webrtc中的基本类型定义,以后可以当库使用

本文介绍了一段代码,定义了WebRTC中常用的基本数据类型,旨在简化跨平台开发时的数据类型选择问题,如使用int64和uint32等。通过这种方式,可以提高代码可读性并降低移植成本。同时,作者还参考了WebRTC项目的单元测试实践,编写了自己的简单单元测试用例,以提升单元测试意识。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

/*
 * libjingle
 * Copyright 2004 Google Inc.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 *  1. Redistributions of source code must retain the above copyright notice,
 *     this list of conditions and the following disclaimer.
 *  2. Redistributions in binary form must reproduce the above copyright notice,
 *     this list of conditions and the following disclaimer in the documentation
 *     and/or other materials provided with the distribution.
 *  3. The name of the author may not be used to endorse or promote products
 *     derived from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

#ifndef TALK_BASE_BASICTYPES_H_
#define TALK_BASE_BASICTYPES_H_

#include <stddef.h>  // for NULL, size_t

#if !(defined(_MSC_VER) && (_MSC_VER < 1600))
#include <stdint.h>  // for uintptr_t
#endif

#ifdef HAVE_CONFIG_H
#include "config.h"  // NOLINT
#endif

#include "talk/base/constructormagic.h"

#if !defined(INT_TYPES_DEFINED)
#define INT_TYPES_DEFINED

#ifdef COMPILER_MSVC//这一块整个是为int64和unint64服务的
typedef unsigned __int64 uint64;
typedef __int64 int64;
#ifndef INT64_C
#define INT64_C(x) x ## I64
#endif
#ifndef UINT64_C
#define UINT64_C(x) x ## UI64
#endif
#define INT64_F "I64"
#else  // COMPILER_MSVC
// On Mac OS X, cssmconfig.h defines uint64 as uint64_t
// TODO(fbarchard): Use long long for compatibility with chromium on BSD/OSX.
#if defined(OSX)
typedef uint64_t uint64;
typedef int64_t int64;
#ifndef INT64_C
#define INT64_C(x) x ## LL
#endif
#ifndef UINT64_C
#define UINT64_C(x) x ## ULL
#endif
#define INT64_F "l"
#elif defined(__LP64__)
typedef unsigned long uint64;  // NOLINT
typedef long int64;  // NOLINT
#ifndef INT64_C
#define INT64_C(x) x ## L
#endif
#ifndef UINT64_C
#define UINT64_C(x) x ## UL
#endif
#define INT64_F "l"
#else  // __LP64__
typedef unsigned long long uint64;  // NOLINT
typedef long long int64;  // NOLINT
#ifndef INT64_C
#define INT64_C(x) x ## LL
#endif
#ifndef UINT64_C
#define UINT64_C(x) x ## ULL
#endif
#define INT64_F "ll"
#endif  // __LP64__
#endif  // COMPILER_MSVC
//上面整个这一块是为64服务的,
typedef unsigned int uint32;
typedef int int32;
typedef unsigned short uint16;  // NOLINT
typedef short int16;  // NOLINT
typedef unsigned char uint8;
typedef signed char int8;
#endif  // INT_TYPES_DEFINED

// Detect compiler is for x86 or x64.
#if defined(__x86_64__) || defined(_M_X64) || \
    defined(__i386__) || defined(_M_IX86)
#define CPU_X86 1
#endif
// Detect compiler is for arm.
#if defined(__arm__) || defined(_M_ARM)
#define CPU_ARM 1
#endif
#if defined(CPU_X86) && defined(CPU_ARM)
#error CPU_X86 and CPU_ARM both defined.
#endif
//上面这一块为x86和arm架构服务的,只能有一种架构

#if !defined(ARCH_CPU_BIG_ENDIAN) && !defined(ARCH_CPU_LITTLE_ENDIAN)
// x86, arm or GCC provided __BYTE_ORDER__ macros
#if CPU_X86 || CPU_ARM ||  \
  (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
#define ARCH_CPU_LITTLE_ENDIAN
#elif defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#define ARCH_CPU_BIG_ENDIAN
#else
#error ARCH_CPU_BIG_ENDIAN or ARCH_CPU_LITTLE_ENDIAN should be defined.
#endif
#endif
#if defined(ARCH_CPU_BIG_ENDIAN) && defined(ARCH_CPU_LITTLE_ENDIAN)
#error ARCH_CPU_BIG_ENDIAN and ARCH_CPU_LITTLE_ENDIAN both defined.
#endif
//上面这一块是为高字节还是低字节服务的

#ifdef WIN32
typedef int socklen_t;
#endif
//for socket 

// The following only works for C++
#ifdef __cplusplus
namespace talk_base {
  template<class T> inline T _min(T a, T b) { return (a > b) ? b : a; }
  template<class T> inline T _max(T a, T b) { return (a < b) ? b : a; }

  // For wait functions that take a number of milliseconds, kForever indicates
  // unlimited time.
  const int kForever = -1;
}

#define ALIGNP(p, t) \
    (reinterpret_cast<uint8*>(((reinterpret_cast<uintptr_t>(p) + \
    ((t) - 1)) & ~((t) - 1))))
#define IS_ALIGNED(p, a) (!((uintptr_t)(p) & ((a) - 1)))

// Note: UNUSED is also defined in common.h
#ifndef UNUSED
#define UNUSED(x) Unused(static_cast<const void*>(&x))
#define UNUSED2(x, y) Unused(static_cast<const void*>(&x)); \
    Unused(static_cast<const void*>(&y))
#define UNUSED3(x, y, z) Unused(static_cast<const void*>(&x)); \
    Unused(static_cast<const void*>(&y)); \
    Unused(static_cast<const void*>(&z))
#define UNUSED4(x, y, z, a) Unused(static_cast<const void*>(&x)); \
    Unused(static_cast<const void*>(&y)); \
    Unused(static_cast<const void*>(&z)); \
    Unused(static_cast<const void*>(&a))
#define UNUSED5(x, y, z, a, b) Unused(static_cast<const void*>(&x)); \
    Unused(static_cast<const void*>(&y)); \
    Unused(static_cast<const void*>(&z)); \
    Unused(static_cast<const void*>(&a)); \
    Unused(static_cast<const void*>(&b))
inline void Unused(const void*) {}
#endif  // UNUSED

// Use these to declare and define a static local variable (static T;) so that
// it is leaked so that its destructors are not called at exit.
#define LIBJINGLE_DEFINE_STATIC_LOCAL(type, name, arguments) \
  static type& name = *new type arguments

#endif  // __cplusplus
#endif  // TALK_BASE_BASICTYPES_H_


上面的这个代码定义了基本的类型,以及硬件架构,字节序,以后写代码就直接用了,不再去纠结于改用int,还是short,还是long等等,直接用int64,uint32岂不是更容易读,而且方便。
这个库代码本身就是垮平台的,看到很多webrtc都有一个单元测试用例,于是我模仿者他们的模式也写了一个简单了,为了能逐渐培养自己的单元测试的思维:

#include "talk/base/basictypes.h"
#include <stdio.h>
#include <assert.h>

void Test()
{
	//assert the lenght
	assert(sizeof(int8) == 1);
	assert(sizeof(uint8) == 1);
	assert(sizeof(int16) == 2);
	assert(sizeof(uint16) == 2);
	assert(sizeof(int32) == 4);
	assert(sizeof(int32) == 4);
	assert(sizeof(int64) == 8);
	assert(sizeof(uint64) == 8);

	//assert signed and unsigned
	int8 i8 = -1;
	assert(i8 == -1);
	uint8 ui8 = -1;
	assert(ui8 > 0);
	int16 i16 = -1;
	assert(i16 == -1);
	uint16 ui16 = -1;
	assert(ui16 > 0);
	int32 i32 = -1;
	assert(i32 == -1);
	uint32 ui32 = -1;
	assert(ui32 > 0);
	int64 i64 = -1;
	assert(i64 == -1);
	uint64 ui64 = -1;
	assert(ui64 > 0);

	//assert cpu arch
	assert(CPU_X86);//X86

	//assert little_endian
#ifdef ARCH_CPU_LITTLE_ENDIAN//little_endian
	assert(1);
#else
	assert(0);
#endif

	//assert static var
	LIBJINGLE_DEFINE_STATIC_LOCAL(int, testname, ());//declare a static var
	testname = 100;
	assert(testname == 100);

	//assert _max, _min
#ifdef __cplusplus
	assert(talk_base::_max(10, 5) == 10);
	assert(talk_base::_max(10, 50)== 50);
	assert(talk_base::_max(10, -50)== 10);
	assert(talk_base::_min(10, 5) == 5);
	assert(talk_base::_min(10, 50) == 10);
	assert(talk_base::_min(10, -50) == -50);
	printf("Assert Success!\n");
#endif 
}
int main()
{
	Test();
	return 0;
}

一步一步搞定基本代码程序,作为以后自己的库。。。。
### 如何在 Flutter 框架中使用 WebRTC 进行音视频通信开发 #### 集成基础 WebRTC(Web 实时通信)是一种用于在 Web 浏览器和移动应用程序间进行实时音视频通信的技术[^1]。对于希望在其 Flutter 应用程序中加入此类功能的开发者来说,可以借助 `flutter_webrtc` 插件来简化这一过程。此插件不仅支持 Android 和 iOS 平台,还扩展到了 Web、macOS、Windows 及 Linux 等多个环境,提供了全面的音视频流处理能力以及诸如数据通道、屏幕共享等特性[^3]。 #### 初始化项目与配置 为了使 Flutter 项目具备 WebRTC 功能,需先安装必要的依赖项。这通常涉及添加 `flutter_webrtc` 到项目的 pubspec.yaml 文件中的 dependencies 下面,并执行相应的包管理命令以下载所需文件[^5]。此外,还需确保设备拥有访问摄像头和麦克风权限,在 AndroidManifest.xml 或 Info.plist 中声明相应权限是非常重要的一步。 #### 获取媒体流 要发起一次成功的音视频通话,首先要获得用户的多媒体输入——即来自相机的画面和麦克风的声音。这部分工作可通过调用 JavaScript 的 getUserMedia 方法完成;而在 Flutter 中,则是通过 `navigator.mediaDevices.getUserMedia()` 来请求这些资源[^4]。具体操作如下: ```dart import 'package:flutter_webrtc/flutter_wertc.dart'; // 请求打开本地媒体设备 (摄像机 & 麦克风) Future<MediaStream> _getLocalStream() async { final Map<String, dynamic> mediaConstraints = { 'audio': true, 'video': {'facingMode': 'user'} }; try { MediaStream stream = await navigator.mediaDevices.getUserMedia(mediaConstraints); return stream; } catch(e){ print('Error opening local media devices: $e'); rethrow; } } ``` #### 创建 RTCPeerConnection 对象 一旦获得了媒体流之后,下一步就是建立连接了。这里需要用到 RTCPeerConnection 类型的对象来进行点对点之间的通讯设置。创建该实例前,应定义好 ICE servers 参数以便于 NAT 穿透等问题解决。下面是一段简单的 Dart 代码片段展示了如何初始化一个 Peer Connection: ```dart final configuration = RTCConfiguration([ RTCIceServer(urls: ["stun:stun.l.google.com:19302"]) ]); RTCPeerConnection pc; pc = await createPeerConnection(configuration); await pc.addTransceiver( kind: TransceiverInit(kind: MediaTypeEnum.audio), ); await pc.addTransceiver( kind: TransceiverInit(kind: MediaTypeEnum.video), ); ``` #### 处理信令消息传递 最后但同样重要的是,两个端点之间还需要交换 SDP (Session Description Protocol)信息才能真正建立起有效的 RTP 数据传输路径。这项任务一般由外部的服务端负责协调双方的状态同步,比如 WebSocket Server 就常被用来充当这样的角色。客户端则只需要监听特定事件并将收到的消息转发给对方即可。 以上就是在 Flutter 框架下运用 WebRTC 技术实现基本音视频聊天室的主要流程概述。当然实际应用场景可能会更加复杂一些,涉及到更多细节上的优化调整。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值