集成 RTC SDK 后,你可以使用其中接口快速构建基础应用,实现基本实时音视频通话;你也能通过阅读代码,了解音视频通话的最佳实践。
本步骤为如何创建一个新项目,如集成到已有项目,请跳过该步骤。
请参考 Get Started with React Native (without a Framework) 创建一个 React Native 项目。
在终端中进入项目根目录,运行如下命令安装依赖项:
# Using npm npm install @volcengine/react-native-rtc # OR using Yarn yarn add @volcengine/react-native-rtc
./build.gradle
文件。allprojects { repositories { ... maven { url 'https://artifact.bytedance.com/repository/Volcengine/' } } }
./gradle.properties
文件中添加 android.enableJetifier=true
,解决兼容性问题。并按需修改 reactNativeArchitectures
属性用于配置需要编译的架构。SDK 已在内部声明所需权限,无需手动添加。
其它注意事项可参考:
为方便后续步骤说明,此处假定封装的文件路径为项目根目录下的 ./src/core/index.ts
。
import { RTCManager,IEngine, IRoom, RTCVideoEventHandler, RTCRoomEventHandler, IJoinRoomProps, ICreateRTCEngineOptions, StreamIndex, IVideoCanvas, } from '@volcengine/react-native-rtc'; class RTCClient { manager?: RTCManager; engine?: IEngine | null; room?: IRoom | null; constructor() { this.manager = new RTCManager(); } /** 引擎相关 */ async createEngine({ appID }: ICreateRTCEngineOptions) { this.engine = await this.manager!.createRTCEngine({ appID }); } setRTCVideoEventHandler(handlers: RTCVideoEventHandler) { this.engine?.setRtcVideoEventHandler(handlers); } setRTCRoomEventHandler(handlers: RTCRoomEventHandler) { this.room?.setRTCRoomEventHandler(handlers); } startAudioCapture() { return this.engine?.startAudioCapture(); } startVideoCapture() { return this.engine?.startVideoCapture(); } setLocalVideoCanvas(streamIndex: StreamIndex, canvas: IVideoCanvas) { return streamIndex < 0 ? 0 : this.engine?.setLocalVideoCanvas(streamIndex, canvas); } destroyEngine() { this.leaveRoom(); this.room?.destroy(); this.room = null; this.manager!.destroyRTCEngine(); this.engine = null; } /** 房间相关 */ joinRoom(params: IJoinRoomProps) { return this.room?.joinRoom({ token: 'Your token', ...params, }); } leaveRoom() { this.room?.leaveRoom(); } createRoom(roomId: string) { this.room = this.engine?.createRTCRoom(roomId); return this.room; } } export default new RTCClient();
为方便后续步骤说明,此处假定封装的文件路径为项目根目录下的 ./src/core/handlers.ts
。
import { RTCVideoEventHandler, RTCRoomEventHandler, UserInfo, MediaStreamType, } from '@volcengine/react-native-rtc'; function convertObjectForPrinting(obj: Record<string, string>) { if (typeof obj !== 'object') { return obj; } const result: Record<string, string> = {}; const properties = Object.getOwnPropertyNames(obj); properties.forEach(prop => { const descriptor = Object.getOwnPropertyDescriptor(obj, prop); if (descriptor && typeof descriptor.get === 'function') { try { result[prop] = descriptor.get.call(obj); } catch (error) {} } else if (typeof obj[prop] !== 'function') { result[prop] = obj[prop]; } }); return result; } const logger = (fnName: string, executer?: any) => { /** 获取函数名称 */ const f = (...args: unknown[]) => { console.log( `------ ${fnName} : ${args .map(arg => typeof arg === 'object' ? JSON.stringify( Array.isArray(arg) ? [...arg].map(convertObjectForPrinting) : convertObjectForPrinting(arg as any), ) : arg, ) .join(' | ')}`, ); executer?.(...args); }; f.name = fnName; return f; }; const useRTCVideoListeners = (): RTCVideoEventHandler => { // ...some hooks return { onUserStartAudioCapture: logger('onUserStartAudioCapture'), onUserStopAudioCapture: logger('onUserStopAudioCapture'), onUserStartVideoCapture: logger('onUserStartVideoCapture'), }; }; const useRTCRoomListeners = (): RTCRoomEventHandler => { // ...some hooks const apis = { onUserJoined: logger('onUserJoined', (userInfo: UserInfo) => { // ... } ), onUserPublishStream: logger( 'onUserPublishStream', (uid: string, type: MediaStreamType) => { // ... }, ), }; return apis; }; export { useRTCVideoListeners, useRTCRoomListeners, };
为方便后续步骤说明,此处假定封装的文件路径为项目根目录下的 ./src/page/index.ts
。
import React, { useEffect, useState } from 'react'; import { Platform, KeyboardAvoidingView } from 'react-native'; import { ChannelProfile, NativeViewComponent, StreamIndex, RenderMode } from '@volcengine/react-native-rtc'; import { request, PERMISSIONS } from 'react-native-permissions'; import RTCClient from '../core'; import { useRTCRoomListeners, useRTCVideoListeners } from '../core/handlers'; const viewId = 'my-view'; const ExamplePage = () => { const [ isViewLoaded, setViewLoaded ] = useState<boolean>(false); const engineEventListeners = useRTCVideoListeners(); const roomEventListeners = useRTCRoomListeners(); const requestDevicePermission = async () => { if (Platform.OS === 'ios') { await request(PERMISSIONS.IOS.CAMERA); await request(PERMISSIONS.IOS.MICROPHONE); } else { await request(PERMISSIONS.ANDROID.CAMERA); await request(PERMISSIONS.ANDROID.RECORD_AUDIO); } }; const handleViewLoad = () => { setViewLoaded(true); } const initialize = async () => { /** 获取权限 */ await requestDevicePermission(); /** 初始化引擎 */ await RTCClient.createEngine({ appID: 'Your AppID', }); /** 设置相关回调函数 */ RTCClient.setRTCVideoEventHandler(engineEventListeners); /** 设置本地渲染视图 */ RTCClient.setLocalVideoCanvas( StreamIndex.STREAM_INDEX_MAIN, { viewId, renderMode: RenderMode.ByteRTCRenderModeFill, }, ); /** 创建房间实例 */ RTCClient.createRoom('Your roomId, for example, 123456'); /** 设置相关回调函数 */ RTCClient.setRTCRoomEventHandler(roomEventListeners); /** 加入房间 */ RTCClient.joinRoom({ userId: 'Your userId, for example, 123456', roomConfigs: { profile: ChannelProfile.CHANNEL_PROFILE_COMMUNICATION, isAutoPublish: true, isAutoSubscribeAudio: true, isAutoSubscribeVideo: true, }, }); /** 采集本地流 */ RTCClient.startVideoCapture(); RTCClient.startAudioCapture(); } useEffect(() => { if (isViewLoaded) { try { initialize(); } catch { } } }, [isViewLoaded]); return ( <KeyboardAvoidingView behavior={Platform.OS === "ios" ? "padding" : "height"} > <NativeViewComponent viewId={viewId} onLoad={handleViewLoad} kind={ Platform.select({ android: 'TextureView', ios: 'UIView', })! } /> </KeyboardAvoidingView> ); }; export default ExamplePage;
至此,我们实现了基本的音视频通话。
在实现音视频通话后,如有其它高阶需求,可参考如下文档: