diff --git a/Android/APIExample-Audio/gradle.properties b/Android/APIExample-Audio/gradle.properties
index e59680392..317242bfe 100644
--- a/Android/APIExample-Audio/gradle.properties
+++ b/Android/APIExample-Audio/gradle.properties
@@ -23,4 +23,4 @@ android.nonFinalResIds=false
# read enable simple filter section on README first before set this flag to TRUE
simpleFilter = false
-rtc_sdk_version = 4.6.0
\ No newline at end of file
+rtc_sdk_version = 4.6.2
\ No newline at end of file
diff --git a/Android/APIExample-Compose/app/src/main/AndroidManifest.xml b/Android/APIExample-Compose/app/src/main/AndroidManifest.xml
index 1a465bdef..544914aae 100644
--- a/Android/APIExample-Compose/app/src/main/AndroidManifest.xml
+++ b/Android/APIExample-Compose/app/src/main/AndroidManifest.xml
@@ -28,6 +28,8 @@
android:exported="true"
android:label="@string/app_name"
android:theme="@style/Theme.APIExampleCompose"
+ android:supportsPictureInPicture="true"
+ android:screenOrientation="portrait"
android:configChanges="screenSize|screenLayout|orientation|smallestScreenSize">
@@ -36,12 +38,14 @@
+
-
+
\ No newline at end of file
diff --git a/Android/APIExample-Compose/app/src/main/java/io/agora/api/example/compose/NavGraph.kt b/Android/APIExample-Compose/app/src/main/java/io/agora/api/example/compose/NavGraph.kt
index efb9315c9..bb60306a8 100644
--- a/Android/APIExample-Compose/app/src/main/java/io/agora/api/example/compose/NavGraph.kt
+++ b/Android/APIExample-Compose/app/src/main/java/io/agora/api/example/compose/NavGraph.kt
@@ -1,5 +1,6 @@
package io.agora.api.example.compose
+import android.util.Log
import androidx.compose.runtime.Composable
import androidx.navigation.NavType
import androidx.navigation.compose.NavHost
@@ -9,6 +10,7 @@ import androidx.navigation.navArgument
import io.agora.api.example.compose.model.Component
import io.agora.api.example.compose.model.Components
import io.agora.api.example.compose.model.Example
+import io.agora.api.example.compose.samples.cleanupPictureInPictureState
import io.agora.api.example.compose.ui.example.Example
import io.agora.api.example.compose.ui.home.Home
import io.agora.api.example.compose.ui.settings.Settings
@@ -48,7 +50,15 @@ fun NavGraph() {
val example = component.examples[exampleIndex]
Example(
example = example,
- onBackClick = { navController.popBackStack() },
+ onBackClick = {
+ Log.d("PiPDebug", "NavGraph: onBackClick called for example: ${example.name}")
+ // Special handling for PictureInPicture example
+ if (example.name == R.string.example_pictureinpicture) {
+ Log.d("PiPDebug", "NavGraph: Cleaning up PictureInPicture state")
+ cleanupPictureInPictureState()
+ }
+ navController.popBackStack()
+ },
)
}
}
diff --git a/Android/APIExample-Compose/app/src/main/java/io/agora/api/example/compose/model/Examples.kt b/Android/APIExample-Compose/app/src/main/java/io/agora/api/example/compose/model/Examples.kt
index 47abfdc76..50b4f63f5 100644
--- a/Android/APIExample-Compose/app/src/main/java/io/agora/api/example/compose/model/Examples.kt
+++ b/Android/APIExample-Compose/app/src/main/java/io/agora/api/example/compose/model/Examples.kt
@@ -20,7 +20,7 @@ import io.agora.api.example.compose.samples.MediaPlayer
import io.agora.api.example.compose.samples.MediaRecorder
import io.agora.api.example.compose.samples.OriginAudioData
import io.agora.api.example.compose.samples.OriginVideoData
-import io.agora.api.example.compose.samples.PictureInPictureEntrance
+import io.agora.api.example.compose.samples.PictureInPicture
import io.agora.api.example.compose.samples.PlayAudioFiles
import io.agora.api.example.compose.samples.PreCallTest
import io.agora.api.example.compose.samples.RTMPStreaming
@@ -54,7 +54,7 @@ val AdvanceExampleList = listOf(
Example(R.string.example_originvideodata) { OriginVideoData() },
Example(R.string.example_customvideosource) { CustomVideoSource() },
Example(R.string.example_customvideorender) { CustomVideoRender() },
- Example(R.string.example_pictureinpicture) { PictureInPictureEntrance(it) },
+ Example(R.string.example_pictureinpicture) { PictureInPicture() },
Example(R.string.example_joinmultichannel) { JoinMultiChannel() },
Example(R.string.example_channelencryption) { ChannelEncryption() },
Example(R.string.example_playaudiofiles) { PlayAudioFiles() },
diff --git a/Android/APIExample-Compose/app/src/main/java/io/agora/api/example/compose/samples/LiveStreaming.kt b/Android/APIExample-Compose/app/src/main/java/io/agora/api/example/compose/samples/LiveStreaming.kt
index 05bf81439..60ca08848 100644
--- a/Android/APIExample-Compose/app/src/main/java/io/agora/api/example/compose/samples/LiveStreaming.kt
+++ b/Android/APIExample-Compose/app/src/main/java/io/agora/api/example/compose/samples/LiveStreaming.kt
@@ -1,5 +1,6 @@
package io.agora.api.example.compose.samples
+import android.util.Log
import android.widget.Toast
import androidx.activity.compose.rememberLauncherForActivityResult
import androidx.activity.result.contract.ActivityResultContracts
@@ -57,6 +58,17 @@ import io.agora.rtc2.video.VideoEncoderConfiguration
import io.agora.rtc2.video.VideoEncoderConfiguration.AdvanceOptions
import io.agora.rtc2.video.WatermarkOptions
+private const val TAG = "LiveStreaming"
+
+private fun getVideoScenarioName(scenario: Constants.VideoScenario): String {
+ return when (scenario) {
+ Constants.VideoScenario.APPLICATION_SCENARIO_GENERAL -> "General"
+ Constants.VideoScenario.APPLICATION_SCENARIO_MEETING -> "Meeting"
+ Constants.VideoScenario.APPLICATION_SCENARIO_1V1 -> "1V1"
+ Constants.VideoScenario.APPLICATION_SCENARIO_LIVESHOW -> "Live Show"
+ }
+}
+
@Composable
fun LiveStreaming() {
val context = LocalContext.current
@@ -70,6 +82,9 @@ fun LiveStreaming() {
var localStats by remember { mutableStateOf(VideoStatsInfo()) }
var remoteStats by remember { mutableStateOf(VideoStatsInfo()) }
var clientRole by remember { mutableStateOf(Constants.CLIENT_ROLE_AUDIENCE) }
+ val settingsValues = remember { mutableStateMapOf().apply {
+ put("videoScenario", Constants.VideoScenario.APPLICATION_SCENARIO_LIVESHOW)
+ } }
val rtcEngine = remember {
RtcEngine.create(RtcEngineConfig().apply {
@@ -177,6 +192,10 @@ fun LiveStreaming() {
), 100, 15
)
)
+ // Set default video scenario
+ val defaultScenario = Constants.VideoScenario.APPLICATION_SCENARIO_LIVESHOW
+ val ret = setVideoScenario(defaultScenario)
+ Log.d(TAG, "onItemSelected: setVideoScenario " + getVideoScenarioName(defaultScenario) + " ret=" + ret)
}
}
DisposableEffect(lifecycleOwner) {
@@ -192,6 +211,11 @@ fun LiveStreaming() {
if (allGranted) {
// Permission is granted
Toast.makeText(context, R.string.permission_granted, Toast.LENGTH_LONG).show()
+ // Set video scenario before joining channel
+ val scenario = settingsValues["videoScenario"] as? Constants.VideoScenario
+ ?: Constants.VideoScenario.APPLICATION_SCENARIO_LIVESHOW
+ val ret = rtcEngine.setVideoScenario(scenario)
+ Log.d(TAG, "onItemSelected: setVideoScenario " + getVideoScenarioName(scenario) + " ret=" + ret)
val mediaOptions = ChannelMediaOptions()
mediaOptions.channelProfile = Constants.CHANNEL_PROFILE_LIVE_BROADCASTING
mediaOptions.clientRoleType = clientRole
@@ -214,6 +238,7 @@ fun LiveStreaming() {
localStats = localStats,
remoteStats = remoteStats,
localLarge = localLarge,
+ settingsValues = settingsValues,
onSwitch = {
localLarge = !localLarge
},
@@ -248,12 +273,12 @@ private fun LiveStreamingView(
localLarge: Boolean = true,
localStats: VideoStatsInfo = VideoStatsInfo(),
remoteStats: VideoStatsInfo = VideoStatsInfo(),
+ settingsValues: MutableMap = mutableMapOf(),
onSwitch: () -> Unit = {},
onJoinClick: (String) -> Unit,
onLeaveClick: () -> Unit
) {
var openSettingSheet by rememberSaveable { mutableStateOf(false) }
- val settingsValues = remember { mutableStateMapOf() }
Box {
Column {
@@ -349,9 +374,17 @@ private fun LiveStreamingView(
rtcEngine = rtcEngine,
role = clientRole,
remoteUid = remoteUid,
+ isJoined = isJoined,
values = settingsValues,
onValueChanged = { key, value ->
settingsValues[key] = value
+ // Update video scenario immediately if not joined
+ if (key == "videoScenario" && !isJoined) {
+ val scenario = value as? Constants.VideoScenario
+ ?: Constants.VideoScenario.APPLICATION_SCENARIO_LIVESHOW
+ val ret = rtcEngine?.setVideoScenario(scenario) ?: -1
+ Log.d(TAG, "onItemSelected: setVideoScenario " + getVideoScenarioName(scenario) + " ret=" + ret)
+ }
}
)
}
@@ -387,15 +420,34 @@ private fun LiveStreamingSettingView(
rtcEngine: RtcEngine? = null,
role: Int = Constants.CLIENT_ROLE_AUDIENCE,
remoteUid: Int = 0,
+ isJoined: Boolean = false,
values: Map = emptyMap(),
onValueChanged: (String, Any) -> Unit = { _, _ -> }
) {
val context = LocalContext.current
+ val videoScenario = values["videoScenario"] as? Constants.VideoScenario
+ ?: Constants.VideoScenario.APPLICATION_SCENARIO_LIVESHOW
Column(
modifier = modifier,
horizontalAlignment = Alignment.CenterHorizontally
) {
+ // Video Scenario selection - must be set before joining channel
+ DropdownMenuRaw(
+ title = stringResource(id = R.string.video_scenario),
+ options = listOf(
+ stringResource(id = R.string.video_scenario_general) to Constants.VideoScenario.APPLICATION_SCENARIO_GENERAL,
+ stringResource(id = R.string.video_scenario_meeting) to Constants.VideoScenario.APPLICATION_SCENARIO_MEETING,
+ stringResource(id = R.string.video_scenario_1v1) to Constants.VideoScenario.APPLICATION_SCENARIO_1V1,
+ stringResource(id = R.string.video_scenario_liveshow) to Constants.VideoScenario.APPLICATION_SCENARIO_LIVESHOW,
+ ),
+ selectedValue = videoScenario,
+ enable = !isJoined,
+ onSelected = { _, option ->
+ onValueChanged("videoScenario", option.second)
+ }
+ )
+ Divider(modifier = Modifier.padding(horizontal = 16.dp))
if (role == Constants.CLIENT_ROLE_AUDIENCE) {
SwitchRaw(
title = stringResource(id = R.string.open_low_latency_live),
diff --git a/Android/APIExample-Compose/app/src/main/java/io/agora/api/example/compose/samples/PictureInPicture.kt b/Android/APIExample-Compose/app/src/main/java/io/agora/api/example/compose/samples/PictureInPicture.kt
index 7af8f1aa3..f675c0528 100644
--- a/Android/APIExample-Compose/app/src/main/java/io/agora/api/example/compose/samples/PictureInPicture.kt
+++ b/Android/APIExample-Compose/app/src/main/java/io/agora/api/example/compose/samples/PictureInPicture.kt
@@ -2,34 +2,34 @@ package io.agora.api.example.compose.samples
import android.app.AppOpsManager
import android.app.PictureInPictureParams
+import android.content.Context
+import android.content.ContextWrapper
import android.content.Intent
import android.graphics.RectF
import android.os.Build
-import android.os.Bundle
import android.os.Process
+import android.util.Log
import android.util.Rational
import android.widget.Toast
import androidx.activity.ComponentActivity
import androidx.activity.compose.rememberLauncherForActivityResult
-import androidx.activity.compose.setContent
import androidx.activity.result.contract.ActivityResultContracts
+import androidx.compose.foundation.layout.Box
import androidx.compose.foundation.layout.Column
import androidx.compose.foundation.layout.Spacer
-import androidx.compose.foundation.layout.WindowInsets
-import androidx.compose.foundation.layout.consumeWindowInsets
import androidx.compose.foundation.layout.fillMaxSize
+import androidx.compose.foundation.layout.fillMaxWidth
import androidx.compose.foundation.layout.height
import androidx.compose.foundation.layout.padding
-import androidx.compose.foundation.layout.safeDrawing
import androidx.compose.material3.Button
import androidx.compose.material3.Text
import androidx.compose.runtime.Composable
+import androidx.compose.runtime.DisposableEffect
import androidx.compose.runtime.LaunchedEffect
import androidx.compose.runtime.getValue
import androidx.compose.runtime.mutableIntStateOf
import androidx.compose.runtime.mutableStateOf
import androidx.compose.runtime.remember
-import androidx.compose.runtime.saveable.rememberSaveable
import androidx.compose.runtime.setValue
import androidx.compose.ui.Modifier
import androidx.compose.ui.graphics.toAndroidRectF
@@ -38,20 +38,18 @@ import androidx.compose.ui.layout.onGloballyPositioned
import androidx.compose.ui.platform.LocalContext
import androidx.compose.ui.platform.LocalLifecycleOwner
import androidx.compose.ui.platform.LocalSoftwareKeyboardController
-import androidx.compose.ui.res.stringResource
import androidx.compose.ui.unit.dp
import androidx.lifecycle.DefaultLifecycleObserver
-import androidx.lifecycle.Lifecycle
import androidx.lifecycle.LifecycleOwner
+import androidx.core.app.PictureInPictureModeChangedInfo
+import androidx.core.util.Consumer
import io.agora.api.example.compose.BuildConfig
import io.agora.api.example.compose.R
import io.agora.api.example.compose.data.SettingPreferences
-import io.agora.api.example.compose.ui.common.APIExampleScaffold
import io.agora.api.example.compose.ui.common.ChannelNameInput
import io.agora.api.example.compose.ui.common.TwoVideoView
import io.agora.api.example.compose.ui.common.TwoVideoViewType
import io.agora.api.example.compose.ui.common.VideoStatsInfo
-import io.agora.api.example.compose.ui.theme.APIExampleComposeTheme
import io.agora.api.example.compose.utils.TokenUtils
import io.agora.rtc2.ChannelMediaOptions
import io.agora.rtc2.Constants
@@ -61,25 +59,102 @@ import io.agora.rtc2.RtcEngineConfig
import io.agora.rtc2.video.VideoCanvas
import io.agora.rtc2.video.VideoEncoderConfiguration
+// Global state storage that persists across component recreation
+private val globalLocalUid = mutableIntStateOf(0)
+private val globalRemoteUid = mutableIntStateOf(0)
+private val globalChannelName = mutableStateOf("")
+private val globalIsJoined = mutableStateOf(false)
+private val isInPipTransition = mutableStateOf(false)
+private val isPageLeaving = mutableStateOf(false) // Flag to track if user is truly leaving the page
+private var globalCleanupFunction: (() -> Unit)? = null // Global cleanup function
+// Helper function to find Activity from Context
+private fun Context.findActivity(): ComponentActivity {
+ var context = this
+ while (context is ContextWrapper) {
+ if (context is ComponentActivity) return context
+ context = context.baseContext
+ }
+ throw IllegalStateException("Picture in picture should be called in the context of an Activity")
+}
+
+// Correct PiP state management following Android official guidelines
@Composable
-fun PictureInPictureEntrance(back: () -> Unit) {
- val context = LocalContext.current
- val intent = Intent(context, PictureInPictureActivity::class.java)
- context.startActivity(intent)
- back()
+private fun rememberIsInPipMode(): Boolean {
+ if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
+ val activity = LocalContext.current.findActivity()
+ var pipMode by remember { mutableStateOf(activity.isInPictureInPictureMode) }
+ DisposableEffect(activity) {
+ val observer = Consumer { info ->
+ pipMode = info.isInPictureInPictureMode
+ }
+ activity.addOnPictureInPictureModeChangedListener(observer)
+ onDispose { activity.removeOnPictureInPictureModeChangedListener(observer) }
+ }
+ return pipMode
+ } else {
+ return false
+ }
+}
+
+// Public function to clean up global state when user leaves the page
+fun cleanupPictureInPictureState() {
+ Log.d("PiPDebug", "cleanupPictureInPictureState called")
+ globalCleanupFunction?.invoke()
}
@Composable
-private fun PictureInPicture() {
+fun PictureInPicture() {
val context = LocalContext.current as ComponentActivity
- var isPipOn by rememberSaveable { mutableStateOf(false) }
+ // Use the correct PiP state management
+ val isPipOn = rememberIsInPipMode()
+
+ Log.d("PiPDebug", "PictureInPicture: Current isPipOn = $isPipOn")
+
+ // Function to mark that user is leaving the page
+ fun markPageLeaving() {
+ isPageLeaving.value = true
+ Log.d("PiPDebug", "Marked page as leaving - global state will be cleared on next dispose")
+ }
+
+ // Register cleanup function globally
+ LaunchedEffect(Unit) {
+ globalCleanupFunction = { markPageLeaving() }
+ }
+
+ // Add LaunchedEffect to handle PiP mode changes
+ LaunchedEffect(isPipOn) {
+ Log.d("PiPDebug", "PiP mode changed to: $isPipOn")
+ // Mark that we're in a PiP transition
+ isInPipTransition.value = true
+ // Note: We can't access localUid and rtcEngine here as they're defined later
+ // The video setup will be handled in the render callbacks
+ }
+
+ // Add DisposableEffect to track lifecycle
+ DisposableEffect(Unit) {
+ onDispose {
+ // Only clear global state when user is truly leaving the page (not during PiP transitions)
+ if (isPageLeaving.value) {
+ Log.d("PiPDebug", "DisposableEffect: User is leaving page, clearing global state")
+ globalLocalUid.intValue = 0
+ globalRemoteUid.intValue = 0
+ globalChannelName.value = ""
+ globalIsJoined.value = false
+ isPageLeaving.value = false // Reset flag
+ } else {
+ Log.d("PiPDebug", "DisposableEffect: Component recreation (PiP transition), preserving global state")
+ }
+ }
+ }
val lifecycleOwner = LocalLifecycleOwner.current
val keyboard = LocalSoftwareKeyboardController.current
- var isJoined by rememberSaveable { mutableStateOf(false) }
- var channelName by rememberSaveable { mutableStateOf("") }
- var localUid by rememberSaveable { mutableIntStateOf(0) }
- var remoteUid by rememberSaveable { mutableIntStateOf(0) }
+ // Use global state directly to avoid duplication
+ var isJoined by globalIsJoined
+ var channelName by globalChannelName
+ var localUid by globalLocalUid
+ var remoteUid by globalRemoteUid
+
var localStats by remember { mutableStateOf(VideoStatsInfo()) }
var remoteStats by remember { mutableStateOf(VideoStatsInfo()) }
val videoViewBound = remember { RectF() }
@@ -176,13 +251,7 @@ private fun PictureInPicture() {
}
}
LaunchedEffect(lifecycleOwner) {
- context.addOnPictureInPictureModeChangedListener { info ->
- isPipOn = info.isInPictureInPictureMode
- if (lifecycleOwner.lifecycle.currentState < Lifecycle.State.STARTED) {
- context.finish()
- }
- }
- lifecycleOwner.lifecycle.addObserver(object: DefaultLifecycleObserver {
+ lifecycleOwner.lifecycle.addObserver(object : DefaultLifecycleObserver {
override fun onDestroy(owner: LifecycleOwner) {
rtcEngine.stopPreview()
rtcEngine.leaveChannel()
@@ -216,19 +285,31 @@ private fun PictureInPicture() {
TwoVideoView(
modifier = Modifier
.height(350.dp)
- .onGloballyPositioned {
+ .onGloballyPositioned { layoutCoordinates ->
videoViewBound.set(
- it
+ layoutCoordinates
.boundsInWindow()
.toAndroidRectF()
)
+ val boundsInWindow = layoutCoordinates.boundsInWindow()
+ Log.d("PiPDebug", "VideoView distance from top: ${boundsInWindow.top}px")
},
type = TwoVideoViewType.Row,
localUid = localUid,
remoteUid = remoteUid,
localStats = localStats,
remoteStats = remoteStats,
- localRender = { view, id, _ ->
+ localRender = { view, id, isFirstSetup ->
+ Log.d("PiPDebug", "localRender: view=$view, id=$id, isFirstSetup=$isFirstSetup, isJoined=$isJoined, isPipOn=$isPipOn")
+ // Clear previous view first
+ rtcEngine.setupLocalVideo(
+ VideoCanvas(
+ null,
+ Constants.RENDER_MODE_HIDDEN,
+ id
+ )
+ )
+ // Then set up new view
rtcEngine.setupLocalVideo(
VideoCanvas(
view,
@@ -237,8 +318,19 @@ private fun PictureInPicture() {
)
)
rtcEngine.startPreview()
+ Log.d("PiPDebug", "localRender: started preview")
},
- remoteRender = { view, id, _ ->
+ remoteRender = { view, id, isFirstSetup ->
+ Log.d("PiPDebug", "remoteRender: view=$view, id=$id, isFirstSetup=$isFirstSetup, remoteUid=$remoteUid, isPipOn=$isPipOn")
+ // Clear previous view first
+ rtcEngine.setupRemoteVideo(
+ VideoCanvas(
+ null,
+ Constants.RENDER_MODE_HIDDEN,
+ id
+ )
+ )
+ // Then set up new view
rtcEngine.setupRemoteVideo(
VideoCanvas(
view,
@@ -246,95 +338,90 @@ private fun PictureInPicture() {
id
)
)
+ Log.d("PiPDebug", "remoteRender: setup completed")
})
}
if (isPipOn) {
- videoView()
+ Log.d("PiPDebug", "PictureInPicture: Rendering PiP mode - localUid: $localUid, remoteUid: $remoteUid, " +
+ "isJoined: $isJoined")
+ // In PiP mode, render only the video content without any scaffold or app bar
+ // Use fillMaxSize to ensure video takes full available space in PiP window
+ Box(modifier = Modifier.fillMaxSize()) {
+ videoView()
+ }
} else {
- APIExampleComposeTheme {
- APIExampleScaffold(
- topBarTitle = stringResource(id = R.string.example_pictureinpicture),
- showSettingIcon = false,
- showBackNavigationIcon = true,
- onBackClick = { context.finish() },
- ) { paddingValues ->
- Column(
- modifier = Modifier
- .fillMaxSize()
- .consumeWindowInsets(WindowInsets.safeDrawing)
- .padding(paddingValues)
- ) {
- videoView()
- Spacer(modifier = Modifier.weight(1f))
+ Log.d("PiPDebug", "PictureInPicture: Rendering normal mode - full UI")
+ // Normal mode with full UI - let Example component handle the scaffold
+ Column(modifier = Modifier.fillMaxWidth()) {
+ videoView()
+ Spacer(modifier = Modifier.weight(1f))
- Button(
- modifier = Modifier.padding(16.dp, 8.dp),
- enabled = isJoined,
- onClick = {
- if (Build.VERSION.SDK_INT >= 26) {
- val appOpsManager: AppOpsManager =
- context.getSystemService(AppOpsManager::class.java)
- if (appOpsManager.checkOpNoThrow(
- AppOpsManager.OPSTR_PICTURE_IN_PICTURE,
- Process.myUid(),
- context.packageName
- ) == AppOpsManager.MODE_ALLOWED
- ) {
- context.enterPictureInPictureMode(
- PictureInPictureParams.Builder()
- .setAspectRatio(
- Rational(
- videoViewBound.width().toInt(),
- videoViewBound.height().toInt()
- )
- )
- .build()
+ Button(
+ modifier = Modifier.padding(16.dp, 8.dp),
+ enabled = isJoined,
+ onClick = {
+ if (Build.VERSION.SDK_INT >= 26) {
+ val appOpsManager: AppOpsManager =
+ context.getSystemService(AppOpsManager::class.java)
+ if (appOpsManager.checkOpNoThrow(
+ AppOpsManager.OPSTR_PICTURE_IN_PICTURE,
+ Process.myUid(),
+ context.packageName
+ ) == AppOpsManager.MODE_ALLOWED
+ ) {
+ context.enterPictureInPictureMode(
+ PictureInPictureParams.Builder()
+ .setAspectRatio(
+ Rational(
+ videoViewBound.width().toInt(),
+ videoViewBound.height().toInt()
+ )
)
- val homeIntent = Intent(Intent.ACTION_MAIN)
- homeIntent.addCategory(Intent.CATEGORY_HOME)
- context.startActivity(homeIntent)
- isPipOn = true
- }
- }
- }
- ) {
- Text(text = "Enter Picture-in-Picture Mode")
- }
-
- ChannelNameInput(
- channelName = channelName,
- isJoined = isJoined,
- onJoinClick = {
- channelName = it
- keyboard?.hide()
- permissionLauncher.launch(
- arrayOf(
- android.Manifest.permission.RECORD_AUDIO,
- android.Manifest.permission.CAMERA
- )
+ .setActions(emptyList()) // Hide system actions (back button, etc.)
+ .build()
)
- },
- onLeaveClick = {
- rtcEngine.stopPreview()
- rtcEngine.leaveChannel()
+ val homeIntent = Intent(Intent.ACTION_MAIN)
+ homeIntent.addCategory(Intent.CATEGORY_HOME)
+ context.startActivity(homeIntent)
+ // isPipOn is now managed by rememberIsInPipMode(), no need to manually set
+ } else {
+ Toast.makeText(
+ context,
+ "Picture-in-Picture permission is not granted",
+ Toast.LENGTH_SHORT
+ ).show()
}
- )
+ } else {
+ Toast.makeText(
+ context,
+ "Picture-in-Picture requires Android 8.0 (API 26) or higher",
+ Toast.LENGTH_SHORT
+ ).show()
+ }
}
+ ) {
+ Text(text = "Enter Picture-in-Picture Mode")
}
- }
- }
-
-}
-
-class PictureInPictureActivity : ComponentActivity() {
-
- override fun onCreate(savedInstanceState: Bundle?) {
- super.onCreate(savedInstanceState)
- setContent {
- PictureInPicture()
+ ChannelNameInput(
+ channelName = channelName,
+ isJoined = isJoined,
+ onJoinClick = {
+ channelName = it
+ keyboard?.hide()
+ permissionLauncher.launch(
+ arrayOf(
+ android.Manifest.permission.RECORD_AUDIO,
+ android.Manifest.permission.CAMERA
+ )
+ )
+ },
+ onLeaveClick = {
+ rtcEngine.stopPreview()
+ rtcEngine.leaveChannel()
+ }
+ )
}
}
-
}
\ No newline at end of file
diff --git a/Android/APIExample-Compose/app/src/main/java/io/agora/api/example/compose/samples/ScreenSharing.kt b/Android/APIExample-Compose/app/src/main/java/io/agora/api/example/compose/samples/ScreenSharing.kt
index 920a1c8fb..855a8a01a 100644
--- a/Android/APIExample-Compose/app/src/main/java/io/agora/api/example/compose/samples/ScreenSharing.kt
+++ b/Android/APIExample-Compose/app/src/main/java/io/agora/api/example/compose/samples/ScreenSharing.kt
@@ -1,5 +1,13 @@
package io.agora.api.example.compose.samples
+import android.Manifest
+import android.app.Activity
+import android.content.Context
+import android.content.ContextWrapper
+import android.os.Build
+import android.os.Handler
+import android.os.Looper
+import android.util.DisplayMetrics
import android.util.Log
import android.view.View
import android.widget.Toast
@@ -21,7 +29,6 @@ import androidx.compose.ui.platform.LocalContext
import androidx.compose.ui.platform.LocalLifecycleOwner
import androidx.compose.ui.platform.LocalSoftwareKeyboardController
import androidx.compose.ui.res.stringResource
-import androidx.compose.ui.tooling.preview.Preview
import androidx.compose.ui.unit.dp
import io.agora.api.example.compose.BuildConfig
import io.agora.api.example.compose.R
@@ -43,6 +50,13 @@ import io.agora.rtc2.ScreenCaptureParameters
import io.agora.rtc2.video.VideoCanvas
import io.agora.rtc2.video.VideoEncoderConfiguration
+private tailrec fun Context.findActivity(): Activity =
+ when (this) {
+ is Activity -> this
+ is ContextWrapper -> this.baseContext.findActivity()
+ else -> throw IllegalArgumentException("Could not find activity!")
+ }
+
@Composable
fun ScreenSharing() {
val context = LocalContext.current
@@ -57,6 +71,9 @@ fun ScreenSharing() {
var screenUid by rememberSaveable { mutableIntStateOf(0) }
var isScreenPreview by rememberSaveable { mutableStateOf(true) }
var isScreenSharing by rememberSaveable { mutableStateOf(false) }
+ var shareScreenOnly by rememberSaveable { mutableStateOf(true) }
+
+ val handler = remember { Handler(Looper.getMainLooper()) }
val screenCaptureParameters = remember {
ScreenCaptureParameters()
@@ -64,7 +81,7 @@ fun ScreenSharing() {
val rtcEngine = remember {
RtcEngine.create(RtcEngineConfig().apply {
mAreaCode = SettingPreferences.getArea()
- mContext = context
+ mContext = context.applicationContext
mAppId = BuildConfig.AGORA_APP_ID
mEventHandler = object : IRtcEngineEventHandler() {
override fun onJoinChannelSuccess(channel: String?, uid: Int, elapsed: Int) {
@@ -115,6 +132,8 @@ fun ScreenSharing() {
localStats.copy(localVideoStats = stats).let {
localStats = it
}
+ Log.d("onLocalVideoStats","${stats?.captureFrameWidth} ${stats?.captureFrameHeight} " +
+ "${stats?.encodedFrameWidth} ${stats?.encodedFrameHeight}")
}
override fun onLocalAudioStats(stats: LocalAudioStats?) {
@@ -144,6 +163,36 @@ fun ScreenSharing() {
}
}
+ override fun onLocalVideoEvent(source: Constants.VideoSourceType?, event: Int) {
+ super.onLocalVideoEvent(source, event)
+ handler.post {
+ when (event) {
+
+ Constants.LOCAL_VIDEO_EVENT_TYPE_SCREEN_CAPTURE_WINDOW_HIDDEN -> {
+ Toast.makeText(context, "Shared app moved to background", Toast.LENGTH_LONG).show()
+ }
+
+ Constants.LOCAL_VIDEO_EVENT_TYPE_SCREEN_CAPTURE_WINDOW_RECOVER_FROM_HIDDEN -> {
+ Toast.makeText(context, "Shared app restored to foreground", Toast.LENGTH_LONG).show()
+ }
+
+ Constants.LOCAL_VIDEO_EVENT_TYPE_SCREEN_CAPTURE_STOPPED_BY_USER -> {
+ Toast.makeText(
+ context,
+ "Screen sharing stopped by user in status bar",
+ Toast.LENGTH_LONG
+ )
+ .show()
+ }
+
+ Constants.LOCAL_VIDEO_EVENT_TYPE_SCREEN_CAPTURE_SYSTEM_INTERNAL_ERROR -> {
+ Toast.makeText(context, "Screen sharing error occurred", Toast.LENGTH_LONG).show()
+ }
+ }
+ }
+
+ }
+
override fun onLocalVideoStateChanged(
source: Constants.VideoSourceType?,
state: Int,
@@ -191,7 +240,24 @@ fun ScreenSharing() {
// Permission is granted
Toast.makeText(context, R.string.permission_granted, Toast.LENGTH_LONG).show()
+ // Set share screen only parameter for Android 14+
+ if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.UPSIDE_DOWN_CAKE) {
+ if (shareScreenOnly) {
+ rtcEngine.setParameters("{\"rtc.video.share_screen_only\":true}")
+ } else {
+ rtcEngine.setParameters("{\"rtc.video.share_screen_only\":false}")
+ }
+ }
+
+ val metrics = DisplayMetrics()
+ context.findActivity().windowManager.defaultDisplay.getRealMetrics(metrics)
+ screenCaptureParameters.videoCaptureParameters.width = 720
+ screenCaptureParameters.videoCaptureParameters.height =
+ (720 * 1.0f / metrics.widthPixels * metrics.heightPixels).toInt()
+ screenCaptureParameters.videoCaptureParameters.framerate = 15
+
rtcEngine.startScreenCapture(screenCaptureParameters)
+
val mediaOptions = ChannelMediaOptions()
mediaOptions.channelProfile = Constants.CHANNEL_PROFILE_LIVE_BROADCASTING
mediaOptions.clientRoleType = Constants.CLIENT_ROLE_BROADCASTER
@@ -202,6 +268,7 @@ fun ScreenSharing() {
mediaOptions.publishScreenCaptureAudio = true
mediaOptions.publishScreenCaptureVideo = true
TokenUtils.gen(channelName, 0) {
+
rtcEngine.joinChannel(it, channelName, 0, mediaOptions)
}
} else {
@@ -218,6 +285,10 @@ fun ScreenSharing() {
remoteStats = remoteStats,
isScreenPreview = isScreenPreview,
isScreenSharing = isScreenSharing,
+ shareScreenOnly = shareScreenOnly,
+ onShareScreenOnly = {
+ shareScreenOnly = it
+ },
localRender = { view, id ->
val videoCanvas = VideoCanvas(view, Constants.RENDER_MODE_FIT, id)
videoCanvas.sourceType = Constants.VideoSourceType.VIDEO_SOURCE_SCREEN_PRIMARY.value
@@ -227,7 +298,7 @@ fun ScreenSharing() {
rtcEngine.startPreview(Constants.VideoSourceType.VIDEO_SOURCE_SCREEN_PRIMARY)
},
remoteRender = { view, id ->
- rtcEngine.setupRemoteVideo(VideoCanvas(view, Constants.RENDER_MODE_HIDDEN, id))
+ rtcEngine.setupRemoteVideo(VideoCanvas(view, Constants.RENDER_MODE_FIT, id))
},
onJoinClick = {
if (it.isEmpty()) {
@@ -238,8 +309,8 @@ fun ScreenSharing() {
channelName = it
permissionLauncher.launch(
arrayOf(
- android.Manifest.permission.RECORD_AUDIO,
- android.Manifest.permission.CAMERA,
+ Manifest.permission.RECORD_AUDIO,
+ Manifest.permission.CAMERA,
)
)
},
@@ -254,7 +325,7 @@ fun ScreenSharing() {
screenUid = localUid
} else {
screenUid = 0
- val videoCanvas = VideoCanvas(null, Constants.RENDER_MODE_HIDDEN, localUid)
+ val videoCanvas = VideoCanvas(null, Constants.RENDER_MODE_FIT, localUid)
videoCanvas.sourceType = Constants.VideoSourceType.VIDEO_SOURCE_SCREEN_PRIMARY.value
videoCanvas.mirrorMode = Constants.VIDEO_MIRROR_MODE_DISABLED
rtcEngine.setupLocalVideo(videoCanvas)
@@ -291,12 +362,14 @@ private fun ScreenSharingView(
remoteRender: (view: View, id: Int) -> Unit = { _, _ -> },
isScreenPreview: Boolean = true,
isScreenSharing: Boolean = false,
+ shareScreenOnly: Boolean = false,
onJoinClick: (String) -> Unit,
onLeaveClick: () -> Unit,
onScreenPreview: (Boolean) -> Unit,
onScreenAudio: (Boolean) -> Unit,
onScreenScenario: (Constants.ScreenScenarioType) -> Unit,
onScreenVolume: (Float) -> Unit,
+ onShareScreenOnly: (Boolean) -> Unit,
) {
Column {
TwoVideoView(
@@ -325,6 +398,15 @@ private fun ScreenSharingView(
) {
onScreenAudio(it)
}
+ if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.UPSIDE_DOWN_CAKE) {
+ SwitchRaw(
+ title = stringResource(id = R.string.screen_sharing_share_screen_only),
+ checked = shareScreenOnly,
+ enable = !isJoined
+ ) {
+ onShareScreenOnly(it)
+ }
+ }
DropdownMenuRaw(
title = "Scenario Type",
options = listOf(
@@ -353,19 +435,20 @@ private fun ScreenSharingView(
}
-@Preview
-@Composable
-private fun ScreenSharingViewPreview() {
- ScreenSharingView(
- channelName = "test",
- isJoined = false,
- localUid = 1,
- onJoinClick = {},
- onLeaveClick = { },
- onScreenPreview = {},
- onScreenAudio = {},
- onScreenScenario = {}
- ) {
-
- }
-}
+//@Preview
+//@Composable
+//private fun ScreenSharingViewPreview() {
+// ScreenSharingView(
+// channelName = "test",
+// isJoined = false,
+// localUid = 1,
+// onJoinClick = {},
+// onLeaveClick = { },
+// onScreenPreview = {},
+// onScreenAudio = {},
+// onScreenScenario = {},
+// onShareScreenOnly = {},
+// ) {
+//
+// }
+//}
diff --git a/Android/APIExample-Compose/app/src/main/java/io/agora/api/example/compose/ui/common/Widgets.kt b/Android/APIExample-Compose/app/src/main/java/io/agora/api/example/compose/ui/common/Widgets.kt
index e30668597..2d7e83a67 100644
--- a/Android/APIExample-Compose/app/src/main/java/io/agora/api/example/compose/ui/common/Widgets.kt
+++ b/Android/APIExample-Compose/app/src/main/java/io/agora/api/example/compose/ui/common/Widgets.kt
@@ -349,6 +349,7 @@ fun DropdownMenuRaw(
value = text,
onValueChange = {},
readOnly = true,
+ enabled = enable,
singleLine = true,
trailingIcon = { ExposedDropdownMenuDefaults.TrailingIcon(expanded = expanded && enable) },
colors = TextFieldDefaults.colors(
diff --git a/Android/APIExample-Compose/app/src/main/java/io/agora/api/example/compose/ui/example/Example.kt b/Android/APIExample-Compose/app/src/main/java/io/agora/api/example/compose/ui/example/Example.kt
index 8e8e1e5e5..97b0760bf 100644
--- a/Android/APIExample-Compose/app/src/main/java/io/agora/api/example/compose/ui/example/Example.kt
+++ b/Android/APIExample-Compose/app/src/main/java/io/agora/api/example/compose/ui/example/Example.kt
@@ -1,5 +1,9 @@
package io.agora.api.example.compose.ui.example
+import android.content.Context
+import android.content.ContextWrapper
+import android.os.Build
+import androidx.activity.ComponentActivity
import androidx.compose.foundation.layout.Box
import androidx.compose.foundation.layout.WindowInsets
import androidx.compose.foundation.layout.consumeWindowInsets
@@ -7,30 +11,74 @@ import androidx.compose.foundation.layout.fillMaxSize
import androidx.compose.foundation.layout.padding
import androidx.compose.foundation.layout.safeDrawing
import androidx.compose.runtime.Composable
+import androidx.compose.runtime.DisposableEffect
+import androidx.compose.runtime.getValue
+import androidx.compose.runtime.mutableStateOf
+import androidx.compose.runtime.remember
+import androidx.compose.runtime.setValue
import androidx.compose.ui.Alignment
import androidx.compose.ui.Modifier
+import androidx.compose.ui.platform.LocalContext
import androidx.compose.ui.res.stringResource
+import androidx.core.app.PictureInPictureModeChangedInfo
+import androidx.core.util.Consumer
import io.agora.api.example.compose.model.Example
import io.agora.api.example.compose.ui.common.APIExampleScaffold
+private fun Context.findActivity(): ComponentActivity {
+ var context = this
+ while (context is ContextWrapper) {
+ if (context is ComponentActivity) return context
+ context = context.baseContext
+ }
+ throw IllegalStateException("Picture in picture should be called in the context of an Activity")
+}
+
+@Composable
+private fun rememberIsInPipMode(): Boolean {
+ if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
+ val activity = LocalContext.current.findActivity()
+ var pipMode by remember { mutableStateOf(activity.isInPictureInPictureMode) }
+ DisposableEffect(activity) {
+ val observer = Consumer { info ->
+ pipMode = info.isInPictureInPictureMode
+ }
+ activity.addOnPictureInPictureModeChangedListener(observer)
+ onDispose { activity.removeOnPictureInPictureModeChangedListener(observer) }
+ }
+ return pipMode
+ } else {
+ return false
+ }
+}
+
+
@Composable
fun Example(
example: Example,
onBackClick: () -> Unit,
) {
- APIExampleScaffold(
- topBarTitle = stringResource(id = example.name),
- showBackNavigationIcon = true,
- onBackClick = onBackClick,
- ) { paddingValues ->
+ val isInPictureInPictureMode = rememberIsInPipMode()
+ if (isInPictureInPictureMode) {
Box(
- modifier = Modifier
- .fillMaxSize()
- .consumeWindowInsets(WindowInsets.safeDrawing)
- .padding(paddingValues),
- contentAlignment = Alignment.Center
+ modifier = Modifier.fillMaxSize(), contentAlignment = Alignment.Center
) {
example.content(onBackClick)
}
+ } else {
+ APIExampleScaffold(
+ topBarTitle = stringResource(id = example.name),
+ showBackNavigationIcon = true,
+ onBackClick = onBackClick,
+ ) { paddingValues ->
+ Box(
+ modifier = Modifier
+ .fillMaxSize()
+ .consumeWindowInsets(WindowInsets.safeDrawing)
+ .padding(paddingValues)
+ ) {
+ example.content(onBackClick)
+ }
+ }
}
}
\ No newline at end of file
diff --git a/Android/APIExample-Compose/app/src/main/res/values/strings.xml b/Android/APIExample-Compose/app/src/main/res/values/strings.xml
index 74a399a35..4c0119cb7 100644
--- a/Android/APIExample-Compose/app/src/main/res/values/strings.xml
+++ b/Android/APIExample-Compose/app/src/main/res/values/strings.xml
@@ -74,6 +74,7 @@
Transcoding Or NotScreen Sharing Local PreviewScreen Sharing Audio
+ Share Screen OnlyPlease move the red icon to experience the 3D audio effectMute AudioBeauty
@@ -107,4 +108,9 @@
Permission GrantedPermission DeniedEnable Video
+ Video Scenario
+ General
+ Meeting
+ 1V1
+ Live Show
\ No newline at end of file
diff --git a/Android/APIExample-Compose/gradle.properties b/Android/APIExample-Compose/gradle.properties
index f5ef10dee..69df9661f 100644
--- a/Android/APIExample-Compose/gradle.properties
+++ b/Android/APIExample-Compose/gradle.properties
@@ -22,4 +22,4 @@ kotlin.code.style=official
# thereby reducing the size of the R class for that library
android.nonTransitiveRClass=true
-rtc_sdk_version = 4.6.0
\ No newline at end of file
+rtc_sdk_version = 4.6.2
\ No newline at end of file
diff --git a/Android/APIExample/README.md b/Android/APIExample/README.md
index ee36e5f0c..4b4feb347 100644
--- a/Android/APIExample/README.md
+++ b/Android/APIExample/README.md
@@ -44,18 +44,14 @@ This project contains third-party beauty integration examples, which cannot be e
without configuring resources and certificates. The resource certificate configuration method is as
follows:
-#### SenseTime
+#### Agora
-1. Contact SenseTime customer service to obtain the download link and certificate of the beauty sdk
-2. Unzip the beauty sdk, and copy the following resources to the corresponding path
+1. Contact Agora technical support to obtain beauty resources
+2. Put the beauty resources in the corresponding path
-| SenseTime Beauty SDK Path | Location |
-|----------------------------------------------------------------------|----------------------------------------------------------|
-| Android/models | app/src/main/assets/beauty_sensetime/models |
-| Android/smaple/SenseMeEffects/app/src/main/assets/sticker_face_shape | app/src/main/assets/beauty_sensetime/sticker_face_shape |
-| Android/smaple/SenseMeEffects/app/src/main/assets/style_lightly | app/src/main/assets/beauty_sensetime/style_lightly |
-| Android/smaple/SenseMeEffects/app/src/main/assets/makeup_lip | app/src/main/assets/beauty_sensetime/makeup_lip |
-| SenseME.lic | app/src/main/assets/beauty_sensetime/license/SenseME.lic |
+| Agora Beauty Resources | Location |
+|------------------------|----------------------------------|
+| beauty resources | app/src/main/assets/beauty_agora |
#### FaceUnity
@@ -68,21 +64,6 @@ follows:
| sticker resource(e.g. fashi.bundle) | app/src/main/assets/beauty_faceunity/sticker |
| authpack.java | app/src/main/java/io/agora/api/example/examples/advanced/beauty/authpack.java |
-#### ByteDance
-
-1. Contact ByteDance customer service to obtain the download link and certificate of the beauty sdk
-2. Unzip the ByteDance beauty resource and copy the following files/directories to the corresponding path
-
-| ByteDance Beauty Resources | Location |
-|---------------------------------|--------------------------------------|
-| resource/LicenseBag.bundle | app/src/main/assets/beauty_bytedance |
-| resource/ModelResource.bundle | app/src/main/assets/beauty_bytedance |
-| resource/ComposeMakeup.bundle | app/src/main/assets/beauty_bytedance |
-| resource/StickerResource.bundle | app/src/main/assets/beauty_bytedance |
-| resource/StickerResource.bundle | app/src/main/assets/beauty_bytedance |
-
-3. Modify the LICENSE_NAME in the app/src/main/java/io/agora/api/example/examples/advanced/beauty/ByteDanceBeauty.java file to the name of the applied certificate file.
-
### For Agora Extension Developers
diff --git a/Android/APIExample/README.zh.md b/Android/APIExample/README.zh.md
index b372acf0b..67fde8368 100644
--- a/Android/APIExample/README.zh.md
+++ b/Android/APIExample/README.zh.md
@@ -41,18 +41,14 @@
本项目包含第三方美颜集成示例,在没有配置资源和证书的情况下,默认是无法启用的。资源证书配置方法如下:
-#### 商汤美颜
+#### 声网美颜
-1. 联系商汤客服获取美颜sdk下载链接以及证书
-2. 解压美颜sdk,并将以下资源复制到对应路径
+1. 联系声网技术支持获取美颜资源
+2. 将美颜资源放到对应路径下
-| 商汤SDK文件/目录 | 项目路径 |
-|----------------------------------------------------------------------|----------------------------------------------------------|
-| Android/models | app/src/main/assets/beauty_sensetime/models |
-| Android/smaple/SenseMeEffects/app/src/main/assets/sticker_face_shape | app/src/main/assets/beauty_sensetime/sticker_face_shape |
-| Android/smaple/SenseMeEffects/app/src/main/assets/style_lightly | app/src/main/assets/beauty_sensetime/style_lightly |
-| Android/smaple/SenseMeEffects/app/src/main/assets/makeup_lip | app/src/main/assets/beauty_sensetime/makeup_lip |
-| SenseME.lic | app/src/main/assets/beauty_sensetime/license/SenseME.lic |
+| 美颜资源 | 项目路径 |
+|------|----------------------------------|
+| 美颜资源 | app/src/main/assets/beauty_agora |
#### 相芯美颜
@@ -65,22 +61,6 @@
| 贴纸资源(如fashi.bundle) | app/src/main/assets/beauty_faceunity/sticker |
| 证书authpack.java | app/src/main/java/io/agora/api/example/examples/advanced/beauty/authpack.java |
-#### 字节美颜
-
-1. 联系字节客服获取美颜sdk下载链接以及证书
-2. 解压字节/火山美颜资源并复制以下文件/目录到对应路径下
-
-| 字节SDK文件/目录 | 项目路径 |
-|--------------------------------------------------|-------------------------------------------------------|
-| resource/LicenseBag.bundle | app/src/main/assets/beauty_bytedance |
-| resource/ModelResource.bundle | app/src/main/assets/beauty_bytedance |
-| resource/ComposeMakeup.bundle | app/src/main/assets/beauty_bytedance |
-| resource/StickerResource.bundle | app/src/main/assets/beauty_bytedance |
-| resource/StickerResource.bundle | app/src/main/assets/beauty_bytedance |
-
-3.
-修改app/src/main/java/io/agora/api/example/examples/advanced/beauty/ByteDanceBeauty.java文件里LICENSE_NAME为申请到的证书文件名
-
### 对于Agora Extension开发者
从4.0.0SDK开始,Agora SDK支持插件系统和开放的云市场帮助开发者发布自己的音视频插件,本项目包含了一个SimpleFilter示例,默认是禁用的状态,如果需要开启编译和使用需要完成以下步骤:
diff --git a/Android/APIExample/agora-simple-filter/build.gradle b/Android/APIExample/agora-simple-filter/build.gradle
index cb784e784..e27008bf7 100644
--- a/Android/APIExample/agora-simple-filter/build.gradle
+++ b/Android/APIExample/agora-simple-filter/build.gradle
@@ -36,7 +36,7 @@ android {
externalNativeBuild {
cmake {
path "src/main/cpp/CMakeLists.txt"
- version "3.10.2"
+ version "3.22.1"
}
}
diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/AgoraBase.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/AgoraBase.h
index 537fd5fae..99397d4fe 100644
--- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/AgoraBase.h
+++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/AgoraBase.h
@@ -262,19 +262,15 @@ class AList {
} // namespace util
/**
- * The channel profile.
+ * @brief The channel profile.
*/
enum CHANNEL_PROFILE_TYPE {
/**
- * 0: Communication.
- *
- * This profile prioritizes smoothness and applies to the one-to-one scenario.
+ * 0: Communication. Use this profile when there are only two users in the channel.
*/
CHANNEL_PROFILE_COMMUNICATION = 0,
/**
- * 1: (Default) Live Broadcast.
- *
- * This profile prioritizes supporting a large audience in a live broadcast channel.
+ * 1: Live streaming. Use this profile when there are more than two users in the channel.
*/
CHANNEL_PROFILE_LIVE_BROADCASTING = 1,
/**
@@ -283,8 +279,8 @@ enum CHANNEL_PROFILE_TYPE {
*/
CHANNEL_PROFILE_GAME __deprecated = 2,
/**
- * 3: Cloud Gaming.
- *
+ * Cloud gaming. The scenario is optimized for latency. Use this profile if the use case requires
+ * frequent interactions between users.
* @deprecated This profile is deprecated.
*/
CHANNEL_PROFILE_CLOUD_GAMING __deprecated = 3,
@@ -451,59 +447,75 @@ enum WARN_CODE_TYPE {
};
/**
- * The error codes.
+ * @brief Error codes.
+ *
+ * @details
+ * An error code indicates that the SDK encountered an unrecoverable error that requires application
+ * intervention. For example, an error is returned when the camera fails to open, and the app needs
+ * to inform the user that the camera cannot be used.
+ *
*/
enum ERROR_CODE_TYPE {
/**
- * 0: No error occurs.
+ * 0: No error.
*/
ERR_OK = 0,
// 1~1000
/**
- * 1: A general error occurs (no specified reason).
+ * 1: General error with no classified reason. Try calling the method again.
*/
ERR_FAILED = 1,
/**
- * 2: The argument is invalid. For example, the specific channel name
- * includes illegal characters.
+ * 2: An invalid parameter is used. For example, the specified channel name includes illegal
+ * characters. Reset the parameter.
*/
ERR_INVALID_ARGUMENT = 2,
/**
- * 3: The SDK module is not ready. Choose one of the following solutions:
- * - Check the audio device.
- * - Check the completeness of the app.
- * - Reinitialize the RTC engine.
+ * 3: The SDK is not ready. Possible reasons include the following:
+ * - The initialization of `IRtcEngine` fails. Reinitialize the `IRtcEngine`.
+ * - No user has joined the channel when the method is called. Check the code logic.
+ * - The user has not left the channel when the `rate` or `complain` method is called. Check the
+ * code logic.
+ * - The audio module is disabled.
+ * - The program is not complete.
*/
ERR_NOT_READY = 3,
/**
- * 4: The SDK does not support this function.
+ * 4: The `IRtcEngine` does not support the request. Possible reasons include the following:
+ * - The built-in encryption mode is incorrect, or the SDK fails to load the external encryption
+ * library. Check the encryption mode setting, or reload the external encryption library.
*/
ERR_NOT_SUPPORTED = 4,
/**
- * 5: The request is rejected.
+ * 5: The request is rejected. Possible reasons include the following:
+ * - The `IRtcEngine` initialization fails. Reinitialize the `IRtcEngine`.
+ * - The channel name is set as the empty string `""` when joining the channel. Reset the channel
+ * name.
+ * - When the `joinChannelEx` method is called to join multiple channels, the specified channel name
+ * is already in use. Reset the channel name.
*/
ERR_REFUSED = 5,
/**
- * 6: The buffer size is not big enough to store the returned data.
+ * 6: The buffer size is insufficient to store the returned data.
*/
ERR_BUFFER_TOO_SMALL = 6,
/**
- * 7: The SDK is not initialized before calling this method.
+ * 7: A method is called before the initialization of `IRtcEngine`. Ensure that the `IRtcEngine`
+ * object is initialized before using this method.
*/
ERR_NOT_INITIALIZED = 7,
/**
- * 8: The state is invalid.
+ * 8: Invalid state.
*/
ERR_INVALID_STATE = 8,
/**
- * 9: No permission. This is for internal use only, and does
- * not return to the app through any method or callback.
+ * 9: Permission to access is not granted. Check whether your app has access to the audio and video
+ * device.
*/
ERR_NO_PERMISSION = 9,
/**
- * 10: An API timeout occurs. Some API methods require the SDK to return the
- * execution result, and this error occurs if the request takes too long
- * (more than 10 seconds) for the SDK to process.
+ * 10: A timeout occurs. Some API calls require the SDK to return the execution result. This error
+ * occurs if the SDK takes too long (more than 10 seconds) to return the result.
*/
ERR_TIMEDOUT = 10,
/**
@@ -529,126 +541,112 @@ enum ERROR_CODE_TYPE {
*/
ERR_NET_DOWN = 14,
/**
- * 17: The request to join the channel is rejected. This error usually occurs
- * when the user is already in the channel, and still calls the method to join
- * the channel, for example, \ref agora::rtc::IRtcEngine::joinChannel "joinChannel()".
+ * 17: The request to join the channel is rejected. Possible reasons include the following:
+ * - The user is already in the channel. Agora recommends that you use the
+ * `onConnectionStateChanged` callback to see whether the user is in the channel. Do not call this
+ * method to join the channel unless you receive the `CONNECTION_STATE_DISCONNECTED` (1) state.
+ * - After calling `startEchoTest` for the call test, the user tries to join the channel without
+ * calling `stopEchoTest` to end the current test. To join a channel, the call test must be ended by
+ * calling `stopEchoTest`.
*/
ERR_JOIN_CHANNEL_REJECTED = 17,
/**
- * 18: The request to leave the channel is rejected. This error usually
- * occurs when the user has already left the channel, and still calls the
- * method to leave the channel, for example, \ref agora::rtc::IRtcEngine::leaveChannel
- * "leaveChannel".
+ * 18: Fails to leave the channel. Possible reasons include the following:
+ * - The user has left the channel before calling the `leaveChannel(const LeaveChannelOptions&
+ * options)` method. Stop calling this
+ * method to clear this error.
+ * - The user calls the `leaveChannel(const LeaveChannelOptions& options)` method to leave the
+ * channel before joining the channel.
+ * In this case, no extra operation is needed.
*/
ERR_LEAVE_CHANNEL_REJECTED = 18,
/**
- * 19: The resources have been occupied and cannot be reused.
+ * 19: Resources are already in use.
*/
ERR_ALREADY_IN_USE = 19,
/**
- * 20: The SDK gives up the request due to too many requests. This is for
- * internal use only, and does not return to the app through any method or callback.
+ * 20: The request is abandoned by the SDK, possibly because the request has been sent too
+ * frequently.
*/
ERR_ABORTED = 20,
/**
- * 21: On Windows, specific firewall settings can cause the SDK to fail to
- * initialize and crash.
+ * 21: The `IRtcEngine` fails to initialize and has crashed because of specific Windows firewall
+ * settings.
*/
ERR_INIT_NET_ENGINE = 21,
/**
- * 22: The app uses too much of the system resource and the SDK
- * fails to allocate any resource.
+ * 22: The SDK fails to allocate resources because your app uses too many system resources or system
+ * resources are insufficient.
*/
ERR_RESOURCE_LIMITED = 22,
/**
- * 101: The App ID is invalid, usually because the data format of the App ID is incorrect.
- *
- * Solution: Check the data format of your App ID. Ensure that you use the correct App ID to
- * initialize the Agora service.
+ * 23: The function is prohibited. Please allow it in the console, or contact the Agora technical support.
+ * @technical preview
+ */
+ ERR_FUNC_IS_PROHIBITED = 23,
+ /**
+ * 101: The specified App ID is invalid. Rejoin the channel with a valid App ID.
*/
ERR_INVALID_APP_ID = 101,
/**
- * 102: The specified channel name is invalid. Please try to rejoin the
- * channel with a valid channel name.
+ * 102: The specified channel name is invalid. A possible reason is that the parameter's data type
+ * is incorrect. Rejoin the channel with a valid channel name.
*/
ERR_INVALID_CHANNEL_NAME = 102,
/**
- * 103: Fails to get server resources in the specified region. Please try to
- * specify another region when calling \ref agora::rtc::IRtcEngine::initialize
- * "initialize".
+ * 103: Fails to get server resources in the specified region. Try another region when initializing
+ * `IRtcEngine`.
*/
ERR_NO_SERVER_RESOURCES = 103,
/**
- * 109: The token has expired, usually for the following reasons:
- * - Timeout for token authorization: Once a token is generated, you must use it to access the
- * Agora service within 24 hours. Otherwise, the token times out and you can no longer use it.
- * - The token privilege expires: To generate a token, you need to set a timestamp for the token
- * privilege to expire. For example, If you set it as seven days, the token expires seven days
- * after its usage. In that case, you can no longer access the Agora service. The users cannot
- * make calls, or are kicked out of the channel.
- *
- * Solution: Regardless of whether token authorization times out or the token privilege expires,
- * you need to generate a new token on your server, and try to join the channel.
+ * 109: The current token has expired. Apply for a new token on the server and call `renewToken`.
*/
ERR_TOKEN_EXPIRED = 109,
/**
- * 110: The token is invalid, usually for one of the following reasons:
- * - Did not provide a token when joining a channel in a situation where the project has enabled
- * the App Certificate.
- * - Tried to join a channel with a token in a situation where the project has not enabled the App
- * Certificate.
- * - The App ID, user ID and channel name that you use to generate the token on the server do not
- * match those that you use when joining a channel.
- *
- * Solution:
- * - Before joining a channel, check whether your project has enabled the App certificate. If yes,
- * you must provide a token when joining a channel; if no, join a channel without a token.
- * - When using a token to join a channel, ensure that the App ID, user ID, and channel name that
- * you use to generate the token is the same as the App ID that you use to initialize the Agora
- * service, and the user ID and channel name that you use to join the channel.
+ * 110: Invalid token. Typical reasons include the following:
+ * - App Certificate is enabled in Agora Console, but the code still uses App ID for authentication.
+ * Once App Certificate is enabled for a project, you must use token-based authentication.
+ * - The `uid` used to generate the token is not the same as the `uid` used to join the channel.
*/
ERR_INVALID_TOKEN = 110,
/**
- * 111: The internet connection is interrupted. This applies to the Agora Web
- * SDK only.
+ * 111: The network connection is interrupted. The SDK triggers this callback when it loses
+ * connection with the server for more than four seconds after the connection is established.
*/
ERR_CONNECTION_INTERRUPTED = 111, // only used in web sdk
/**
- * 112: The internet connection is lost. This applies to the Agora Web SDK
- * only.
+ * 112: The network connection is lost. Occurs when the SDK cannot reconnect to Agora's edge server
+ * 10 seconds after its connection to the server is interrupted.
*/
ERR_CONNECTION_LOST = 112, // only used in web sdk
/**
- * 113: The user is not in the channel when calling the
- * \ref agora::rtc::IRtcEngine::sendStreamMessage "sendStreamMessage()" method.
+ * 113: The user is not in the channel when calling the `sendStreamMessage` method.
*/
ERR_NOT_IN_CHANNEL = 113,
/**
- * 114: The data size is over 1024 bytes when the user calls the
- * \ref agora::rtc::IRtcEngine::sendStreamMessage "sendStreamMessage()" method.
+ * 114: The data size exceeds 1 KB when calling the `sendStreamMessage` method.
*/
ERR_SIZE_TOO_LARGE = 114,
/**
- * 115: The bitrate of the sent data exceeds the limit of 6 Kbps when the
- * user calls the \ref agora::rtc::IRtcEngine::sendStreamMessage "sendStreamMessage()".
+ * 115: The data bitrate exceeds 6 KB/s when calling the `sendStreamMessage` method.
*/
ERR_BITRATE_LIMIT = 115,
/**
- * 116: Too many data streams (over 5) are created when the user
- * calls the \ref agora::rtc::IRtcEngine::createDataStream "createDataStream()" method.
+ * 116: More than five data streams are created when calling the `createDataStream(int* streamId,
+ * const DataStreamConfig& config)` method.
*/
ERR_TOO_MANY_DATA_STREAMS = 116,
/**
- * 117: A timeout occurs for the data stream transmission.
+ * 117: The data stream transmission times out.
*/
ERR_STREAM_MESSAGE_TIMEOUT = 117,
/**
- * 119: Switching the user role fails. Please try to rejoin the channel.
+ * 119: Switching roles fails, try rejoining the channel.
*/
ERR_SET_CLIENT_ROLE_NOT_AUTHORIZED = 119,
/**
- * 120: MediaStream decryption fails. The user may have tried to join the channel with a wrong
- * password. Check your settings or try rejoining the channel.
+ * 120: Media streams decryption fails. The user might use an incorrect password to join the
+ * channel. Check the entered password, or tell the user to try rejoining the channel.
*/
ERR_DECRYPTION_FAILED = 120,
/**
@@ -656,18 +654,16 @@ enum ERROR_CODE_TYPE {
*/
ERR_INVALID_USER_ID = 121,
/**
- * 122: DataStream decryption fails. The peer may have tried to join the channel with a wrong
- * password, or did't enable datastream encryption
+ * 122: Data streams decryption fails. The user might use an incorrect password to join the channel.
+ * Check the entered password, or tell the user to try rejoining the channel.
*/
ERR_DATASTREAM_DECRYPTION_FAILED = 122,
/**
- * 123: The app is banned by the server.
+ * 123: The user is banned from the server.
*/
ERR_CLIENT_IS_BANNED_BY_SERVER = 123,
/**
- * 130: Encryption is enabled when the user calls the
- * \ref agora::rtc::IRtcEngine::addPublishStreamUrl "addPublishStreamUrl()" method
- * (CDN live streaming does not support encrypted streams).
+ * 130: The SDK does not support pushing encrypted streams to CDN.
*/
ERR_ENCRYPTED_STREAM_NOT_ALLOWED_PUBLISH = 130,
@@ -677,8 +673,7 @@ enum ERROR_CODE_TYPE {
ERR_LICENSE_CREDENTIAL_INVALID = 131,
/**
- * 134: The user account is invalid, usually because the data format of the user account is
- * incorrect.
+ * 134: The user account is invalid, possibly because it contains invalid parameters.
*/
ERR_INVALID_USER_ACCOUNT = 134,
@@ -705,7 +700,13 @@ enum ERROR_CODE_TYPE {
ERR_CERT_REQUEST = 168,
// PcmSend Error num
+ /**
+ * 200: Unsupported PCM format.
+ */
ERR_PCMSEND_FORMAT = 200, // unsupport pcm format
+ /**
+ * 201: Buffer overflow, the PCM send rate too quickly.
+ */
ERR_PCMSEND_BUFFEROVERFLOW = 201, // buffer overflow, the pcm send rate too quickly
/// @cond
@@ -749,43 +750,43 @@ enum ERROR_CODE_TYPE {
/// @endcond
// 1001~2000
/**
- * 1001: Fails to load the media engine.
+ * 1001: The SDK fails to load the media engine.
*/
ERR_LOAD_MEDIA_ENGINE = 1001,
/**
- * 1005: Audio device module: A general error occurs in the Audio Device Module (no specified
- * reason). Check if the audio device is used by another app, or try
- * rejoining the channel.
+ * 1005: A general error occurs (no specified reason). Check whether the audio device is already in
+ * use by another app, or try rejoining the channel.
*/
ERR_ADM_GENERAL_ERROR = 1005,
/**
- * 1008: Audio Device Module: An error occurs in initializing the playback
- * device.
+ * 1008: An error occurs when initializing the playback device. Check whether the playback device is
+ * already in use by another app, or try rejoining the channel.
*/
ERR_ADM_INIT_PLAYOUT = 1008,
/**
- * 1009: Audio Device Module: An error occurs in starting the playback device.
+ * 1009: An error occurs when starting the playback device. Check the playback device.
*/
ERR_ADM_START_PLAYOUT = 1009,
/**
- * 1010: Audio Device Module: An error occurs in stopping the playback device.
+ * 1010: An error occurs when stopping the playback device.
*/
ERR_ADM_STOP_PLAYOUT = 1010,
/**
- * 1011: Audio Device Module: An error occurs in initializing the recording
- * device.
+ * 1011: An error occurs when initializing the recording device. Check the recording device, or try
+ * rejoining the channel.
*/
ERR_ADM_INIT_RECORDING = 1011,
/**
- * 1012: Audio Device Module: An error occurs in starting the recording device.
+ * 1012: An error occurs when starting the recording device. Check the recording device.
*/
ERR_ADM_START_RECORDING = 1012,
/**
- * 1013: Audio Device Module: An error occurs in stopping the recording device.
+ * 1013: An error occurs when stopping the recording device.
*/
ERR_ADM_STOP_RECORDING = 1013,
/**
- * 1501: Video Device Module: The camera is not authorized.
+ * 1501: Permission to access the camera is not granted. Check whether permission to access the
+ * camera permission is granted.
*/
ERR_VDM_CAMERA_NOT_AUTHORIZED = 1501,
};
@@ -818,11 +819,11 @@ enum LICENSE_ERROR_TYPE {
};
/**
- * The operational permission of the SDK on the audio session.
+ * @brief The operation permissions of the SDK on the audio session.
*/
enum AUDIO_SESSION_OPERATION_RESTRICTION {
/**
- * 0: No restriction; the SDK can change the audio session.
+ * 0: No restriction, the SDK can change the audio session.
*/
AUDIO_SESSION_OPERATION_RESTRICTION_NONE = 0,
/**
@@ -834,13 +835,13 @@ enum AUDIO_SESSION_OPERATION_RESTRICTION {
*/
AUDIO_SESSION_OPERATION_RESTRICTION_CONFIGURE_SESSION = 1 << 1,
/**
- * 4: The SDK keeps the audio session active when the user leaves the
- * channel, for example, to play an audio file in the background.
+ * 4: The SDK keeps the audio session active when the user leaves the channel, for example, to play
+ * an audio file in the background.
*/
AUDIO_SESSION_OPERATION_RESTRICTION_DEACTIVATE_SESSION = 1 << 2,
/**
- * 128: Completely restricts the operational permission of the SDK on the
- * audio session; the SDK cannot change the audio session.
+ * 128: Completely restricts the operation permissions of the SDK on the audio session; the SDK
+ * cannot change the audio session.
*/
AUDIO_SESSION_OPERATION_RESTRICTION_ALL = 1 << 7,
};
@@ -849,7 +850,7 @@ typedef const char* user_id_t;
typedef void* view_t;
/**
- * The definition of the UserInfo struct.
+ * @brief The information of the user.
*/
struct UserInfo {
/**
@@ -878,17 +879,18 @@ typedef util::AList UserList;
namespace rtc {
/**
- * Reasons for a user being offline.
+ * @brief Reasons for a user being offline.
*/
enum USER_OFFLINE_REASON_TYPE {
/**
- * 0: The user leaves the current channel.
+ * 0: The user quits the call.
*/
USER_OFFLINE_QUIT = 0,
/**
- * 1: The SDK times out and the user drops offline because no data packet was received within a
- * certain period of time. If a user quits the call and the message is not passed to the SDK (due
- * to an unreliable channel), the SDK assumes that the user drops offline.
+ * 1: The SDK times out and the user drops offline because no data packet is received within a
+ * certain period of time.
+ * @note If the user quits the call and the message is not passed to the SDK (due to an unreliable
+ * channel), the SDK assumes the user dropped offline.
*/
USER_OFFLINE_DROPPED = 1,
/**
@@ -897,14 +899,32 @@ enum USER_OFFLINE_REASON_TYPE {
USER_OFFLINE_BECOME_AUDIENCE = 2,
};
+/**
+ * @brief The interface class.
+ */
enum INTERFACE_ID_TYPE {
+ /**
+ * 1: The `IAudioDeviceManager` interface class.
+ */
AGORA_IID_AUDIO_DEVICE_MANAGER = 1,
+ /**
+ * 2: The `IVideoDeviceManager` interface class.
+ */
AGORA_IID_VIDEO_DEVICE_MANAGER = 2,
+ /**
+ * This interface class is deprecated.
+ */
AGORA_IID_PARAMETER_ENGINE = 3,
+ /**
+ * 4: The `IMediaEngine` interface class.
+ */
AGORA_IID_MEDIA_ENGINE = 4,
AGORA_IID_AUDIO_ENGINE = 5,
AGORA_IID_VIDEO_ENGINE = 6,
AGORA_IID_RTC_CONNECTION = 7,
+ /**
+ * This interface class is deprecated.
+ */
AGORA_IID_SIGNALING_ENGINE = 8,
AGORA_IID_MEDIA_ENGINE_REGULATOR = 9,
AGORA_IID_LOCAL_SPATIAL_AUDIO = 11,
@@ -915,7 +935,7 @@ enum INTERFACE_ID_TYPE {
};
/**
- * The network quality types.
+ * @brief Network quality types.
*/
enum QUALITY_TYPE {
/**
@@ -924,16 +944,15 @@ enum QUALITY_TYPE {
*/
QUALITY_UNKNOWN __deprecated = 0,
/**
- * 1: The quality is excellent.
+ * 1: The network quality is excellent.
*/
QUALITY_EXCELLENT = 1,
/**
- * 2: The quality is quite good, but the bitrate may be slightly
- * lower than excellent.
+ * 2: The network quality is quite good, but the bitrate may be slightly lower than excellent.
*/
QUALITY_GOOD = 2,
/**
- * 3: Users can feel the communication slightly impaired.
+ * 3: Users can feel the communication is slightly impaired.
*/
QUALITY_POOR = 3,
/**
@@ -941,11 +960,11 @@ enum QUALITY_TYPE {
*/
QUALITY_BAD = 4,
/**
- * 5: Users can barely communicate.
+ * 5: The quality is so bad that users can barely communicate.
*/
QUALITY_VBAD = 5,
/**
- * 6: Users cannot communicate at all.
+ * 6: The network is down and users cannot communicate at all.
*/
QUALITY_DOWN = 6,
/**
@@ -953,7 +972,7 @@ enum QUALITY_TYPE {
*/
QUALITY_UNSUPPORTED = 7,
/**
- * 8: Detecting the network quality.
+ * 8: The last-mile network probe test is in progress.
*/
QUALITY_DETECTING = 8,
};
@@ -977,29 +996,29 @@ enum FIT_MODE_TYPE {
};
/**
- * The rotation information.
+ * @brief The clockwise rotation of the video.
*/
enum VIDEO_ORIENTATION {
/**
- * 0: Rotate the video by 0 degree clockwise.
+ * 0: (Default) No rotation.
*/
VIDEO_ORIENTATION_0 = 0,
/**
- * 90: Rotate the video by 90 degrees clockwise.
+ * 90: 90 degrees.
*/
VIDEO_ORIENTATION_90 = 90,
/**
- * 180: Rotate the video by 180 degrees clockwise.
+ * 180: 180 degrees.
*/
VIDEO_ORIENTATION_180 = 180,
/**
- * 270: Rotate the video by 270 degrees clockwise.
+ * 270: 270 degrees.
*/
VIDEO_ORIENTATION_270 = 270
};
/**
- * The video frame rate.
+ * @brief The video frame rate.
*/
enum FRAME_RATE {
/**
@@ -1027,7 +1046,8 @@ enum FRAME_RATE {
*/
FRAME_RATE_FPS_30 = 30,
/**
- * 60: 60 fps. Applies to Windows and macOS only.
+ * 60: 60 fps.
+ * @note For Windows and macOS only.
*/
FRAME_RATE_FPS_60 = 60,
};
@@ -1041,85 +1061,97 @@ enum FRAME_HEIGHT {
};
/**
- * Types of the video frame.
+ * @brief The video frame type.
*/
enum VIDEO_FRAME_TYPE {
- /** 0: A black frame. */
+ /**
+ * 0: A black frame.
+ */
VIDEO_FRAME_TYPE_BLANK_FRAME = 0,
- /** 3: Key frame. */
+ /**
+ * 3: Key frame.
+ */
VIDEO_FRAME_TYPE_KEY_FRAME = 3,
- /** 4: Delta frame. */
+ /**
+ * 4: Delta frame.
+ */
VIDEO_FRAME_TYPE_DELTA_FRAME = 4,
- /** 5: The B frame.*/
+ /**
+ * 5: The B frame.
+ */
VIDEO_FRAME_TYPE_B_FRAME = 5,
- /** 6: A discarded frame. */
+ /**
+ * 6: A discarded frame.
+ */
VIDEO_FRAME_TYPE_DROPPABLE_FRAME = 6,
- /** Unknown frame. */
+ /**
+ * Unknown frame.
+ */
VIDEO_FRAME_TYPE_UNKNOW
};
/**
- * Video output orientation modes.
+ * @brief Video output orientation mode.
*/
enum ORIENTATION_MODE {
/**
- * 0: The output video always follows the orientation of the captured video. The receiver takes
- * the rotational information passed on from the video encoder. This mode applies to scenarios
- * where video orientation can be adjusted on the receiver:
+ * 0: (Default) The output video always follows the orientation of the captured video. The receiver
+ * takes the rotational information passed on from the video encoder. This mode applies to scenarios
+ * where video orientation can be adjusted on the receiver.
* - If the captured video is in landscape mode, the output video is in landscape mode.
* - If the captured video is in portrait mode, the output video is in portrait mode.
*/
ORIENTATION_MODE_ADAPTIVE = 0,
/**
- * 1: Landscape mode. In this mode, the SDK always outputs videos in landscape (horizontal) mode.
- * If the captured video is in portrait mode, the video encoder crops it to fit the output.
- * Applies to situations where the receiving end cannot process the rotational information. For
- * example, CDN live streaming.
+ * 1: In this mode, the SDK always outputs videos in landscape (horizontal) mode. If the captured
+ * video is in portrait mode, the video encoder crops it to fit the output. Applies to situations
+ * where the receiving end cannot process the rotational information. For example, CDN live
+ * streaming.
*/
ORIENTATION_MODE_FIXED_LANDSCAPE = 1,
/**
- * 2: Portrait mode. In this mode, the SDK always outputs video in portrait (portrait) mode. If
- * the captured video is in landscape mode, the video encoder crops it to fit the output. Applies
- * to situations where the receiving end cannot process the rotational information. For example,
- * CDN live streaming.
+ * 2: In this mode, the SDK always outputs video in portrait (portrait) mode. If the captured video
+ * is in landscape mode, the video encoder crops it to fit the output. Applies to situations where
+ * the receiving end cannot process the rotational information. For example, CDN live streaming.
*/
ORIENTATION_MODE_FIXED_PORTRAIT = 2,
};
/**
- * (For future use) Video degradation preferences under limited bandwidth.
+ * @brief Video degradation preferences when the bandwidth is a constraint.
*/
enum DEGRADATION_PREFERENCE {
/**
- * -1: (Default) SDK uses degradation preference according to setVideoScenario API settings, real-time network state and other relevant data information.
- * If API setVideoScenario set video scenario to APPLICATION_SCENARIO_LIVESHOW, then MAINTAIN_BALANCED is used. If not, then MAINTAIN_RESOLUTION is used.
- * Also if network state has changed, SDK may change this parameter between MAINTAIN_FRAMERATE、MAINTAIN_BALANCED and MAINTAIN_RESOLUTION automatically to get the best QOE.
- * We recommend using this option.
- */
+ * -1: (Default) Automatic mode. The SDK will automatically select MAINTAIN_FRAMERATE,
+ * MAINTAIN_BALANCED or MAINTAIN_RESOLUTION based on the video scenario you set, in order to achieve
+ * the best overall quality of experience (QoE).
+ */
MAINTAIN_AUTO = -1,
/**
- * 0: (Deprecated) Prefers to reduce the video frame rate while maintaining video quality during
- * video encoding under limited bandwidth. This degradation preference is suitable for scenarios
- * where video quality is prioritized.
- * @note In the COMMUNICATION channel profile, the resolution of the video sent may change, so
- * remote users need to handle this issue.
+ * 0: Prefers to reduce the video frame rate while maintaining video resolution during video
+ * encoding under limited bandwidth. This degradation preference is suitable for scenarios where
+ * video quality is prioritized.
*/
MAINTAIN_QUALITY = 0,
/**
- * 1: Prefers to reduce the video quality while maintaining the video frame rate during video
- * encoding under limited bandwidth. This degradation preference is suitable for scenarios where
- * smoothness is prioritized and video quality is allowed to be reduced.
+ * 1: Reduces the video resolution while maintaining the video frame rate during video encoding
+ * under limited bandwidth. This degradation preference is suitable for scenarios where smoothness
+ * is prioritized and video quality is allowed to be reduced.
*/
MAINTAIN_FRAMERATE = 1,
/**
- * 2: Reduces the video frame rate and video quality simultaneously during video encoding under
- * limited bandwidth. MAINTAIN_BALANCED has a lower reduction than MAINTAIN_RESOLUTION and
- * MAINTAIN_FRAMERATE, and this preference is suitable for scenarios where both smoothness and
- * video quality are a priority.
+ * 2: Reduces the video frame rate and video resolution simultaneously during video encoding under
+ * limited bandwidth. The MAINTAIN_BALANCED has a lower reduction than MAINTAIN_QUALITY and
+ * MAINTAIN_FRAMERATE, and this preference is suitable for scenarios where both smoothness and video
+ * quality are a priority.
+ * @note The resolution of the video sent may change, so remote users need to handle this issue. See
+ * `onVideoSizeChanged`.
*/
MAINTAIN_BALANCED = 2,
/**
- * 3: Degrade framerate in order to maintain resolution.
+ * 3: Reduces the video frame rate while maintaining the video resolution during video encoding
+ * under limited bandwidth. This degradation preference is suitable for scenarios where video
+ * quality is prioritized.
*/
MAINTAIN_RESOLUTION = 3,
/**
@@ -1129,15 +1161,15 @@ enum DEGRADATION_PREFERENCE {
};
/**
- * The definition of the VideoDimensions struct.
+ * @brief The video dimension.
*/
struct VideoDimensions {
/**
- * The width of the video, in pixels.
+ * The width (pixels) of the video.
*/
int width;
/**
- * The height of the video, in pixels.
+ * The height (pixels) of the video.
*/
int height;
VideoDimensions() : width(640), height(480) {}
@@ -1174,38 +1206,57 @@ const int DEFAULT_MIN_BITRATE = -1;
const int DEFAULT_MIN_BITRATE_EQUAL_TO_TARGET_BITRATE = -2;
/**
- * screen sharing supported capability level.
+ * @brief The highest frame rate supported by the screen sharing device.
*/
enum SCREEN_CAPTURE_FRAMERATE_CAPABILITY {
+ /**
+ * 0: The device supports the frame rate of up to 15 fps.
+ */
SCREEN_CAPTURE_FRAMERATE_CAPABILITY_15_FPS = 0,
+ /**
+ * 1: The device supports the frame rate of up to 30 fps.
+ */
SCREEN_CAPTURE_FRAMERATE_CAPABILITY_30_FPS = 1,
+ /**
+ * 2: The device supports the frame rate of up to 60 fps.
+ */
SCREEN_CAPTURE_FRAMERATE_CAPABILITY_60_FPS = 2,
};
/**
- * Video codec capability levels.
+ * @brief The level of the codec capability.
*/
enum VIDEO_CODEC_CAPABILITY_LEVEL {
- /** No specified level */
+ /**
+ * -1: Unsupported video type. Currently, only H.264 and H.265 formats are supported. If the video
+ * is in another format, this value will be returned.
+ */
CODEC_CAPABILITY_LEVEL_UNSPECIFIED = -1,
- /** Only provide basic support for the codec type */
+ /**
+ * 5: Supports encoding and decoding videos up to 1080p and 30 fps.
+ */
CODEC_CAPABILITY_LEVEL_BASIC_SUPPORT = 5,
- /** Can process 1080p video at a rate of approximately 30 fps. */
+ /**
+ * 10: Supports encoding and decoding videos up to1080p and 30 fps.
+ */
CODEC_CAPABILITY_LEVEL_1080P30FPS = 10,
- /** Can process 1080p video at a rate of approximately 60 fps. */
+ /**
+ * 20: Support encoding and decoding videos up to 1080p and 60 fps.
+ */
CODEC_CAPABILITY_LEVEL_1080P60FPS = 20,
- /** Can process 4k video at a rate of approximately 30 fps. */
+ /**
+ * 30: Support encoding and decoding videos up to 4K and 30 fps.
+ */
CODEC_CAPABILITY_LEVEL_4K60FPS = 30,
};
/**
- * The video codec types.
+ * @brief Video codec types.
*/
enum VIDEO_CODEC_TYPE {
/**
- * 0: (Default) SDK will automatically adjust the codec type according to country and region or real-time network state and other relevant data information.
- * Also if network state is changed, SDK may change codec automatically to get the best QOE.
- * We recommend use this option.
+ * 0: (Default) Unspecified codec format. The SDK automatically matches the appropriate codec format
+ * based on the current video stream's resolution and device performance.
*/
VIDEO_CODEC_NONE = 0,
/**
@@ -1247,23 +1298,26 @@ enum VIDEO_CODEC_TYPE {
};
/**
- * Camera focal length type.
+ * @brief The camera focal length types.
+ *
+ * @note This enumeration class applies to Android and iOS only.
+ *
*/
enum CAMERA_FOCAL_LENGTH_TYPE {
/**
- * By default, there are no wide-angle and ultra-wide-angle properties.
+ * 0: (Default) Standard lens.
*/
CAMERA_FOCAL_LENGTH_DEFAULT = 0,
/**
- * Lens with focal length from 24mm to 35mm.
+ * 1: Wide-angle lens.
*/
CAMERA_FOCAL_LENGTH_WIDE_ANGLE = 1,
/**
- * Lens with focal length of less than 24mm.
+ * 2: Ultra-wide-angle lens.
*/
CAMERA_FOCAL_LENGTH_ULTRA_WIDE = 2,
/**
- * Telephoto lens.
+ * 3: (For iOS only) Telephoto lens.
*/
CAMERA_FOCAL_LENGTH_TELEPHOTO = 3,
};
@@ -1361,7 +1415,7 @@ struct SenderOptions {
};
/**
- * Audio codec types.
+ * @brief The codec type of audio.
*/
enum AUDIO_CODEC_TYPE {
/**
@@ -1385,11 +1439,11 @@ enum AUDIO_CODEC_TYPE {
/** 7: AAC. */
// AUDIO_CODEC_AAC = 7,
/**
- * 8: AAC LC.
+ * 8: LC-AAC.
*/
AUDIO_CODEC_AACLC = 8,
/**
- * 9: HE AAC.
+ * 9: HE-AAC.
*/
AUDIO_CODEC_HEAAC = 9,
/**
@@ -1411,77 +1465,77 @@ enum AUDIO_CODEC_TYPE {
};
/**
- * Audio encoding types of the audio encoded frame observer.
+ * @brief Audio encoding type.
*/
enum AUDIO_ENCODING_TYPE {
/**
- * AAC encoding format, 16000 Hz sampling rate, bass quality. A file with an audio duration of 10
- * minutes is approximately 1.2 MB after encoding.
+ * 0x010101: AAC encoding format, 16000 Hz sampling rate, bass quality. A file with an audio
+ * duration of 10 minutes is approximately 1.2 MB after encoding.
*/
AUDIO_ENCODING_TYPE_AAC_16000_LOW = 0x010101,
/**
- * AAC encoding format, 16000 Hz sampling rate, medium sound quality. A file with an audio
+ * 0x010102: AAC encoding format, 16000 Hz sampling rate, medium sound quality. A file with an audio
* duration of 10 minutes is approximately 2 MB after encoding.
*/
AUDIO_ENCODING_TYPE_AAC_16000_MEDIUM = 0x010102,
/**
- * AAC encoding format, 32000 Hz sampling rate, bass quality. A file with an audio duration of 10
- * minutes is approximately 1.2 MB after encoding.
+ * 0x010201: AAC encoding format, 32000 Hz sampling rate, bass quality. A file with an audio
+ * duration of 10 minutes is approximately 1.2 MB after encoding.
*/
AUDIO_ENCODING_TYPE_AAC_32000_LOW = 0x010201,
/**
- * AAC encoding format, 32000 Hz sampling rate, medium sound quality. A file with an audio
+ * 0x010202: AAC encoding format, 32000 Hz sampling rate, medium sound quality. A file with an audio
* duration of 10 minutes is approximately 2 MB after encoding.
*/
AUDIO_ENCODING_TYPE_AAC_32000_MEDIUM = 0x010202,
/**
- * AAC encoding format, 32000 Hz sampling rate, high sound quality. A file with an audio duration
- * of 10 minutes is approximately 3.5 MB after encoding.
+ * 0x010203: AAC encoding format, 32000 Hz sampling rate, high sound quality. A file with an audio
+ * duration of 10 minutes is approximately 3.5 MB after encoding.
*/
AUDIO_ENCODING_TYPE_AAC_32000_HIGH = 0x010203,
/**
- * AAC encoding format, 48000 Hz sampling rate, medium sound quality. A file with an audio
+ * 0x010302: AAC encoding format, 48000 Hz sampling rate, medium sound quality. A file with an audio
* duration of 10 minutes is approximately 2 MB after encoding.
*/
AUDIO_ENCODING_TYPE_AAC_48000_MEDIUM = 0x010302,
/**
- * AAC encoding format, 48000 Hz sampling rate, high sound quality. A file with an audio duration
- * of 10 minutes is approximately 3.5 MB after encoding.
+ * 0x010303: AAC encoding format, 48000 Hz sampling rate, high sound quality. A file with an audio
+ * duration of 10 minutes is approximately 3.5 MB after encoding.
*/
AUDIO_ENCODING_TYPE_AAC_48000_HIGH = 0x010303,
/**
- * OPUS encoding format, 16000 Hz sampling rate, bass quality. A file with an audio duration of 10
- * minutes is approximately 2 MB after encoding.
+ * 0x020101: OPUS encoding format, 16000 Hz sampling rate, bass quality. A file with an audio
+ * duration of 10 minutes is approximately 2 MB after encoding.
*/
AUDIO_ENCODING_TYPE_OPUS_16000_LOW = 0x020101,
/**
- * OPUS encoding format, 16000 Hz sampling rate, medium sound quality. A file with an audio
- * duration of 10 minutes is approximately 2 MB after encoding.
+ * 0x020102: OPUS encoding format, 16000 Hz sampling rate, medium sound quality. A file with an
+ * audio duration of 10 minutes is approximately 2 MB after encoding.
*/
AUDIO_ENCODING_TYPE_OPUS_16000_MEDIUM = 0x020102,
/**
- * OPUS encoding format, 48000 Hz sampling rate, medium sound quality. A file with an audio
- * duration of 10 minutes is approximately 2 MB after encoding.
+ * 0x020302: OPUS encoding format, 48000 Hz sampling rate, medium sound quality. A file with an
+ * audio duration of 10 minutes is approximately 2 MB after encoding.
*/
AUDIO_ENCODING_TYPE_OPUS_48000_MEDIUM = 0x020302,
/**
- * OPUS encoding format, 48000 Hz sampling rate, high sound quality. A file with an audio duration
- * of 10 minutes is approximately 3.5 MB after encoding.
+ * 0x020303: OPUS encoding format, 48000 Hz sampling rate, high sound quality. A file with an audio
+ * duration of 10 minutes is approximately 3.5 MB after encoding.
*/
AUDIO_ENCODING_TYPE_OPUS_48000_HIGH = 0x020303,
};
/**
- * The adaptation mode of the watermark.
+ * @brief The adaptation mode of the watermark.
*/
enum WATERMARK_FIT_MODE {
/**
- * Use the `positionInLandscapeMode` and `positionInPortraitMode` values you set in
- * #WatermarkOptions. The settings in `WatermarkRatio` are invalid.
+ * 0: Use the `positionInLandscapeMode` and `positionInPortraitMode` values you set in
+ * `WatermarkOptions`. The settings in `WatermarkRatio` are invalid.
*/
FIT_MODE_COVER_POSITION = 0,
/**
- * Use the value you set in `WatermarkRatio`. The settings in `positionInLandscapeMode` and
+ * 1: Use the value you set in `WatermarkRatio`. The settings in `positionInLandscapeMode` and
* `positionInPortraitMode` in `WatermarkOptions` are invalid.
*/
FIT_MODE_USE_IMAGE_RATIO = 1,
@@ -1508,7 +1562,7 @@ struct EncodedAudioFrameAdvancedSettings {
};
/**
- * The definition of the EncodedAudioFrameInfo struct.
+ * @brief Audio information after encoding.
*/
struct EncodedAudioFrameInfo {
EncodedAudioFrameInfo()
@@ -1526,25 +1580,23 @@ struct EncodedAudioFrameInfo {
advancedSettings(rhs.advancedSettings),
captureTimeMs(rhs.captureTimeMs) {}
/**
- * The audio codec: #AUDIO_CODEC_TYPE.
+ * Audio Codec type: `AUDIO_CODEC_TYPE`.
*/
AUDIO_CODEC_TYPE codec;
/**
- * The sample rate (Hz) of the audio frame.
+ * Audio sample rate (Hz).
*/
int sampleRateHz;
/**
- * The number of samples per audio channel.
- *
- * If this value is not set, it is 1024 for AAC, or 960 for OPUS by default.
+ * The number of audio samples per channel.
*/
int samplesPerChannel;
/**
- * The number of audio channels of the audio frame.
+ * The number of audio channels.
*/
int numberOfChannels;
/**
- * The advanced settings of the audio frame.
+ * This function is currently not supported.
*/
EncodedAudioFrameAdvancedSettings advancedSettings;
@@ -1603,78 +1655,86 @@ enum H264PacketizeMode {
};
/**
- * Video stream types.
+ * @brief The type of video streams.
*/
enum VIDEO_STREAM_TYPE {
/**
- * 0: The high-quality video stream, which has the highest resolution and bitrate.
+ * 0: High-quality video stream, that is, a video stream with the highest resolution and bitrate.
*/
VIDEO_STREAM_HIGH = 0,
/**
- * 1: The low-quality video stream, which has the lowest resolution and bitrate.
+ * 1: Low-quality video stream, that is, a video stream with the lowest resolution and bitrate.
*/
VIDEO_STREAM_LOW = 1,
/**
- * 4: The video stream of layer_1, which has a lower resolution and bitrate than VIDEO_STREAM_HIGH.
+ * 4. Video stream layer 1. The resolution of this quality level is only lower than that of
+ * VIDEO_STREAM_HIGH.
*/
VIDEO_STREAM_LAYER_1 = 4,
/**
- * 5: The video stream of layer_2, which has a lower resolution and bitrate than VIDEO_STREAM_LAYER_1.
+ * 5: Video stream layer 2. The resolution of this quality level is only lower than that of
+ * VIDEO_STREAM_LAYER_1.
*/
VIDEO_STREAM_LAYER_2 = 5,
/**
- * 6: The video stream of layer_3, which has a lower resolution and bitrate than VIDEO_STREAM_LAYER_2.
+ * 6: Video stream layer 3. The resolution of this quality level is only lower than that of
+ * VIDEO_STREAM_LAYER_2.
*/
VIDEO_STREAM_LAYER_3 = 6,
/**
- * 7: The video stream of layer_4, which has a lower resolution and bitrate than VIDEO_STREAM_LAYER_3.
+ * 7: Video stream layer 4. The resolution of this quality level is only lower than that of
+ * VIDEO_STREAM_LAYER_3.
*/
VIDEO_STREAM_LAYER_4 = 7,
/**
- * 8: The video stream of layer_5, which has a lower resolution and bitrate than VIDEO_STREAM_LAYER_4.
+ * 8: Video stream layer 5. The resolution of this quality level is only lower than that of
+ * VIDEO_STREAM_LAYER_4.
*/
VIDEO_STREAM_LAYER_5 = 8,
/**
- * 9: The video stream of layer_6, which has a lower resolution and bitrate than VIDEO_STREAM_LAYER_5.
+ * 9: Video stream layer 6. The resolution of this quality level is only lower than that of
+ * VIDEO_STREAM_LAYER_5.
*/
VIDEO_STREAM_LAYER_6 = 9,
};
+/**
+ * @brief Video subscription options.
+ */
struct VideoSubscriptionOptions {
/**
- * The type of the video stream to subscribe to.
- *
- * The default value is `VIDEO_STREAM_HIGH`, which means the high-quality
- * video stream.
+ * The video stream type that you want to subscribe to. The default value is VIDEO_STREAM_HIGH,
+ * indicating that the high-quality video streams are subscribed. See `VIDEO_STREAM_TYPE`.
*/
Optional type;
/**
- * Whether to subscribe to encoded video data only:
- * - `true`: Subscribe to encoded video data only.
- * - `false`: (Default) Subscribe to decoded video data.
+ * Whether to subscribe to encoded video frames only:
+ * - `true`: Subscribe to the encoded video data (structured data) only; the SDK does not decode or
+ * render raw video data.
+ * - `false`: (Default) Subscribe to both raw video data and encoded video data.
*/
Optional encodedFrameOnly;
VideoSubscriptionOptions() {}
};
-/** The maximum length of the user account.
+/**
+ * @brief The maximum length of the user account.
*/
enum MAX_USER_ACCOUNT_LENGTH_TYPE {
- /** The maximum length of the user account is 256 bytes.
+ /**
+ * The maximum length of the user account is 256 bytes.
*/
MAX_USER_ACCOUNT_LENGTH = 256
};
/**
- * The definition of the EncodedVideoFrameInfo struct, which contains the information of the
- * external encoded video frame.
+ * @brief Information about externally encoded video frames.
*/
struct EncodedVideoFrameInfo {
EncodedVideoFrameInfo()
- : uid(0),
- codecType(VIDEO_CODEC_H264),
+ : codecType(VIDEO_CODEC_H264),
width(0),
height(0),
framesPerSecond(0),
@@ -1687,8 +1747,7 @@ struct EncodedVideoFrameInfo {
presentationMs(-1) {}
EncodedVideoFrameInfo(const EncodedVideoFrameInfo& rhs)
- : uid(rhs.uid),
- codecType(rhs.codecType),
+ : codecType(rhs.codecType),
width(rhs.width),
height(rhs.height),
framesPerSecond(rhs.framesPerSecond),
@@ -1702,7 +1761,6 @@ struct EncodedVideoFrameInfo {
EncodedVideoFrameInfo& operator=(const EncodedVideoFrameInfo& rhs) {
if (this == &rhs) return *this;
- uid = rhs.uid;
codecType = rhs.codecType;
width = rhs.width;
height = rhs.height;
@@ -1718,51 +1776,47 @@ struct EncodedVideoFrameInfo {
}
/**
- * ID of the user that pushes the the external encoded video frame..
- */
- uid_t uid;
- /**
- * The codec type of the local video stream. See #VIDEO_CODEC_TYPE. The default value is
- * `VIDEO_CODEC_H265 (3)`.
+ * The codec type of the local video stream. See `VIDEO_CODEC_TYPE`. The default value is
+ * `VIDEO_CODEC_H264 (2)`.
*/
VIDEO_CODEC_TYPE codecType;
/**
- * The width (px) of the video frame.
+ * Width (pixel) of the video frame.
*/
int width;
/**
- * The height (px) of the video frame.
+ * Height (pixel) of the video frame.
*/
int height;
/**
* The number of video frames per second.
- * When this parameter is not 0, you can use it to calculate the Unix timestamp of the external
+ * When this parameter is not `0`, you can use it to calculate the Unix timestamp of externally
* encoded video frames.
*/
int framesPerSecond;
/**
- * The video frame type: #VIDEO_FRAME_TYPE.
+ * The video frame type. See `VIDEO_FRAME_TYPE`.
*/
VIDEO_FRAME_TYPE frameType;
/**
- * The rotation information of the video frame: #VIDEO_ORIENTATION.
+ * The rotation information of the video frame. See `VIDEO_ORIENTATION`.
*/
VIDEO_ORIENTATION rotation;
/**
- * The track ID of the video frame.
+ * Reserved for future use.
*/
int trackId; // This can be reserved for multiple video tracks, we need to create different ssrc
// and additional payload for later implementation.
/**
- * This is a input parameter which means the timestamp for capturing the video.
+ * The Unix timestamp (ms) for capturing the external encoded video frames.
*/
int64_t captureTimeMs;
/**
- * The timestamp for decoding the video.
+ * The Unix timestamp (ms) for decoding the external encoded video frames.
*/
int64_t decodeTimeMs;
/**
- * The stream type of video frame.
+ * The type of video streams. See `VIDEO_STREAM_TYPE`.
*/
VIDEO_STREAM_TYPE streamType;
@@ -1771,62 +1825,67 @@ struct EncodedVideoFrameInfo {
};
/**
- * Video compression preference.
+ * @brief Compression preference for video encoding.
*/
enum COMPRESSION_PREFERENCE {
/**
- * (Default) SDK uses compression preference according to setVideoScenario API settings, real-time network state and other relevant data information.
- * If API setVideoScenario set video scenario to APPLICATION_SCENARIO_LIVESHOW, then PREFER_QUALITY is used. If not, then PREFER_LOW_LATENCY is used.
- * Also if network state has changed, SDK may change this parameter between PREFER_QUALITY and PREFER_LOW_LATENCY automatically to get the best QOE.
- * We recommend using this option.
- */
+ * -1: (Default) Automatic mode. The SDK will automatically select PREFER_LOW_LATENCY or
+ * PREFER_QUALITY based on the video scenario you set to achieve the best user experience.
+ */
PREFER_COMPRESSION_AUTO = -1,
/**
- * Prefer low latency, usually used in real-time communication where low latency is the number one priority.
- */
+ * 0: Low latency preference. The SDK compresses video frames to reduce latency. This preference is
+ * suitable for scenarios where smoothness is prioritized and reduced video quality is acceptable.
+ */
PREFER_LOW_LATENCY = 0,
/**
- * Prefer quality in sacrifice of a degree of latency, usually around 30ms ~ 150ms, depends target fps
- */
+ * 1: High quality preference. The SDK compresses video frames while maintaining video quality. This
+ * preference is suitable for scenarios where video quality is prioritized.
+ */
PREFER_QUALITY = 1,
};
/**
- * The video encoder type preference.
+ * @brief Video encoder preference.
*/
enum ENCODING_PREFERENCE {
/**
- *Default .
+ * -1: Adaptive preference. The SDK automatically selects the optimal encoding type for encoding
+ * based on factors such as platform and device type.
*/
PREFER_AUTO = -1,
/**
- * Software encoding.
+ * 0: Software coding preference. The SDK prefers software encoders for video encoding.
*/
PREFER_SOFTWARE = 0,
/**
- * Hardware encoding
+ * 1: Hardware encoding preference. The SDK prefers a hardware encoder for video encoding. When the
+ * device does not support hardware encoding, the SDK automatically uses software encoding and
+ * reports the currently used video encoder type through `hwEncoderAccelerating` in the
+ * `onLocalVideoStats` callback.
*/
PREFER_HARDWARE = 1,
};
/**
- * The definition of the AdvanceOptions struct.
+ * @brief Advanced options for video encoding.
*/
struct AdvanceOptions {
/**
- * The video encoder type preference..
+ * Video encoder preference. See `ENCODING_PREFERENCE`.
*/
ENCODING_PREFERENCE encodingPreference;
/**
- * Video compression preference.
+ * Compression preference for video encoding. See `COMPRESSION_PREFERENCE`.
*/
COMPRESSION_PREFERENCE compressionPreference;
/**
- * Whether to encode and send the alpha data to the remote when alpha data is present.
- * The default value is false.
- */
+ * Whether to encode and send the Alpha data present in the video frame to the remote end:
+ * - `true`: Encode and send Alpha data.
+ * - `false`: (Default) Do not encode and send Alpha data.
+ */
bool encodeAlpha;
AdvanceOptions() : encodingPreference(PREFER_AUTO),
@@ -1848,19 +1907,22 @@ struct AdvanceOptions {
};
/**
- * Video mirror mode types.
+ * @brief Video mirror mode.
*/
enum VIDEO_MIRROR_MODE_TYPE {
/**
- * 0: The mirror mode determined by the SDK.
+ * 0: The SDK determines the mirror mode.
+ * - For the mirror mode of the local video view: If you use a front camera, the SDK enables the
+ * mirror mode by default; if you use a rear camera, the SDK disables the mirror mode by default.
+ * - For the remote user: The mirror mode is disabled by default.
*/
VIDEO_MIRROR_MODE_AUTO = 0,
/**
- * 1: Enable the mirror mode.
+ * 1: Enable mirror mode.
*/
VIDEO_MIRROR_MODE_ENABLED = 1,
/**
- * 2: Disable the mirror mode.
+ * 2: Disable mirror mode.
*/
VIDEO_MIRROR_MODE_DISABLED = 2,
};
@@ -1901,26 +1963,49 @@ enum HDR_CAPABILITY {
HDR_CAPABILITY_SUPPORTED = 1,
};
-/** Supported codec type bit mask. */
+/**
+ * @brief The bit mask of the codec type.
+ */
enum CODEC_CAP_MASK {
- /** 0: No codec support. */
+ /**
+ * (0): The device does not support encoding or decoding.
+ */
CODEC_CAP_MASK_NONE = 0,
- /** bit 1: Hardware decoder support flag. */
+ /**
+ * (1 << 0): The device supports hardware decoding.
+ */
CODEC_CAP_MASK_HW_DEC = 1 << 0,
- /** bit 2: Hardware encoder support flag. */
+ /**
+ * (1 << 1): The device supports hardware encoding.
+ */
CODEC_CAP_MASK_HW_ENC = 1 << 1,
- /** bit 3: Software decoder support flag. */
+ /**
+ * (1 << 2): The device supports software decoding.
+ */
CODEC_CAP_MASK_SW_DEC = 1 << 2,
- /** bit 4: Software encoder support flag. */
+ /**
+ * (1 << 3): The device supports software ecoding.
+ */
CODEC_CAP_MASK_SW_ENC = 1 << 3,
};
+/**
+ * @brief The level of the codec capability.
+ */
struct CodecCapLevels {
+ /**
+ * Hardware decoding capability level, which represents the device's ability to perform hardware
+ * decoding on videos of different quality. See `VIDEO_CODEC_CAPABILITY_LEVEL`.
+ */
VIDEO_CODEC_CAPABILITY_LEVEL hwDecodingLevel;
+ /**
+ * Software decoding capability level, which represents the device's ability to perform software
+ * decoding on videos of different quality. See `VIDEO_CODEC_CAPABILITY_LEVEL`.
+ */
VIDEO_CODEC_CAPABILITY_LEVEL swDecodingLevel;
CodecCapLevels()
@@ -1928,138 +2013,103 @@ struct CodecCapLevels {
swDecodingLevel(CODEC_CAPABILITY_LEVEL_UNSPECIFIED) {}
};
-/** The codec support information. */
+/**
+ * @brief The codec capability of the SDK.
+ */
struct CodecCapInfo {
- /** The codec type: #VIDEO_CODEC_TYPE. */
+ /**
+ * The video codec types. See `VIDEO_CODEC_TYPE`.
+ */
VIDEO_CODEC_TYPE codecType;
- /** The codec support flag. */
+ /**
+ * Bit mask of the codec types in SDK. See `CODEC_CAP_MASK`.
+ */
int codecCapMask;
- /** The codec capability level, estimated based on the device hardware.*/
+ /**
+ * Codec capability of the SDK. See `CodecCapLevels`.
+ */
CodecCapLevels codecLevels;
CodecCapInfo() : codecType(VIDEO_CODEC_NONE), codecCapMask(0) {}
};
-/** FocalLengthInfo contains the IDs of the front and rear cameras, along with the wide-angle types.
+/**
+ * @brief Focal length information supported by the camera, including the camera direction and focal
+ * length type.
+ *
+ * @note This enumeration class applies to Android and iOS only.
+ *
*/
struct FocalLengthInfo {
- /** The camera direction. */
+ /**
+ * The camera direction. See `CAMERA_DIRECTION`.
+ */
int cameraDirection;
- /** Camera focal segment type. */
+ /**
+ * The focal length type. See `CAMERA_FOCAL_LENGTH_TYPE`.
+ */
CAMERA_FOCAL_LENGTH_TYPE focalLengthType;
};
/**
- * The definition of the VideoEncoderConfiguration struct.
+ * @brief Video encoder configurations.
*/
struct VideoEncoderConfiguration {
/**
- * The video encoder code type: #VIDEO_CODEC_TYPE.
+ * The codec type of the local video stream. See `VIDEO_CODEC_TYPE`.
*/
VIDEO_CODEC_TYPE codecType;
/**
- * The video dimension: VideoDimensions.
+ * The dimensions of the encoded video (px). See `VideoDimensions`. This parameter measures the
+ * video encoding quality in the format of length × width. The default value is 960 × 540. You can
+ * set a custom value.
*/
VideoDimensions dimensions;
/**
- * The frame rate of the video. You can set it manually, or choose one from #FRAME_RATE.
+ * The frame rate (fps) of the encoding video frame. The default value is 15. See `FRAME_RATE`.
*/
int frameRate;
/**
- * The bitrate (Kbps) of the video.
- *
- * Refer to the **Video Bitrate Table** below and set your bitrate. If you set a bitrate beyond
- * the proper range, the SDK automatically adjusts it to a value within the range. You can also
- * choose from the following options:
- *
- * - #STANDARD_BITRATE: (Recommended) Standard bitrate mode. In this mode, the bitrates differ
- * between the Live Broadcast and Communication profiles:
- * - In the Communication profile, the video bitrate is the same as the base bitrate.
- * - In the Live Broadcast profile, the video bitrate is twice the base bitrate.
- * - #COMPATIBLE_BITRATE: Compatible bitrate mode. The compatible bitrate mode. In this mode, the
- * bitrate stays the same regardless of the profile. If you choose this mode for the Live
- * Broadcast profile, the video frame rate may be lower than the set value.
- *
- * Agora uses different video codecs for different profiles to optimize the user experience. For
- * example, the communication profile prioritizes the smoothness while the live-broadcast profile
- * prioritizes the video quality (a higher bitrate). Therefore, We recommend setting this
- * parameter as #STANDARD_BITRATE.
- *
- * | Resolution | Frame Rate (fps) | Maximum Bitrate (Kbps) |
- * |------------------------|------------------|------------------------|
- * | 120 * 120 | 15 | 150 |
- * | 120 * 160 | 15 | 186 |
- * | 180 * 180 | 15 | 270 |
- * | 180 * 240 | 15 | 336 |
- * | 180 * 320 | 15 | 420 |
- * | 240 * 240 | 15 | 420 |
- * | 240 * 320 | 15 | 522 |
- * | 240 * 424 | 15 | 648 |
- * | 360 * 360 | 15 | 774 |
- * | 360 * 360 | 30 | 1162 |
- * | 360 * 480 | 15 | 966 |
- * | 360 * 480 | 30 | 1407 |
- * | 360 * 640 | 15 | 1200 |
- * | 360 * 640 | 30 | 1696 |
- * | 480 * 480 | 15 | 1200 |
- * | 480 * 480 | 30 | 1696 |
- * | 480 * 640 | 10 | 1164 |
- * | 480 * 640 | 15 | 1445 |
- * | 480 * 640 | 30 | 2041 |
- * | 480 * 848 | 15 | 1735 |
- * | 480 * 848 | 30 | 2445 |
- * | 540 * 960 | 15 | 2029 |
- * | 540 * 960 | 30 | 2852 |
- * | 720 * 960 | 15 | 2443 |
- * | 720 * 960 | 30 | 3434 |
- * | 720 * 1280 | 15 | 2938 |
- * | 720 * 1280 | 30 | 4113 |
- * | 1080 * 1920 | 15 | 4914 |
- * | 1080 * 1920 | 30 | 6819 |
- * | 1080 * 1920 | 60 | 9380 |
- * | 2560 * 1440 | 15 | 7040 |
- * | 2560 * 1440 | 30 | 9700 |
- * | 2560 * 1440 | 60 | 13230 |
- * | 3840 * 2160 | 15 | 11550 |
- * | 3840 * 2160 | 30 | 15726 |
- * | 3840 * 2160 | 60 | 21133 |
+ * The encoding bitrate (Kbps) of the video. This parameter does not need to be set; keeping the
+ * default value `STANDARD_BITRATE` is sufficient. The SDK automatically matches the most suitable
+ * bitrate based on the video resolution and frame rate you have set. For the correspondence between
+ * video resolution and frame rate, see `Video profile`.
+ * - STANDARD_BITRATE (0): (Recommended) Standard bitrate mode.
+ * - COMPATIBLE_BITRATE (-1): Adaptive bitrate mode. In general, Agora suggests that you do not use
+ * this value.
*/
int bitrate;
/**
- * The minimum encoding bitrate (Kbps).
- *
- * The Agora SDK automatically adjusts the encoding bitrate to adapt to the
- * network conditions.
- *
- * Using a value greater than the default value forces the video encoder to
- * output high-quality images but may cause more packet loss and hence
- * sacrifice the smoothness of the video transmission. That said, unless you
- * have special requirements for image quality, Agora does not recommend
- * changing this value.
- *
- * @note
- * This parameter applies to the live-broadcast profile only.
+ * The minimum encoding bitrate (Kbps) of the video.
+ * The SDK automatically adjusts the encoding bitrate to adapt to the network conditions. Using a
+ * value greater than the default value forces the video encoder to output high-quality images but
+ * may cause more packet loss and sacrifice the smoothness of the video transmission. Unless you
+ * have special requirements for image quality, Agora does not recommend changing this value.
+ * @note This parameter only applies to the interactive streaming profile.
*/
int minBitrate;
/**
- * The video orientation mode: #ORIENTATION_MODE.
+ * The orientation mode of the encoded video. See `ORIENTATION_MODE`.
*/
ORIENTATION_MODE orientationMode;
/**
- * The video degradation preference under limited bandwidth: #DEGRADATION_PREFERENCE.
+ * Video degradation preference under limited bandwidth. See `DEGRADATION_PREFERENCE`.
+ * @note When this parameter is set to MAINTAIN_FRAMERATE (1) or MAINTAIN_BALANCED (2),
+ * `orientationMode` needs to be set to ORIENTATION_MODE_ADAPTIVE (0) at the same time, otherwise
+ * the setting will not take effect.
*/
DEGRADATION_PREFERENCE degradationPreference;
/**
- * The mirror mode is disabled by default
- * If mirror_type is set to VIDEO_MIRROR_MODE_ENABLED, then the video frame would be mirrored
- * before encoding.
+ * Sets the mirror mode of the published local video stream. It only affects the video that the
+ * remote user sees. See `VIDEO_MIRROR_MODE_TYPE`.
+ * @note By default, the video is not mirrored.
*/
VIDEO_MIRROR_MODE_TYPE mirrorMode;
/**
- * The advanced options for the video encoder configuration. See AdvanceOptions.
+ * Advanced options for video encoding. See `AdvanceOptions`.
*/
AdvanceOptions advanceOptions;
@@ -2120,26 +2170,34 @@ struct VideoEncoderConfiguration {
};
/**
- * The configurations for the data stream.
+ * @brief The configurations for the data stream.
+ *
+ * @details
+ * The following table shows the SDK behaviors under different parameter settings:
+ * | `syncWithAudio` | `ordered` | SDK behaviors |
+ * | --------------- | --------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+ * | `false` | `false` | The SDK triggers the `onStreamMessage` callback immediately after the receiver receives a data packet. |
+ * | `true` | `false` | If the data packet delay is within the audio delay, the SDK triggers the onStreamMessage callback when the synchronized audio packet is played out. If the data packet delay exceeds the audio delay, the SDK triggers the onStreamMessage callback as soon as the data packet is received. |
+ * | `false` | `true` | If the delay of a data packet is less than five seconds, the SDK corrects the order of the data packet. If the delay of a data packet exceeds five seconds, the SDK discards the data packet. |
+ * | `true` | `true` | If the delay of the data packet is within the range of the audio delay, the SDK corrects the order of the data packet. If the delay of a data packet exceeds the audio delay, the SDK discards this data packet. |
+ *
*/
struct DataStreamConfig {
/**
* Whether to synchronize the data packet with the published audio packet.
- * - `true`: Synchronize the data packet with the audio packet.
- * - `false`: Do not synchronize the data packet with the audio packet.
- *
+ * - `true`: Synchronize the data packet with the audio packet. This setting is suitable for special
+ * scenarios such as lyrics synchronization.
+ * - `false`: Do not synchronize the data packet with the audio packet. This setting is suitable for
+ * scenarios where data packets need to arrive at the receiving end immediately.
* When you set the data packet to synchronize with the audio, then if the data packet delay is
* within the audio delay, the SDK triggers the `onStreamMessage` callback when the synchronized
- * audio packet is played out. Do not set this parameter as true if you need the receiver to
- * receive the data packet immediately. Agora recommends that you set this parameter to `true`
- * only when you need to implement specific functions, for example lyric synchronization.
+ * audio packet is played out.
*/
bool syncWithAudio;
/**
* Whether the SDK guarantees that the receiver receives the data in the sent order.
* - `true`: Guarantee that the receiver receives the data in the sent order.
* - `false`: Do not guarantee that the receiver receives the data in the sent order.
- *
* Do not set this parameter as `true` if you need the receiver to receive the data packet
* immediately.
*/
@@ -2147,38 +2205,42 @@ struct DataStreamConfig {
};
/**
- * The definition of SIMULCAST_STREAM_MODE
+ * @brief The mode in which the video stream is sent.
*/
enum SIMULCAST_STREAM_MODE {
- /*
- * disable simulcast stream until receive request for enable simulcast stream by other broadcaster
+ /**
+ * -1: By default, do not send the low-quality video stream until a subscription request for the
+ * low-quality video stream is received from the receiving end, then automatically start sending
+ * low-quality video stream.
*/
AUTO_SIMULCAST_STREAM = -1,
- /*
- * disable simulcast stream
+ /**
+ * 0: Never send low-quality video stream.
*/
DISABLE_SIMULCAST_STREAM = 0,
- /*
- * always enable simulcast stream
+ /**
+ * 1: Always send low-quality video stream.
*/
ENABLE_SIMULCAST_STREAM = 1,
};
/**
- * The configuration of the low-quality video stream.
+ * @brief The configuration of the low-quality video stream.
*/
struct SimulcastStreamConfig {
/**
- * The video frame dimension: VideoDimensions. The default value is 160 × 120.
+ * The video dimension. See `VideoDimensions`. The default value is 50% of the high-quality video
+ * stream.
*/
VideoDimensions dimensions;
/**
- * The video bitrate (Kbps), represented by an instantaneous value. The default value of the log
- * level is 5.
+ * Video bitrate (Kbps). The default value is -1. This parameter does not need to be set. The SDK
+ * automatically matches the most suitable bitrate based on the video resolution and frame rate you
+ * set.
*/
int kBitrate;
/**
- * The capture frame rate (fps) of the local video. The default value is 5.
+ * The frame rate (fps) of the local video. The default value is 5.
*/
int framerate;
SimulcastStreamConfig() : dimensions(160, 120), kBitrate(65), framerate(5) {}
@@ -2189,97 +2251,93 @@ struct SimulcastStreamConfig {
};
/**
- * The configuration of the multi-layer video stream.
+ * @brief Configure video streams of different quality levels.
+ *
* @since v4.6.0
*/
struct SimulcastConfig {
/**
- * The index of multi-layer video stream
+ * @brief Index of video streams of different quality levels.
*/
enum StreamLayerIndex {
/**
- * 0: The video stream of layer_1, which has a lower resolution and bitrate than STREAM_HIGH.
+ * (0): Video stream layer_1, with lower resolution and bitrate than VIDEO_STREAM_HIGH.
*/
STREAM_LAYER_1 = 0,
/**
- * 1: The video stream of layer_2, which has a lower resolution and bitrate than VIDEO_STREAM_LAYER_1.
+ * (1): Video stream layer_2, with lower resolution and bitrate than VIDEO_STREAM_LAYER_1.
*/
STREAM_LAYER_2 = 1,
/**
- * 2: The video stream of layer_3, which has a lower resolution and bitrate than VIDEO_STREAM_LAYER_2.
+ * (2): Video stream layer_3, with lower resolution and bitrate than VIDEO_STREAM_LAYER_2.
*/
STREAM_LAYER_3 = 2,
/**
- * 3: The video stream of layer_4, which has a lower resolution and bitrate than VIDEO_STREAM_LAYER_3.
+ * (3): Video stream layer_4, with lower resolution and bitrate than VIDEO_STREAM_LAYER_3.
*/
STREAM_LAYER_4 = 3,
/**
- * 4: The video stream of layer_5, which has a lower resolution and bitrate than VIDEO_STREAM_LAYER_4.
+ * (4): Video stream layer_5, with lower resolution and bitrate than VIDEO_STREAM_LAYER_4.
*/
STREAM_LAYER_5 = 4,
/**
- * 5: The video stream of layer_6, which has a lower resolution and bitrate than VIDEO_STREAM_LAYER_5.
+ * (5): Video stream layer_6, with lower resolution and bitrate than VIDEO_STREAM_LAYER_5.
*/
STREAM_LAYER_6 = 5,
/**
- * 6: The low-quality video stream, which has the lowest resolution and bitrate.
+ * (6): Low-quality video stream, with the lowest resolution and bitrate.
*/
STREAM_LOW = 6,
/**
- * 7: Max count of video stream layers
+ * (7): Maximum number of video stream layers.
*/
STREAM_LAYER_COUNT_MAX = 7
};
/**
- * The configuration of a specific layer in the multi-layer video stream.
+ * @brief Configures the parameters of a specific layer in multi-quality video streams.
+ *
+ * @details
+ * Used to configure the resolution, frame rate, and enable status of a specific layer in
+ * multi-quality video streams.
+ *
*/
struct StreamLayerConfig {
/**
- * The video frame dimension. The default value is 0.
+ * Video frame size. Default is 0. See `VideoDimensions`.
*/
VideoDimensions dimensions;
/**
- * The capture frame rate (fps) of the local video. The default value is 0.
+ * Frame rate (fps) of the local video capture. Default is 0.
*/
int framerate;
/**
- * Whether to enable the corresponding layer of video stream. The default value is false.
- * - true: Enable the corresponding layer of video stream
- * - false: (Default) Disable the corresponding layer of video stream
+ * Whether to enable the video stream for the corresponding layer. Default is false.
+ * - `true`: Enables the video stream for the corresponding layer.
+ * - `false`: (Default) Disables the video stream for the corresponding layer.
*/
bool enable;
StreamLayerConfig() : dimensions(0, 0), framerate(0), enable(false) {}
};
/**
- * The array of StreamLayerConfig, which contains STREAM_LAYER_COUNT_MAX layers of video stream at most.
+ * Configurations for multi-layer streaming: `StreamLayerConfig`.
*/
StreamLayerConfig configs[STREAM_LAYER_COUNT_MAX];
/**
- * Whether to enable fallback publishing. When set to true, it allows dynamic disabling of multiple streams when the performance or network of the publishing end is poor. The order of disabling is layer1->layer6.
- * - true: Enable fallback publishing.
- * - false: (Default) Disable fallback publishing.
- *
- * @details The system guarantees that even under poor network conditions or limited
- * device capabilities, at least the major stream and lowest-resolution minor stream
- * will be maintained for basic video continuity.
- *
+ * Whether to enable fallback publishing:
+ * - `true`: Enable fallback publishing. When the device performance or network is poor at the
+ * publishing end, the SDK will dynamically disable multiple video streams of different quality
+ * levels, from layer1 to layer6. At least the video streams of the highest and lowest quality are
+ * retained to maintain basic video continuity.
+ * - `false`: (Default) Disable fallback publishing.
*/
bool publish_fallback_enable;
- /**
- * Whether to enable on-demand publishing. When set to true, a simulcast layer will only be published
- * when there are subscribers requesting that layer.
- * - true: (Default) Enable on-demand publishing.
- * - false: Disable on-demand publishing. All enabled simulcast layers will be published regardless
- * of subscription status.
- */
- bool publish_on_demand;
- SimulcastConfig(): publish_fallback_enable(false), publish_on_demand(true) {}
+ SimulcastConfig(): publish_fallback_enable(false) {}
};
/**
- * The location of the target area relative to the screen or window. If you do not set this parameter,
- * the SDK selects the whole screen or window.
+ * @brief The location of the target area relative to the screen or window. If you do not set this
+ * parameter, the SDK selects the whole screen or window.
*/
struct Rectangle {
/**
@@ -2291,11 +2349,11 @@ struct Rectangle {
*/
int y;
/**
- * The width of the region.
+ * The width of the target area.
*/
int width;
/**
- * The height of the region.
+ * The height of the target area.
*/
int height;
@@ -2304,26 +2362,28 @@ struct Rectangle {
};
/**
- * The position and size of the watermark on the screen.
+ * @brief The position and size of the watermark on the screen.
*
+ * @details
* The position and size of the watermark on the screen are determined by `xRatio`, `yRatio`, and
* `widthRatio`:
- * - (`xRatio`, `yRatio`) refers to the coordinates of the upper left corner of the watermark, which
- * determines the distance from the upper left corner of the watermark to the upper left corner of
- * the screen. The `widthRatio` determines the width of the watermark.
+ * - ( `xRatio`, `yRatio` ) refers to the coordinates of the upper left corner of the watermark,
+ * which determines the distance from the upper left corner of the watermark to the upper left
+ * corner of the screen.
+ * - The `widthRatio` determines the width of the watermark.
+ *
*/
struct WatermarkRatio {
/**
* The x-coordinate of the upper left corner of the watermark. The horizontal position relative to
- * the origin, where the upper left corner of the screen is the origin, and the x-coordinate is
- * the upper left corner of the watermark. The value range is [0.0,1.0], and the default value is
- * 0.
+ * the origin, where the upper left corner of the screen is the origin, and the x-coordinate is the
+ * upper left corner of the watermark. The value range is [0.0,1.0], and the default value is 0.
*/
float xRatio;
/**
- * The y-coordinate of the upper left corner of the watermark. The vertical position relative to
- * the origin, where the upper left corner of the screen is the origin, and the y-coordinate is
- * the upper left corner of the screen. The value range is [0.0,1.0], and the default value is 0.
+ * The y-coordinate of the upper left corner of the watermark. The vertical position relative to the
+ * origin, where the upper left corner of the screen is the origin, and the y-coordinate is the
+ * upper left corner of the screen. The value range is [0.0,1.0], and the default value is 0.
*/
float yRatio;
/**
@@ -2339,36 +2399,40 @@ struct WatermarkRatio {
};
/**
- * Configurations of the watermark image.
+ * @brief Watermark image configurations.
+ *
+ * @details
+ * Configuration options for setting the watermark image to be added.
+ *
*/
struct WatermarkOptions {
/**
- * Whether or not the watermark image is visible in the local video preview:
- * - true: (Default) The watermark image is visible in preview.
- * - false: The watermark image is not visible in preview.
+ * Whether the watermark is visible in the local preview view:
+ * - `true`: (Default) The watermark is visible in the local preview view.
+ * - `false`: The watermark is not visible in the local preview view.
*/
bool visibleInPreview;
/**
- * When the adaptation mode of the watermark is `FIT_MODE_COVER_POSITION`, it is used to set the
- * area of the watermark image in landscape mode. See #FIT_MODE_COVER_POSITION for details.
+ * When the adaptation mode of the watermark is FIT_MODE_COVER_POSITION, it is used to set the area
+ * of the watermark image in landscape mode. See `Rectangle`.
*/
Rectangle positionInLandscapeMode;
/**
- * When the adaptation mode of the watermark is `FIT_MODE_COVER_POSITION`, it is used to set the
- * area of the watermark image in portrait mode. See #FIT_MODE_COVER_POSITION for details.
+ * When the adaptation mode of the watermark is FIT_MODE_COVER_POSITION, it is used to set the area
+ * of the watermark image in portrait mode. See `Rectangle`.
*/
Rectangle positionInPortraitMode;
/**
- * When the watermark adaptation mode is `FIT_MODE_USE_IMAGE_RATIO`, this parameter is used to set
- * the watermark coordinates. See WatermarkRatio for details.
+ * When the watermark adaptation mode is FIT_MODE_USE_IMAGE_RATIO, this parameter is used to set the
+ * watermark coordinates. See `WatermarkRatio`.
*/
WatermarkRatio watermarkRatio;
/**
- * The adaptation mode of the watermark. See #WATERMARK_FIT_MODE for details.
+ * The adaptation mode of the watermark. See `WATERMARK_FIT_MODE`.
*/
WATERMARK_FIT_MODE mode;
/**
- * The z-order of the watermark image. The default value is 0.
+ * Layer order of the watermark image. The default value is 0.
*/
int zOrder;
@@ -2381,17 +2445,17 @@ struct WatermarkOptions {
};
/**
- * @brief The source type of the watermark.
- *
+ * @brief Type of watermark source.
+ *
* @since 4.6.0
*/
enum WATERMARK_SOURCE_TYPE {
/**
- * 0: The watermark source is an image.
+ * (0): The watermark source is an image.
*/
IMAGE = 0,
/**
- * 1: The watermark source is a buffer.
+ * (1): The watermark source is a buffer.
*/
BUFFER = 1,
/**
@@ -2476,34 +2540,38 @@ struct WatermarkLiteral {
};
/**
- * @brief Defines the configuration for a buffer watermark.
+ * @brief Configures the format, size, and pixel buffer of the watermark image.
*
* @since 4.6.0
+ *
+ * @details
+ * Defines the buffer data structure of the watermark image, including image width, height, format,
+ * length, and image data buffer.
+ *
*/
struct WatermarkBuffer {
/**
- * The width of the watermark buffer.
+ * Width of the watermark buffer, in pixels.
*/
int width;
/**
- * The height of the watermark buffer.
+ * Height of the watermark buffer, in pixels.
*/
int height;
/**
- * The length of the watermark buffer.
+ * Length of the watermark buffer, in bytes.
*/
int length;
/**
- * The format of the watermark buffer. The default value is #VIDEO_PIXEL_I420.
- * Currently supports: #VIDEO_PIXEL_I420, #VIDEO_PIXEL_RGBA, #VIDEO_PIXEL_BGRA, and #VIDEO_PIXEL_NV21.
+ * Format of the watermark buffer. See `VIDEO_PIXEL_FORMAT`. Default is VIDEO_PIXEL_I420. Currently
+ * supported formats include: VIDEO_PIXEL_I420, VIDEO_PIXEL_RGBA, VIDEO_PIXEL_BGRA, and
+ * VIDEO_PIXEL_NV21.
*/
media::base::VIDEO_PIXEL_FORMAT format;
/**
- * The buffer data of the watermark.
- *
- * @note If used asynchronously, copy the buffer to memory that will not be released.
+ * Buffer data of the watermark.
*/
const uint8_t* buffer;
@@ -2511,22 +2579,22 @@ struct WatermarkBuffer {
};
/**
- * @brief Defines the configuration for a watermark.
+ * @brief Used to configure watermark-related information.
*
* @since 4.6.0
*/
struct WatermarkConfig {
/**
- * The unique identifier of the watermark. It is recommended to use a UUID.
+ * Unique identifier for the watermark. It is recommended to use a UUID.
*/
const char* id;
/**
- * The watermark source type. See #WATERMARK_SOURCE_TYPE for details.
+ * Type of the watermark. See `WATERMARK_SOURCE_TYPE`.
*/
WATERMARK_SOURCE_TYPE type;
union {
/**
- * The watermark buffer. See WatermarkBuffer.
+ * Buffer of the watermark. See `WatermarkBuffer`.
*/
WatermarkBuffer buffer;
/**
@@ -2542,15 +2610,13 @@ struct WatermarkConfig {
*/
WatermarkLiteral literal;
/**
- * The URL of the image file for the watermark. The default value is NULL.
- *
- * @note If used asynchronously, copy the URL to memory that will not be released.
+ * URL of the watermark image file. Default value is NULL.
*/
const char* imageUrl;
};
/**
- * The options of the watermark. See WatermarkOptions.
+ * Options for the watermark. See `WatermarkOptions`.
*/
WatermarkOptions options;
@@ -2558,7 +2624,7 @@ struct WatermarkConfig {
};
/**
- * @brief Defines how data is transmitted across multiple network paths.
+ * @brief The transmission mode of data over multiple network paths.
*
* @since 4.6.0
*/
@@ -2568,51 +2634,52 @@ enum MultipathMode {
*/
Duplicate= 0,
/**
- * Dynamic mode, the data is transmitted only over the path that the internal algorithm determines to be optimal for transmission quality.
- */
+ * (1): Dynamic transmission mode. The SDK dynamically selects the optimal path for data
+ * transmission based on the current network conditions to improve transmission performance.
+ */
Dynamic
};
/**
- * @brief Defines the types of network paths used in multipath transmission.
+ * @brief Network path types used in multipath transmission.
*
* @since 4.6.0
- */
+ */
enum MultipathType {
/**
- * The local area network (LAN) path.
+ * (0): Local Area Network (LAN) path.
*/
LAN = 0,
/**
- * The Wi-Fi path.
+ * (1): Wi-Fi path.
*/
WIFI,
/**
- * The mobile network path.
+ * (2): Mobile network path.
*/
Mobile,
/**
- * An unknown or unspecified network path.
+ * (99): Unknown or unspecified network path.
*/
Unknown = 99
};
/**
- * @brief Contains statistics for a specific network path in multipath transmission.
+ * @brief Statistical information about a specific network path.
*
* @since 4.6.0
*/
struct PathStats {
/**
- * The type of the path.
+ * Types of network path. See `MultipathType`.
*/
MultipathType type;
/**
- * The transmission bitrate of the path.
+ * The transmission bitrate of the path in Kbps.
*/
int txKBitRate;
/**
- * The receiving bitrate of the path.
+ * The receiving bitrate of the path in Kbps.
*/
int rxKBitRate;
PathStats() : type(Unknown), txKBitRate(0), rxKBitRate(0) {}
@@ -2620,41 +2687,41 @@ struct PathStats {
};
/**
- * @brief Aggregates statistics for all network paths used in multipath transmission.
+ * @brief Aggregates statistics of each network path in multipath transmission.
*
* @since 4.6.0
*/
struct MultipathStats {
/**
- * The number of bytes transmitted over the LAN path.
+ * The total number of bytes sent over the LAN path.
*/
uint32_t lanTxBytes;
/**
- * The number of bytes received over the LAN path.
+ * The total number of bytes received over the LAN path.
*/
uint32_t lanRxBytes;
/**
- * The number of bytes transmitted over the Wi-Fi path.
+ * The total number of bytes sent over the Wi-Fi path.
*/
uint32_t wifiTxBytes;
/**
- * The number of bytes received over the Wi-Fi path.
+ * The total number of bytes received over the Wi-Fi path.
*/
uint32_t wifiRxBytes;
/**
- * The number of bytes transmitted over the mobile network path.
+ * The total number of bytes sent over the mobile network path.
*/
uint32_t mobileTxBytes;
/**
- * The number of bytes received over the mobile network path.
+ * The total number of bytes received over the mobile network path.
*/
uint32_t mobileRxBytes;
/**
- * The number of active paths.
+ * The number of active transmission paths.
*/
int activePathNum;
/**
- * “An array of statistics for each active path.
+ * An array of statistics for each active transmission path. See `PathStats`.
*/
const PathStats* pathStats;
MultipathStats()
@@ -2669,63 +2736,63 @@ struct MultipathStats {
};
/**
- * The definition of the RtcStats struct.
+ * @brief Statistics of a call session.
*/
struct RtcStats {
/**
- * The call duration (s), represented by an aggregate value.
+ * Call duration of the local user in seconds, represented by an aggregate value.
*/
unsigned int duration;
/**
- * The total number of bytes transmitted, represented by an aggregate value.
+ * The number of bytes sent.
*/
unsigned int txBytes;
/**
- * The total number of bytes received, represented by an aggregate value.
+ * The number of bytes received.
*/
unsigned int rxBytes;
/**
- * The total number of audio bytes sent (bytes), represented by an aggregate value.
+ * The total number of audio bytes sent, represented by an aggregate value.
*/
unsigned int txAudioBytes;
/**
- * The total number of video bytes sent (bytes), represented by an aggregate value.
+ * The total number of video bytes sent, represented by an aggregate value.
*/
unsigned int txVideoBytes;
/**
- * The total number of audio bytes received (bytes), represented by an aggregate value.
+ * The total number of audio bytes received, represented by an aggregate value.
*/
unsigned int rxAudioBytes;
/**
- * The total number of video bytes received (bytes), represented by an aggregate value.
+ * The total number of video bytes received, represented by an aggregate value.
*/
unsigned int rxVideoBytes;
/**
- * The transmission bitrate (Kbps), represented by an instantaneous value.
+ * The actual bitrate (Kbps) while sending the local video stream.
*/
unsigned short txKBitRate;
/**
- * The receiving bitrate (Kbps), represented by an instantaneous value.
+ * The receiving bitrate (Kbps).
*/
unsigned short rxKBitRate;
/**
- * Audio receiving bitrate (Kbps), represented by an instantaneous value.
+ * The bitrate (Kbps) of receiving the audio.
*/
unsigned short rxAudioKBitRate;
/**
- * The audio transmission bitrate (Kbps), represented by an instantaneous value.
+ * The bitrate (Kbps) of sending the audio packet.
*/
unsigned short txAudioKBitRate;
/**
- * The video receive bitrate (Kbps), represented by an instantaneous value.
+ * The bitrate (Kbps) of receiving the video.
*/
unsigned short rxVideoKBitRate;
/**
- * The video transmission bitrate (Kbps), represented by an instantaneous value.
+ * The bitrate (Kbps) of sending the video.
*/
unsigned short txVideoKBitRate;
/**
- * The VOS client-server latency (ms).
+ * The client-to-server delay (milliseconds).
*/
unsigned short lastmileDelay;
/**
@@ -2733,49 +2800,49 @@ struct RtcStats {
*/
unsigned int userCount;
/**
- * The app CPU usage (%).
+ * Application CPU usage (%).
* @note
* - The value of `cpuAppUsage` is always reported as 0 in the `onLeaveChannel` callback.
- * - As of Android 8.1, you cannot get the CPU usage from this attribute due to system
- * limitations.
+ * - As of Android 8.1, you cannot get the CPU usage from this attribute due to system limitations.
*/
double cpuAppUsage;
/**
* The system CPU usage (%).
- *
* For Windows, in the multi-kernel environment, this member represents the average CPU usage. The
* value = (100 - System Idle Progress in Task Manager)/100.
* @note
* - The value of `cpuTotalUsage` is always reported as 0 in the `onLeaveChannel` callback.
- * - As of Android 8.1, you cannot get the CPU usage from this attribute due to system
- * limitations.
+ * - As of Android 8.1, you cannot get the CPU usage from this attribute due to system limitations.
*/
double cpuTotalUsage;
/**
- * The round-trip time delay from the client to the local router.
- * @note On Android, to get `gatewayRtt`, ensure that you add the
- * `android.permission.ACCESS_WIFI_STATE` permission after `` in the
- * `AndroidManifest.xml` file in your project.
+ * The round-trip time delay (ms) from the client to the local router.
+ * @note
+ * This property is disabled on devices running iOS 14 or later, and enabled on devices running
+ * versions earlier than iOS 14 by default.
+ * To enable this property on devices running iOS 14 or later, `technical support`.
+ * On Android, to get `gatewayRtt`, ensure that you add the `android.permission.ACCESS_WIFI_STATE`
+ * permission after `` in the `AndroidManifest.xml` file in your project.
*/
int gatewayRtt;
/**
- * The memory usage ratio of the app (%).
+ * The memory ratio occupied by the app (%).
* @note This value is for reference only. Due to system limitations, you may not get this value.
*/
double memoryAppUsageRatio;
/**
- * The memory usage ratio of the system (%).
+ * The memory occupied by the system (%).
* @note This value is for reference only. Due to system limitations, you may not get this value.
*/
double memoryTotalUsageRatio;
/**
- * The memory usage of the app (KB).
+ * The memory size occupied by the app (KB).
* @note This value is for reference only. Due to system limitations, you may not get this value.
*/
int memoryAppUsageInKbytes;
/**
- * The time elapsed from the when the app starts connecting to an Agora channel
- * to when the connection is established. 0 indicates that this member does not apply.
+ * The duration (ms) between the SDK starts connecting and the connection is established. If the
+ * value reported is 0, it means invalid.
*/
int connectTimeMs;
/**
@@ -2824,11 +2891,13 @@ struct RtcStats {
*/
int firstVideoKeyFrameRenderedDurationAfterUnmute;
/**
- * The packet loss rate of sender(broadcaster).
+ * The packet loss rate (%) from the client to the Agora server before applying the anti-packet-loss
+ * algorithm.
*/
int txPacketLossRate;
/**
- * The packet loss rate of receiver(audience).
+ * The packet loss rate (%) from the Agora server to the client before using the anti-packet-loss
+ * method.
*/
int rxPacketLossRate;
/**
@@ -2876,41 +2945,41 @@ struct RtcStats {
};
/**
- * User role types.
+ * @brief The user role in the interactive live streaming.
*/
enum CLIENT_ROLE_TYPE {
/**
- * 1: Broadcaster. A broadcaster can both send and receive streams.
+ * 1: Host. A host can both send and receive streams.
*/
CLIENT_ROLE_BROADCASTER = 1,
/**
- * 2: Audience. An audience member can only receive streams.
+ * 2: (Default) Audience. An audience member can only receive streams.
*/
CLIENT_ROLE_AUDIENCE = 2,
};
/**
- * Quality change of the local video in terms of target frame rate and target bit rate since last
- * count.
+ * @brief Quality change of the local video in terms of target frame rate and target bit rate since
+ * last count.
*/
enum QUALITY_ADAPT_INDICATION {
/**
- * 0: The quality of the local video stays the same.
+ * 0: The local video quality stays the same.
*/
ADAPT_NONE = 0,
/**
- * 1: The quality improves because the network bandwidth increases.
+ * 1: The local video quality improves because the network bandwidth increases.
*/
ADAPT_UP_BANDWIDTH = 1,
/**
- * 2: The quality worsens because the network bandwidth decreases.
+ * 2: The local video quality deteriorates because the network bandwidth decreases.
*/
ADAPT_DOWN_BANDWIDTH = 2,
};
/**
- * The latency level of an audience member in interactive live streaming. This enum takes effect
- * only when the user role is set to `CLIENT_ROLE_AUDIENCE`.
+ * @brief The latency level of an audience member in interactive live streaming. This enum takes
+ * effect only when the user role is set to CLIENT_ROLE_AUDIENCE .
*/
enum AUDIENCE_LATENCY_LEVEL_TYPE {
/**
@@ -2918,13 +2987,13 @@ enum AUDIENCE_LATENCY_LEVEL_TYPE {
*/
AUDIENCE_LATENCY_LEVEL_LOW_LATENCY = 1,
/**
- * 2: Ultra low latency.
+ * 2: (Default) Ultra low latency.
*/
AUDIENCE_LATENCY_LEVEL_ULTRA_LOW_LATENCY = 2,
};
/**
- * The detailed options of a user.
+ * @brief Setting of user role properties.
*/
struct ClientRoleOptions {
/**
@@ -2937,21 +3006,25 @@ struct ClientRoleOptions {
};
/**
- * Quality of experience (QoE) of the local user when receiving a remote audio stream.
+ * @brief The Quality of Experience (QoE) of the local user when receiving a remote audio stream.
*/
enum EXPERIENCE_QUALITY_TYPE {
- /** 0: QoE of the local user is good. */
+ /**
+ * 0: The QoE of the local user is good.
+ */
EXPERIENCE_QUALITY_GOOD = 0,
- /** 1: QoE of the local user is poor. */
+ /**
+ * 1: The QoE of the local user is poor.
+ */
EXPERIENCE_QUALITY_BAD = 1,
};
/**
- * Reasons why the QoE of the local user when receiving a remote audio stream is poor.
+ * @brief Reasons why the QoE of the local user when receiving a remote audio stream is poor.
*/
enum EXPERIENCE_POOR_REASON {
/**
- * 0: No reason, indicating good QoE of the local user.
+ * 0: No reason, indicating a good QoE of the local user.
*/
EXPERIENCE_REASON_NONE = 0,
/**
@@ -2967,42 +3040,47 @@ enum EXPERIENCE_POOR_REASON {
*/
WIRELESS_SIGNAL_POOR = 4,
/**
- * 8: The local user enables both Wi-Fi and bluetooth, and their signals interfere with each
- * other. As a result, audio transmission quality is undermined.
+ * 8: The local user enables both Wi-Fi and bluetooth, and their signals interfere with each other.
+ * As a result, audio transmission quality is undermined.
*/
WIFI_BLUETOOTH_COEXIST = 8,
};
/**
- * Audio AINS mode
+ * @brief AI noise suppression modes.
*/
enum AUDIO_AINS_MODE {
/**
- * AINS mode with soft suppression level.
+ * 0: (Default) Balance mode. This mode allows for a balanced performance on noice suppression and
+ * time delay.
*/
AINS_MODE_BALANCED = 0,
/**
- * AINS mode with high suppression level.
+ * 1: Aggressive mode. In scenarios where high performance on noise suppression is required, such as
+ * live streaming outdoor events, this mode reduces nosie more dramatically, but may sometimes
+ * affect the original character of the audio.
*/
AINS_MODE_AGGRESSIVE = 1,
/**
- * AINS mode with high suppression level and ultra-low-latency
+ * 2: Aggressive mode with low latency. The noise suppression delay of this mode is about only half
+ * of that of the balance and aggressive modes. It is suitable for scenarios that have high
+ * requirements on noise suppression with low latency, such as sing together online in real time.
*/
AINS_MODE_ULTRALOWLATENCY = 2
};
/**
- * Audio profile types.
+ * @brief The audio profile.
*/
enum AUDIO_PROFILE_TYPE {
/**
* 0: The default audio profile.
- * - For the Communication profile:
+ * - For the interactive streaming profile: A sample rate of 48 kHz, music encoding, mono, and a
+ * bitrate of up to 64 Kbps.
+ * - For the communication profile:
* - Windows: A sample rate of 16 kHz, audio encoding, mono, and a bitrate of up to 16 Kbps.
* - Android/macOS/iOS: A sample rate of 32 kHz, audio encoding, mono, and a bitrate of up to 18
- * Kbps. of up to 16 Kbps.
- * - For the Live-broadcast profile: A sample rate of 48 kHz, music encoding, mono, and a bitrate
- * of up to 64 Kbps.
+ * Kbps.
*/
AUDIO_PROFILE_DEFAULT = 0,
/**
@@ -3014,10 +3092,9 @@ enum AUDIO_PROFILE_TYPE {
*/
AUDIO_PROFILE_MUSIC_STANDARD = 2,
/**
- * 3: A sample rate of 48 kHz, music encoding, stereo, and a bitrate of up to 80 Kbps.
- *
- * To implement stereo audio, you also need to call `setAdvancedAudioOptions` and set
- * `audioProcessingChannels` to `AUDIO_PROCESSING_STEREO` in `AdvancedAudioOptions`.
+ * 3: A sample rate of 48 kHz, music encoding, stereo, and a bitrate of up to 80 Kbps. To implement
+ * stereo audio, you also need to call `setAdvancedAudioOptions` and set `audioProcessingChannels`
+ * to `AUDIO_PROCESSING_STEREO` in `AdvancedAudioOptions`.
*/
AUDIO_PROFILE_MUSIC_STANDARD_STEREO = 3,
/**
@@ -3025,38 +3102,37 @@ enum AUDIO_PROFILE_TYPE {
*/
AUDIO_PROFILE_MUSIC_HIGH_QUALITY = 4,
/**
- * 5: A sample rate of 48 kHz, music encoding, stereo, and a bitrate of up to 128 Kbps.
- *
- * To implement stereo audio, you also need to call `setAdvancedAudioOptions` and set
- * `audioProcessingChannels` to `AUDIO_PROCESSING_STEREO` in `AdvancedAudioOptions`.
+ * 5: A sample rate of 48 kHz, music encoding, stereo, and a bitrate of up to 128 Kbps. To implement
+ * stereo audio, you also need to call `setAdvancedAudioOptions` and set `audioProcessingChannels`
+ * to `AUDIO_PROCESSING_STEREO` in `AdvancedAudioOptions`.
*/
AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO = 5,
/**
- * 6: A sample rate of 16 kHz, audio encoding, mono, and Acoustic Echo Cancellation (AES) enabled.
+ * 6: A sample rate of 16 kHz, audio encoding, mono, and Acoustic Echo Cancellation (AEC) enabled.
*/
AUDIO_PROFILE_IOT = 6,
+ /**
+ * Enumerator boundary.
+ */
AUDIO_PROFILE_NUM = 7
};
/**
- * The audio scenario.
+ * @brief The audio scenarios.
*/
enum AUDIO_SCENARIO_TYPE {
/**
- * 0: Automatic scenario, where the SDK chooses the appropriate audio quality according to the
- * user role and audio route.
+ * 0: (Default) Automatic scenario match, where the SDK chooses the appropriate audio quality
+ * according to the user role and audio route.
*/
AUDIO_SCENARIO_DEFAULT = 0,
/**
- * 3: (Recommended) The live gaming scenario, which needs to enable gaming
- * audio effects in the speaker. Choose this scenario to achieve high-fidelity
- * music playback.
+ * 3: High-quality audio scenario, where users mainly play music. For example, instrument tutoring.
*/
AUDIO_SCENARIO_GAME_STREAMING = 3,
/**
- * 5: The chatroom scenario, which needs to keep recording when setClientRole to audience.
- * Normally, app developer can also use mute api to achieve the same result,
- * and we implement this 'non-orthogonal' behavior only to make API backward compatible.
+ * 5: Chatroom scenario, where users need to frequently switch the user role or mute and unmute the
+ * microphone. For example, education scenarios.
*/
AUDIO_SCENARIO_CHATROOM = 5,
/**
@@ -3065,7 +3141,7 @@ enum AUDIO_SCENARIO_TYPE {
*/
AUDIO_SCENARIO_CHORUS = 7,
/**
- * 8: Meeting
+ * 8: Meeting scenario that mainly contains the human voice.
*/
AUDIO_SCENARIO_MEETING = 8,
/**
@@ -3074,17 +3150,18 @@ enum AUDIO_SCENARIO_TYPE {
*/
AUDIO_SCENARIO_AI_SERVER = 9,
/**
- * 10: AI Client.
+ * 10: AI conversation scenario, which is only applicable to scenarios where the user interacts with
+ * the conversational AI agent created by `Conversational AI Engine`.
*/
AUDIO_SCENARIO_AI_CLIENT = 10,
/**
- * 11: The number of enumerations.
+ * The number of enumerations.
*/
AUDIO_SCENARIO_NUM = 11,
};
/**
- * The format of the video frame.
+ * @brief The format of the video frame.
*/
struct VideoFormat {
OPTIONAL_ENUM_SIZE_T{
@@ -3097,15 +3174,15 @@ struct VideoFormat {
};
/**
- * The width (px) of the video.
+ * The width (px) of the video frame. The default value is 960.
*/
int width; // Number of pixels.
/**
- * The height (px) of the video.
+ * The height (px) of the video frame. The default value is 540.
*/
int height; // Number of pixels.
/**
- * The video frame rate (fps).
+ * The video frame rate (fps). The default value is 15.
*/
int fps;
VideoFormat() : width(FRAME_WIDTH_960), height(FRAME_HEIGHT_540), fps(FRAME_RATE_FPS_15) {}
@@ -3127,73 +3204,114 @@ struct VideoFormat {
};
/**
- * Video content hints.
+ * @brief The content hint for screen sharing.
*/
enum VIDEO_CONTENT_HINT {
/**
- * (Default) No content hint. In this case, the SDK balances smoothness with sharpness.
+ * (Default) No content hint.
*/
CONTENT_HINT_NONE,
/**
- * Choose this option if you prefer smoothness or when
- * you are sharing motion-intensive content such as a video clip, movie, or video game.
- *
- *
+ * Motion-intensive content. Choose this option if you prefer smoothness or when you are sharing a
+ * video clip, movie, or video game.
*/
CONTENT_HINT_MOTION,
/**
- * Choose this option if you prefer sharpness or when you are
- * sharing montionless content such as a picture, PowerPoint slide, ot text.
- *
+ * Motionless content. Choose this option if you prefer sharpness or when you are sharing a picture,
+ * PowerPoint slides, or texts.
*/
CONTENT_HINT_DETAILS
};
/**
- * The screen sharing scenario.
+ * @brief The screen sharing scenario.
*/
enum SCREEN_SCENARIO_TYPE {
/**
- * 1: Document. This scenario prioritizes the video quality of screen sharing and reduces the
- * latency of the shared video for the receiver. If you share documents, slides, and tables,
- * you can set this scenario.
+ * 1: (Default) Document. This scenario prioritizes the video quality of screen sharing and reduces
+ * the latency of the shared video for the receiver. If you share documents, slides, and tables, you
+ * can set this scenario.
*/
SCREEN_SCENARIO_DOCUMENT = 1,
/**
- * 2: Game. This scenario prioritizes the smoothness of screen sharing. If you share games, you
- * can set this scenario.
+ * 2: Game. This scenario prioritizes the smoothness of screen sharing. If you share games, you can
+ * set this scenario.
*/
SCREEN_SCENARIO_GAMING = 2,
/**
- * 3: Video. This scenario prioritizes the smoothness of screen sharing. If you share movies or
- * live videos, you can set this scenario.
+ * 3: Video. This scenario prioritizes the smoothness of screen sharing. If you share movies or live
+ * videos, you can set this scenario.
*/
SCREEN_SCENARIO_VIDEO = 3,
/**
- * 4: Remote control. This scenario prioritizes the video quality of screen sharing and reduces
- * the latency of the shared video for the receiver. If you share the device desktop being
- * remotely controlled, you can set this scenario.
+ * 4: Remote control. This scenario prioritizes the video quality of screen sharing and reduces the
+ * latency of the shared video for the receiver. If you share the device desktop being remotely
+ * controlled, you can set this scenario.
*/
SCREEN_SCENARIO_RDC = 4,
};
/**
- * The video application scenario type.
+ * @brief The video application scenarios.
*/
enum VIDEO_APPLICATION_SCENARIO_TYPE {
/**
- * 0: Default Scenario.
+ * 0: (Default) The general scenario.
*/
APPLICATION_SCENARIO_GENERAL = 0,
/**
- * 1: Meeting Scenario. This scenario is the best QoE practice of meeting application.
+ * 1: The meeting scenario.
+ * `APPLICATION_SCENARIO_MEETING` (1) is suitable for meeting scenarios. The SDK automatically
+ * enables the following strategies:
+ * - In meeting scenarios where low-quality video streams are required to have a high bitrate, the
+ * SDK automatically enables multiple technologies used to deal with network congestions, to enhance
+ * the performance of the low-quality streams and to ensure the smooth reception by subscribers.
+ * - The SDK monitors the number of subscribers to the high-quality video stream in real time and
+ * dynamically adjusts its configuration based on the number of subscribers.
+ * - If nobody subscribers to the high-quality stream, the SDK automatically reduces its bitrate
+ * and frame rate to save upstream bandwidth.
+ * - If someone subscribes to the high-quality stream, the SDK resets the high-quality stream to
+ * the `VideoEncoderConfiguration` configuration used in the most recent calling of
+ * `setVideoEncoderConfiguration`. If no configuration has been set by the user previously, the
+ * following values are used:
+ * - Resolution: (Windows and macOS) 1280 × 720; (Android and iOS) 960 × 540
+ * - Frame rate: 15 fps
+ * - Bitrate: (Windows and macOS) 1600 Kbps; (Android and iOS) 1000 Kbps
+ * - The SDK monitors the number of subscribers to the low-quality video stream in real time and
+ * dynamically enables or disables it based on the number of subscribers.
+ * - If nobody subscribes to the low-quality stream, the SDK automatically disables it to save
+ * upstream bandwidth.
+ * - If someone subscribes to the low-quality stream, the SDK enables the low-quality stream and
+ * resets it to the `SimulcastStreamConfig` configuration used in the most recent calling of
+ * `setDualStreamMode(SIMULCAST_STREAM_MODE mode, const SimulcastStreamConfig& streamConfig)`. If no
+ * configuration has been set by the user previously, the following
+ * values are used:
+ * - Resolution: 480 × 272
+ * - Frame rate: 15 fps
+ * - Bitrate: 500 Kbps
+ * @note If the user has called `setDualStreamMode(SIMULCAST_STREAM_MODE mode, const
+ * SimulcastStreamConfig& streamConfig)` to set that never send low-quality video
+ * stream ( `DISABLE_SIMULCAST_STREAM` ), the dynamic adjustment of the low-quality stream in
+ * meeting scenarios will not take effect.
*/
APPLICATION_SCENARIO_MEETING = 1,
/**
- * 2: Video Call Scenario. This scenario is used to optimize the video experience in video application, like 1v1 video call.
+ * 2: 1v1 video call scenario.
+ * `APPLICATION_SCENARIO_1V1` (2) This is applicable to the `one to one live` scenario. To meet the
+ * requirements for low latency and high-quality video in this scenario, the SDK optimizes its
+ * strategies, improving performance in terms of video quality, first frame rendering, latency on
+ * mid-to-low-end devices, and smoothness under weak network conditions.
+ * @note This enumeration value is only applicable to the broadcaster vs. broadcaster scenario.
*/
APPLICATION_SCENARIO_1V1 = 2,
/**
- * 3: Live Show Scenario. This scenario is used to optimize the video experience in video live show.
+ * 3. Live show scenario.
+ * `APPLICATION_SCENARIO_LIVESHOW` (3) This is applicable to the `show room` scenario. In this
+ * scenario, fast video rendering and high image quality are crucial. The SDK implements several
+ * performance optimizations, including automatically enabling accelerated audio and video frame
+ * rendering to minimize first-frame latency (no need to call `enableInstantMediaRendering` ), and
+ * B-frame encoding to achieve better image quality and bandwidth efficiency. The SDK also provides
+ * enhanced video quality and smooth playback, even in poor network conditions or on lower-end
+ * devices.
*/
APPLICATION_SCENARIO_LIVESHOW = 3,
};
@@ -3222,39 +3340,57 @@ enum VIDEO_QOE_PREFERENCE_TYPE {
};
/**
- * The brightness level of the video image captured by the local camera.
+ * @brief The brightness level of the video image captured by the local camera.
*/
enum CAPTURE_BRIGHTNESS_LEVEL_TYPE {
- /** -1: The SDK does not detect the brightness level of the video image.
- * Wait a few seconds to get the brightness level from `CAPTURE_BRIGHTNESS_LEVEL_TYPE` in the next
- * callback.
+ /**
+ * -1: The SDK does not detect the brightness level of the video image. Wait a few seconds to get
+ * the brightness level from `captureBrightnessLevel` in the next callback.
*/
CAPTURE_BRIGHTNESS_LEVEL_INVALID = -1,
- /** 0: The brightness level of the video image is normal.
+ /**
+ * 0: The brightness level of the video image is normal.
*/
CAPTURE_BRIGHTNESS_LEVEL_NORMAL = 0,
- /** 1: The brightness level of the video image is too bright.
+ /**
+ * 1: The brightness level of the video image is too bright.
*/
CAPTURE_BRIGHTNESS_LEVEL_BRIGHT = 1,
- /** 2: The brightness level of the video image is too dark.
+ /**
+ * 2: The brightness level of the video image is too dark.
*/
CAPTURE_BRIGHTNESS_LEVEL_DARK = 2,
};
+/**
+ * @brief Camera stabilization modes.
+ *
+ * @details
+ * The camera stabilization effect increases in the order of 1 < 2 < 3, and the latency will also
+ * increase accordingly.
+ *
+ */
enum CAMERA_STABILIZATION_MODE {
- /** The camera stabilization mode is disabled.
+ /**
+ * -1: (Default) Camera stabilization mode off.
*/
CAMERA_STABILIZATION_MODE_OFF = -1,
- /** device choose stabilization mode automatically.
+ /**
+ * 0: Automatic camera stabilization. The system automatically selects a stabilization mode based on
+ * the status of the camera. However, the latency is relatively high in this mode, so it is
+ * recommended not to use this enumeration.
*/
CAMERA_STABILIZATION_MODE_AUTO = 0,
- /** stabilization mode level 1.
+ /**
+ * 1: (Recommended) Level 1 camera stabilization.
*/
CAMERA_STABILIZATION_MODE_LEVEL_1 = 1,
- /** stabilization mode level 2.
+ /**
+ * 2: Level 2 camera stabilization.
*/
CAMERA_STABILIZATION_MODE_LEVEL_2 = 2,
- /** stabilization mode level 3.
+ /**
+ * 3: Level 3 camera stabilization.
*/
CAMERA_STABILIZATION_MODE_LEVEL_3 = 3,
/** The maximum level of the camera stabilization mode.
@@ -3263,7 +3399,7 @@ enum CAMERA_STABILIZATION_MODE {
};
/**
- * Local audio states.
+ * @brief The state of the local audio.
*/
enum LOCAL_AUDIO_STREAM_STATE {
/**
@@ -3271,7 +3407,7 @@ enum LOCAL_AUDIO_STREAM_STATE {
*/
LOCAL_AUDIO_STREAM_STATE_STOPPED = 0,
/**
- * 1: The capturing device starts successfully.
+ * 1: The local audio capturing device starts successfully.
*/
LOCAL_AUDIO_STREAM_STATE_RECORDING = 1,
/**
@@ -3285,7 +3421,7 @@ enum LOCAL_AUDIO_STREAM_STATE {
};
/**
- * Local audio state error codes.
+ * @brief Reasons for local audio state changes.
*/
enum LOCAL_AUDIO_STREAM_REASON {
/**
@@ -3298,43 +3434,56 @@ enum LOCAL_AUDIO_STREAM_REASON {
*/
LOCAL_AUDIO_STREAM_REASON_FAILURE = 1,
/**
- * 2: No permission to use the local audio device. Remind your users to grant permission.
+ * 2: No permission to use the local audio capturing device. Remind your users to grant permission.
*/
LOCAL_AUDIO_STREAM_REASON_DEVICE_NO_PERMISSION = 2,
/**
- * 3: (Android and iOS only) The local audio capture device is used. Remind your users to check
- * whether another application occupies the microphone. Local audio capture automatically resume
- * after the microphone is idle for about five seconds. You can also try to rejoin the channel
- * after the microphone is idle.
+ * 3: (Android and iOS only) The local audio capture device is already in use. Remind your users to
+ * check whether another application occupies the microphone. Local audio capture automatically
+ * resumes after the microphone is idle for about five seconds. You can also try to rejoin the
+ * channel after the microphone is idle.
*/
LOCAL_AUDIO_STREAM_REASON_DEVICE_BUSY = 3,
/**
- * 4: The local audio capture failed.
+ * 4: The local audio capture fails.
*/
LOCAL_AUDIO_STREAM_REASON_RECORD_FAILURE = 4,
/**
- * 5: The local audio encoding failed.
+ * 5: The local audio encoding fails.
*/
LOCAL_AUDIO_STREAM_REASON_ENCODE_FAILURE = 5,
- /** 6: The SDK cannot find the local audio recording device.
+ /**
+ * 6: (Windows and macOS only) No local audio capture device. Remind your users to check whether the
+ * microphone is connected to the device properly in the control panel of the device or if the
+ * microphone is working properly.
*/
LOCAL_AUDIO_STREAM_REASON_NO_RECORDING_DEVICE = 6,
- /** 7: The SDK cannot find the local audio playback device.
+ /**
+ * 7: (Windows and macOS only) No local audio capture device. Remind your users to check whether the
+ * speaker is connected to the device properly in the control panel of the device or if the speaker
+ * is working properly.
*/
LOCAL_AUDIO_STREAM_REASON_NO_PLAYOUT_DEVICE = 7,
/**
- * 8: The local audio capturing is interrupted by the system call.
+ * 8: (Android and iOS only) The local audio capture is interrupted by a system call, smart
+ * assistants, or alarm clock. Prompt your users to end the phone call, smart assistants, or alarm
+ * clock if the local audio capture is required.
*/
LOCAL_AUDIO_STREAM_REASON_INTERRUPTED = 8,
- /** 9: An invalid audio capture device ID.
+ /**
+ * 9: (Windows only) The ID of the local audio-capture device is invalid. Prompt the user to check
+ * the audio capture device ID.
*/
LOCAL_AUDIO_STREAM_REASON_RECORD_INVALID_ID = 9,
- /** 10: An invalid audio playback device ID.
+ /**
+ * 10: (Windows only) The ID of the local audio-playback device is invalid. Prompt the user to check
+ * the audio playback device ID.
*/
LOCAL_AUDIO_STREAM_REASON_PLAYOUT_INVALID_ID = 10,
};
-/** Local video state types.
+/**
+ * @brief Local video state types.
*/
enum LOCAL_VIDEO_STREAM_STATE {
/**
@@ -3342,8 +3491,8 @@ enum LOCAL_VIDEO_STREAM_STATE {
*/
LOCAL_VIDEO_STREAM_STATE_STOPPED = 0,
/**
- * 1: The local video capturing device starts successfully. The SDK also reports this state when
- * you call `startScreenCaptureByWindowId` to share a maximized window.
+ * 1: The local video capturing device starts successfully. The SDK also reports this state when you
+ * call `startScreenCaptureByWindowId` to share a maximized window.
*/
LOCAL_VIDEO_STREAM_STATE_CAPTURING = 1,
/**
@@ -3357,7 +3506,30 @@ enum LOCAL_VIDEO_STREAM_STATE {
};
/**
- * Local video state error codes.
+ * @brief The local video event type.
+ * @since v4.6.1
+ */
+enum LOCAL_VIDEO_EVENT_TYPE {
+ /**
+ * 1: (Android only) The screen capture window is hidden.
+ */
+ LOCAL_VIDEO_EVENT_TYPE_SCREEN_CAPTURE_WINDOW_HIDDEN = 1,
+ /**
+ * 2: (Android only) The screen capture window is recovered from hidden.
+ */
+ LOCAL_VIDEO_EVENT_TYPE_SCREEN_CAPTURE_WINDOW_RECOVER_FROM_HIDDEN = 2,
+ /**
+ * 3: (Android only) The screen capture is stopped by user.
+ */
+ LOCAL_VIDEO_EVENT_TYPE_SCREEN_CAPTURE_STOPPED_BY_USER = 3,
+ /**
+ * 4: (Android only) An internal error occurs during the screen capture.
+ */
+ LOCAL_VIDEO_EVENT_TYPE_SCREEN_CAPTURE_SYSTEM_INTERNAL_ERROR = 4,
+};
+
+/**
+ * @brief Reasons for local video state changes.
*/
enum LOCAL_VIDEO_STREAM_REASON {
/**
@@ -3369,35 +3541,34 @@ enum LOCAL_VIDEO_STREAM_REASON {
*/
LOCAL_VIDEO_STREAM_REASON_FAILURE = 1,
/**
- * 2: No permission to use the local video capturing device. Remind the user to grant permission
+ * 2: No permission to use the local video capturing device. Prompt the user to grant permissions
* and rejoin the channel.
*/
LOCAL_VIDEO_STREAM_REASON_DEVICE_NO_PERMISSION = 2,
/**
- * 3: The local video capturing device is in use. Remind the user to check whether another
- * application occupies the camera.
+ * 3: The local video capturing device is in use. Prompt the user to check if the camera is being
+ * used by another app, or try to rejoin the channel.
*/
LOCAL_VIDEO_STREAM_REASON_DEVICE_BUSY = 3,
/**
- * 4: The local video capture fails. Remind the user to check whether the video capture device
- * is working properly or the camera is occupied by another application, and then to rejoin the
- * channel.
+ * 4: The local video capture fails. Prompt the user to check whether the video capture device is
+ * working properly, whether the camera is used by another app, or try to rejoin the channel.
*/
LOCAL_VIDEO_STREAM_REASON_CAPTURE_FAILURE = 4,
/**
- * 5: The local video encoder is not supported.
+ * 5: The local video encoding fails.
*/
LOCAL_VIDEO_STREAM_REASON_CODEC_NOT_SUPPORT = 5,
/**
- * 6: (iOS only) The app is in the background. Remind the user that video capture cannot be
+ * 6: (iOS only) The app is in the background. Prompt the user that video capture cannot be
* performed normally when the app is in the background.
*/
LOCAL_VIDEO_STREAM_REASON_CAPTURE_INBACKGROUND = 6,
/**
- * 7: (iOS only) The current application window is running in Slide Over, Split View, or Picture
- * in Picture mode, and another app is occupying the camera. Remind the user that the application
- * cannot capture video properly when the app is running in Slide Over, Split View, or Picture in
- * Picture mode and another app is occupying the camera.
+ * 7: (iOS only) The current app window is running in Slide Over, Split View, or Picture in Picture
+ * mode, and another app is occupying the camera. Prompt the user that the app cannot capture video
+ * properly when it is running in Slide Over, Split View, or Picture in Picture mode and another app
+ * is occupying the camera.
*/
LOCAL_VIDEO_STREAM_REASON_CAPTURE_MULTIPLE_FOREGROUND_APPS = 7,
/**
@@ -3407,23 +3578,28 @@ enum LOCAL_VIDEO_STREAM_REASON {
*/
LOCAL_VIDEO_STREAM_REASON_DEVICE_NOT_FOUND = 8,
/**
- * 9: (macOS and Windows only) The video capture device currently in use is disconnected (such as being
- * unplugged).
+ * 9: (macOS and Windows only) The video capture device currently in use is disconnected (such as
+ * being unplugged).
*/
LOCAL_VIDEO_STREAM_REASON_DEVICE_DISCONNECTED = 9,
/**
- * 10: (macOS and Windows only) The SDK cannot find the video device in the video device list.
- * Check whether the ID of the video device is valid.
+ * 10: (macOS and Windows only) The SDK cannot find the video device in the video device list. Check
+ * whether the ID of the video device is valid.
*/
LOCAL_VIDEO_STREAM_REASON_DEVICE_INVALID_ID = 10,
/**
- * 14: (Android only) Video capture was interrupted, possibly due to the camera being occupied
- * or some policy reasons such as background termination.
+ * 14: (Android only) Video capture is interrupted. Possible reasons include the following:
+ * - The camera is being used by another app. Prompt the user to check if the camera is being used
+ * by another app.
+ * - The current app has been switched to the background. You can use foreground services to notify
+ * the operating system and ensure that the app can still collect video when it switches to the
+ * background.
*/
LOCAL_VIDEO_STREAM_REASON_DEVICE_INTERRUPT = 14,
/**
- * 15: (Android only) The device may need to be shut down and restarted to restore camera
- * function, or there may be a persistent hardware problem.
+ * 15: (Android only) The video capture device encounters an error. Prompt the user to close and
+ * restart the camera to restore functionality. If this operation does not solve the problem, check
+ * if the camera has a hardware failure.
*/
LOCAL_VIDEO_STREAM_REASON_DEVICE_FATAL_ERROR = 15,
/**
@@ -3431,58 +3607,78 @@ enum LOCAL_VIDEO_STREAM_REASON {
*/
LOCAL_VIDEO_STREAM_REASON_DEVICE_SYSTEM_PRESSURE = 101,
/**
- * 11: (macOS only) The shared window is minimized when you call `startScreenCaptureByWindowId`
- * to share a window. The SDK cannot share a minimized window. You can cancel the minimization
- * of this window at the application layer, for example by maximizing this window.
+ * 11: (macOS and Windows only) The shared window is minimized when you call the
+ * `startScreenCaptureByWindowId` method to share a window. The SDK cannot share a minimized window.
+ * Please prompt the user to unminimize the shared window.
*/
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_MINIMIZED = 11,
/**
- * 12: (macOS and Windows only) The error code indicates that a window shared by the window ID
- * has been closed or a full-screen window shared by the window ID has exited full-screen mode.
- * After exiting full-screen mode, remote users cannot see the shared window. To prevent remote
- * users from seeing a black screen, Agora recommends that you immediately stop screen sharing.
- *
- * Common scenarios for reporting this error code:
- * - When the local user closes the shared window, the SDK reports this error code.
- * - The local user shows some slides in full-screen mode first, and then shares the windows of
- * the slides. After the user exits full-screen mode, the SDK reports this error code.
- * - The local user watches a web video or reads a web document in full-screen mode first, and
- * then shares the window of the web video or document. After the user exits full-screen mode,
- * the SDK reports this error code.
+ * 12: (macOS and Windows only) The error code indicates that a window shared by the window ID has
+ * been closed or a full-screen window shared by the window ID has exited full-screen mode. After
+ * exiting full-screen mode, remote users cannot see the shared window. To prevent remote users from
+ * seeing a black screen, Agora recommends that you immediately stop screen sharing.
+ * Common scenarios reporting this error code:
+ * - The local user closes the shared window.
+ * - The local user shows some slides in full-screen mode first, and then shares the windows of the
+ * slides. After the user exits full-screen mode, the SDK reports this error code.
+ * - The local user watches a web video or reads a web document in full-screen mode first, and then
+ * shares the window of the web video or document. After the user exits full-screen mode, the SDK
+ * reports this error code.
*/
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_CLOSED = 12,
- /** 13: The local screen capture window is occluded. */
+ /**
+ * 13: (Windows only) The window being shared is overlapped by another window, so the overlapped
+ * area is blacked out by the SDK during window sharing.
+ */
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_OCCLUDED = 13,
/** 20: The local screen capture window is not supported. */
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_NOT_SUPPORTED = 20,
- /** 21: The screen capture fails. */
+ /**
+ * 21: (Windows and Android only) The currently captured window has no data.
+ */
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_FAILURE = 21,
- /** 22: No permision to capture screen. */
+ /**
+ * 22: (Windows and macOS only) No permission for screen capture.
+ */
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_NO_PERMISSION = 22,
/**
- * 24: (Windows Only) An unexpected error (possibly due to window block failure) occurs during the
- * screen sharing process, resulting in performance degradation. However, the screen sharing
- * process itself is functioning normally.
+ * 24: (Windows only) An unexpected error occurred during screen sharing (possibly due to window
+ * blocking failure), resulting in decreased performance, but the screen sharing process itself was
+ * not affected.
+ * @note During screen sharing, if blocking a specific window fails due to device driver issues, the
+ * SDK will report this event and automatically fall back to sharing the entire screen. If your use
+ * case requires masking specific windows to protect privacy, we recommend listening for this event
+ * and implementing additional privacy protection mechanisms when it is triggered.
*/
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_AUTO_FALLBACK = 24,
- /** 25: (Windows only) The local screen capture window is currently hidden and not visible on the
- desktop. */
+ /**
+ * 25: (Windows only) The window for the current screen capture is hidden and not visible on the
+ * current screen.
+ */
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_HIDDEN = 25,
- /** 26: (Windows only) The local screen capture window is recovered from its hidden state. */
+ /**
+ * 26: (Windows only) The window for screen capture has been restored from hidden state.
+ */
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_RECOVER_FROM_HIDDEN = 26,
- /** 27: (Windows and macOS only) The window is recovered from miniminzed */
+ /**
+ * 27: (macOS and Windows only) The window for screen capture has been restored from the minimized
+ * state.
+ */
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_RECOVER_FROM_MINIMIZED = 27,
/**
- * 28: The screen capture paused.
- *
- * Common scenarios for reporting this error code:
- * - When the desktop switch to the secure desktop such as UAC dialog or the Winlogon desktop on
- * Windows platform, the SDK reports this error code.
+ * 28: (Windows only) Screen capture has been paused. Common scenarios reporting this error code:
+ * The current screen may have been switched to a secure desktop, such as a UAC dialog box or
+ * Winlogon desktop.
*/
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_PAUSED = 28,
- /** 29: The screen capture is resumed. */
+ /**
+ * 29: (Windows only) Screen capture has resumed from paused state.
+ */
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_RESUMED = 29,
- /** 30: The shared display has been disconnected */
+ /**
+ * 30: (Windows and macOS only) The displayer used for screen capture is disconnected. The current
+ * screen sharing has been paused. Prompt the user to restart the screen sharing.
+ */
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_DISPLAY_DISCONNECTED = 30,
/* 30: (HMOS only) ScreenCapture stopped by user */
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_STOPPED_BY_USER = 31,
@@ -3490,18 +3686,18 @@ enum LOCAL_VIDEO_STREAM_REASON {
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_INTERRUPTED_BY_OTHER = 32,
/* 32: (HMOS only) ScreenCapture stopped by SIM call */
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_STOPPED_BY_CALL = 33,
- /* 34: HDR Video Source fallback to SDR */
- LOCAL_AUDIO_STREAM_REASON_VIDEO_SOURCE_HDR_TO_SDR = 34,
+ /** 34: (Windows only) Some windows of the exclude window list failed to be excluded from the screen capture. */
+ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_EXCLUDE_WINDOW_FAILED = 34,
};
/**
- * Remote audio states.
+ * @brief Remote audio states.
*/
enum REMOTE_AUDIO_STATE {
/**
- * 0: The remote audio is in the default state. The SDK reports this state in the case of
- * `REMOTE_AUDIO_REASON_LOCAL_MUTED(3)`, `REMOTE_AUDIO_REASON_REMOTE_MUTED(5)`, or
- * `REMOTE_AUDIO_REASON_REMOTE_OFFLINE(7)`.
+ * 0: The local audio is in the initial state. The SDK reports this state in the case of
+ * `REMOTE_AUDIO_REASON_LOCAL_MUTED`, `REMOTE_AUDIO_REASON_REMOTE_MUTED` or
+ * `REMOTE_AUDIO_REASON_REMOTE_OFFLINE`.
*/
REMOTE_AUDIO_STATE_STOPPED =
0, // Default state, audio is started or remote user disabled/muted audio stream
@@ -3510,30 +3706,30 @@ enum REMOTE_AUDIO_STATE {
*/
REMOTE_AUDIO_STATE_STARTING = 1, // The first audio frame packet has been received
/**
- * 2: The remote audio stream is decoded and plays normally. The SDK reports this state in the
- * case of `REMOTE_AUDIO_REASON_NETWORK_RECOVERY(2)`, `REMOTE_AUDIO_REASON_LOCAL_UNMUTED(4)`, or
- * `REMOTE_AUDIO_REASON_REMOTE_UNMUTED(6)`.
+ * 2: The remote audio stream is decoded and plays normally. The SDK reports this state in the case
+ * of `REMOTE_AUDIO_REASON_NETWORK_RECOVERY`, `REMOTE_AUDIO_REASON_LOCAL_UNMUTED` or
+ * `REMOTE_AUDIO_REASON_REMOTE_UNMUTED`.
*/
REMOTE_AUDIO_STATE_DECODING =
2, // The first remote audio frame has been decoded or fronzen state ends
/**
* 3: The remote audio is frozen. The SDK reports this state in the case of
- * `REMOTE_AUDIO_REASON_NETWORK_CONGESTION(1)`.
+ * `REMOTE_AUDIO_REASON_NETWORK_CONGESTION`.
*/
REMOTE_AUDIO_STATE_FROZEN = 3, // Remote audio is frozen, probably due to network issue
/**
* 4: The remote audio fails to start. The SDK reports this state in the case of
- * `REMOTE_AUDIO_REASON_INTERNAL(0)`.
+ * `REMOTE_AUDIO_REASON_INTERNAL`.
*/
REMOTE_AUDIO_STATE_FAILED = 4, // Remote audio play failed
};
/**
- * Reasons for the remote audio state change.
+ * @brief The reason for the remote audio state change.
*/
enum REMOTE_AUDIO_STATE_REASON {
/**
- * 0: The SDK reports this reason when the video state changes.
+ * 0: The SDK reports this reason when the audio state changes.
*/
REMOTE_AUDIO_REASON_INTERNAL = 0,
/**
@@ -3545,23 +3741,19 @@ enum REMOTE_AUDIO_STATE_REASON {
*/
REMOTE_AUDIO_REASON_NETWORK_RECOVERY = 2,
/**
- * 3: The local user stops receiving the remote audio stream or
- * disables the audio module.
+ * 3: The local user stops receiving the remote audio stream or disables the audio module.
*/
REMOTE_AUDIO_REASON_LOCAL_MUTED = 3,
/**
- * 4: The local user resumes receiving the remote audio stream or
- * enables the audio module.
+ * 4: The local user resumes receiving the remote audio stream or enables the audio module.
*/
REMOTE_AUDIO_REASON_LOCAL_UNMUTED = 4,
/**
- * 5: The remote user stops sending the audio stream or disables the
- * audio module.
+ * 5: The remote user stops sending the audio stream or disables the audio module.
*/
REMOTE_AUDIO_REASON_REMOTE_MUTED = 5,
/**
- * 6: The remote user resumes sending the audio stream or enables the
- * audio module.
+ * 6: The remote user resumes sending the audio stream or enables the audio module.
*/
REMOTE_AUDIO_REASON_REMOTE_UNMUTED = 6,
/**
@@ -3579,14 +3771,13 @@ enum REMOTE_AUDIO_STATE_REASON {
};
/**
- * The state of the remote video.
+ * @brief The state of the remote video stream.
*/
enum REMOTE_VIDEO_STATE {
/**
- * 0: The remote video is in the default state. The SDK reports this state in the case of
- * `REMOTE_VIDEO_STATE_REASON_LOCAL_MUTED (3)`, `REMOTE_VIDEO_STATE_REASON_REMOTE_MUTED (5)`,
- * `REMOTE_VIDEO_STATE_REASON_REMOTE_OFFLINE (7)`, or `REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK
- * (8)`.
+ * 0: The remote video is in the initial state. The SDK reports this state in the case of
+ * `REMOTE_VIDEO_STATE_REASON_LOCAL_MUTED`, `REMOTE_VIDEO_STATE_REASON_REMOTE_MUTED`, or
+ * `REMOTE_VIDEO_STATE_REASON_REMOTE_OFFLINE`.
*/
REMOTE_VIDEO_STATE_STOPPED = 0,
/**
@@ -3594,23 +3785,25 @@ enum REMOTE_VIDEO_STATE {
*/
REMOTE_VIDEO_STATE_STARTING = 1,
/**
- * 2: The remote video stream is decoded and plays normally. The SDK reports this state in the
- * case of `REMOTE_VIDEO_STATE_REASON_NETWORK_RECOVERY (2)`,
- * `REMOTE_VIDEO_STATE_REASON_LOCAL_UNMUTED (4)`, `REMOTE_VIDEO_STATE_REASON_REMOTE_UNMUTED (6)`,
- * or `REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK_RECOVERY (9)`.
+ * 2: The remote video stream is decoded and plays normally. The SDK reports this state in the case
+ * of `REMOTE_VIDEO_STATE_REASON_NETWORK_RECOVERY`, `REMOTE_VIDEO_STATE_REASON_LOCAL_UNMUTED`,
+ * `REMOTE_VIDEO_STATE_REASON_REMOTE_UNMUTED`, or
+ * `REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK_RECOVERY`.
*/
REMOTE_VIDEO_STATE_DECODING = 2,
- /** 3: The remote video is frozen, probably due to
- * #REMOTE_VIDEO_STATE_REASON_NETWORK_CONGESTION (1).
+ /**
+ * 3: The remote video is frozen. The SDK reports this state in the case of
+ * `REMOTE_VIDEO_STATE_REASON_NETWORK_CONGESTION` or `REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK`.
*/
REMOTE_VIDEO_STATE_FROZEN = 3,
- /** 4: The remote video fails to start. The SDK reports this state in the case of
- * `REMOTE_VIDEO_STATE_REASON_INTERNAL (0)`.
+ /**
+ * 4: The remote video fails to start. The SDK reports this state in the case of
+ * `REMOTE_VIDEO_STATE_REASON_INTERNAL`.
*/
REMOTE_VIDEO_STATE_FAILED = 4,
};
/**
- * The reason for the remote video state change.
+ * @brief The reason for the remote video state change.
*/
enum REMOTE_VIDEO_STATE_REASON {
/**
@@ -3622,7 +3815,7 @@ enum REMOTE_VIDEO_STATE_REASON {
*/
REMOTE_VIDEO_STATE_REASON_NETWORK_CONGESTION = 1,
/**
- * 2: Network recovery.
+ * 2: Network is recovered.
*/
REMOTE_VIDEO_STATE_REASON_NETWORK_RECOVERY = 2,
/**
@@ -3645,12 +3838,14 @@ enum REMOTE_VIDEO_STATE_REASON {
* 7: The remote user leaves the channel.
*/
REMOTE_VIDEO_STATE_REASON_REMOTE_OFFLINE = 7,
- /** 8: The remote audio-and-video stream falls back to the audio-only stream
- * due to poor network conditions.
+ /**
+ * 8: The remote audio-and-video stream falls back to the audio-only stream due to poor network
+ * conditions.
*/
REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK = 8,
- /** 9: The remote audio-only stream switches back to the audio-and-video
- * stream after the network conditions improve.
+ /**
+ * 9: The remote audio-only stream switches back to the audio-and-video stream after the network
+ * conditions improve.
*/
REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK_RECOVERY = 9,
/** (Internal use only) 10: The remote video stream type change to low stream type
@@ -3659,11 +3854,13 @@ enum REMOTE_VIDEO_STATE_REASON {
/** (Internal use only) 11: The remote video stream type change to high stream type
*/
REMOTE_VIDEO_STATE_REASON_VIDEO_STREAM_TYPE_CHANGE_TO_HIGH = 11,
- /** (iOS only) 12: The app of the remote user is in background.
+ /**
+ * 12: (iOS only) The remote user's app has switched to the background.
*/
REMOTE_VIDEO_STATE_REASON_SDK_IN_BACKGROUND = 12,
- /** 13: The remote video stream is not supported by the decoder
+ /**
+ * 13: The local video decoder does not support decoding the remote video stream.
*/
REMOTE_VIDEO_STATE_REASON_CODEC_NOT_SUPPORT = 13,
@@ -3771,19 +3968,22 @@ enum REMOTE_VIDEO_DOWNSCALE_LEVEL {
};
/**
- * The volume information of users.
+ * @brief The volume information of users.
*/
struct AudioVolumeInfo {
/**
- * User ID of the speaker.
- * - In the local user's callback, `uid` = 0.
- * - In the remote users' callback, `uid` is the user ID of a remote user whose instantaneous
- * volume is one of the three highest.
+ * The user ID.
+ * - In the local user's callback, `uid` is 0.
+ * - In the remote users' callback, `uid` is the user ID of a remote user whose instantaneous volume
+ * is the highest.
*/
uid_t uid;
/**
* The volume of the user. The value ranges between 0 (the lowest volume) and 255 (the highest
- * volume). If the user calls `startAudioMixing`, the value of volume is the volume after audio
+ * volume). If the local user enables audio capturing and calls `muteLocalAudioStream` and set it as
+ * `true` to mute, the value of `volume` indicates the volume of locally captured audio signal. If
+ * the user calls `startAudioMixing(const char* filePath, bool loopback, int cycle, int startPos)`,
+ * the value of `volume` indicates the volume after audio
* mixing.
*/
unsigned int volume; // [0,255]
@@ -3794,14 +3994,14 @@ struct AudioVolumeInfo {
* @note
* - The `vad` parameter does not report the voice activity status of remote users. In a remote
* user's callback, the value of `vad` is always 1.
- * - To use this parameter, you must set `reportVad` to true when calling
+ * - To use this parameter, you must set `reportVad` to `true` when calling
* `enableAudioVolumeIndication`.
*/
unsigned int vad;
/**
- * The voice pitch (Hz) of the local user. The value ranges between 0.0 and 4000.0.
- * @note The `voicePitch` parameter does not report the voice pitch of remote users. In the
- * remote users' callback, the value of `voicePitch` is always 0.0.
+ * The voice pitch of the local user. The value ranges between 0.0 and 4000.0.
+ * @note The `voicePitch` parameter does not report the voice pitch of remote users. In the remote
+ * users' callback, the value of `voicePitch` is always 0.0.
*/
double voicePitch;
@@ -3809,10 +4009,13 @@ struct AudioVolumeInfo {
};
/**
- * The audio device information.
+ * @brief The audio device information.
+ *
+ * @note This class is for Android only.
+ *
*/
struct DeviceInfo {
- /*
+ /**
* Whether the audio device supports ultra-low-latency capture and playback:
* - `true`: The device supports ultra-low-latency capture and playback.
* - `false`: The device does not support ultra-low-latency capture and playback.
@@ -3829,13 +4032,13 @@ class IPacketObserver {
public:
virtual ~IPacketObserver() {}
/**
- * The definition of the Packet struct.
+ * @brief Configurations for the `Packet` instance.
*/
struct Packet {
/**
* The buffer address of the sent or received data.
- * @note Agora recommends setting `buffer` to a value larger than 2048 bytes. Otherwise, you
- * may encounter undefined behaviors (such as crashes).
+ * @note Agora recommends setting `buffer` to a value larger than 2048 bytes. Otherwise, you may
+ * encounter undefined behaviors (such as crashes).
*/
const unsigned char* buffer;
/**
@@ -3846,62 +4049,70 @@ class IPacketObserver {
Packet() : buffer(OPTIONAL_NULLPTR), size(0) {}
};
/**
- * Occurs when the SDK is ready to send the audio packet.
- * @param packet The audio packet to be sent: Packet.
- * @return Whether to send the audio packet:
- * - true: Send the packet.
- * - false: Do not send the packet, in which case the audio packet will be discarded.
+ * @brief Occurs when the local user sends an audio packet.
+ *
+ * @param packet The sent audio packet, see `Packet`.
+ *
+ * @return
+ * - `true`: The audio packet is sent successfully.
+ * - `false`: The audio packet is discarded.
*/
virtual bool onSendAudioPacket(Packet& packet) = 0;
/**
- * Occurs when the SDK is ready to send the video packet.
- * @param packet The video packet to be sent: Packet.
- * @return Whether to send the video packet:
- * - true: Send the packet.
- * - false: Do not send the packet, in which case the audio packet will be discarded.
+ * @brief Occurs when the local user sends a video packet.
+ *
+ * @param packet The sent video packet, see `Packet`.
+ *
+ * @return
+ * - `true`: The video packet is sent successfully.
+ * - `false`: The video packet is discarded.
*/
virtual bool onSendVideoPacket(Packet& packet) = 0;
/**
- * Occurs when the audio packet is received.
- * @param packet The received audio packet: Packet.
- * @return Whether to process the audio packet:
- * - true: Process the packet.
- * - false: Do not process the packet, in which case the audio packet will be discarded.
+ * @brief Occurs when the local user receives an audio packet.
+ *
+ * @param packet The received audio packet, see `Packet`.
+ *
+ * @return
+ * - `true`: The audio packet is received successfully.
+ * - `false`: The audio packet is discarded.
*/
virtual bool onReceiveAudioPacket(Packet& packet) = 0;
/**
- * Occurs when the video packet is received.
- * @param packet The received video packet: Packet.
- * @return Whether to process the audio packet:
- * - true: Process the packet.
- * - false: Do not process the packet, in which case the video packet will be discarded.
+ * @brief Occurs when the local user receives a video packet.
+ *
+ * @param packet The received video packet, see `Packet`.
+ *
+ * @return
+ * - `true`: The video packet is received successfully.
+ * - `false`: The video packet is discarded.
*/
virtual bool onReceiveVideoPacket(Packet& packet) = 0;
};
/**
- * Audio sample rate types.
+ * @brief The audio sampling rate of the stream to be pushed to the CDN.
*/
enum AUDIO_SAMPLE_RATE_TYPE {
/**
- * 32000: 32 KHz.
+ * 32000: 32 kHz
*/
AUDIO_SAMPLE_RATE_32000 = 32000,
/**
- * 44100: 44.1 KHz.
+ * 44100: 44.1 kHz
*/
AUDIO_SAMPLE_RATE_44100 = 44100,
/**
- * 48000: 48 KHz.
+ * 48000: (Default) 48 kHz
*/
AUDIO_SAMPLE_RATE_48000 = 48000,
};
/**
- * The codec type of the output video.
+ * @brief The codec type of the output video.
*/
enum VIDEO_CODEC_TYPE_FOR_STREAM {
/**
- * 1: H.264.
+ * 1: (Default) H.264.
*/
VIDEO_CODEC_H264_FOR_STREAM = 1,
/**
@@ -3911,30 +4122,31 @@ enum VIDEO_CODEC_TYPE_FOR_STREAM {
};
/**
- * Video codec profile types.
+ * @brief Video codec profile types.
*/
enum VIDEO_CODEC_PROFILE_TYPE {
/**
- * 66: Baseline video codec profile. Generally used in video calls on mobile phones.
+ * 66: Baseline video codec profile; generally used for video calls on mobile phones.
*/
VIDEO_CODEC_PROFILE_BASELINE = 66,
/**
- * 77: Main video codec profile. Generally used in mainstream electronics, such as MP4 players,
+ * 77: Main video codec profile; generally used in mainstream electronics such as MP4 players,
* portable video players, PSP, and iPads.
*/
VIDEO_CODEC_PROFILE_MAIN = 77,
/**
- * 100: High video codec profile. Generally used in high-resolution broadcasts or television.
+ * 100: (Default) High video codec profile; generally used in high-resolution live streaming or
+ * television.
*/
VIDEO_CODEC_PROFILE_HIGH = 100,
};
/**
- * Self-defined audio codec profile.
+ * @brief Self-defined audio codec profile.
*/
enum AUDIO_CODEC_PROFILE_TYPE {
/**
- * 0: LC-AAC.
+ * 0: (Default) LC-AAC.
*/
AUDIO_CODEC_PROFILE_LC_AAC = 0,
/**
@@ -3942,13 +4154,13 @@ enum AUDIO_CODEC_PROFILE_TYPE {
*/
AUDIO_CODEC_PROFILE_HE_AAC = 1,
/**
- * 2: HE-AAC v2.
+ * 2: HE-AAC v2.
*/
AUDIO_CODEC_PROFILE_HE_AAC_V2 = 2,
};
/**
- * Local audio statistics.
+ * @brief Local audio statistics.
*/
struct LocalAudioStats {
/**
@@ -3973,7 +4185,7 @@ struct LocalAudioStats {
*/
unsigned short txPacketLossRate;
/**
- * The audio delay of the device, contains record and playout delay
+ * The audio device module delay (ms) when playing or recording audio.
*/
int audioDeviceDelay;
/**
@@ -3981,27 +4193,26 @@ struct LocalAudioStats {
*/
int audioPlayoutDelay;
/**
- * The signal delay estimated from audio in-ear monitoring (ms).
+ * The ear monitor delay (ms), which is the delay from microphone input to headphone output.
*/
int earMonitorDelay;
/**
- * The signal delay estimated during the AEC process from nearin and farin (ms).
+ * Acoustic echo cancellation (AEC) module estimated delay (ms), which is the signal delay between
+ * when audio is played locally before being locally captured.
*/
int aecEstimatedDelay;
};
/**
- * States of the Media Push.
+ * @brief States of the Media Push.
*/
enum RTMP_STREAM_PUBLISH_STATE {
/**
- * 0: The Media Push has not started or has ended. This state is also triggered after you remove a
- * RTMP or RTMPS stream from the CDN by calling `removePublishStreamUrl`.
+ * 0: The Media Push has not started or has ended.
*/
RTMP_STREAM_PUBLISH_STATE_IDLE = 0,
/**
- * 1: The SDK is connecting to Agora's streaming server and the CDN server. This state is
- * triggered after you call the `addPublishStreamUrl` method.
+ * 1: The streaming server and CDN server are being connected.
*/
RTMP_STREAM_PUBLISH_STATE_CONNECTING = 1,
/**
@@ -4010,42 +4221,37 @@ enum RTMP_STREAM_PUBLISH_STATE {
*/
RTMP_STREAM_PUBLISH_STATE_RUNNING = 2,
/**
- * 3: The RTMP or RTMPS streaming is recovering. When exceptions occur to the CDN, or the
- * streaming is interrupted, the SDK tries to resume RTMP or RTMPS streaming and returns this
- * state.
- * - If the SDK successfully resumes the streaming, #RTMP_STREAM_PUBLISH_STATE_RUNNING (2)
- * returns.
+ * 3: The RTMP or RTMPS streaming is recovering. When exceptions occur to the CDN, or the streaming
+ * is interrupted, the SDK tries to resume RTMP or RTMPS streaming and returns this state.
+ * - If the SDK successfully resumes the streaming, RTMP_STREAM_PUBLISH_STATE_RUNNING (2) returns.
* - If the streaming does not resume within 60 seconds or server errors occur,
- * #RTMP_STREAM_PUBLISH_STATE_FAILURE (4) returns. You can also reconnect to the server by calling
- * the `removePublishStreamUrl` and `addPublishStreamUrl` methods.
+ * RTMP_STREAM_PUBLISH_STATE_FAILURE (4) returns. If you feel that 60 seconds is too long, you can
+ * also actively try to reconnect.
*/
RTMP_STREAM_PUBLISH_STATE_RECOVERING = 3,
/**
- * 4: The RTMP or RTMPS streaming fails. See the `errCode` parameter for the detailed error
- * information. You can also call the `addPublishStreamUrl` method to publish the RTMP or RTMPS
- * streaming again.
+ * 4: The RTMP or RTMPS streaming fails. After a failure, you can troubleshoot the cause of the
+ * error through the returned error code.
*/
RTMP_STREAM_PUBLISH_STATE_FAILURE = 4,
/**
- * 5: The SDK is disconnecting to Agora's streaming server and the CDN server. This state is
- * triggered after you call the `removePublishStreamUrl` method.
+ * 5: The SDK is disconnecting from the Agora streaming server and CDN. When you call
+ * `stopRtmpStream` to stop the Media Push normally, the SDK reports the Media Push state as
+ * `RTMP_STREAM_PUBLISH_STATE_DISCONNECTING` and `RTMP_STREAM_PUBLISH_STATE_IDLE` in sequence.
*/
RTMP_STREAM_PUBLISH_STATE_DISCONNECTING = 5,
};
/**
- * Error codes of the RTMP or RTMPS streaming.
+ * @brief Reasons for changes in the status of RTMP or RTMPS streaming.
*/
enum RTMP_STREAM_PUBLISH_REASON {
/**
- * 0: The RTMP or RTMPS streaming publishes successfully.
+ * 0: The RTMP or RTMPS streaming has not started or has ended.
*/
RTMP_STREAM_PUBLISH_REASON_OK = 0,
/**
- * 1: Invalid argument used. If, for example, you do not call the `setLiveTranscoding` method to
- * configure the LiveTranscoding parameters before calling the addPublishStreamUrl method, the SDK
- * returns this error. Check whether you set the parameters in the `setLiveTranscoding` method
- * properly.
+ * 1: Invalid argument used. Check the parameter setting.
*/
RTMP_STREAM_PUBLISH_REASON_INVALID_ARGUMENT = 1,
/**
@@ -4053,13 +4259,11 @@ enum RTMP_STREAM_PUBLISH_REASON {
*/
RTMP_STREAM_PUBLISH_REASON_ENCRYPTED_STREAM_NOT_ALLOWED = 2,
/**
- * 3: Timeout for the RTMP or RTMPS streaming. Call the `addPublishStreamUrl` method to publish
- * the streaming again.
+ * 3: Timeout for the RTMP or RTMPS streaming.
*/
RTMP_STREAM_PUBLISH_REASON_CONNECTION_TIMEOUT = 3,
/**
- * 4: An error occurs in Agora's streaming server. Call the `addPublishStreamUrl` method to
- * publish the streaming again.
+ * 4: An error occurs in Agora's streaming server.
*/
RTMP_STREAM_PUBLISH_REASON_INTERNAL_SERVER_ERROR = 4,
/**
@@ -4075,7 +4279,8 @@ enum RTMP_STREAM_PUBLISH_REASON {
*/
RTMP_STREAM_PUBLISH_REASON_REACH_LIMIT = 7,
/**
- * 8: The host manipulates other hosts' URLs. Check your app logic.
+ * 8: The host manipulates other hosts' URLs. For example, the host updates or stops other hosts'
+ * streams. Check your app logic.
*/
RTMP_STREAM_PUBLISH_REASON_NOT_AUTHORIZED = 8,
/**
@@ -4083,20 +4288,19 @@ enum RTMP_STREAM_PUBLISH_REASON {
*/
RTMP_STREAM_PUBLISH_REASON_STREAM_NOT_FOUND = 9,
/**
- * 10: The format of the RTMP or RTMPS streaming URL is not supported. Check whether the URL
- * format is correct.
+ * 10: The format of the RTMP or RTMPS streaming URL is not supported. Check whether the URL format
+ * is correct.
*/
RTMP_STREAM_PUBLISH_REASON_FORMAT_NOT_SUPPORTED = 10,
/**
- * 11: The user role is not host, so the user cannot use the CDN live streaming function. Check
- * your application code logic.
+ * 11: The user role is not host, so the user cannot use the CDN live streaming function. Check your
+ * application code logic.
*/
RTMP_STREAM_PUBLISH_REASON_NOT_BROADCASTER =
11, // Note: match to ERR_PUBLISH_STREAM_NOT_BROADCASTER in AgoraBase.h
/**
- * 13: The `updateRtmpTranscoding` or `setLiveTranscoding` method is called to update the
- * transcoding configuration in a scenario where there is streaming without transcoding. Check
- * your application code logic.
+ * 13: The `updateRtmpTranscoding` method is called to update the transcoding configuration in a
+ * scenario where there is streaming without transcoding. Check your application code logic.
*/
RTMP_STREAM_PUBLISH_REASON_TRANSCODING_NO_MIX_STREAM =
13, // Note: match to ERR_PUBLISH_STREAM_TRANSCODING_NO_MIX_STREAM in AgoraBase.h
@@ -4109,25 +4313,28 @@ enum RTMP_STREAM_PUBLISH_REASON {
*/
RTMP_STREAM_PUBLISH_REASON_INVALID_APPID =
15, // Note: match to ERR_PUBLISH_STREAM_APPID_INVALID in AgoraBase.h
- /** invalid privilege. */
+ /**
+ * 16: Your project does not have permission to use streaming services.
+ */
RTMP_STREAM_PUBLISH_REASON_INVALID_PRIVILEGE = 16,
/**
- * 100: The streaming has been stopped normally. After you call `removePublishStreamUrl` to stop
- * streaming, the SDK returns this value.
+ * 100: The streaming has been stopped normally. After you stop the Media Push, the SDK returns this
+ * value.
*/
RTMP_STREAM_UNPUBLISH_REASON_OK = 100,
};
-/** Events during the RTMP or RTMPS streaming. */
+/**
+ * @brief Events during the Media Push.
+ */
enum RTMP_STREAMING_EVENT {
/**
- * 1: An error occurs when you add a background image or a watermark image to the RTMP or RTMPS
- * stream.
+ * 1: An error occurs when you add a background image or a watermark image in the Media Push.
*/
RTMP_STREAMING_EVENT_FAILED_LOAD_IMAGE = 1,
/**
- * 2: The streaming URL is already being used for CDN live streaming. If you want to start new
- * streaming, use a new streaming URL.
+ * 2: The streaming URL is already being used for Media Push. If you want to start new streaming,
+ * use a new streaming URL.
*/
RTMP_STREAMING_EVENT_URL_ALREADY_IN_USE = 2,
/**
@@ -4135,48 +4342,52 @@ enum RTMP_STREAMING_EVENT {
*/
RTMP_STREAMING_EVENT_ADVANCED_FEATURE_NOT_SUPPORT = 3,
/**
- * 4: Client request too frequently.
+ * 4: Reserved.
*/
RTMP_STREAMING_EVENT_REQUEST_TOO_OFTEN = 4,
};
/**
- * Image properties.
+ * @brief Image properties.
+ *
+ * @details
+ * This class sets the properties of the watermark and background images in the live video.
+ *
*/
typedef struct RtcImage {
/**
- *The HTTP/HTTPS URL address of the image in the live video. The maximum length of this parameter
- *is 1024 bytes.
+ * The HTTP/HTTPS URL address of the image in the live video. The maximum length of this parameter
+ * is 1024 bytes.
*/
const char* url;
/**
- * The x coordinate (pixel) of the image on the video frame (taking the upper left corner of the
- * video frame as the origin).
+ * The x-coordinate (px) of the image on the video frame (taking the upper left corner of the video
+ * frame as the origin).
*/
int x;
/**
- * The y coordinate (pixel) of the image on the video frame (taking the upper left corner of the
- * video frame as the origin).
+ * The y-coordinate (px) of the image on the video frame (taking the upper left corner of the video
+ * frame as the origin).
*/
int y;
/**
- * The width (pixel) of the image on the video frame.
+ * The width (px) of the image on the video frame.
*/
int width;
/**
- * The height (pixel) of the image on the video frame.
+ * The height (px) of the image on the video frame.
*/
int height;
/**
- * The layer index of the watermark or background image. When you use the watermark array to add
- * a watermark or multiple watermarks, you must pass a value to `zOrder` in the range [1,255];
- * otherwise, the SDK reports an error. In other cases, zOrder can optionally be passed in the
+ * The layer index of the watermark or background image. When you use the watermark array to add a
+ * watermark or multiple watermarks, you must pass a value to `zOrder` in the range [1,255];
+ * otherwise, the SDK reports an error. In other cases, `zOrder` can optionally be passed in the
* range [0,255], with 0 being the default value. 0 means the bottom layer and 255 means the top
* layer.
*/
int zOrder;
- /** The transparency level of the image. The value ranges between 0.0 and 1.0:
- *
+ /**
+ * The transparency of the watermark or background image. The range of the value is [0.0,1.0]:
* - 0.0: Completely transparent.
* - 1.0: (Default) Opaque.
*/
@@ -4185,10 +4396,12 @@ typedef struct RtcImage {
RtcImage() : url(OPTIONAL_NULLPTR), x(0), y(0), width(0), height(0), zOrder(0), alpha(1.0) {}
} RtcImage;
/**
- * The configuration for advanced features of the RTMP or RTMPS streaming with transcoding.
+ * @brief The configuration for advanced features of the RTMP or RTMPS streaming with transcoding.
*
+ * @details
* If you want to enable the advanced features of streaming with transcoding, contact
- * support@agora.io.
+ * `support@agora.io`.
+ *
*/
struct LiveStreamAdvancedFeature {
LiveStreamAdvancedFeature() : featureName(OPTIONAL_NULLPTR), opened(false) {}
@@ -4207,63 +4420,66 @@ struct LiveStreamAdvancedFeature {
/**
* Whether to enable the advanced features of streaming with transcoding:
- * - `true`: Enable the advanced feature.
- * - `false`: (Default) Disable the advanced feature.
+ * - `true`: Enable the advanced features.
+ * - `false`: (Default) Do not enable the advanced features.
*/
bool opened;
};
/**
- * Connection state types.
+ * @brief Connection states.
*/
enum CONNECTION_STATE_TYPE {
/**
* 1: The SDK is disconnected from the Agora edge server. The state indicates the SDK is in one of
* the following phases:
- * - The initial state before calling the `joinChannel` method.
- * - The app calls the `leaveChannel` method.
+ * - Theinitial state before calling the `joinChannel(const char* token, const char* channelId,
+ * uid_t uid, const ChannelMediaOptions& options)` method.
+ * - The app calls the `leaveChannel()` method.
*/
CONNECTION_STATE_DISCONNECTED = 1,
/**
* 2: The SDK is connecting to the Agora edge server. This state indicates that the SDK is
- * establishing a connection with the specified channel after the app calls `joinChannel`.
- * - If the SDK successfully joins the channel, it triggers the `onConnectionStateChanged`
- * callback and the connection state switches to `CONNECTION_STATE_CONNECTED`.
+ * establishing a connection with the specified channel after the app calls `joinChannel(const char*
+ * token, const char* channelId, uid_t uid, const ChannelMediaOptions& options)`.
+ * - If the SDK successfully joins the channel, it triggers the `onConnectionStateChanged` callback
+ * and the connection state switches to CONNECTION_STATE_CONNECTED.
* - After the connection is established, the SDK also initializes the media and triggers
* `onJoinChannelSuccess` when everything is ready.
*/
CONNECTION_STATE_CONNECTING = 2,
/**
- * 3: The SDK is connected to the Agora edge server. This state also indicates that the user
- * has joined a channel and can now publish or subscribe to a media stream in the channel.
- * If the connection to the Agora edge server is lost because, for example, the network is down
- * or switched, the SDK automatically tries to reconnect and triggers `onConnectionStateChanged`
- * that indicates the connection state switches to `CONNECTION_STATE_RECONNECTING`.
+ * 3: The SDK is connected to the Agora edge server. This state also indicates that the user has
+ * joined a channel and can now publish or subscribe to a media stream in the channel. If the
+ * connection to the channel is lost because, for example, if the network is down or switched, the
+ * SDK automatically tries to reconnect and triggers `onConnectionStateChanged` callback, notifying
+ * that the current network state becomes CONNECTION_STATE_RECONNECTING.
*/
CONNECTION_STATE_CONNECTED = 3,
/**
- * 4: The SDK keeps reconnecting to the Agora edge server. The SDK keeps rejoining the channel
- * after being disconnected from a joined channel because of network issues.
- * - If the SDK cannot rejoin the channel within 10 seconds, it triggers `onConnectionLost`,
- * stays in the `CONNECTION_STATE_RECONNECTING` state, and keeps rejoining the channel.
- * - If the SDK fails to rejoin the channel 20 minutes after being disconnected from the Agora
- * edge server, the SDK triggers the `onConnectionStateChanged` callback, switches to the
- * `CONNECTION_STATE_FAILED` state, and stops rejoining the channel.
+ * 4: The SDK keeps reconnecting to the Agora edge server. The SDK keeps rejoining the channel after
+ * being disconnected from a joined channel because of network issues.
+ * - If the SDK cannot rejoin the channel within 10 seconds, it triggers `onConnectionLost`, stays
+ * in the CONNECTION_STATE_RECONNECTING state, and keeps rejoining the channel.
+ * - If the SDK fails to rejoin the channel 20 minutes after being disconnected from the Agora edge
+ * server, the SDK triggers the `onConnectionStateChanged` callback, switches to the
+ * CONNECTION_STATE_FAILED state, and stops rejoining the channel.
*/
CONNECTION_STATE_RECONNECTING = 4,
/**
* 5: The SDK fails to connect to the Agora edge server or join the channel. This state indicates
- * that the SDK stops trying to rejoin the channel. You must call `leaveChannel` to leave the
+ * that the SDK stops trying to rejoin the channel. You must call `leaveChannel()` to leave the
* channel.
- * - You can call `joinChannel` to rejoin the channel.
- * - If the SDK is banned from joining the channel by the Agora edge server through the RESTful
- * API, the SDK triggers the `onConnectionStateChanged` callback.
+ * - You can call `joinChannel(const char* token, const char* channelId, uid_t uid, const
+ * ChannelMediaOptions& options)` to rejoin the channel.
+ * - If the SDK is banned from joining the channel by the Agora edge server through the RESTful API,
+ * the SDK triggers the `onConnectionStateChanged` callback.
*/
CONNECTION_STATE_FAILED = 5,
};
/**
- * Transcoding configurations of each host.
+ * @brief Transcoding configurations of each host.
*/
struct TranscodingUser {
/**
@@ -4294,12 +4510,13 @@ struct TranscodingUser {
* The layer index number of the host's video. The value range is [0, 100].
* - 0: (Default) The host's video is the bottom layer.
* - 100: The host's video is the top layer.
- *
- * If the value is beyond this range, the SDK reports the error code `ERR_INVALID_ARGUMENT`.
+ * @note
+ * - If the value is less than 0 or greater than 100, `ERR_INVALID_ARGUMENT` error is returned.
+ * - Setting zOrder to 0 is supported.
*/
int zOrder;
/**
- * The transparency of the host's video. The value range is [0.0, 1.0].
+ * The transparency of the host's video. The value range is [0.0,1.0].
* - 0.0: Completely transparent.
* - 1.0: (Default) Opaque.
*/
@@ -4307,8 +4524,8 @@ struct TranscodingUser {
/**
* The audio channel used by the host's audio in the output audio. The default value is 0, and the
* value range is [0, 5].
- * - `0`: (Recommended) The defaut setting, which supports dual channels at most and depends on
- * the upstream of the host.
+ * - `0`: (Recommended) The defaut setting, which supports dual channels at most and depends on the
+ * upstream of the host.
* - `1`: The host's audio uses the FL audio channel. If the host's upstream uses multiple audio
* channels, the Agora server mixes them into mono first.
* - `2`: The host's audio uses the FC audio channel. If the host's upstream uses multiple audio
@@ -4319,9 +4536,8 @@ struct TranscodingUser {
* channels, the Agora server mixes them into mono first.
* - `5`: The host's audio uses the BR audio channel. If the host's upstream uses multiple audio
* channels, the Agora server mixes them into mono first.
- * - `0xFF` or a value greater than 5: The host's audio is muted, and the Agora server removes the
+ * - `0xFF` or a value greater than `5`: The host's audio is muted, and the Agora server removes the
* host's audio.
- *
* @note If the value is not `0`, a special player is required.
*/
int audioChannel;
@@ -4331,108 +4547,125 @@ struct TranscodingUser {
};
/**
- * Transcoding configurations for Media Push.
+ * @brief Transcoding configurations for Media Push.
*/
struct LiveTranscoding {
- /** The width of the video in pixels. The default value is 360.
- * - When pushing video streams to the CDN, the value range of `width` is [64,1920].
- * If the value is less than 64, Agora server automatically adjusts it to 64; if the
- * value is greater than 1920, Agora server automatically adjusts it to 1920.
+ /**
+ * The width of the video in pixels. The default value is 360.
+ * - When pushing video streams to the CDN, the value range of `width` is [64,1920]. If the value is
+ * less than 64, Agora server automatically adjusts it to 64; if the value is greater than 1920,
+ * Agora server automatically adjusts it to 1920.
* - When pushing audio streams to the CDN, set `width` and `height` as 0.
*/
int width;
- /** The height of the video in pixels. The default value is 640.
- * - When pushing video streams to the CDN, the value range of `height` is [64,1080].
- * If the value is less than 64, Agora server automatically adjusts it to 64; if the
- * value is greater than 1080, Agora server automatically adjusts it to 1080.
+ /**
+ * The height of the video in pixels. The default value is 640.
+ * - When pushing video streams to the CDN, the value range of` height` is [64,1080]. If the value
+ * is less than 64, Agora server automatically adjusts it to 64; if the value is greater than 1080,
+ * Agora server automatically adjusts it to 1080.
* - When pushing audio streams to the CDN, set `width` and `height` as 0.
*/
int height;
- /** Bitrate of the CDN live output video stream. The default value is 400 Kbps.
-
- Set this parameter according to the Video Bitrate Table. If you set a bitrate beyond the proper
- range, the SDK automatically adapts it to a value within the range.
- */
+ /**
+ * The encoding bitrate (Kbps) of the video. This parameter does not need to be set; keeping the
+ * default value `STANDARD_BITRATE` is sufficient. The SDK automatically matches the most suitable
+ * bitrate based on the video resolution and frame rate you have set. For the correspondence between
+ * video resolution and frame rate, see `Video profile`.
+ */
int videoBitrate;
- /** Frame rate of the output video stream set for the CDN live streaming. The default value is 15
- fps, and the value range is (0,30].
-
- @note The Agora server adjusts any value over 30 to 30.
- */
+ /**
+ * Frame rate (fps) of the output video stream set for Media Push. The default value is 15. The
+ * value range is (0,30].
+ * @note The Agora server adjusts any value over 30 to 30.
+ */
int videoFramerate;
- /** **DEPRECATED** Latency mode:
-
- - true: Low latency with unassured quality.
- - false: (Default) High latency with assured quality.
+ /**
+ * Deprecated
+ * This member is deprecated.
+ * Latency mode:
+ * - `true`: Low latency with unassured quality.
+ * - `false`: (Default) High latency with assured quality.
*/
bool lowLatency;
- /** Video GOP in frames. The default value is 30 fps.
+ /**
+ * GOP (Group of Pictures) in fps of the video frames for Media Push. The default value is 30.
*/
int videoGop;
- /** Self-defined video codec profile: #VIDEO_CODEC_PROFILE_TYPE.
-
- @note If you set this parameter to other values, Agora adjusts it to the default value of 100.
- */
+ /**
+ * Video codec profile type for Media Push. Set it as 66, 77, or 100 (default). See
+ * `VIDEO_CODEC_PROFILE_TYPE` for details.
+ * @note If you set this parameter to any other value, Agora adjusts it to the default value.
+ */
VIDEO_CODEC_PROFILE_TYPE videoCodecProfile;
- /** The background color in RGB hex value. Value only. Do not include a preceeding #. For example,
+ /**
+ * The background color in RGB hex value. Value only. Do not include a preceeding #. For example,
* 0xFFB6C1 (light pink). The default value is 0x000000 (black).
*/
unsigned int backgroundColor;
- /** Video codec profile types for Media Push. See VIDEO_CODEC_TYPE_FOR_STREAM. */
+ /**
+ * Video codec profile types for Media Push. See `VIDEO_CODEC_TYPE_FOR_STREAM`.
+ */
VIDEO_CODEC_TYPE_FOR_STREAM videoCodecType;
- /** The number of users in the live interactive streaming.
- * The value range is [0, 17].
+ /**
+ * The number of users in the Media Push. The value range is [0,17].
*/
unsigned int userCount;
- /** Manages the user layout configuration in the Media Push. Agora supports a maximum of 17
+ /**
+ * Manages the user layout configuration in the Media Push. Agora supports a maximum of 17
* transcoding users in a Media Push channel. See `TranscodingUser`.
*/
TranscodingUser* transcodingUsers;
- /** Reserved property. Extra user-defined information to send SEI for the H.264/H.265 video stream
- to the CDN live client. Maximum length: 4096 Bytes.
-
- For more information on SEI frame, see [SEI-related questions](https://docs.agora.io/en/faq/sei).
+ /**
+ * Reserved property. Extra user-defined information to send SEI for the H.264/H.265 video stream to
+ * the CDN live client. Maximum length: 4096 bytes. For more information on SEI, see SEI-related
+ * questions.
*/
const char* transcodingExtraInfo;
- /** **DEPRECATED** The metadata sent to the CDN live client.
+ /**
+ * Deprecated
+ * Obsolete and not recommended for use.
+ * The metadata sent to the CDN client.
*/
const char* metadata;
- /** The watermark on the live video. The image format needs to be PNG. See `RtcImage`.
-
- You can add one watermark, or add multiple watermarks using an array. This parameter is used with
- `watermarkCount`.
- */
+ /**
+ * The watermark on the live video. The image format needs to be PNG. See `RtcImage`.
+ * You can add one watermark, or add multiple watermarks using an array. This parameter is used with
+ * `watermarkCount`.
+ */
RtcImage* watermark;
/**
- * The number of watermarks on the live video. The total number of watermarks and background
- * images can range from 0 to 10. This parameter is used with `watermark`.
+ * The number of watermarks on the live video. The total number of watermarks and background images
+ * can range from 0 to 10. This parameter is used with `watermark`.
*/
unsigned int watermarkCount;
- /** The number of background images on the live video. The image format needs to be PNG. See
+ /**
+ * The number of background images on the live video. The image format needs to be PNG. See
* `RtcImage`.
- *
- * You can add a background image or use an array to add multiple background images. This
- * parameter is used with `backgroundImageCount`.
+ * You can add a background image or use an array to add multiple background images. This parameter
+ * is used with `backgroundImageCount`.
*/
RtcImage* backgroundImage;
/**
- * The number of background images on the live video. The total number of watermarks and
- * background images can range from 0 to 10. This parameter is used with `backgroundImage`.
+ * The number of background images on the live video. The total number of watermarks and background
+ * images can range from 0 to 10. This parameter is used with `backgroundImage`.
*/
unsigned int backgroundImageCount;
- /** The audio sampling rate (Hz) of the output media stream. See #AUDIO_SAMPLE_RATE_TYPE.
+ /**
+ * The audio sampling rate (Hz) of the output media stream. See `AUDIO_SAMPLE_RATE_TYPE`.
*/
AUDIO_SAMPLE_RATE_TYPE audioSampleRate;
- /** Bitrate (Kbps) of the audio output stream for Media Push. The default value is 48, and the
+ /**
+ * Bitrate (Kbps) of the audio output stream for Media Push. The default value is 48, and the
* highest value is 128.
*/
int audioBitrate;
- /** The number of audio channels for Media Push. Agora recommends choosing 1 (mono), or 2 (stereo)
+ /**
+ * The number of audio channels for Media Push. Agora recommends choosing 1 (mono), or 2 (stereo)
* audio channels. Special players are required if you choose 3, 4, or 5.
* - 1: (Default) Mono.
* - 2: Stereo.
@@ -4441,15 +4674,18 @@ struct LiveTranscoding {
* - 5: Five audio channels.
*/
int audioChannels;
- /** Audio codec profile type for Media Push. See #AUDIO_CODEC_PROFILE_TYPE.
+ /**
+ * Audio codec profile type for Media Push. See `AUDIO_CODEC_PROFILE_TYPE`.
*/
AUDIO_CODEC_PROFILE_TYPE audioCodecProfile;
- /** Advanced features of the RTMP or RTMPS streaming with transcoding. See
- * LiveStreamAdvancedFeature.
+ /**
+ * Advanced features of the Media Push with transcoding. See `LiveStreamAdvancedFeature`.
*/
LiveStreamAdvancedFeature* advancedFeatures;
- /** The number of enabled advanced features. The default value is 0. */
+ /**
+ * The number of enabled advanced features. The default value is 0.
+ */
unsigned int advancedFeatureCount;
LiveTranscoding()
@@ -4479,65 +4715,66 @@ struct LiveTranscoding {
};
/**
- * The video streams for the video mixing on the local client.
+ * @brief The video streams for local video mixing.
*/
struct TranscodingVideoStream {
/**
- * The source type of video for the video mixing on the local client. See #VIDEO_SOURCE_TYPE.
+ * The video source type for local video mixing. See `VIDEO_SOURCE_TYPE`.
*/
VIDEO_SOURCE_TYPE sourceType;
/**
- * The ID of the remote user.
- * @note Use this parameter only when the source type of the video for the video mixing on the
- * local client is `VIDEO_SOURCE_REMOTE`.
+ * The user ID of the remote user.
+ * @note Use this parameter only when the source type is `VIDEO_SOURCE_REMOTE` for local video
+ * mixing.
*/
uid_t remoteUserUid;
/**
- * The URL of the image.
- * @note Use this parameter only when the source type of the video for the video mixing on the
- * local client is `RTC_IMAGE`.
+ * The file path of local images.
+ * Examples:
+ * - Windows: `C:\\Users\\{username}\\Pictures\\image.png`
+ * @note Use this parameter only when the source type is the image for local video mixing.
*/
const char* imageUrl;
/**
- * MediaPlayer id if sourceType is MEDIA_PLAYER_SOURCE.
+ * (Optional) Media player ID. Use the parameter only when you set `sourceType` to
+ * `VIDEO_SOURCE_MEDIA_PLAYER`.
*/
int mediaPlayerId;
/**
- * The horizontal displacement of the top-left corner of the video for the video mixing on the
- * client relative to the top-left corner (origin) of the canvas for this video mixing.
+ * The relative lateral displacement of the top left corner of the video for local video mixing to
+ * the origin (the top left corner of the canvas).
*/
int x;
/**
- * The vertical displacement of the top-left corner of the video for the video mixing on the
- * client relative to the top-left corner (origin) of the canvas for this video mixing.
+ * The relative longitudinal displacement of the top left corner of the captured video to the origin
+ * (the top left corner of the canvas).
*/
int y;
/**
- * The width (px) of the video for the video mixing on the local client.
+ * The width (px) of the video for local video mixing on the canvas.
*/
int width;
/**
- * The height (px) of the video for the video mixing on the local client.
+ * The height (px) of the video for local video mixing on the canvas.
*/
int height;
/**
- * The number of the layer to which the video for the video mixing on the local client belongs.
- * The value range is [0,100].
+ * The number of the layer to which the video for the local video mixing belongs. The value range is
+ * [0, 100].
* - 0: (Default) The layer is at the bottom.
* - 100: The layer is at the top.
*/
int zOrder;
/**
- * The transparency of the video for the video mixing on the local client. The value range is
- * [0.0,1.0]. 0.0 means the transparency is completely transparent. 1.0 means the transparency is
- * opaque.
+ * The transparency of the video for local video mixing. The value range is [0.0, 1.0]. 0.0
+ * indicates that the video is completely transparent, and 1.0 indicates that it is opaque.
*/
double alpha;
/**
- * Whether to mirror the video for the video mixing on the local client.
- * - true: Mirroring.
- * - false: (Default) Do not mirror.
- * @note The paramter only works for videos with the source type `CAMERA`.
+ * Whether to mirror the video for the local video mixing.
+ * - `true`: Mirror the video for the local video mixing.
+ * - `false`: (Default) Do not mirror the video for the local video mixing.
+ * @note This parameter only takes effect on video source types that are cameras.
*/
bool mirror;
@@ -4555,7 +4792,7 @@ struct TranscodingVideoStream {
};
/**
- * The configuration of the video mixing on the local client.
+ * @brief The configuration of the video mixing on the local client.
*/
struct LocalTranscoderConfiguration {
/**
@@ -4563,12 +4800,12 @@ struct LocalTranscoderConfiguration {
*/
unsigned int streamCount;
/**
- * The video streams for the video mixing on the local client. See TranscodingVideoStream.
+ * The video streams for local video mixing. See `TranscodingVideoStream`.
*/
TranscodingVideoStream* videoInputStreams;
/**
- * The encoding configuration of the mixed video stream after the video mixing on the local
- * client. See VideoEncoderConfiguration.
+ * The encoding configuration of the mixed video stream after the local video mixing. See
+ * `VideoEncoderConfiguration`.
*/
VideoEncoderConfiguration videoOutputConfiguration;
/**
@@ -4588,55 +4825,71 @@ struct LocalTranscoderConfiguration {
syncWithPrimaryCamera(true) {}
};
+/**
+ * @brief The error code of the local video mixing failure.
+ */
enum VIDEO_TRANSCODER_ERROR {
/**
- * The video track of the video source is not started.
+ * 1: The selected video source has not started video capture. You need to create a video track for
+ * it and start video capture.
*/
VT_ERR_VIDEO_SOURCE_NOT_READY = 1,
/**
- * The video source type is not supported.
+ * 2: The video source type is invalid. You need to re-specify the supported video source type.
*/
VT_ERR_INVALID_VIDEO_SOURCE_TYPE = 2,
/**
- * The image url is not correctly of image source.
+ * 3: The image path is invalid. You need to re-specify the correct image path.
*/
VT_ERR_INVALID_IMAGE_PATH = 3,
/**
- * The image format not the type png/jpeg/gif of image source.
+ * 4: The image format is invalid. Make sure the image format is one of PNG, JPEG, or GIF.
*/
VT_ERR_UNSUPPORT_IMAGE_FORMAT = 4,
/**
- * The layout is invalid such as width is zero.
+ * 5: The video encoding resolution after video mixing is invalid.
*/
VT_ERR_INVALID_LAYOUT = 5,
/**
- * Internal error.
+ * 20: Unknown internal error.
*/
VT_ERR_INTERNAL = 20
};
/**
- * The audio streams for the video mixing on the local client.
+ * @brief The source of the audio streams that are mixed locally.
*/
struct MixedAudioStream {
/**
- * The source type of audio for the audio mixing on the local client. See #AUDIO_SOURCE_TYPE.
+ * The type of the audio source. See `AUDIO_SOURCE_TYPE`.
*/
AUDIO_SOURCE_TYPE sourceType;
/**
- * The ID of the remote user.
- * @note Use this parameter only when the source type is `AUDIO_SOURCE_REMOTE`.
+ * The user ID of the remote user.
+ * @note Set this parameter if the source type of the locally mixed audio steams is
+ * AUDIO_SOURCE_REMOTE_USER.
*/
uid_t remoteUserUid;
/**
- * The channel ID of the remote user.
- * @note Use this parameter only when the source type is `AUDIO_SOURCE_REMOTE`.
+ * The channel name. This parameter signifies the channel in which users engage in real-time audio
+ * and video interaction. Under the premise of the same App ID, users who fill in the same channel
+ * ID enter the same channel for audio and video interaction. The string length must be less than 64
+ * bytes. Supported characters (89 characters in total):
+ * - All lowercase English letters: a to z.
+ * - All uppercase English letters: A to Z.
+ * - All numeric characters: 0 to 9.
+ * - "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]",
+ * "^", "_", "{", "}", "|", "~", ","
+ * @note Set this parameter if the source type of the locally mixed audio streams is
+ * AUDIO_SOURCE_REMOTE_CHANNEL or AUDIO_SOURCE_REMOTE_USER.
*/
const char* channelId;
/**
- * The track ID of the local track.
- * @note Use this parameter only when the source type is `AUDIO_SOURCE_REMOTE`.
+ * The audio track ID. Set this parameter to the custom audio track ID returned in
+ * `createCustomAudioTrack`.
+ * @note Set this parameter if the source type of the locally mixed audio steams is
+ * AUDIO_SOURCE_CUSTOM.
*/
track_id_t trackId;
@@ -4664,22 +4917,24 @@ struct MixedAudioStream {
};
/**
- * The configuration of the audio mixing on the local client.
+ * @brief The configurations for mixing the lcoal audio.
*/
struct LocalAudioMixerConfiguration {
/**
- * The number of the audio streams for the audio mixing on the local client.
+ * The number of the audio streams that are mixed locally.
*/
unsigned int streamCount;
/**
- * The source of the streams to mixed;
- */
+ * The source of the audio streams that are mixed locally. See `MixedAudioStream`.
+ */
MixedAudioStream* audioInputStreams;
/**
- * Whether to use the timestamp follow the local mic's audio frame.
- * - true: (Default) Use the timestamp of the captured audio frame as the timestamp of the mixed audio frame.
- * - false: Do not use the timestamp of the captured audio frame as the timestamp of the mixed audio frame. Instead, use the timestamp when the mixed audio frame is constructed.
+ * Whether the mxied audio stream uses the timestamp of the audio frames captured by the local
+ * microphone.
+ * - `true`: (Default) Yes. Set to this value if you want all locally captured audio streams
+ * synchronized.
+ * - `false`: No. The SDK uses the timestamp of the audio frames at the time when they are mixed.
*/
bool syncWithLocalMic;
@@ -4687,37 +4942,36 @@ struct LocalAudioMixerConfiguration {
};
/**
- * Configurations of the last-mile network test.
+ * @brief Configurations of the last-mile network test.
*/
struct LastmileProbeConfig {
/**
- * Determines whether to test the uplink network. Some users, for example,
- * the audience in a live broadcast channel, do not need such a test:
- * - true: Test.
- * - false: Do not test.
+ * Sets whether to test the uplink network. Some users, for example, the audience members in a
+ * LIVE_BROADCASTING channel, do not need such a test.
+ * - `true`: Test the uplink network.
+ * - `false`: Do not test the uplink network.
*/
bool probeUplink;
/**
- * Determines whether to test the downlink network:
- * - true: Test.
- * - false: Do not test.
+ * Sets whether to test the downlink network:
+ * - `true`: Test the downlink network.
+ * - `false`: Do not test the downlink network.
*/
bool probeDownlink;
/**
- * The expected maximum sending bitrate (bps) of the local user. The value range is [100000,
- * 5000000]. We recommend setting this parameter according to the bitrate value set by
- * `setVideoEncoderConfiguration`.
+ * The expected maximum uplink bitrate (bps) of the local user. The value range is [100000,
+ * 5000000]. Agora recommends referring to `setVideoEncoderConfiguration` to set the value.
*/
unsigned int expectedUplinkBitrate;
/**
- * The expected maximum receiving bitrate (bps) of the local user. The value range is
+ * The expected maximum downlink bitrate (bps) of the local user. The value range is
* [100000,5000000].
*/
unsigned int expectedDownlinkBitrate;
};
/**
- * The status of the last-mile network tests.
+ * @brief The status of the last-mile probe test.
*/
enum LASTMILE_PROBE_RESULT_STATE {
/**
@@ -4726,18 +4980,18 @@ enum LASTMILE_PROBE_RESULT_STATE {
LASTMILE_PROBE_RESULT_COMPLETE = 1,
/**
* 2: The last-mile network probe test is incomplete because the bandwidth estimation is not
- * available due to limited test resources.
+ * available due to limited test resources. One possible reason is that testing resources are
+ * temporarily limited.
*/
LASTMILE_PROBE_RESULT_INCOMPLETE_NO_BWE = 2,
/**
- * 3: The last-mile network probe test is not carried out, probably due to poor network
- * conditions.
+ * 3: The last-mile network probe test is not carried out. Probably due to poor network conditions.
*/
LASTMILE_PROBE_RESULT_UNAVAILABLE = 3
};
/**
- * Results of the uplink or downlink last-mile network test.
+ * @brief Results of the uplink or downlink last-mile network test.
*/
struct LastmileProbeOneWayResult {
/**
@@ -4757,19 +5011,19 @@ struct LastmileProbeOneWayResult {
};
/**
- * Results of the uplink and downlink last-mile network tests.
+ * @brief Results of the uplink and downlink last-mile network tests.
*/
struct LastmileProbeResult {
/**
- * The status of the last-mile network tests. See #LASTMILE_PROBE_RESULT_STATE.
+ * The status of the last-mile network tests. See `LASTMILE_PROBE_RESULT_STATE`.
*/
LASTMILE_PROBE_RESULT_STATE state;
/**
- * Results of the uplink last-mile network test. For details, see LastmileProbeOneWayResult.
+ * Results of the uplink last-mile network test. See `LastmileProbeOneWayResult`.
*/
LastmileProbeOneWayResult uplinkReport;
/**
- * Results of the downlink last-mile network test. For details, see LastmileProbeOneWayResult.
+ * Results of the downlink last-mile network test. See `LastmileProbeOneWayResult`.
*/
LastmileProbeOneWayResult downlinkReport;
/**
@@ -4781,11 +5035,11 @@ struct LastmileProbeResult {
};
/**
- * Reasons causing the change of the connection state.
+ * @brief Reasons causing the change of the connection state.
*/
enum CONNECTION_CHANGED_REASON_TYPE {
/**
- * 0: The SDK is connecting to the server.
+ * 0: The SDK is connecting to the Agora edge server.
*/
CONNECTION_CHANGED_CONNECTING = 0,
/**
@@ -4793,17 +5047,18 @@ enum CONNECTION_CHANGED_REASON_TYPE {
*/
CONNECTION_CHANGED_JOIN_SUCCESS = 1,
/**
- * 2: The connection between the SDK and the server is interrupted.
+ * 2: The connection between the SDK and the Agora edge server is interrupted.
*/
CONNECTION_CHANGED_INTERRUPTED = 2,
/**
- * 3: The connection between the SDK and the server is banned by the server. This error occurs
- * when the user is kicked out of the channel by the server.
+ * 3: The connection between the SDK and the Agora edge server is banned by the Agora edge server.
+ * For example, when a user is kicked out of the channel, this status will be returned.
*/
CONNECTION_CHANGED_BANNED_BY_SERVER = 3,
/**
* 4: The SDK fails to join the channel. When the SDK fails to join the channel for more than 20
- * minutes, this error occurs and the SDK stops reconnecting to the channel.
+ * minutes, this code will be returned and the SDK stops reconnecting to the channel. You need to
+ * prompt the user to try to switch to another network and rejoin the channel.
*/
CONNECTION_CHANGED_JOIN_FAILED = 4,
/**
@@ -4811,37 +5066,51 @@ enum CONNECTION_CHANGED_REASON_TYPE {
*/
CONNECTION_CHANGED_LEAVE_CHANNEL = 5,
/**
- * 6: The connection fails because the App ID is not valid.
+ * 6: The App ID is invalid. You need to rejoin the channel with a valid APP ID and make sure the
+ * App ID you are using is consistent with the one generated in the Agora Console.
*/
CONNECTION_CHANGED_INVALID_APP_ID = 6,
/**
- * 7: The connection fails because the channel name is not valid. Please rejoin the channel with a
- * valid channel name.
+ * 7: Invalid channel name. Rejoin the channel with a valid channel name. A valid channel name is a
+ * string of up to 64 bytes in length. Supported characters (89 characters in total):
+ * - All lowercase English letters: a to z.
+ * - All uppercase English letters: A to Z.
+ * - All numeric characters: 0 to 9.
+ * - "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]",
+ * "^", "_", "{", "}", "|", "~", ","
*/
CONNECTION_CHANGED_INVALID_CHANNEL_NAME = 7,
/**
- * 8: The connection fails because the token is not valid. Typical reasons include:
- * - The App Certificate for the project is enabled in Agora Console, but you do not use a token
- * when joining the channel. If you enable the App Certificate, you must use a token to join the
- * channel.
- * - The `uid` specified when calling `joinChannel` to join the channel is inconsistent with the
- * `uid` passed in when generating the token.
+ * 8: Invalid token. Possible reasons are as follows:
+ * - The App Certificate for the project is enabled in Agora Console, but you do not pass in a token
+ * when joining a channel.
+ * - The uid specified when calling `joinChannel(const char* token, const char* channelId, uid_t
+ * uid, const ChannelMediaOptions& options)` to join the channel is inconsistent with the
+ * uid passed in when generating the token.
+ * - The generated token and the token used to join the channel are not consistent.
+ * Ensure the following:
+ * - When your project enables App Certificate, you need to pass in a token to join a channel.
+ * - The user ID specified when generating the token is consistent with the user ID used when
+ * joining the channel.
+ * - The generated token is the same as the token passed in to join the channel.
*/
CONNECTION_CHANGED_INVALID_TOKEN = 8,
/**
- * 9: The connection fails because the token has expired.
+ * 9: The token currently being used has expired. You need to generate a new token on your server
+ * and rejoin the channel with the new token.
*/
CONNECTION_CHANGED_TOKEN_EXPIRED = 9,
/**
- * 10: The connection is rejected by the server. Typical reasons include:
- * - The user is already in the channel and still calls a method, for example, `joinChannel`, to
- * join the channel. Stop calling this method to clear this error.
- * - The user tries to join the channel when conducting a pre-call test. The user needs to call
- * the channel after the call test ends.
+ * 10: The connection is rejected by server. Possible reasons are as follows:
+ * - The user is already in the channel and still calls a method, for example, `joinChannel(const
+ * char* token, const char* channelId, uid_t uid, const ChannelMediaOptions& options)`,
+ * to join the channel. Stop calling this method to clear this error.
+ * - The user tries to join a channel while a test call is in progress. The user needs to join the
+ * channel after the call test ends.
*/
CONNECTION_CHANGED_REJECTED_BY_SERVER = 10,
/**
- * 11: The connection changes to reconnecting because the SDK has set a proxy server.
+ * 11: The connection state changed to reconnecting because the SDK has set a proxy server.
*/
CONNECTION_CHANGED_SETTING_PROXY_SERVER = 11,
/**
@@ -4849,17 +5118,17 @@ enum CONNECTION_CHANGED_REASON_TYPE {
*/
CONNECTION_CHANGED_RENEW_TOKEN = 12,
/**
- * 13: The IP address of the client has changed, possibly because the network type, IP address, or
- * port has been changed.
+ * 13: Client IP address changed. If you receive this code multiple times, You need to prompt the
+ * user to switch networks and try joining the channel again.
*/
CONNECTION_CHANGED_CLIENT_IP_ADDRESS_CHANGED = 13,
/**
* 14: Timeout for the keep-alive of the connection between the SDK and the Agora edge server. The
- * connection state changes to CONNECTION_STATE_RECONNECTING.
+ * SDK tries to reconnect to the server automatically.
*/
CONNECTION_CHANGED_KEEP_ALIVE_TIMEOUT = 14,
/**
- * 15: The SDK has rejoined the channel successfully.
+ * 15: The user has rejoined the channel successfully.
*/
CONNECTION_CHANGED_REJOIN_SUCCESS = 15,
/**
@@ -4867,19 +5136,19 @@ enum CONNECTION_CHANGED_REASON_TYPE {
*/
CONNECTION_CHANGED_LOST = 16,
/**
- * 17: The change of connection state is caused by echo test.
+ * 17: The connection state changes due to the echo test.
*/
CONNECTION_CHANGED_ECHO_TEST = 17,
/**
- * 18: The local IP Address is changed by user.
+ * 18: The local IP address was changed by the user.
*/
CONNECTION_CHANGED_CLIENT_IP_ADDRESS_CHANGED_BY_USER = 18,
/**
- * 19: The connection is failed due to join the same channel on another device with the same uid.
+ * 19: The user joined the same channel from different devices with the same UID.
*/
CONNECTION_CHANGED_SAME_UID_LOGIN = 19,
/**
- * 20: The connection is failed due to too many broadcasters in the channel.
+ * 20: The number of hosts in the channel has reached the upper limit.
*/
CONNECTION_CHANGED_TOO_MANY_BROADCASTERS = 20,
@@ -4902,31 +5171,37 @@ enum CONNECTION_CHANGED_REASON_TYPE {
};
/**
- * The reason of changing role's failure.
+ * @brief The reason for a user role switch failure.
*/
enum CLIENT_ROLE_CHANGE_FAILED_REASON {
/**
- * 1: Too many broadcasters in the channel.
+ * 1: The number of hosts in the channel exceeds the limit.
+ * @note This enumerator is reported only when the support for 128 users is enabled. The maximum
+ * number of hosts is based on the actual number of hosts configured when you enable the 128-user
+ * feature.
*/
CLIENT_ROLE_CHANGE_FAILED_TOO_MANY_BROADCASTERS = 1,
/**
- * 2: The operation of changing role is not authorized.
+ * 2: The request is rejected by the Agora server. Agora recommends you prompt the user to try to
+ * switch their user role again.
*/
CLIENT_ROLE_CHANGE_FAILED_NOT_AUTHORIZED = 2,
/**
- * 3: The operation of changing role is timeout.
+ * 3: The request is timed out. Agora recommends you prompt the user to check the network connection
+ * and try to switch their user role again.
* @deprecated This reason is deprecated.
*/
CLIENT_ROLE_CHANGE_FAILED_REQUEST_TIME_OUT __deprecated = 3,
/**
- * 4: The operation of changing role is interrupted since we lost connection with agora service.
+ * 4: The SDK is disconnected from the Agora edge server. You can troubleshoot the failure through
+ * the `reason` reported by `onConnectionStateChanged`.
* @deprecated This reason is deprecated.
*/
CLIENT_ROLE_CHANGE_FAILED_CONNECTION_FAILED __deprecated = 4,
};
/**
- * The network type.
+ * @brief Network type.
*/
enum NETWORK_TYPE {
/**
@@ -4964,91 +5239,95 @@ enum NETWORK_TYPE {
};
/**
- * The mode of setting up video views.
+ * @brief Setting mode of the view.
*/
enum VIDEO_VIEW_SETUP_MODE {
/**
- * 0: replace one view
+ * 0: (Default) Clear all added views and replace with a new view.
*/
VIDEO_VIEW_SETUP_REPLACE = 0,
/**
- * 1: add one view
+ * 1: Adds a view.
*/
VIDEO_VIEW_SETUP_ADD = 1,
/**
- * 2: remove one view
+ * 2: Deletes a view.
+ * @note When you no longer need to use a certain view, it is recommended to delete the view by
+ * setting `setupMode` to VIDEO_VIEW_SETUP_REMOVE, otherwise it may lead to leak of rendering
+ * resources.
*/
VIDEO_VIEW_SETUP_REMOVE = 2,
};
/**
- * Attributes of video canvas object.
+ * @brief Attributes of the video canvas object.
*/
struct VideoCanvas {
/**
- * The user id of local video.
+ * User ID that publishes the video source.
*/
uid_t uid;
/**
- * The uid of video stream composing the video stream from transcoder which will be drawn on this
- * video canvas.
+ * The ID of the user who publishes a specific sub-video stream within the mixed video stream.
*/
uid_t subviewUid;
/**
- * Video display window.
+ * The video display window.
+ * @note In one `VideoCanvas`, you can only choose to set either `view` or `surfaceTexture`. If both
+ * are set, only the settings in `view` take effect.
*/
view_t view;
/**
- * A RGBA value indicates background color of the render view. Defaults to 0x00000000.
+ * The background color of the video canvas in RGBA format. The default value is 0x00000000, which
+ * represents black.
*/
uint32_t backgroundColor;
/**
- * The video render mode. See \ref agora::media::base::RENDER_MODE_TYPE "RENDER_MODE_TYPE".
- * The default value is RENDER_MODE_HIDDEN.
+ * The rendering mode of the video. See `RENDER_MODE_TYPE`.
*/
media::base::RENDER_MODE_TYPE renderMode;
/**
- * The video mirror mode. See \ref VIDEO_MIRROR_MODE_TYPE "VIDEO_MIRROR_MODE_TYPE".
- * The default value is VIDEO_MIRROR_MODE_AUTO.
+ * The mirror mode of the view. See `VIDEO_MIRROR_MODE_TYPE`.
* @note
- * - For the mirror mode of the local video view:
- * If you use a front camera, the SDK enables the mirror mode by default;
- * if you use a rear camera, the SDK disables the mirror mode by default.
+ * - For the mirror mode of the local video view: If you use a front camera, the SDK enables the
+ * mirror mode by default; if you use a rear camera, the SDK disables the mirror mode by default.
* - For the remote user: The mirror mode is disabled by default.
*/
VIDEO_MIRROR_MODE_TYPE mirrorMode;
/**
- * The mode of setting up video view. See \ref VIDEO_VIEW_SETUP_MODE "VIDEO_VIEW_SETUP_MODE"
- * The default value is VIDEO_VIEW_SETUP_REPLACE.
+ * Setting mode of the view. See `VIDEO_VIEW_SETUP_MODE`.
*/
VIDEO_VIEW_SETUP_MODE setupMode;
/**
- * The video source type. See \ref VIDEO_SOURCE_TYPE "VIDEO_SOURCE_TYPE".
- * The default value is VIDEO_SOURCE_CAMERA_PRIMARY.
+ * The type of the video source. See `VIDEO_SOURCE_TYPE`.
*/
VIDEO_SOURCE_TYPE sourceType;
/**
- * The media player id of AgoraMediaPlayer. It should set this parameter when the
- * sourceType is VIDEO_SOURCE_MEDIA_PLAYER to show the video that AgoraMediaPlayer is playing.
- * You can get this value by calling the method \ref getMediaPlayerId().
+ * The ID of the media player. You can get the Device ID by calling `getMediaPlayerId`.
*/
int mediaPlayerId;
/**
- * If you want to display a certain part of a video frame, you can set
- * this value to crop the video frame to show.
- * The default value is empty(that is, if it has zero width or height), which means no cropping.
+ * (Optional) Display area of the video frame, see `Rectangle`. `width` and `height` represent the
+ * video pixel width and height of the area. The default value is null (width or height is 0), which
+ * means that the actual resolution of the video frame is displayed.
*/
Rectangle cropArea;
/**
- * Whether to apply alpha mask to the video frame if exsit:
- * true: Apply alpha mask to video frame.
- * false: (Default) Do not apply alpha mask to video frame.
+ * (Optional) Whether to enable alpha mask rendering:
+ * - `true`: Enable alpha mask rendering.
+ * - `false`: (Default) Disable alpha mask rendering.
+ * Alpha mask rendering can create images with transparent effects and extract portraits from
+ * videos. When used in combination with other methods, you can implement effects such as
+ * portrait-in-picture and watermarking.
+ * @note
+ * - The receiver can render alpha channel information only when the sender enables alpha
+ * transmission.
+ * - To enable alpha transmission, `technical support`.
*/
bool enableAlphaMask;
/**
- * The video frame position in pipeline. See \ref VIDEO_MODULE_POSITION "VIDEO_MODULE_POSITION".
- * The default value is POSITION_POST_CAPTURER.
+ * The observation position of the video frame in the video link. See `VIDEO_MODULE_POSITION`.
*/
media::base::VIDEO_MODULE_POSITION position;
@@ -5110,41 +5389,55 @@ struct VideoCanvas {
position(media::base::POSITION_POST_CAPTURER) {}
};
-/** Image enhancement options.
+/**
+ * @brief Image enhancement options.
*/
struct BeautyOptions {
- /** The contrast level.
+ /**
+ * @brief The contrast level.
*/
enum LIGHTENING_CONTRAST_LEVEL {
- /** Low contrast level. */
+ /**
+ * 0: Low contrast level.
+ */
LIGHTENING_CONTRAST_LOW = 0,
- /** (Default) Normal contrast level. */
+ /**
+ * 1: (Default) Normal contrast level.
+ */
LIGHTENING_CONTRAST_NORMAL = 1,
- /** High contrast level. */
+ /**
+ * 2: High contrast level.
+ */
LIGHTENING_CONTRAST_HIGH = 2,
};
- /** The contrast level, used with the `lighteningLevel` parameter. The larger the value, the
- * greater the contrast between light and dark. See #LIGHTENING_CONTRAST_LEVEL.
+ /**
+ * The contrast level, used with the `lighteningLevel` parameter. The larger the value, the greater
+ * the contrast between light and dark. See `LIGHTENING_CONTRAST_LEVEL`.
*/
LIGHTENING_CONTRAST_LEVEL lighteningContrastLevel;
- /** The brightness level. The value ranges from 0.0 (original) to 1.0. The default value is 0.0.
- * The greater the value, the greater the degree of whitening. */
+ /**
+ * The brightening level, in the range [0.0,1.0], where 0.0 means the original brightening. The
+ * default value is 0.0. The higher the value, the greater the degree of brightening.
+ */
float lighteningLevel;
- /** The value ranges from 0.0 (original) to 1.0. The default value is 0.0. The greater the value,
- * the greater the degree of skin grinding.
+ /**
+ * The smoothness level, in the range [0.0,1.0], where 0.0 means the original smoothness. The
+ * default value is 0.0. The greater the value, the greater the smoothness level.
*/
float smoothnessLevel;
- /** The redness level. The value ranges from 0.0 (original) to 1.0. The default value is 0.0. The
- * larger the value, the greater the rosy degree.
+ /**
+ * The redness level, in the range [0.0,1.0], where 0.0 means the original redness. The default
+ * value is 0.0. The larger the value, the greater the redness level.
*/
float rednessLevel;
- /** The sharpness level. The value ranges from 0.0 (original) to 1.0. The default value is 0.0.
- * The larger the value, the greater the sharpening degree.
+ /**
+ * The sharpness level, in the range [0.0,1.0], where 0.0 means the original sharpness. The default
+ * value is 0.0. The larger the value, the greater the sharpness level.
*/
float sharpnessLevel;
@@ -5164,220 +5457,207 @@ struct BeautyOptions {
sharpnessLevel(0) {}
};
-/**
- * @brief Face shape area options. This structure defines options for facial adjustments on different facial areas.
+/**
+ * @brief Filter effect options.
*
* @since v4.4.0
*/
struct FaceShapeAreaOptions {
/**
- * @brief The specific facial area to be adjusted.
+ * @brief Chooses the specific facial areas that need to be adjusted.
*
* @since v4.4.0
*/
enum FACE_SHAPE_AREA {
- /** (Default) Invalid area. */
+ /**
+ * -1: (Default) Invalid area; facial enhancement effects do not take effect.
+ */
FACE_SHAPE_AREA_NONE = -1,
- /**
- * Head Scale, reduces the size of the head.
- * The value range is [0, 100]. The default value is 50.
- * The larger the value, the stronger the head reduction effect.
+ /**
+ * (100): Head, used to achieve a smaller head effect. The value range is 0 to 100, and the default
+ * value is 50. The larger the value, the more noticeable the adjustment.
*/
FACE_SHAPE_AREA_HEADSCALE = 100,
- /**
- * Forehead, adjusts the size of the forehead.
- * The value range is [0, 100]. The default value is 0.
- * The larger the value, the stronger the forehead effect.
+ /**
+ * (101): Forehead, used to adjust the hairline height. The range is [0, 100], with a default value
+ * of 0. The larger the value, the more noticeable the adjustment.
*/
FACE_SHAPE_AREA_FOREHEAD = 101,
- /**
- * Face Contour, slims the facial contour.
- * The value range is [0, 100]. The default value is 0.
- * The larger the value, the stronger the facial contour reduction effect.
+ /**
+ * (102): Face contour, used to achieve a slimmer face effect. The range is [0, 100], with a default
+ * value of 0. The larger the value, the more noticeable the adjustment.
*/
FACE_SHAPE_AREA_FACECONTOUR = 102,
- /**
- * Face Length, adjusts the length of the face.
- * The value range is [-100, 100]. The default value is 0.
- * The larger the absolute value, the stronger the face length effect, negative values indicate the opposite direction.
+ /**
+ * (103): Face length, used to achieve a longer face effect. The range is [-100, 100], with a
+ * default value of 0. The greater the absolute value, the more noticeable the adjustment. Negative
+ * values indicate the opposite direction.
*/
FACE_SHAPE_AREA_FACELENGTH = 103,
- /**
- * Face Width, narrows the width of the face.
- * The value range is [0, 100]. The default value is 0.
- * The larger the value, the stronger the face width reduction effect.
+ /**
+ * (104): Face width, used to achieve a narrower face effect. The range is [0, 100], with a default
+ * value of 0. The larger the value, the more noticeable the adjustment.
*/
FACE_SHAPE_AREA_FACEWIDTH = 104,
- /**
- * Cheekbone, adjusts the size of the cheekbone.
- * The value range is [0, 100]. The default value is 0.
- * The larger the value, the stronger the cheekbone effect.
+ /**
+ * (105): Cheekbone, used to adjust cheekbone width. The range is [0, 100], with a default value of
+ * 0. The larger the value, the more noticeable the adjustment.The larger the value, the more
+ * noticeable the adjustment.
*/
FACE_SHAPE_AREA_CHEEKBONE = 105,
- /**
- * Cheek, adjusts the size of the cheek.
- * The value range is [0, 100]. The default value is 0.
- * The larger the value, the stronger the cheek effect.
+ /**
+ * (106): Cheek, used to adjust cheek width. The range is [0, 100], with a default value of 0. The
+ * larger the value, the more noticeable the adjustment.
*/
FACE_SHAPE_AREA_CHEEK = 106,
- /**
- * Mandible, slims the mandible.
- * The value range is [0, 100]. The default value is 0.
- * The larger the value, the stronger the mandible effect.
+ /**
+ * (107): Adjustment of the mandible. The range is [0, 100], with a default value of 0. The larger
+ * the value, the more noticeable the adjustment.
* @since v4.6.0
*/
FACE_SHAPE_AREA_MANDIBLE = 107,
- /**
- * Chin, adjusts the length of the chin.
- * The value range is [-100, 100]. The default value is 0.
- * The larger the absolute value, the stronger the chin effect, negative values indicate the opposite direction.
- */
+ /**
+ * (108): Chin, used to adjust chin length. The range is [-100, 100], with a default value of 0. The
+ * greater the absolute value, the more noticeable the adjustment. Negative values indicate the
+ * opposite direction.
+ */
FACE_SHAPE_AREA_CHIN = 108,
- /**
- * Eye Scale, adjusts the size of the eyes.
- * The value range is [0, 100]. The default value is 50.
- * The larger the value, the stronger the eye size effect.
+ /**
+ * (200): Eyes, used to achieve a larger eye effect. The value range is 0 to 100, and the default
+ * value is 50. The larger the value, the more noticeable the adjustment.
*/
FACE_SHAPE_AREA_EYESCALE = 200,
- /**
- * Eye Distance, adjusts the distance between the two eyes.
- * The value range is [-100, 100]. The default value is 0.
- * The larger the absolute value, the stronger the eye distance effect, negative values indicate the opposite direction.
+ /**
+ * (201): Eye distance adjustment. The range is [-100, 100], with a default value of 0. The greater
+ * the absolute value, the more noticeable the adjustment. Negative values indicate the opposite
+ * direction.
* @since v4.6.0
*/
FACE_SHAPE_AREA_EYEDISTANCE = 201,
- /**
- * Eye Position, adjusts the upper and lower position of the eyes.
- * The value range is [-100, 100]. The default value is 0.
- * The larger the absolute value, the stronger the eye position effect, negative values indicate the opposite direction.
+ /**
+ * (202): Eye position adjustment. The range is [-100, 100], with a default value of 0. The greater
+ * the absolute value, the more noticeable the adjustment. Negative values indicate the opposite
+ * direction.
* @since v4.6.0
*/
FACE_SHAPE_AREA_EYEPOSITION = 202,
- /**
- * Lower Eyelid, adjusts the downward position of the eyelids.
- * The value range is [0, 100]. The default value is 0.
- * The larger the value, the stronger the lower eyelid effect.
+ /**
+ * (203): Lower eyelid adjustment.(203): Lower eyelid adjustment. The range is [0, 100], with a
+ * default value of 0. The larger the value, the more noticeable the adjustment.
* @since v4.6.0
*/
FACE_SHAPE_AREA_LOWEREYELID = 203,
- /**
- * Eye Pupils, adjusts the size of the pupils.
- * The value range is [0, 100]. The default value is 0.
- * The larger the value, the stronger the eye pupils effect.
+ /**
+ * (204): Pupil size adjustment. The range is [0, 100], with a default value of 0. The larger the
+ * value, the more noticeable the adjustment.
* @since v4.6.0
*/
FACE_SHAPE_AREA_EYEPUPILS = 204,
- /**
- * Eye Inner Corner, adjusts the inner corners of the eyes.
- * The value range is [-100, 100]. The default value is 0.
- * The larger the absolute value, the stronger the eye inner corner effect, negative values indicate the opposite direction.
+ /**
+ * (205): Inner eye corner adjustment. The range is [-100, 100], with a default value of 0. The
+ * greater the absolute value, the more noticeable the adjustment. Negative values indicate the
+ * opposite direction.
* @since v4.6.0
*/
FACE_SHAPE_AREA_EYEINNERCORNER = 205,
- /**
- * Eye Outer Corner, adjusts the outer corners of the eyes.
- * The value range is [-100, 100]. The default value is 0.
- * The larger the absolute value, the stronger the eye outer corner effect, negative values indicate the opposite direction.
+ /**
+ * (206): Outer eye corner adjustment. The range is [-100, 100], with a default value of 0. The
+ * greater the absolute value, the more noticeable the adjustment. Negative values indicate the
+ * opposite direction.
* @since v4.6.0
*/
FACE_SHAPE_AREA_EYEOUTERCORNER = 206,
- /**
- * Nose Length, adjusts the length of the nose.
- * The value range is [-100, 100]. The default value is 0.
+ /**
+ * (300): Nose length, used to achieve a longer nose effect. The range is [-100, 100], with a
+ * default value of 0.
*/
FACE_SHAPE_AREA_NOSELENGTH = 300,
- /**
- * Nose Width, adjusts the width of the nose.
- * The value range is [0, 100]. The default value is 0.
- * The larger the value, the stronger the nose width effect.
+ /**
+ * (301): Nose width, used to achieve a slimmer nose effect. The range is [0, 100], with a default
+ * value of 0. The larger the value, the more noticiable the effect of narrowing the nose.
* @since v4.6.0
*/
FACE_SHAPE_AREA_NOSEWIDTH = 301,
- /**
- * Nose Wing, adjusts the size of the nose wings.
- * The value range is [0, 100]. The default value is 10.
- * The larger the value, the stronger the nose wing effect.
+ /**
+ * (302): Nose wing adjustment. The value range is 0 to 100, and the default value is 10. The larger
+ * the value, the more noticeable the adjustment.
* @since v4.6.0
*/
FACE_SHAPE_AREA_NOSEWING = 302,
- /**
- * Nose Root, adjusts the size of the nose root.
- * The value range is [0, 100]. The default value is 0.
- * The larger the value, the stronger the nose root effect.
+ /**
+ * (303): Nose root adjustment. The range is [0, 100], with a default value of 0. The larger the
+ * value, the more noticeable the adjustment.
* @since v4.6.0
*/
FACE_SHAPE_AREA_NOSEROOT = 303,
- /**
- * Nose Bridge, adjusts the size of the nose bridge.
- * The value range is [0, 100]. The default value is 50.
- * The larger the value, the stronger the nose bridge effect.
+ /**
+ * (304): Nose bridge adjustment. The value range is 0 to 100, and the default value is 50. The
+ * larger the value, the more noticeable the adjustment.
* @since v4.6.0
*/
FACE_SHAPE_AREA_NOSEBRIDGE = 304,
- /**
- * Nose Tip, adjusts the size of the nose tip.
- * The value range is [0, 100]. The default value is 50.
- * The larger the value, the stronger the nose tip effect.
+ /**
+ * (305): Nose tip adjustment. The value range is 0 to 100, and the default value is 50. The larger
+ * the value, the more noticeable the adjustment.
* @since v4.6.0
*/
FACE_SHAPE_AREA_NOSETIP = 305,
- /**
- * Nose General, adjusts the overall size of the nose.
- * The value range is [-100, 100]. The default value is 50.
- * The larger the absolute value, the stronger the nose general effect, negative values indicate the opposite direction.
+ /**
+ * (306): Overall nose adjustment. The range is [-100, 100], with a default value of 50. The greater
+ * the absolute value, the more noticeable the adjustment. Negative values indicate the opposite
+ * direction.
* @since v4.6.0
*/
FACE_SHAPE_AREA_NOSEGENERAL = 306,
- /**
- * Mouth Scale, adjusts the size of the mouth.
- * The value range is [-100, 100]. The default value is 20.
- * The larger the absolute value, the stronger the mouth size effect, negative values indicate the opposite direction.
+ /**
+ * (400): Mouth, used to achieve a larger mouth effect. The range is [-100, 100], with a default
+ * value of 20. The greater the absolute value, the more noticeable the adjustment. Negative values
+ * indicate the opposite direction.
* @since v4.6.0
*/
FACE_SHAPE_AREA_MOUTHSCALE = 400,
- /**
- * Mouth Position, adjusts the position of the mouth.
- * The value range is [0, 100]. The default value is 0.
- * The larger the value, the stronger the mouth position effect.
+ /**
+ * (401): Mouth position adjustment. The range is [0, 100], with a default value of 0. The larger
+ * the value, the more noticeable the adjustment.
* @since v4.6.0
*/
FACE_SHAPE_AREA_MOUTHPOSITION = 401,
- /**
- * Mouth Smile, adjusts the degree of the mouth's smile.
- * The value range is [0, 100]. The default value is 30.
- * The larger the value, the stronger the mouth smile effect.
+ /**
+ * (402): Mouth smile adjustment. The value range is [0,1], and the default value is 0. The larger
+ * the value, the more noticeable the adjustment.
* @since v4.6.0
*/
FACE_SHAPE_AREA_MOUTHSMILE = 402,
- /**
- * Mouth Lip, adjusts the size of the lips.
- * The value range is [0, 100]. The default value is 0.
- * The larger the value, the stronger the mouth lip effect.
+ /**
+ * (403): Lip shape adjustment. The range is [0, 100], with a default value of 0. The larger the
+ * value, the more noticeable the adjustment.
+ * @note v.4.6.0.
* @since v4.6.0
*/
FACE_SHAPE_AREA_MOUTHLIP = 403,
- /**
- * Eyebrow Position, adjusts the position of the eyebrows.
- * The value range is [-100, 100]. The default value is 0.
- * The larger the absolute value, the stronger the eyebrow position effect, negative values indicate the opposite direction.
+ /**
+ * (500): Eyebrow position adjustment. The range is [-100, 100], with a default value of 0. The
+ * greater the absolute value, the more noticeable the adjustment. Negative values indicate the
+ * opposite direction.
* @since v4.6.0
*/
FACE_SHAPE_AREA_EYEBROWPOSITION = 500,
- /**
- * Eyebrow Thickness, adjusts the thickness of the eyebrows.
- * The value range is [-100, 100]. The default value is 0.
- * The larger the value, the stronger the eyebrow thickness effect.
+ /**
+ * (501): Eyebrow thickness adjustment. The range is [-100, 100], with a default value of 0. The
+ * larger the value, the more noticeable the adjustment.
* @since v4.6.0
*/
FACE_SHAPE_AREA_EYEBROWTHICKNESS = 501,
};
- /** The specific facial area to be adjusted, See #FACE_SHAPE_AREA.
- */
+ /**
+ * Facial enhancement areas: `FACE_SHAPE_AREA`
+ */
FACE_SHAPE_AREA shapeArea;
- /**
- * The intensity of the pinching effect applied to the specified facial area.
+ /**
+ * The intensity of the enhancement. The definition of enhancement intensity varies according to the
+ * different face areas, such as its orientation, range, and preset value. See `FACE_SHAPE_AREA`.
*/
int shapeIntensity;
@@ -5386,38 +5666,43 @@ struct FaceShapeAreaOptions {
FaceShapeAreaOptions() : shapeArea(FACE_SHAPE_AREA_NONE), shapeIntensity(0) {}
};
-/** @brief Face shape beauty options. This structure defines options for facial adjustments of different facial styles.
+/**
+ * @brief The facial enhancement style options.
*
* @since v4.4.0
*/
struct FaceShapeBeautyOptions {
/**
- * @brief The face shape beauty style options.
+ * @brief The facial enhancement style options.
*
* @since v4.4.0
*/
enum FACE_SHAPE_BEAUTY_STYLE {
/**
- * (Default) Female face shape style.
+ * 0: (Default) Feminine style.
*/
FACE_SHAPE_BEAUTY_STYLE_FEMALE = 0,
/**
- * Male face shape style.
+ * 1: Masculine style.
*/
FACE_SHAPE_BEAUTY_STYLE_MALE = 1,
/**
- * A natural-looking face shape style that applies minimal modification to facial features.
+ * 2: The natural style beauty effect only makes minimal adjustments to facial features.
* @since v4.6.0
*/
FACE_SHAPE_BEAUTY_STYLE_NATURAL = 2,
};
- /** The face shape style, See #FACE_SHAPE_BEAUTY_STYLE.
- */
+ /**
+ * Facial enhancement style options: `FACE_SHAPE_BEAUTY_STYLE`.
+ */
FACE_SHAPE_BEAUTY_STYLE shapeStyle;
- /** The intensity of the pinching effect applied to the specified facial style. The value ranges from 0 (original) to 100. The default value is 0. The greater the value, the stronger the intensity applied to face pinching.
- */
+ /**
+ * The intensity of the facial enhancement style, with a value range oof [0.0,1.0]. The default
+ * value is 0.0, which means no face enhancement effect. The higher the value, the more obvious the
+ * facial enhancement effect.
+ */
int styleIntensity;
FaceShapeBeautyOptions(FACE_SHAPE_BEAUTY_STYLE shapeStyle, int styleIntensity) : shapeStyle(shapeStyle), styleIntensity(styleIntensity) {}
@@ -5425,29 +5710,34 @@ struct FaceShapeBeautyOptions {
FaceShapeBeautyOptions() : shapeStyle(FACE_SHAPE_BEAUTY_STYLE_FEMALE), styleIntensity(50) {}
};
-/** Filter effect options. This structure defines options for filter effect.
+/**
+ * @brief Filter effect options.
*
* @since v4.4.1
*/
struct FilterEffectOptions {
/**
- * The local absolute path of the custom 3D Cube path. Only cube format is supported.
- * The cube file must strictly comply with the Cube LUT Specification; otherwise, the filter effects will not take effect.
- *
- * The following is an example of the Cube file format. The cube file starts with `LUT_3D_SIZE`, which indicates the cube size. In filter effects, the cube size is limited to 32.
-
+ * The absolute path to the local cube map texture file, which can be used to customize the filter
+ * effect. The specified .cude file should strictly follow the Cube LUT Format Specification;
+ * otherwise, the filter options do not take effect. The following is a sample of the .cude file:
+ * ```
* LUT_3D_SIZE 32
* 0.0039215689 0 0.0039215682
* 0.0086021447 0.0037950677 0
- * 0.0728652592 0.0039215689 0
* ...
- *
- * The SDK provides a built-in cube named `built_in_whiten.cube` for whitening. To use this cube, specify the path to `built_in_whiten_filter`
+ * 0.0728652592 0.0039215689 0
+ * ```
+ * @note
+ * - The identifier `LUT_3D_SIZE` on the first line of the cube map file represents the size of the
+ * three-dimensional lookup table. The LUT size for filter effect can only be set to 32.
+ * - The SDK provides a built-in `built_in_whiten_filter.cube` file. You can pass the absolute path
+ * of this file to get the whitening filter effect.
*/
const char * path;
/**
- * The intensity of specified filter effect. The value ranges from 0.0 to 1.0. The default value is 0.5. The greater the value, the stronger the intensity of the filter.
+ * The intensity of the filter effect, with a range value of [0.0,1.0], in which 0.0 represents no
+ * filter effect. The default value is 0.5. The higher the value, the stronger the filter effect.
*/
float strength;
@@ -5456,40 +5746,49 @@ struct FilterEffectOptions {
FilterEffectOptions() : path(OPTIONAL_NULLPTR), strength(0.5) {}
};
+/**
+ * @brief The low-light enhancement options.
+ */
struct LowlightEnhanceOptions {
/**
- * The low-light enhancement mode.
+ * @brief The low-light enhancement mode.
*/
enum LOW_LIGHT_ENHANCE_MODE {
- /** 0: (Default) Automatic mode. The SDK automatically enables or disables the low-light
- enhancement feature according to the ambient light to compensate for the lighting level or
- prevent overexposure, as necessary. */
+ /**
+ * 0: (Default) Automatic mode. The SDK automatically enables or disables the low-light enhancement
+ * feature according to the ambient light to compensate for the lighting level or prevent
+ * overexposure, as necessary.
+ */
LOW_LIGHT_ENHANCE_AUTO = 0,
- /** Manual mode. Users need to enable or disable the low-light enhancement feature manually. */
+ /**
+ * 1: Manual mode. Users need to enable or disable the low-light enhancement feature manually.
+ */
LOW_LIGHT_ENHANCE_MANUAL = 1,
};
/**
- * The low-light enhancement level.
+ * @brief The low-light enhancement level.
*/
enum LOW_LIGHT_ENHANCE_LEVEL {
/**
- * 0: (Default) Promotes video quality during low-light enhancement. It processes the
- * brightness, details, and noise of the video image. The performance consumption is moderate,
- * the processing speed is moderate, and the overall video quality is optimal.
+ * 0: (Default) Promotes video quality during low-light enhancement. It processes the brightness,
+ * details, and noise of the video image. The performance consumption is moderate, the processing
+ * speed is moderate, and the overall video quality is optimal.
*/
LOW_LIGHT_ENHANCE_LEVEL_HIGH_QUALITY = 0,
/**
- * Promotes performance during low-light enhancement. It processes the brightness and details of
+ * 1: Promotes performance during low-light enhancement. It processes the brightness and details of
* the video image. The processing speed is faster.
*/
LOW_LIGHT_ENHANCE_LEVEL_FAST = 1,
};
- /** The low-light enhancement mode. See #LOW_LIGHT_ENHANCE_MODE.
+ /**
+ * The low-light enhancement mode. See `LOW_LIGHT_ENHANCE_MODE`.
*/
LOW_LIGHT_ENHANCE_MODE mode;
- /** The low-light enhancement level. See #LOW_LIGHT_ENHANCE_LEVEL.
+ /**
+ * The low-light enhancement level. See `LOW_LIGHT_ENHANCE_LEVEL`.
*/
LOW_LIGHT_ENHANCE_LEVEL level;
@@ -5500,45 +5799,51 @@ struct LowlightEnhanceOptions {
: mode(LOW_LIGHT_ENHANCE_AUTO), level(LOW_LIGHT_ENHANCE_LEVEL_HIGH_QUALITY) {}
};
/**
- * The video noise reduction options.
+ * @brief Video noise reduction options.
*
* @since v4.0.0
*/
struct VideoDenoiserOptions {
- /** The video noise reduction mode.
+ /**
+ * @brief Video noise reduction mode.
*/
enum VIDEO_DENOISER_MODE {
- /** 0: (Default) Automatic mode. The SDK automatically enables or disables the video noise
- reduction feature according to the ambient light. */
+ /**
+ * 0: (Default) Automatic mode. The SDK automatically enables or disables the video noise reduction
+ * feature according to the ambient light.
+ */
VIDEO_DENOISER_AUTO = 0,
- /** Manual mode. Users need to enable or disable the video noise reduction feature manually. */
+ /**
+ * 1: Manual mode. Users need to enable or disable the video noise reduction feature manually.
+ */
VIDEO_DENOISER_MANUAL = 1,
};
/**
- * The video noise reduction level.
+ * @brief Video noise reduction level.
*/
enum VIDEO_DENOISER_LEVEL {
/**
- * 0: (Default) Promotes video quality during video noise reduction. `HIGH_QUALITY` balances
- * performance consumption and video noise reduction quality. The performance consumption is
- * moderate, the video noise reduction speed is moderate, and the overall video quality is
- * optimal.
+ * 0: (Default) Promotes video quality during video noise reduction. balances performance
+ * consumption and video noise reduction quality. The performance consumption is moderate, the video
+ * noise reduction speed is moderate, and the overall video quality is optimal.
*/
VIDEO_DENOISER_LEVEL_HIGH_QUALITY = 0,
/**
- * Promotes reducing performance consumption during video noise reduction. `FAST` prioritizes
- * reducing performance consumption over video noise reduction quality. The performance
- * consumption is lower, and the video noise reduction speed is faster. To avoid a noticeable
- * shadowing effect (shadows trailing behind moving objects) in the processed video, Agora
- * recommends that you use `FAST` when the camera is fixed.
+ * 1: Promotes reducing performance consumption during video noise reduction. It prioritizes
+ * reducing performance consumption over video noise reduction quality. The performance consumption
+ * is lower, and the video noise reduction speed is faster. To avoid a noticeable shadowing effect
+ * (shadows trailing behind moving objects) in the processed video, Agora recommends that you use
+ * this setting when the camera is fixed.
*/
VIDEO_DENOISER_LEVEL_FAST = 1,
};
- /** The video noise reduction mode. See #VIDEO_DENOISER_MODE.
+ /**
+ * Video noise reduction mode. See `VIDEO_DENOISER_MODE`.
*/
VIDEO_DENOISER_MODE mode;
- /** The video noise reduction level. See #VIDEO_DENOISER_LEVEL.
+ /**
+ * Video noise reduction level. See `VIDEO_DENOISER_LEVEL`.
*/
VIDEO_DENOISER_LEVEL level;
@@ -5548,22 +5853,27 @@ struct VideoDenoiserOptions {
VideoDenoiserOptions() : mode(VIDEO_DENOISER_AUTO), level(VIDEO_DENOISER_LEVEL_HIGH_QUALITY) {}
};
-/** The color enhancement options.
+/**
+ * @brief The color enhancement options.
*
* @since v4.0.0
*/
struct ColorEnhanceOptions {
- /** The level of color enhancement. The value range is [0.0,1.0]. `0.0` is the default value,
- * which means no color enhancement is applied to the video. The higher the value, the higher the
- * level of color enhancement.
+ /**
+ * The level of color enhancement. The value range is [0.0, 1.0]. `0.0` is the default value, which
+ * means no color enhancement is applied to the video. The higher the value, the higher the level of
+ * color enhancement. The default value is `0.5`.
*/
float strengthLevel;
- /** The level of skin tone protection. The value range is [0.0,1.0]. `0.0` means no skin tone
- * protection. The higher the value, the higher the level of skin tone protection. The default
- * value is `1.0`. When the level of color enhancement is higher, the portrait skin tone can be
- * significantly distorted, so you need to set the level of skin tone protection; when the level
- * of skin tone protection is higher, the color enhancement effect can be slightly reduced.
+ /**
+ * The level of skin tone protection. The value range is [0.0, 1.0]. `0.0` means no skin tone
+ * protection. The higher the value, the higher the level of skin tone protection. The default value
+ * is `1.0`.
+ * - When the level of color enhancement is higher, the portrait skin tone can be significantly
+ * distorted, so you need to set the level of skin tone protection.
+ * - When the level of skin tone protection is higher, the color enhancement effect can be slightly
+ * reduced.
* Therefore, to get the best color enhancement effect, Agora recommends that you adjust
* `strengthLevel` and `skinProtectLevel` to get the most appropriate values.
*/
@@ -5576,76 +5886,90 @@ struct ColorEnhanceOptions {
};
/**
- * The custom background image.
+ * @brief The custom background.
*/
struct VirtualBackgroundSource {
- /** The type of the custom background source.
+ /**
+ * @brief The custom background.
*/
enum BACKGROUND_SOURCE_TYPE {
/**
- * 0: Enable segementation with the captured video frame without replacing the background.
+ * 0: Process the background as alpha data without replacement, only separating the portrait and the
+ * background. After setting this value, you can call `startLocalVideoTranscoder` to implement the
+ * picture-in-picture effect.
*/
BACKGROUND_NONE = 0,
/**
- * 1: (Default) The background source is a solid color.
+ * 1: (Default) The background image is a solid color.
*/
BACKGROUND_COLOR = 1,
/**
- * The background source is a file in PNG or JPG format.
+ * 2: The background is an image in PNG or JPG format.
*/
BACKGROUND_IMG = 2,
/**
- * The background source is the blurred original video frame.
- * */
+ * 3: The background is a blurred version of the original background.
+ */
BACKGROUND_BLUR = 3,
/**
- * The background source is a file in MP4, AVI, MKV, FLV format.
- * */
+ * 4: The background is a local video in MP4, AVI, MKV, FLV, or other supported formats.
+ */
BACKGROUND_VIDEO = 4,
};
- /** The degree of blurring applied to the background source.
+ /**
+ * @brief The degree of blurring applied to the custom background image.
*/
enum BACKGROUND_BLUR_DEGREE {
- /** 1: The degree of blurring applied to the custom background image is low. The user can almost
- see the background clearly. */
+ /**
+ * 1: The degree of blurring applied to the custom background image is low. The user can almost see
+ * the background clearly.
+ */
BLUR_DEGREE_LOW = 1,
- /** 2: The degree of blurring applied to the custom background image is medium. It is difficult
- for the user to recognize details in the background. */
+ /**
+ * 2: The degree of blurring applied to the custom background image is medium. It is difficult for
+ * the user to recognize details in the background.
+ */
BLUR_DEGREE_MEDIUM = 2,
- /** 3: (Default) The degree of blurring applied to the custom background image is high. The user
- can barely see any distinguishing features in the background. */
+ /**
+ * 3: (Default) The degree of blurring applied to the custom background image is high. The user can
+ * barely see any distinguishing features in the background.
+ */
BLUR_DEGREE_HIGH = 3,
};
- /** The type of the custom background image. See #BACKGROUND_SOURCE_TYPE.
+ /**
+ * The custom background. See `BACKGROUND_SOURCE_TYPE`.
*/
BACKGROUND_SOURCE_TYPE background_source_type;
/**
- * The color of the custom background image. The format is a hexadecimal integer defined by RGB,
- * without the # sign, such as 0xFFB6C1 for light pink. The default value is 0xFFFFFF, which
- * signifies white. The value range is [0x000000,0xFFFFFF]. If the value is invalid, the SDK
- * replaces the original background image with a white background image.
- *
- * @note This parameter takes effect only when the type of the custom background image is
- * `BACKGROUND_COLOR`.
+ * The type of the custom background image. The color of the custom background image. The format is
+ * a hexadecimal integer defined by RGB, without the # sign, such as 0xFFB6C1 for light pink. The
+ * default value is 0xFFFFFF, which signifies white. The value range is [0x000000, 0xffffff]. If the
+ * value is invalid, the SDK replaces the original background image with a white background image.
+ * @note
+ * This parameter is only applicable to custom backgrounds of the following types:
+ * - BACKGROUND_COLOR: The background image is a solid-colored image of the color passed in by the
+ * parameter.
+ * - BACKGROUND_IMG: If the image in `source` has a transparent background, the transparent
+ * background will be filled with the color passed in by the parameter.
*/
unsigned int color;
/**
- * The local absolute path of the custom background image. PNG and JPG formats are supported. If
- * the path is invalid, the SDK replaces the original background image with a white background
- * image.
- *
+ * The local absolute path of the custom background image. Supports PNG, JPG, MP4, AVI, MKV, and FLV
+ * formats. If the path is invalid, the SDK will use either the original background image or the
+ * solid color image specified by `color`.
* @note This parameter takes effect only when the type of the custom background image is
- * `BACKGROUND_IMG`.
+ * BACKGROUND_IMG or BACKGROUND_VIDEO.
*/
const char* source;
- /** The degree of blurring applied to the custom background image. See BACKGROUND_BLUR_DEGREE.
+ /**
+ * The degree of blurring applied to the custom background image. See `BACKGROUND_BLUR_DEGREE`.
* @note This parameter takes effect only when the type of the custom background image is
- * `BACKGROUND_BLUR`.
+ * BACKGROUND_BLUR.
*/
BACKGROUND_BLUR_DEGREE blur_degree;
@@ -5656,28 +5980,67 @@ struct VirtualBackgroundSource {
blur_degree(BLUR_DEGREE_HIGH) {}
};
+/**
+ * @brief Processing properties for background images.
+ */
struct SegmentationProperty {
+ /**
+ * @brief The type of algorithms to user for background processing.
+ */
enum SEG_MODEL_TYPE {
+ /**
+ * 1: (Default) Use the algorithm suitable for all scenarios.
+ */
SEG_MODEL_AI = 1,
+ /**
+ * 2: Use the algorithm designed specifically for scenarios with a green screen background.
+ */
SEG_MODEL_GREEN = 2
};
+ /**
+ * @brief Screen color type.
+ */
enum SCREEN_COLOR_TYPE {
+ /**
+ * (0): Automatically selects screen color.
+ */
SCREEN_COLOR_AUTO = 0,
+ /**
+ * (1): Green screen.
+ */
SCREEN_COLOR_GREEN = 1,
+ /**
+ * (2): Blue screen.
+ */
SCREEN_COLOR_BLUE = 2
};
+ /**
+ * The type of algorithms to user for background processing. See `SEG_MODEL_TYPE`.
+ */
SEG_MODEL_TYPE modelType;
+ /**
+ * The accuracy range for recognizing background colors in the image. The value range is [0,1], and
+ * the default value is 0.5. The larger the value, the wider the range of identifiable shades of
+ * pure color. When the value of this parameter is too large, the edge of the portrait and the pure
+ * color in the portrait range are also detected. Agora recommends that you dynamically adjust the
+ * value of this parameter according to the actual effect.
+ * @note This parameter only takes effect when `modelType` is set to `SEG_MODEL_GREEN`.
+ */
float greenCapacity;
+ /**
+ * The screen color. See `SCREEN_COLOR_TYPE`.
+ */
SCREEN_COLOR_TYPE screenColorType;
SegmentationProperty() : modelType(SEG_MODEL_AI), greenCapacity(0.5), screenColorType(SCREEN_COLOR_AUTO) {}
};
-/** The type of custom audio track
+/**
+ * @brief The type of the audio track.
*/
enum AUDIO_TRACK_TYPE {
/**
@@ -5685,33 +6048,39 @@ enum AUDIO_TRACK_TYPE {
*/
AUDIO_TRACK_INVALID = -1,
/**
- * 0: Mixable audio track
- * You can push more than one mixable Audio tracks into one RTC connection(channel id + uid),
- * and SDK will mix these tracks into one audio track automatically.
- * However, compare to direct audio track, mixable track might cause extra 30ms+ delay.
+ * 0: Mixable audio tracks. This type of audio track supports mixing with other audio streams (such
+ * as audio streams captured by microphone) and playing locally or publishing to channels after
+ * mixing. The latency of mixable audio tracks is higher than that of direct audio tracks.
*/
AUDIO_TRACK_MIXABLE = 0,
/**
- * 1: Direct audio track
- * You can only push one direct (non-mixable) audio track into one RTC connection(channel id +
- * uid). Compare to mixable stream, you can have lower lantency using direct audio track.
+ * 1: Direct audio tracks. This type of audio track will replace the audio streams captured by the
+ * microphone and does not support mixing with other audio streams. The latency of direct audio
+ * tracks is lower than that of mixable audio tracks.
+ * @note If `AUDIO_TRACK_DIRECT` is specified for this parameter, you must set
+ * `publishMicrophoneTrack` to `false` in `ChannelMediaOptions` when calling `joinChannel(const
+ * char* token, const char* channelId, uid_t uid, const ChannelMediaOptions& options)` to
+ * join the channel; otherwise, joining the channel fails and returns the error code -2.
*/
AUDIO_TRACK_DIRECT = 1,
};
-/** The configuration of custom audio track
+/**
+ * @brief The configuration of custom audio tracks.
*/
struct AudioTrackConfig {
/**
- * Enable local playback, enabled by default
- * true: (Default) Enable local playback
- * false: Do not enable local playback
+ * Whether to enable the local audio-playback device:
+ * - `true`: (Default) Enable the local audio-playback device.
+ * - `false`: Do not enable the local audio-playback device.
*/
bool enableLocalPlayback;
/**
- * Whether to enable APM (AEC/ANS/AGC) processing when the trackType is AUDIO_TRACK_DIRECT.
- * false: (Default) Do not enable APM processing.
- * true: Enable APM processing.
+ * Whether to enable audio processing module:
+ * - `true`Enable the audio processing module to apply the Automatic Echo Cancellation (AEC),
+ * Automatic Noise Suppression (ANS), and Automatic Gain Control (AGC) effects.
+ * - `false`: (Default) Do not enable the audio processing module.
+ * @note This parameter only takes effect on AUDIO_TRACK_DIRECT in custom audio capturing.
*/
bool enableAudioProcessing;
@@ -5736,213 +6105,199 @@ struct AudioTrackConfig {
* | |--------------------|-----------------------------| | |
* | | 0x3: voice changer | 0x1: voice transform | | |
*/
-/** The options for SDK preset voice beautifier effects.
+/**
+ * @brief The options for SDK preset voice beautifier effects.
*/
enum VOICE_BEAUTIFIER_PRESET {
- /** Turn off voice beautifier effects and use the original voice.
+ /**
+ * Turn off voice beautifier effects and use the original voice.
*/
VOICE_BEAUTIFIER_OFF = 0x00000000,
- /** A more magnetic voice.
- *
- * @note Agora recommends using this enumerator to process a male-sounding voice; otherwise, you
- * may experience vocal distortion.
+ /**
+ * A more magnetic voice.
+ * @note Agora recommends using this enumerator to process a male-sounding voice; otherwise, you may
+ * experience vocal distortion.
*/
CHAT_BEAUTIFIER_MAGNETIC = 0x01010100,
- /** A fresher voice.
- *
+ /**
+ * A fresher voice.
* @note Agora recommends using this enumerator to process a female-sounding voice; otherwise, you
* may experience vocal distortion.
*/
CHAT_BEAUTIFIER_FRESH = 0x01010200,
- /** A more vital voice.
- *
+ /**
+ * A more vital voice.
* @note Agora recommends using this enumerator to process a female-sounding voice; otherwise, you
* may experience vocal distortion.
*/
CHAT_BEAUTIFIER_VITALITY = 0x01010300,
/**
* Singing beautifier effect.
- * - If you call `setVoiceBeautifierPreset`(SINGING_BEAUTIFIER), you can beautify a male-sounding
+ * - If you call `setVoiceBeautifierPreset` ( SINGING_BEAUTIFIER ), you can beautify a male-sounding
* voice and add a reverberation effect that sounds like singing in a small room. Agora recommends
- * not using `setVoiceBeautifierPreset`(SINGING_BEAUTIFIER) to process a female-sounding voice;
- * otherwise, you may experience vocal distortion.
- * - If you call `setVoiceBeautifierParameters`(SINGING_BEAUTIFIER, param1, param2), you can
- * beautify a male- or female-sounding voice and add a reverberation effect.
+ * using this enumerator to process a male-sounding voice; otherwise, you might experience vocal
+ * distortion.
+ * - If you call `setVoiceBeautifierParameters` ( SINGING_BEAUTIFIER, param1, param2), you can
+ * beautify a male or female-sounding voice and add a reverberation effect.
*/
SINGING_BEAUTIFIER = 0x01020100,
- /** A more vigorous voice.
+ /**
+ * A more vigorous voice.
*/
TIMBRE_TRANSFORMATION_VIGOROUS = 0x01030100,
- /** A deeper voice.
+ /**
+ * A deep voice.
*/
TIMBRE_TRANSFORMATION_DEEP = 0x01030200,
- /** A mellower voice.
+ /**
+ * A mellower voice.
*/
TIMBRE_TRANSFORMATION_MELLOW = 0x01030300,
- /** A falsetto voice.
+ /**
+ * Falsetto.
*/
TIMBRE_TRANSFORMATION_FALSETTO = 0x01030400,
- /** A fuller voice.
+ /**
+ * A fuller voice.
*/
TIMBRE_TRANSFORMATION_FULL = 0x01030500,
- /** A clearer voice.
+ /**
+ * A clearer voice.
*/
TIMBRE_TRANSFORMATION_CLEAR = 0x01030600,
- /** A more resounding voice.
+ /**
+ * A more resounding voice.
*/
TIMBRE_TRANSFORMATION_RESOUNDING = 0x01030700,
- /** A more ringing voice.
+ /**
+ * A more ringing voice.
*/
TIMBRE_TRANSFORMATION_RINGING = 0x01030800,
/**
* A ultra-high quality voice, which makes the audio clearer and restores more details.
- * - To achieve better audio effect quality, Agora recommends that you call `setAudioProfile`
- * and set the `profile` to `AUDIO_PROFILE_MUSIC_HIGH_QUALITY(4)` or
- * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)` and `scenario` to
- * `AUDIO_SCENARIO_HIGH_DEFINITION(6)` before calling `setVoiceBeautifierPreset`.
- * - If you have an audio capturing device that can already restore audio details to a high
- * degree, Agora recommends that you do not enable ultra-high quality; otherwise, the SDK may
- * over-restore audio details, and you may not hear the anticipated voice effect.
+ * - To achieve better audio effect quality, Agora recommends that you set the `profile` of
+ * `setAudioProfile(AUDIO_PROFILE_TYPE profile)` to `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` (4) or
+ * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` (5) and `scenario` to `AUDIO_SCENARIO_GAME_STREAMING`
+ * (3) before calling `setVoiceBeautifierPreset`.
+ * - If you have an audio capturing device that can already restore audio details to a high degree,
+ * Agora recommends that you do not enable ultra-high quality; otherwise, the SDK may over-restore
+ * audio details, and you may not hear the anticipated voice effect.
*/
ULTRA_HIGH_QUALITY_VOICE = 0x01040100
};
-/** Preset voice effects.
+/**
+ * @brief Preset audio effects.
*
- * For better voice effects, Agora recommends setting the `profile` parameter of `setAudioProfile`
- * to `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` before using
- * the following presets:
+ * @details
+ * To get better audio effects, Agora recommends calling `setAudioProfile(AUDIO_PROFILE_TYPE profile, AUDIO_SCENARIO_TYPE scenario)` and setting the `profile` parameter as recommended below before using the preset audio effects.
+ * | Preset audio effects | `profile` |
+ * | ------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- |
+ * | - ROOM_ACOUSTICS_VIRTUAL_STEREO - ROOM_ACOUSTICS_3D_VOICE - ROOM_ACOUSTICS_VIRTUAL_SURROUND_SOUND | `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` or `AUDIO_PROFILE_MUSIC_STANDARD_STEREO` |
+ * | Other preset audio effects (except for `AUDIO_EFFECT_OFF` ) | `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` |
*
- * - `ROOM_ACOUSTICS_KTV`
- * - `ROOM_ACOUSTICS_VOCAL_CONCERT`
- * - `ROOM_ACOUSTICS_STUDIO`
- * - `ROOM_ACOUSTICS_PHONOGRAPH`
- * - `ROOM_ACOUSTICS_SPACIAL`
- * - `ROOM_ACOUSTICS_ETHEREAL`
- * - `ROOM_ACOUSTICS_CHORUS`
- * - `VOICE_CHANGER_EFFECT_UNCLE`
- * - `VOICE_CHANGER_EFFECT_OLDMAN`
- * - `VOICE_CHANGER_EFFECT_BOY`
- * - `VOICE_CHANGER_EFFECT_SISTER`
- * - `VOICE_CHANGER_EFFECT_GIRL`
- * - `VOICE_CHANGER_EFFECT_PIGKING`
- * - `VOICE_CHANGER_EFFECT_HULK`
- * - `PITCH_CORRECTION`
*/
enum AUDIO_EFFECT_PRESET {
- /** Turn off voice effects, that is, use the original voice.
+ /**
+ * Turn off voice effects, that is, use the original voice.
*/
AUDIO_EFFECT_OFF = 0x00000000,
- /** The voice effect typical of a KTV venue.
+ /**
+ * The voice effect typical of a KTV venue.
*/
ROOM_ACOUSTICS_KTV = 0x02010100,
- /** The voice effect typical of a concert hall.
+ /**
+ * The voice effect typical of a concert hall.
*/
ROOM_ACOUSTICS_VOCAL_CONCERT = 0x02010200,
- /** The voice effect typical of a recording studio.
+ /**
+ * The voice effect typical of a recording studio.
*/
ROOM_ACOUSTICS_STUDIO = 0x02010300,
- /** The voice effect typical of a vintage phonograph.
+ /**
+ * The voice effect typical of a vintage phonograph.
*/
ROOM_ACOUSTICS_PHONOGRAPH = 0x02010400,
- /** The virtual stereo effect, which renders monophonic audio as stereo audio.
- *
- * @note Before using this preset, set the `profile` parameter of `setAudioProfile`
- * to `AUDIO_PROFILE_MUSIC_STANDARD_STEREO(3)` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)`;
- * otherwise, the preset setting is invalid.
+ /**
+ * The virtual stereo effect, which renders monophonic audio as stereo audio.
*/
ROOM_ACOUSTICS_VIRTUAL_STEREO = 0x02010500,
- /** A more spatial voice effect.
+ /**
+ * A more spatial voice effect.
*/
ROOM_ACOUSTICS_SPACIAL = 0x02010600,
- /** A more ethereal voice effect.
+ /**
+ * A more ethereal voice effect.
*/
ROOM_ACOUSTICS_ETHEREAL = 0x02010700,
- /** A 3D voice effect that makes the voice appear to be moving around the user. The default cycle
- * period of the 3D voice effect is 10 seconds. To change the cycle period, call
- * `setAudioEffectParameters` after this method.
- *
- * @note
- * - Before using this preset, set the `profile` parameter of `setAudioProfile` to
- * `AUDIO_PROFILE_MUSIC_STANDARD_STEREO` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO`; otherwise,
- * the preset setting is invalid.
- * - If the 3D voice effect is enabled, users need to use stereo audio playback devices to hear
+ /**
+ * A 3D voice effect that makes the voice appear to be moving around the user. The default cycle
+ * period is 10 seconds. After setting this effect, you can call `setAudioEffectParameters` to
+ * modify the movement period.
+ * @note If the 3D voice effect is enabled, users need to use stereo audio playback devices to hear
* the anticipated voice effect.
*/
ROOM_ACOUSTICS_3D_VOICE = 0x02010800,
- /** virtual suround sound.
- *
- * @note
- * - Agora recommends using this enumerator to process virtual suround sound; otherwise, you may
- * not hear the anticipated voice effect.
- * - To achieve better audio effect quality, Agora recommends calling \ref
- * IRtcEngine::setAudioProfile "setAudioProfile" and setting the `profile` parameter to
- * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY(4)` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)` before
- * setting this enumerator.
+ /**
+ * Virtual surround sound, that is, the SDK generates a simulated surround sound field on the basis
+ * of stereo channels, thereby creating a surround sound effect.
+ * @note If the virtual surround sound is enabled, users need to use stereo audio playback devices
+ * to hear the anticipated audio effect.
*/
ROOM_ACOUSTICS_VIRTUAL_SURROUND_SOUND = 0x02010900,
- /** The voice effect for chorus.
- *
- * @note: To achieve better audio effect quality, Agora recommends calling \ref
- * IRtcEngine::setAudioProfile "setAudioProfile" and setting the `profile` parameter to
- * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY(4)` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)` before
- * setting this enumerator.
+ /**
+ * The audio effect of chorus. Agora recommends using this effect in chorus scenarios to enhance the
+ * sense of depth and dimension in the vocals.
*/
ROOM_ACOUSTICS_CHORUS = 0x02010D00,
- /** A middle-aged man's voice.
- *
- * @note
- * Agora recommends using this enumerator to process a male-sounding voice; otherwise, you may
- * not hear the anticipated voice effect.
+ /**
+ * A middle-aged man's voice.
+ * @note Agora recommends using this preset to process a male-sounding voice; otherwise, you may not
+ * hear the anticipated voice effect.
*/
VOICE_CHANGER_EFFECT_UNCLE = 0x02020100,
- /** A senior man's voice.
- *
- * @note Agora recommends using this enumerator to process a male-sounding voice; otherwise, you
- * may not hear the anticipated voice effect.
+ /**
+ * An older man's voice.
+ * @note Agora recommends using this preset to process a male-sounding voice; otherwise, you may not
+ * hear the anticipated voice effect.
*/
VOICE_CHANGER_EFFECT_OLDMAN = 0x02020200,
- /** A boy's voice.
- *
- * @note Agora recommends using this enumerator to process a male-sounding voice; otherwise, you
- * may not hear the anticipated voice effect.
+ /**
+ * A boy's voice.
+ * @note Agora recommends using this preset to process a male-sounding voice; otherwise, you may not
+ * hear the anticipated voice effect.
*/
VOICE_CHANGER_EFFECT_BOY = 0x02020300,
- /** A young woman's voice.
- *
- * @note
- * - Agora recommends using this enumerator to process a female-sounding voice; otherwise, you may
+ /**
+ * A young woman's voice.
+ * @note Agora recommends using this preset to process a female-sounding voice; otherwise, you may
* not hear the anticipated voice effect.
*/
VOICE_CHANGER_EFFECT_SISTER = 0x02020400,
- /** A girl's voice.
- *
- * @note Agora recommends using this enumerator to process a female-sounding voice; otherwise, you
- * may not hear the anticipated voice effect.
+ /**
+ * A girl's voice.
+ * @note Agora recommends using this preset to process a female-sounding voice; otherwise, you may
+ * not hear the anticipated voice effect.
*/
VOICE_CHANGER_EFFECT_GIRL = 0x02020500,
- /** The voice of Pig King, a character in Journey to the West who has a voice like a growling
- * bear.
+ /**
+ * The voice of Pig King, a character in Journey to the West who has a voice like a growling bear.
*/
VOICE_CHANGER_EFFECT_PIGKING = 0x02020600,
- /** The Hulk's voice.
+ /**
+ * The Hulk's voice.
*/
VOICE_CHANGER_EFFECT_HULK = 0x02020700,
- /** An audio effect typical of R&B music.
- *
- * @note Before using this preset, set the `profile` parameter of `setAudioProfile` to
- - `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO`; otherwise,
- * the preset setting is invalid.
+ /**
+ * The voice effect typical of R&B music.
*/
STYLE_TRANSFORMATION_RNB = 0x02030100,
- /** The voice effect typical of popular music.
- *
- * @note Before using this preset, set the `profile` parameter of `setAudioProfile` to
- - `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO`; otherwise,
- * the preset setting is invalid.
+ /**
+ * The voice effect typical of popular music.
*/
STYLE_TRANSFORMATION_POPULAR = 0x02030200,
- /** A pitch correction effect that corrects the user's pitch based on the pitch of the natural C
+ /**
+ * A pitch correction effect that corrects the user's pitch based on the pitch of the natural C
* major scale. After setting this voice effect, you can call `setAudioEffectParameters` to adjust
* the basic mode of tuning and the pitch of the main tone.
*/
@@ -5953,25 +6308,31 @@ enum AUDIO_EFFECT_PRESET {
*/
};
-/** The options for SDK preset voice conversion.
+/**
+ * @brief The options for SDK preset voice conversion effects.
*/
enum VOICE_CONVERSION_PRESET {
- /** Turn off voice conversion and use the original voice.
+ /**
+ * Turn off voice conversion effects and use the original voice.
*/
VOICE_CONVERSION_OFF = 0x00000000,
- /** A gender-neutral voice. To avoid audio distortion, ensure that you use this enumerator to
- * process a female-sounding voice.
+ /**
+ * A gender-neutral voice. To avoid audio distortion, ensure that you use this enumerator to process
+ * a female-sounding voice.
*/
VOICE_CHANGER_NEUTRAL = 0x03010100,
- /** A sweet voice. To avoid audio distortion, ensure that you use this enumerator to process a
+ /**
+ * A sweet voice. To avoid audio distortion, ensure that you use this enumerator to process a
* female-sounding voice.
*/
VOICE_CHANGER_SWEET = 0x03010200,
- /** A steady voice. To avoid audio distortion, ensure that you use this enumerator to process a
+ /**
+ * A steady voice. To avoid audio distortion, ensure that you use this enumerator to process a
* male-sounding voice.
*/
VOICE_CHANGER_SOLID = 0x03010300,
- /** A deep voice. To avoid audio distortion, ensure that you use this enumerator to process a
+ /**
+ * A deep voice. To avoid audio distortion, ensure that you use this enumerator to process a
* male-sounding voice.
*/
VOICE_CHANGER_BASS = 0x03010400,
@@ -6011,88 +6372,166 @@ enum VOICE_CONVERSION_PRESET {
};
-/** The options for SDK preset headphone equalizer.
+/**
+ * @brief Preset headphone equalizer types.
*/
enum HEADPHONE_EQUALIZER_PRESET {
- /** Turn off headphone EQ and use the original voice.
+ /**
+ * The headphone equalizer is disabled, and the original audio is heard.
*/
HEADPHONE_EQUALIZER_OFF = 0x00000000,
- /** For over-ear headphones.
+ /**
+ * An equalizer is used for headphones.
*/
HEADPHONE_EQUALIZER_OVEREAR = 0x04000001,
- /** For in-ear headphones.
+ /**
+ * An equalizer is used for in-ear headphones.
*/
HEADPHONE_EQUALIZER_INEAR = 0x04000002
};
-/** The options for SDK voice AI tuner.
+/**
+ * @brief Voice AI tuner sound types.
*/
enum VOICE_AI_TUNER_TYPE {
- /** Uncle, deep and magnetic male voice.
+ /**
+ * 0: Mature male voice. A deep and magnetic male voice.
*/
VOICE_AI_TUNER_MATURE_MALE,
- /** Fresh male, refreshing and sweet male voice.
+ /**
+ * 1: Fresh male voice. A fresh and slightly sweet male voice.
*/
VOICE_AI_TUNER_FRESH_MALE,
- /** Big sister, deep and charming female voice.
+ /**
+ * 2: Elegant female voice. A deep and charming female voice.
*/
VOICE_AI_TUNER_ELEGANT_FEMALE,
- /** Lolita, high-pitched and cute female voice.
+ /**
+ * 3: Sweet female voice. A high-pitched and cute female voice.
*/
VOICE_AI_TUNER_SWEET_FEMALE,
- /** Warm man singing, warm and melodic male voice that is suitable for male lyrical songs.
+ /**
+ * 4: Warm male singing. A warm and melodious male voice.
*/
VOICE_AI_TUNER_WARM_MALE_SINGING,
- /** Gentle female singing, soft and delicate female voice that is suitable for female lyrical songs.
+ /**
+ * 5: Gentle female singing. A soft and delicate female voice.
*/
VOICE_AI_TUNER_GENTLE_FEMALE_SINGING,
- /** Smoky uncle singing, unique husky male voice that is suitable for rock or blues songs.
+ /**
+ * 6: Husky male singing. A unique husky male voice.
*/
VOICE_AI_TUNER_HUSKY_MALE_SINGING,
- /** Warm big sister singing, warm and mature female voice that is suitable for emotionally powerful songs.
+ /**
+ * 7: Warm elegant female singing. A warm and mature female voice.
*/
VOICE_AI_TUNER_WARM_ELEGANT_FEMALE_SINGING,
- /** Forceful male singing, strong and powerful male voice that is suitable for passionate songs.
+ /**
+ * 8: Powerful male singing. A strong and powerful male voice.
*/
VOICE_AI_TUNER_POWERFUL_MALE_SINGING,
- /** Dreamy female singing, dreamlike and soft female voice that is suitable for airy and dream-like songs.
+ /**
+ * 9: Dreamy female singing. A dreamy and soft female voice.
*/
VOICE_AI_TUNER_DREAMY_FEMALE_SINGING,
};
/**
- * Screen sharing configurations.
+ * @brief The audio configuration for the shared screen stream.
+ *
+ * @details
+ * Only available where `captureAudio` is `true`.
+ *
+ */
+struct ScreenAudioParameters {
+ /**
+ * Audio sample rate (Hz).
+ */
+ int sampleRate;
+ /**
+ * The number of audio channels. The default value is 2, which means stereo.
+ */
+ int channels;
+ /**
+ * The volume of the captured system audio. The value range is [0, 100]. The default value is 100.
+ */
+ int captureSignalVolume;
+
+#if defined(__APPLE__) && !TARGET_OS_IOS
+ /**
+ * @technical preview
+ */
+ bool excludeCurrentProcessAudio = true;
+ ScreenAudioParameters(): sampleRate(48000), channels(2), captureSignalVolume(100) {}
+#else
+ ScreenAudioParameters(): sampleRate(16000), channels(2), captureSignalVolume(100) {}
+#endif
+};
+
+/**
+ * @brief Screen sharing configurations.
*/
struct ScreenCaptureParameters {
+
/**
- * On Windows and macOS, it represents the video encoding resolution of the shared screen stream.
- * See `VideoDimensions`. The default value is 1920 x 1080, that is, 2,073,600 pixels. Agora uses
- * the value of this parameter to calculate the charges.
+ * Determines whether to capture system audio during screen sharing:
+ * - `true`: Capture.
+ * - `false`: (Default) Do not capture.
*
- * If the aspect ratio is different between the encoding dimensions and screen dimensions, Agora
- * applies the following algorithms for encoding. Suppose dimensions are 1920 x 1080:
- * - If the value of the screen dimensions is lower than that of dimensions, for example,
- * 1000 x 1000 pixels, the SDK uses 1000 x 1000 pixels for encoding.
- * - If the value of the screen dimensions is higher than that of dimensions, for example,
- * 2000 x 1500, the SDK uses the maximum value under dimensions with the aspect ratio of
- * the screen dimension (4:3) for encoding, that is, 1440 x 1080.
+ * @note
+ * Due to system limitations, capturing system audio is only available for Android API level 29
+ * and later (that is, Android 10 and later).
+ */
+ bool captureAudio;
+ /**
+ * The audio configuration for the shared screen stream.
+ * @note This parameter only takes effect when `captureAudio` is `true`.
+ * See `ScreenAudioParameters`.
+ */
+ ScreenAudioParameters audioParams;
+
+ /**
+ * The video encoding resolution of the screen sharing stream. See `VideoDimensions`. The default
+ * value is 1920 × 1080, that is, 2,073,600 pixels. Agora uses the value of this parameter to
+ * calculate the charges.
+ * If the screen dimensions are different from the value of this parameter, Agora applies the
+ * following strategies for encoding. Suppose `dimensions` is set to 1920 × 1080:
+ * - If the value of the screen dimensions is lower than that of `dimensions`, for example, 1000 ×
+ * 1000 pixels, the SDK uses the screen dimensions, that is, 1000 × 1000 pixels, for encoding.
+ * - If the value of the screen dimensions is higher than that of `dimensions`, for example, 2000 ×
+ * 1500, the SDK uses the maximum value under `dimensions` with the aspect ratio of the screen
+ * dimension (4:3) for encoding, that is, 1440 × 1080.
+ * @note
+ * When setting the encoding resolution in the scenario of sharing documents (
+ * SCREEN_SCENARIO_DOCUMENT ), choose one of the following two methods:
+ * - If you require the best image quality, it is recommended to set the encoding resolution to be
+ * the same as the capture resolution.
+ * - If you wish to achieve a relative balance between image quality, bandwidth, and system
+ * performance, then:
+ * - When the capture resolution is greater than 1920 × 1080, it is recommended that the encoding
+ * resolution is not less than 1920 × 1080.
+ * - When the capture resolution is less than 1920 × 1080, it is recommended that the encoding
+ * resolution is not less than 1280 × 720.
*/
VideoDimensions dimensions;
/**
- * On Windows and macOS, it represents the video encoding frame rate (fps) of the shared screen
- * stream. The frame rate (fps) of the shared region. The default value is 5. We do not recommend
- * setting this to a value greater than 15.
+ * On Windows and macOS, this represents the video encoding frame rate (fps) of the screen sharing
+ * stream. The frame rate (fps) of the shared region. The default value is 5. Agora does not
+ * recommend setting this to a value greater than 15.
*/
int frameRate;
/**
- * On Windows and macOS, it represents the video encoding bitrate of the shared screen stream.
+ * On Windows and macOS, this represents the video encoding bitrate of the screen sharing stream.
* The bitrate (Kbps) of the shared region. The default value is 0 (the SDK works out a bitrate
* according to the dimensions of the current screen).
*/
int bitrate;
- /** Whether to capture the mouse in screen sharing:
+ /**
+ * Whether to capture the mouse in screen sharing:
* - `true`: (Default) Capture the mouse.
* - `false`: Do not capture the mouse.
+ * @note Due to macOS system restrictions, setting this parameter to `false` is ineffective during
+ * screen sharing (it has no impact when sharing a window).
*/
bool captureMouseCursor;
/**
@@ -6100,40 +6539,48 @@ struct ScreenCaptureParameters {
* to share it:
* - `true`: Bring the window to the front.
* - `false`: (Default) Do not bring the window to the front.
+ * @note Due to macOS system limitations, when setting this member to bring the window to the front,
+ * if the current app has multiple windows, only the main window will be brought to the front.
*/
bool windowFocus;
/**
- * A list of IDs of windows to be blocked. When calling `startScreenCaptureByDisplayId` to start
+ * The ID list of the windows to be blocked. When calling `startScreenCaptureByDisplayId` to start
* screen sharing, you can use this parameter to block a specified window. When calling
* `updateScreenCaptureParameters` to update screen sharing configurations, you can use this
- * parameter to dynamically block the specified windows during screen sharing.
+ * parameter to dynamically block a specified window.
*/
view_t* excludeWindowList;
/**
- * The number of windows to be blocked.
+ * The number of windows to be excluded.
+ * @note On the Windows platform, the maximum value of this parameter is 24; if this value is
+ * exceeded, excluding the window fails.
*/
int excludeWindowCount;
- /** The width (px) of the border. Defaults to 0, and the value range is [0,50].
- *
+ /**
+ * (For macOS and Windows only) The width (px) of the border. The default value is 5, and the value
+ * range is (0, 50].
+ * @note This parameter only takes effect when `highLighted` is set to `true`.
*/
int highLightWidth;
- /** The color of the border in RGBA format. The default value is 0xFF8CBF26.
- *
+ /**
+ * (For macOS and Windows only)
+ * - On Windows platforms, the color of the border in ARGB format. The default value is 0xFF8CBF26.
+ * - On macOS, `COLOR_CLASS` refers to `NSColor`.
*/
unsigned int highLightColor;
- /** Whether to place a border around the shared window or screen:
- * - true: Place a border.
- * - false: (Default) Do not place a border.
- *
+ /**
+ * (For macOS and Windows only) Whether to place a border around the shared window or screen:
+ * - `true`: Place a border.
+ * - `false`: (Default) Do not place a border.
* @note When you share a part of a window or screen, the SDK places a border around the entire
- * window or screen if you set `enableHighLight` as true.
- *
+ * window or screen if you set this parameter to `true`.
*/
bool enableHighLight;
ScreenCaptureParameters()
- : dimensions(1920, 1080),
+ : captureAudio(false),
+ dimensions(1920, 1080),
frameRate(5),
bitrate(STANDARD_BITRATE),
captureMouseCursor(true),
@@ -6144,7 +6591,7 @@ struct ScreenCaptureParameters {
highLightColor(0),
enableHighLight(false) {}
ScreenCaptureParameters(const VideoDimensions& d, int f, int b)
- : dimensions(d),
+ : captureAudio(false),dimensions(d),
frameRate(f),
bitrate(b),
captureMouseCursor(true),
@@ -6155,7 +6602,8 @@ struct ScreenCaptureParameters {
highLightColor(0),
enableHighLight(false) {}
ScreenCaptureParameters(int width, int height, int f, int b)
- : dimensions(width, height),
+ : captureAudio(false),
+ dimensions(width, height),
frameRate(f),
bitrate(b),
captureMouseCursor(true),
@@ -6166,7 +6614,8 @@ struct ScreenCaptureParameters {
highLightColor(0),
enableHighLight(false) {}
ScreenCaptureParameters(int width, int height, int f, int b, bool cur, bool fcs)
- : dimensions(width, height),
+ : captureAudio(false),
+ dimensions(width, height),
frameRate(f),
bitrate(b),
captureMouseCursor(cur),
@@ -6177,7 +6626,8 @@ struct ScreenCaptureParameters {
highLightColor(0),
enableHighLight(false) {}
ScreenCaptureParameters(int width, int height, int f, int b, view_t* ex, int cnt)
- : dimensions(width, height),
+ : captureAudio(false),
+ dimensions(width, height),
frameRate(f),
bitrate(b),
captureMouseCursor(true),
@@ -6189,7 +6639,8 @@ struct ScreenCaptureParameters {
enableHighLight(false) {}
ScreenCaptureParameters(int width, int height, int f, int b, bool cur, bool fcs, view_t* ex,
int cnt)
- : dimensions(width, height),
+ : captureAudio(false),
+ dimensions(width, height),
frameRate(f),
bitrate(b),
captureMouseCursor(cur),
@@ -6202,12 +6653,12 @@ struct ScreenCaptureParameters {
};
/**
- * Audio recording quality.
+ * @brief Recording quality.
*/
enum AUDIO_RECORDING_QUALITY_TYPE {
/**
- * 0: Low quality. The sample rate is 32 kHz, and the file size is around 1.2 MB after 10 minutes
- * of recording.
+ * 0: Low quality. The sample rate is 32 kHz, and the file size is around 1.2 MB after 10 minutes of
+ * recording.
*/
AUDIO_RECORDING_QUALITY_LOW = 0,
/**
@@ -6216,18 +6667,19 @@ enum AUDIO_RECORDING_QUALITY_TYPE {
*/
AUDIO_RECORDING_QUALITY_MEDIUM = 1,
/**
- * 2: High quality. The sample rate is 32 kHz, and the file size is around 3.75 MB after 10
- * minutes of recording.
+ * 2: High quality. The sample rate is 32 kHz, and the file size is around 3.75 MB after 10 minutes
+ * of recording.
*/
AUDIO_RECORDING_QUALITY_HIGH = 2,
/**
- * 3: Ultra high audio recording quality.
+ * 3: Ultra high quality. The sample rate is 32 kHz, and the file size is around 7.5 MB after 10
+ * minutes of recording.
*/
AUDIO_RECORDING_QUALITY_ULTRA_HIGH = 3,
};
/**
- * Recording content. Set in `startAudioRecording`.
+ * @brief Recording content. Set in `startAudioRecording [3/3]`.
*/
enum AUDIO_FILE_RECORDING_TYPE {
/**
@@ -6245,7 +6697,7 @@ enum AUDIO_FILE_RECORDING_TYPE {
};
/**
- * Audio encoded frame observer position.
+ * @brief Audio profile.
*/
enum AUDIO_ENCODED_FRAME_OBSERVER_POSITION {
/**
@@ -6263,12 +6715,12 @@ enum AUDIO_ENCODED_FRAME_OBSERVER_POSITION {
};
/**
- * Recording configuration.
+ * @brief Recording configurations.
*/
struct AudioRecordingConfiguration {
/**
* The absolute path (including the filename extensions) of the recording file. For example:
- * `C:\music\audio.mp4`.
+ * `C:\music\audio.aac`.
* @note Ensure that the directory for the log files exists and is writable.
*/
const char* filePath;
@@ -6285,7 +6737,7 @@ struct AudioRecordingConfiguration {
* - 44100
* - 48000
* @note If you set this parameter to 44100 or 48000, Agora recommends recording WAV files, or AAC
- * files with quality to be `AUDIO_RECORDING_QUALITY_MEDIUM` or `AUDIO_RECORDING_QUALITY_HIGH` for
+ * files with `quality` set as AUDIO_RECORDING_QUALITY_MEDIUM or AUDIO_RECORDING_QUALITY_HIGH for
* better recording quality.
*/
int sampleRate;
@@ -6300,9 +6752,17 @@ struct AudioRecordingConfiguration {
AUDIO_RECORDING_QUALITY_TYPE quality;
/**
- * Recording channel. The following values are supported:
- * - (Default) 1
- * - 2
+ * The audio channel of recording: The parameter supports the following values:
+ * - 1: (Default) Mono.
+ * - 2: Stereo.
+ * @note
+ * The actual recorded audio channel is related to the audio channel that you capture.
+ * - If the captured audio is mono and `recordingChannel` is `2`, the recorded audio is the
+ * dual-channel data that is copied from mono data, not stereo.
+ * - If the captured audio is dual channel and `recordingChannel` is `1`, the recorded audio is the
+ * mono data that is mixed by dual-channel data.
+ * The integration scheme also affects the final recorded audio channel. If you need to record in
+ * stereo, contact `technical support`.
*/
int recordingChannel;
@@ -6343,15 +6803,15 @@ struct AudioRecordingConfiguration {
};
/**
- * Observer settings for the encoded audio.
+ * @brief Observer settings for the encoded audio.
*/
struct AudioEncodedFrameObserverConfig {
/**
- * Audio profile. For details, see `AUDIO_ENCODED_FRAME_OBSERVER_POSITION`.
+ * Audio profile. See `AUDIO_ENCODED_FRAME_OBSERVER_POSITION`.
*/
AUDIO_ENCODED_FRAME_OBSERVER_POSITION postionType;
/**
- * Audio encoding type. For details, see `AUDIO_ENCODING_TYPE`.
+ * Audio encoding type. See `AUDIO_ENCODING_TYPE`.
*/
AUDIO_ENCODING_TYPE encodingType;
@@ -6365,46 +6825,49 @@ struct AudioEncodedFrameObserverConfig {
class IAudioEncodedFrameObserver {
public:
/**
- * Gets the encoded audio data of the local user.
+ * @brief Gets the encoded audio data of the local user.
*
+ * @details
* After calling `registerAudioEncodedFrameObserver` and setting the encoded audio as
- * `AUDIO_ENCODED_FRAME_OBSERVER_POSITION_RECORD`, you can get the encoded audio data of the local
+ * AUDIO_ENCODED_FRAME_OBSERVER_POSITION_RECORD, you can get the encoded audio data of the local
* user from this callback.
*
- * @param frameBuffer The pointer to the audio frame buffer.
- * @param length The data length (byte) of the audio frame.
- * @param audioEncodedFrameInfo Audio information after encoding. For details, see
- * `EncodedAudioFrameInfo`.
+ * @param frameBuffer The audio buffer.
+ * @param length The data length (byte).
+ * @param audioEncodedFrameInfo Audio information after encoding. See `EncodedAudioFrameInfo`.
+ *
*/
virtual void onRecordAudioEncodedFrame(const uint8_t* frameBuffer, int length,
const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0;
/**
- * Gets the encoded audio data of all remote users.
+ * @brief Gets the encoded audio data of all remote users.
*
+ * @details
* After calling `registerAudioEncodedFrameObserver` and setting the encoded audio as
- * `AUDIO_ENCODED_FRAME_OBSERVER_POSITION_PLAYBACK`, you can get encoded audio data of all remote
+ * AUDIO_ENCODED_FRAME_OBSERVER_POSITION_PLAYBACK, you can get encoded audio data of all remote
* users through this callback.
*
- * @param frameBuffer The pointer to the audio frame buffer.
- * @param length The data length (byte) of the audio frame.
- * @param audioEncodedFrameInfo Audio information after encoding. For details, see
- * `EncodedAudioFrameInfo`.
+ * @param frameBuffer The audio buffer.
+ * @param length The data length (byte).
+ * @param audioEncodedFrameInfo Audio information after encoding. See `EncodedAudioFrameInfo`.
+ *
*/
virtual void onPlaybackAudioEncodedFrame(const uint8_t* frameBuffer, int length,
const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0;
/**
- * Gets the mixed and encoded audio data of the local and all remote users.
+ * @brief Gets the mixed and encoded audio data of the local and all remote users.
*
+ * @details
* After calling `registerAudioEncodedFrameObserver` and setting the audio profile as
- * `AUDIO_ENCODED_FRAME_OBSERVER_POSITION_MIXED`, you can get the mixed and encoded audio data of
- * the local and all remote users through this callback.
+ * AUDIO_ENCODED_FRAME_OBSERVER_POSITION_MIXED, you can get the mixed and encoded audio data of the
+ * local and all remote users through this callback.
+ *
+ * @param frameBuffer The audio buffer.
+ * @param length The data length (byte).
+ * @param audioEncodedFrameInfo Audio information after encoding. See `EncodedAudioFrameInfo`.
*
- * @param frameBuffer The pointer to the audio frame buffer.
- * @param length The data length (byte) of the audio frame.
- * @param audioEncodedFrameInfo Audio information after encoding. For details, see
- * `EncodedAudioFrameInfo`.
*/
virtual void onMixedAudioEncodedFrame(const uint8_t* frameBuffer, int length,
const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0;
@@ -6412,7 +6875,9 @@ class IAudioEncodedFrameObserver {
virtual ~IAudioEncodedFrameObserver() {}
};
-/** The region for connection, which is the region where the server the SDK connects to is located.
+/**
+ * @brief The region for connection, which is the region where the server the SDK connects to is
+ * located.
*/
enum AREA_CODE {
/**
@@ -6440,7 +6905,7 @@ enum AREA_CODE {
*/
AREA_CODE_IN = 0x00000020,
/**
- * (Default) Global.
+ * Global.
*/
AREA_CODE_GLOB = (0xFFFFFFFF)
};
@@ -6485,83 +6950,102 @@ enum AREA_CODE_EX {
};
/**
- * The error code of the channel media replay.
+ * @brief The error code of the channel media relay.
*/
enum CHANNEL_MEDIA_RELAY_ERROR {
- /** 0: No error.
+ /**
+ * 0: No error.
*/
RELAY_OK = 0,
- /** 1: An error occurs in the server response.
+ /**
+ * 1: An error occurs in the server response.
*/
RELAY_ERROR_SERVER_ERROR_RESPONSE = 1,
- /** 2: No server response. You can call the `leaveChannel` method to leave the channel.
- *
- * This error can also occur if your project has not enabled co-host token authentication. You can
- * contact technical support to enable the service for cohosting across channels before starting a
- * channel media relay.
+ /**
+ * 2: No server response.
+ * This error may be caused by poor network connections. If this error occurs when initiating a
+ * channel media relay, you can try again later; if this error occurs during channel media relay,
+ * you can call `leaveChannel(const LeaveChannelOptions& options)` to leave the channel.
+ * This error can also occur if the channel media relay service is not enabled in the project. You
+ * can contact `technical support` to enable the service.
*/
RELAY_ERROR_SERVER_NO_RESPONSE = 2,
- /** 3: The SDK fails to access the service, probably due to limited resources of the server.
+ /**
+ * 3: The SDK fails to access the service, probably due to limited resources of the server.
*/
RELAY_ERROR_NO_RESOURCE_AVAILABLE = 3,
- /** 4: Fails to send the relay request.
+ /**
+ * 4: Fails to send the relay request.
*/
RELAY_ERROR_FAILED_JOIN_SRC = 4,
- /** 5: Fails to accept the relay request.
+ /**
+ * 5: Fails to accept the relay request.
*/
RELAY_ERROR_FAILED_JOIN_DEST = 5,
- /** 6: The server fails to receive the media stream.
+ /**
+ * 6: The server fails to receive the media stream.
*/
RELAY_ERROR_FAILED_PACKET_RECEIVED_FROM_SRC = 6,
- /** 7: The server fails to send the media stream.
+ /**
+ * 7: The server fails to send the media stream.
*/
RELAY_ERROR_FAILED_PACKET_SENT_TO_DEST = 7,
- /** 8: The SDK disconnects from the server due to poor network connections. You can call the
- * `leaveChannel` method to leave the channel.
+ /**
+ * 8: The SDK disconnects from the server due to poor network connections. You can call
+ * `leaveChannel(const LeaveChannelOptions& options)` to leave the channel.
*/
RELAY_ERROR_SERVER_CONNECTION_LOST = 8,
- /** 9: An internal error occurs in the server.
+ /**
+ * 9: An internal error occurs in the server.
*/
RELAY_ERROR_INTERNAL_ERROR = 9,
- /** 10: The token of the source channel has expired.
+ /**
+ * 10: The token of the source channel has expired.
*/
RELAY_ERROR_SRC_TOKEN_EXPIRED = 10,
- /** 11: The token of the destination channel has expired.
+ /**
+ * 11: The token of the destination channel has expired.
*/
RELAY_ERROR_DEST_TOKEN_EXPIRED = 11,
};
/**
- * The state code of the channel media relay.
+ * @brief The state code of the channel media relay.
*/
enum CHANNEL_MEDIA_RELAY_STATE {
- /** 0: The initial state. After you successfully stop the channel media relay by calling
+ /**
+ * 0: The initial state. After you successfully stop the channel media relay by calling
* `stopChannelMediaRelay`, the `onChannelMediaRelayStateChanged` callback returns this state.
*/
RELAY_STATE_IDLE = 0,
- /** 1: The SDK tries to relay the media stream to the destination channel.
+ /**
+ * 1: The SDK tries to relay the media stream to the destination channel.
*/
RELAY_STATE_CONNECTING = 1,
- /** 2: The SDK successfully relays the media stream to the destination channel.
+ /**
+ * 2: The SDK successfully relays the media stream to the destination channel.
*/
RELAY_STATE_RUNNING = 2,
- /** 3: An error occurs. See `code` in `onChannelMediaRelayStateChanged` for the error code.
+ /**
+ * 3: An error occurs. See `code` in `onChannelMediaRelayStateChanged` for the error code.
*/
RELAY_STATE_FAILURE = 3,
};
-/** The definition of ChannelMediaInfo.
+/**
+ * @brief Channel media information.
*/
struct ChannelMediaInfo {
- /** The user ID.
+ /**
+ * The user ID.
*/
uid_t uid;
- /** The channel name. The default value is NULL, which means that the SDK
- * applies the current channel name.
+ /**
+ * The channel name.
*/
const char* channelName;
- /** The token that enables the user to join the channel. The default value
- * is NULL, which means that the SDK applies the current token.
+ /**
+ * The token that enables the user to join the channel.
*/
const char* token;
@@ -6569,41 +7053,45 @@ struct ChannelMediaInfo {
ChannelMediaInfo(const char* c, const char* t, uid_t u) : uid(u), channelName(c), token(t) {}
};
-/** The definition of ChannelMediaRelayConfiguration.
+/**
+ * @brief Configuration of cross channel media relay.
*/
struct ChannelMediaRelayConfiguration {
- /** The information of the source channel `ChannelMediaInfo`. It contains the following members:
- * - `channelName`: The name of the source channel. The default value is `NULL`, which means the
- * SDK applies the name of the current channel.
- * - `uid`: The unique ID to identify the relay stream in the source channel. The default value is
- * 0, which means the SDK generates a random UID. You must set it as 0.
- * - `token`: The token for joining the source channel. It is generated with the `channelName` and
- * `uid` you set in `srcInfo`.
- * - If you have not enabled the App Certificate, set this parameter as the default value
- * `NULL`, which means the SDK applies the App ID.
- * - If you have enabled the App Certificate, you must use the token generated with the
+ /**
+ * The information of the source channel. See `ChannelMediaInfo`. It contains the following members:
+ * - `channelName`: The name of the source channel. The default value is `NULL`, which means the SDK
+ * applies the name of the current channel.
+ * - `token`: The `token` for joining the source channel. This token is generated with the
+ * `channelName` and `uid` you set in `srcInfo`.
+ * - If you have not enabled the App Certificate, set this parameter as the default value `NULL`,
+ * which means the SDK applies the App ID.
+ * - If you have enabled the App Certificate, you must use the `token` generated with the
* `channelName` and `uid`, and the `uid` must be set as 0.
+ * - `uid`: The unique user ID to identify the relay stream in the source channel. Agora recommends
+ * leaving the default value of 0 unchanged.
*/
ChannelMediaInfo* srcInfo;
- /** The information of the destination channel `ChannelMediaInfo`. It contains the following
- * members:
- * - `channelName`: The name of the destination channel.
- * - `uid`: The unique ID to identify the relay stream in the destination channel. The value
- * ranges from 0 to (2^32-1). To avoid UID conflicts, this `UID` must be different from any
- * other `UID` in the destination channel. The default value is 0, which means the SDK generates
- * a random `UID`. Do not set this parameter as the `UID` of the host in the destination channel,
- * and ensure that this `UID` is different from any other `UID` in the channel.
- * - `token`: The token for joining the destination channel. It is generated with the
- * `channelName` and `uid` you set in `destInfos`.
- * - If you have not enabled the App Certificate, set this parameter as the default value NULL,
+ /**
+ * The information of the target channel `ChannelMediaInfo`. It contains the following members:
+ * - `channelName`: The name of the target channel.
+ * - `token`: The `token` for joining the target channel. It is generated with the `channelName` and
+ * `uid` you set in `destInfos`.
+ * - If you have not enabled the App Certificate, set this parameter as the default value `NULL`,
* which means the SDK applies the App ID.
- * If you have enabled the App Certificate, you must use the token generated with the
+ * - If you have enabled the App Certificate, you must use the `token` generated with the
* `channelName` and `uid`.
+ * - `uid`: The unique user ID to identify the relay stream in the target channel. The value ranges
+ * from 0 to (2 32-1). To avoid user ID conflicts, this user ID must be different from any other
+ * user ID in the target channel. The default value is 0, which means the SDK generates a random
+ * UID.
+ * @note If the token of any target channel expires, the whole media relay stops; hence Agora
+ * recommends that you specify the same expiration time for the tokens of all the target channels.
*/
ChannelMediaInfo* destInfos;
- /** The number of destination channels. The default value is 0, and the value range is from 0 to
- * 6. Ensure that the value of this parameter corresponds to the number of `ChannelMediaInfo`
- * structs you define in `destInfo`.
+ /**
+ * The number of target channels. The default value is 0, and the value range is from 0 to 6. Ensure
+ * that the value of this parameter corresponds to the number of `ChannelMediaInfo` structs you
+ * define in `destInfo`.
*/
int destCount;
@@ -6612,7 +7100,7 @@ struct ChannelMediaRelayConfiguration {
};
/**
- * The uplink network information.
+ * @brief The uplink network information.
*/
struct UplinkNetworkInfo {
/**
@@ -6743,66 +7231,82 @@ struct DownlinkNetworkInfo {
};
/**
- * The built-in encryption mode.
+ * @brief The built-in encryption mode.
*
+ * @details
* Agora recommends using AES_128_GCM2 or AES_256_GCM2 encrypted mode. These two modes support the
* use of salt for higher security.
+ *
*/
enum ENCRYPTION_MODE {
- /** 1: 128-bit AES encryption, XTS mode.
+ /**
+ * 1: 128-bit AES encryption, XTS mode.
*/
AES_128_XTS = 1,
- /** 2: 128-bit AES encryption, ECB mode.
+ /**
+ * 2: 128-bit AES encryption, ECB mode.
*/
AES_128_ECB = 2,
- /** 3: 256-bit AES encryption, XTS mode.
+ /**
+ * 3: 256-bit AES encryption, XTS mode.
*/
AES_256_XTS = 3,
- /** 4: 128-bit SM4 encryption, ECB mode.
+ /**
+ * 4: 128-bit SM4 encryption, ECB mode.
*/
SM4_128_ECB = 4,
- /** 5: 128-bit AES encryption, GCM mode.
+ /**
+ * 5: 128-bit AES encryption, GCM mode.
*/
AES_128_GCM = 5,
- /** 6: 256-bit AES encryption, GCM mode.
+ /**
+ * 6: 256-bit AES encryption, GCM mode.
*/
AES_256_GCM = 6,
- /** 7: (Default) 128-bit AES encryption, GCM mode. This encryption mode requires the setting of
- * salt (`encryptionKdfSalt`).
+ /**
+ * 7: (Default) 128-bit AES encryption, GCM mode. This encryption mode requires the setting of salt
+ * (`encryptionKdfSalt`).
*/
AES_128_GCM2 = 7,
- /** 8: 256-bit AES encryption, GCM mode. This encryption mode requires the setting of salt
+ /**
+ * 8: 256-bit AES encryption, GCM mode. This encryption mode requires the setting of salt
* (`encryptionKdfSalt`).
*/
AES_256_GCM2 = 8,
- /** Enumerator boundary.
+ /**
+ * Enumerator boundary.
*/
MODE_END,
};
-/** Built-in encryption configurations. */
+/**
+ * @brief Built-in encryption configurations.
+ */
struct EncryptionConfig {
/**
- * The built-in encryption mode. See #ENCRYPTION_MODE. Agora recommends using `AES_128_GCM2`
- * or `AES_256_GCM2` encrypted mode. These two modes support the use of salt for higher security.
+ * The built-in encryption mode. See `ENCRYPTION_MODE`. Agora recommends using `AES_128_GCM2` or
+ * `AES_256_GCM2` encrypted mode. These two modes support the use of salt for higher security.
*/
ENCRYPTION_MODE encryptionMode;
/**
* Encryption key in string type with unlimited length. Agora recommends using a 32-byte key.
- *
- * @note If you do not set an encryption key or set it as NULL, you cannot use the built-in
- * encryption, and the SDK returns #ERR_INVALID_ARGUMENT (-2).
+ * @note If you do not set an encryption key or set it as `NULL`, you cannot use the built-in
+ * encryption, and the SDK returns `-2`.
*/
const char* encryptionKey;
/**
* Salt, 32 bytes in length. Agora recommends that you use OpenSSL to generate salt on the server
- * side.
- *
- * @note This parameter takes effect only in `AES_128_GCM2` or `AES_256_GCM2` encrypted mode.
- * In this case, ensure that this parameter is not 0.
+ * side. See Media Stream Encryption for details.
+ * @note This parameter takes effect only in `AES_128_GCM2` or `AES_256_GCM2` encrypted mode. In
+ * this case, ensure that this parameter is not `0`.
*/
uint8_t encryptionKdfSalt[32];
+ /**
+ * Whether to enable data stream encryption:
+ * - `true`: Enable data stream encryption.
+ * - `false`: (Default) Disable data stream encryption.
+ */
bool datastreamEncryptionEnabled;
EncryptionConfig()
@@ -6839,7 +7343,8 @@ struct EncryptionConfig {
/// @endcond
};
-/** Encryption error type.
+/**
+ * @brief Encryption error type.
*/
enum ENCRYPTION_ERROR_TYPE {
/**
@@ -6847,21 +7352,21 @@ enum ENCRYPTION_ERROR_TYPE {
*/
ENCRYPTION_ERROR_INTERNAL_FAILURE = 0,
/**
- * 1: MediaStream decryption errors. Ensure that the receiver and the sender use the same
- * encryption mode and key.
+ * 1: Media stream decryption error. Ensure that the receiver and the sender use the same encryption
+ * mode and key.
*/
ENCRYPTION_ERROR_DECRYPTION_FAILURE = 1,
/**
- * 2: MediaStream encryption errors.
+ * 2: Media stream encryption error.
*/
ENCRYPTION_ERROR_ENCRYPTION_FAILURE = 2,
/**
- * 3: DataStream decryption errors. Ensure that the receiver and the sender use the same
- * encryption mode and key.
+ * 3: Data stream decryption error. Ensure that the receiver and the sender use the same encryption
+ * mode and key.
*/
ENCRYPTION_ERROR_DATASTREAM_DECRYPTION_FAILURE = 3,
/**
- * 4: DataStream encryption errors.
+ * 4: Data stream encryption error.
*/
ENCRYPTION_ERROR_DATASTREAM_ENCRYPTION_FAILURE = 4,
};
@@ -6873,47 +7378,53 @@ enum UPLOAD_ERROR_REASON {
};
/**
- * Error codes for renewing a token.
+ * @brief Represents the error codes after calling `renewToken`.
*
- * These error codes indicate the result of calling renewToken.
* @since 4.6.0
*/
enum RENEW_TOKEN_ERROR_CODE {
/**
- * 0: The token is renewed successfully.
+ * (0): Token updated successfully.
*/
RENEW_TOKEN_SUCCESS = 0,
/**
- * 1: It is recommended that the user generate a new token and retry renewToken.
+ * (1): Token update failed due to an unknown server error. It is recommended to check the
+ * parameters used to generate the Token, regenerate the Token, and retry `renewToken`.
*/
RENEW_TOKEN_FAILURE = 1,
/**
- * 2: The token renewal failed because the provided token has expired.
- * It is recommended that the user generate a new token with a longer expiration time and retry renewToken.
+ * (2): Token update failed because the provided Token has expired. It is recommended to generate a
+ * new Token with a longer expiration time and retry `renewToken`.
*/
RENEW_TOKEN_TOKEN_EXPIRED = 2,
/**
- * 3: The token renewal failed because the provided token is invalid.
- * It is recommended that the user check the token generation process, generate a new token, and retry renewToken.
+ * (3): Token update failed because the provided Token is invalid. Common reasons include: the
+ * project has enabled App Certificate in the Agora Console but did not use a Token when joining the
+ * channel; the uid specified in `joinChannel` is inconsistent with the uid used when generating the
+ * Token; the channel name specified in `joinChannel` is inconsistent with the one used when
+ * generating the Token. It is recommended to check the Token generation process, generate a new
+ * Token, and retry `renewToken`.
*/
RENEW_TOKEN_INVALID_TOKEN = 3,
/**
- * 4: The token renewal failed because the channel name in the token does not match the current channel.
- * It is recommended that the user check the channel name, generate a new token, and retry renewToken.
+ * (4): Token update failed because the channel name in the Token does not match the current
+ * channel. It is recommended to check the channel name, generate a new Token, and retry
+ * `renewToken`.
*/
RENEW_TOKEN_INVALID_CHANNEL_NAME = 4,
/**
- * 5: The token renewal failed because the app ID in the token does not match the current app ID.
- * It is recommended that the user check the app ID, generate a new token, and retry renewToken.
+ * (5): Token update failed because the App ID in the Token does not match the current App ID. It is
+ * recommended to check the App ID, generate a new Token, and retry `renewToken`.
*/
RENEW_TOKEN_INCONSISTENT_APPID = 5,
/**
- * 6: The token renewal was canceled because a new request was made, and the previous one was canceled.
+ * (6): The previous Token update request was canceled due to a new request being initiated.
*/
RENEW_TOKEN_CANCELED_BY_NEW_REQUEST = 6,
};
-/** The type of the device permission.
+/**
+ * @brief The type of the device permission.
*/
enum PERMISSION_TYPE {
/**
@@ -6925,29 +7436,34 @@ enum PERMISSION_TYPE {
*/
CAMERA = 1,
+ /**
+ * (For Android only) 2: Permission for screen sharing.
+ */
SCREEN_CAPTURE = 2,
};
/**
- * The subscribing state.
+ * @brief The subscribing state.
*/
enum STREAM_SUBSCRIBE_STATE {
/**
- * 0: The initial subscribing state after joining the channel.
+ * 0: The initial publishing state after joining the channel.
*/
SUB_STATE_IDLE = 0,
/**
* 1: Fails to subscribe to the remote stream. Possible reasons:
* - The remote user:
- * - Calls `muteLocalAudioStream(true)` or `muteLocalVideoStream(true)` to stop sending local
- * media stream.
- * - Calls `disableAudio` or `disableVideo `to disable the local audio or video module.
- * - Calls `enableLocalAudio(false)` or `enableLocalVideo(false)` to disable the local audio or
+ * - Calls `muteLocalAudioStream` (`true`) or `muteLocalVideoStream` (`true`) to stop sending
+ * local media stream.
+ * - Calls `disableAudio` or `disableVideo` to disable the local audio or video module.
+ * - Calls `enableLocalAudio` ( false ) or `enableLocalVideo` ( false ) to disable local audio or
* video capture.
* - The role of the remote user is audience.
* - The local user calls the following methods to stop receiving remote streams:
- * - Calls `muteRemoteAudioStream(true)`, `muteAllRemoteAudioStreams(true)` to stop receiving the remote audio streams.
- * - Calls `muteRemoteVideoStream(true)`, `muteAllRemoteVideoStreams(true)` to stop receiving the remote video streams.
+ * - Call `muteRemoteAudioStream` ( true ) or `muteAllRemoteAudioStreams` ( true ) to stop
+ * receiving the remote audio stream.
+ * - Call `muteRemoteVideoStream` ( true ) or `muteAllRemoteVideoStreams` ( true ) to stop
+ * receiving the remote video stream.
*/
SUB_STATE_NO_SUBSCRIBED = 1,
/**
@@ -6955,13 +7471,13 @@ enum STREAM_SUBSCRIBE_STATE {
*/
SUB_STATE_SUBSCRIBING = 2,
/**
- * 3: Subscribes to and receives the remote stream successfully.
+ * 3: The remote stream is received, and the subscription is successful.
*/
SUB_STATE_SUBSCRIBED = 3
};
/**
- * The publishing state.
+ * @brief The publishing state.
*/
enum STREAM_PUBLISH_STATE {
/**
@@ -6970,12 +7486,12 @@ enum STREAM_PUBLISH_STATE {
PUB_STATE_IDLE = 0,
/**
* 1: Fails to publish the local stream. Possible reasons:
- * - The local user calls `muteLocalAudioStream(true)` or `muteLocalVideoStream(true)` to stop
- * sending the local media stream.
+ * - The local user calls `muteLocalAudioStream` (`true`) or `muteLocalVideoStream` (`true`) to stop
+ * sending local media streams.
* - The local user calls `disableAudio` or `disableVideo` to disable the local audio or video
* module.
- * - The local user calls `enableLocalAudio(false)` or `enableLocalVideo(false)` to disable the
- * local audio or video capture.
+ * - The local user calls `enableLocalAudio` (`false`) or `enableLocalVideo` (`false`) to disable
+ * the local audio or video capture.
* - The role of the local user is audience.
*/
PUB_STATE_NO_PUBLISHED = 1,
@@ -6990,14 +7506,49 @@ enum STREAM_PUBLISH_STATE {
};
/**
- * The EchoTestConfiguration struct.
+ * @brief The configuration of the audio and video call loop test.
*/
struct EchoTestConfiguration {
+ /**
+ * The view used to render the local user's video. This parameter is only applicable to scenarios
+ * testing video devices, that is, when `enableVideo` is true.
+ */
view_t view;
+ /**
+ * Whether to enable the audio device for the loop test:
+ * - `true`: (Default) Enable the audio device. To test the audio device, set this parameter as
+ * true.
+ * - `false`: Disable the audio device.
+ */
bool enableAudio;
+ /**
+ * Whether to enable the video device for the loop test:
+ * - `true`: (Default) Enable the video device. To test the video device, set this parameter as
+ * true.
+ * - `false`: Disable the video device.
+ */
bool enableVideo;
+ /**
+ * The token used to secure the audio and video call loop test. If you do not enable App Certificate
+ * in Agora Console, you do not need to pass a value in this parameter; if you have enabled App
+ * Certificate in Agora Console, you must pass a token in this parameter; the `uid` used when you
+ * generate the token must be 0xFFFFFFFF, and the channel name used must be the channel name that
+ * identifies each audio and video call loop tested. For server-side token generation, see .
+ */
const char* token;
+ /**
+ * The channel name that identifies each audio and video call loop. To ensure proper loop test
+ * functionality, the channel name passed in to identify each loop test cannot be the same when
+ * users of the same project (App ID) perform audio and video call loop tests on different devices.
+ */
const char* channelId;
+ /**
+ * Set the time interval or delay for returning the results of the audio and video loop test. The
+ * value range is [2,10], in seconds, with the default value being 2 seconds.
+ * - For audio loop tests, the test results will be returned according to the time interval you set.
+ * - For video loop tests, the video will be displayed in a short time, after which the delay will
+ * gradually increase until it reaches the delay you set.
+ */
int intervalInSeconds;
EchoTestConfiguration(view_t v, bool ea, bool ev, const char* t, const char* c, const int is)
@@ -7013,7 +7564,7 @@ struct EchoTestConfiguration {
};
/**
- * The information of the user.
+ * @brief The information of the user.
*/
struct UserInfo {
/**
@@ -7021,7 +7572,7 @@ struct UserInfo {
*/
uid_t uid;
/**
- * The user account. The maximum data length is `MAX_USER_ACCOUNT_LENGTH_TYPE`.
+ * User account. The maximum data length is `MAX_USER_ACCOUNT_LENGTH_TYPE`.
*/
char userAccount[MAX_USER_ACCOUNT_LENGTH];
@@ -7029,26 +7580,26 @@ struct UserInfo {
};
/**
- * The audio filter of in-ear monitoring.
+ * @brief The audio filter types of in-ear monitoring.
*/
enum EAR_MONITORING_FILTER_TYPE {
/**
- * 1: Do not add an audio filter to the in-ear monitor.
+ * 1<<0: No audio filter added to in-ear monitoring.
*/
EAR_MONITORING_FILTER_NONE = (1 << 0),
/**
- * 2: Enable audio filters to the in-ear monitor. If you implement functions such as voice
- * beautifier and audio effect, users can hear the voice after adding these effects.
+ * 1<<1: Add vocal effects audio filter to in-ear monitoring. If you implement functions such as
+ * voice beautifier and audio effect, users can hear the voice after adding these effects.
*/
EAR_MONITORING_FILTER_BUILT_IN_AUDIO_FILTERS = (1 << 1),
/**
- * 4: Enable noise suppression to the in-ear monitor.
+ * 1<<2: Add noise suppression audio filter to in-ear monitoring.
*/
EAR_MONITORING_FILTER_NOISE_SUPPRESSION = (1 << 2),
/**
- * 32768: Enable audio filters by reuse post-processing filter to the in-ear monitor.
- * This bit is intended to be used in exclusive mode, which means, if this bit is set, all other
- * bits will be disregarded.
+ * 1<<15: Reuse the audio filter that has been processed on the sending end for in-ear monitoring.
+ * This enumerator reduces CPU usage while increasing in-ear monitoring latency, which is suitable
+ * for latency-tolerant scenarios requiring low CPU consumption.
*/
EAR_MONITORING_FILTER_REUSE_POST_PROCESSING_FILTER = (1 << 15),
};
@@ -7086,49 +7637,23 @@ enum THREAD_PRIORITY_TYPE {
#if defined(__ANDROID__) || (defined(__APPLE__) && TARGET_OS_IOS) || defined(__OHOS__)
/**
- * The video configuration for the shared screen stream.
+ * @brief The video configuration for the shared screen stream.
*/
struct ScreenVideoParameters {
/**
- * The dimensions of the video encoding resolution. The default value is `1280` x `720`.
- * For recommended values, see [Recommended video
- * profiles](https://docs.agora.io/en/Interactive%20Broadcast/game_streaming_video_profile?platform=Android#recommended-video-profiles).
- * If the aspect ratio is different between width and height and the screen, the SDK adjusts the
- * video encoding resolution according to the following rules (using an example where `width` ×
- * `height` is 1280 × 720):
- * - When the width and height of the screen are both lower than `width` and `height`, the SDK
- * uses the resolution of the screen for video encoding. For example, if the screen is 640 ×
- * 360, The SDK uses 640 × 360 for video encoding.
- * - When either the width or height of the screen is higher than `width` or `height`, the SDK
- * uses the maximum values that do not exceed those of `width` and `height` while maintaining
- * the aspect ratio of the screen for video encoding. For example, if the screen is 2000 × 1500,
- * the SDK uses 960 × 720 for video encoding.
- *
- * @note
- * - The billing of the screen sharing stream is based on the values of width and height.
- * When you do not pass in these values, Agora bills you at 1280 × 720;
- * when you pass in these values, Agora bills you at those values.
- * For details, see [Pricing for Real-time
- * Communication](https://docs.agora.io/en/Interactive%20Broadcast/billing_rtc).
- * - This value does not indicate the orientation mode of the output ratio.
- * For how to set the video orientation, see `ORIENTATION_MODE`.
- * - Whether the SDK can support a resolution at 720P depends on the performance of the device.
- * If you set 720P but the device cannot support it, the video frame rate can be lower.
+ * The video encoding dimension. The default value is 1280 × 720.
*/
VideoDimensions dimensions;
/**
- * The video encoding frame rate (fps). The default value is `15`.
- * For recommended values, see [Recommended video
- * profiles](https://docs.agora.io/en/Interactive%20Broadcast/game_streaming_video_profile?platform=Android#recommended-video-profiles).
+ * The video encoding frame rate (fps). The default value is 15.
*/
int frameRate = 15;
/**
- * The video encoding bitrate (Kbps). For recommended values, see [Recommended video
- * profiles](https://docs.agora.io/en/Interactive%20Broadcast/game_streaming_video_profile?platform=Android#recommended-video-profiles).
+ * The video encoding bitrate (Kbps).
*/
int bitrate;
- /*
- * The content hint of the screen sharing:
+ /**
+ * The content hint for screen sharing. See `VIDEO_CONTENT_HINT`.
*/
VIDEO_CONTENT_HINT contentHint = VIDEO_CONTENT_HINT::CONTENT_HINT_MOTION;
@@ -7136,124 +7661,140 @@ struct ScreenVideoParameters {
};
/**
- * The audio configuration for the shared screen stream.
- */
-struct ScreenAudioParameters {
- /**
- * The audio sample rate (Hz). The default value is `16000`.
- */
- int sampleRate = 16000;
- /**
- * The number of audio channels. The default value is `2`, indicating dual channels.
- */
- int channels = 2;
- /**
- * The volume of the captured system audio. The value range is [0,100]. The default value is
- * `100`.
- */
- int captureSignalVolume = 100;
-};
-
-/**
- * The configuration of the screen sharing
+ * @brief Screen sharing configurations.
*/
struct ScreenCaptureParameters2 {
/**
* Determines whether to capture system audio during screen sharing:
- * - `true`: Capture.
- * - `false`: (Default) Do not capture.
- *
- * **Note**
- * Due to system limitations, capturing system audio is only available for Android API level 29
+ * - `true`: Capture system audio.
+ * - `false`: (Default) Do not capture system audio.
+ * @note
+ * - Due to system limitations, capturing system audio is only applicable to Android API level 29
* and later (that is, Android 10 and later).
+ * - To improve the success rate of capturing system audio during screen sharing, ensure that you
+ * have called the `setAudioScenario` method and set the audio scenario to
+ * `AUDIO_SCENARIO_GAME_STREAMING`.
*/
bool captureAudio = false;
/**
- * The audio configuration for the shared screen stream.
+ * The audio configuration for the shared screen stream. See `ScreenAudioParameters`.
+ * @note This parameter only takes effect when `captureAudio` is `true`.
*/
ScreenAudioParameters audioParams;
/**
- * Determines whether to capture the screen during screen sharing:
- * - `true`: (Default) Capture.
- * - `false`: Do not capture.
- *
- * **Note**
- * Due to system limitations, screen capture is only available for Android API level 21 and later
- * (that is, Android 5 and later).
+ * Whether to capture the screen when screen sharing:
+ * - `true`: (Default) Capture the screen.
+ * - `false`: Do not capture the screen.
+ * @note Due to system limitations, the capture screen is only applicable to Android API level 21
+ * and above, that is, Android 5 and above.
*/
bool captureVideo = true;
/**
- * The video configuration for the shared screen stream.
+ * The video configuration for the shared screen stream. See `ScreenVideoParameters`.
+ * @note This parameter only takes effect when `captureVideo` is `true`.
*/
ScreenVideoParameters videoParams;
};
#endif
/**
- * The tracing event of media rendering.
+ * @brief The rendering state of the media frame.
*/
enum MEDIA_TRACE_EVENT {
/**
- * 0: The media frame has been rendered.
+ * 0: The video frame has been rendered.
*/
MEDIA_TRACE_EVENT_VIDEO_RENDERED = 0,
/**
- * 1: The media frame has been decoded.
+ * 1: The video frame has been decoded.
*/
MEDIA_TRACE_EVENT_VIDEO_DECODED,
};
/**
- * The video rendering tracing result
+ * @brief Indicators during video frame rendering progress.
*/
struct VideoRenderingTracingInfo {
/**
- * Elapsed time from the start tracing time to the time when the tracing event occurred.
+ * The time interval (ms) from `startMediaRenderingTracing` to SDK triggering the
+ * `onVideoRenderingTracingResult` callback. Agora recommends you call `startMediaRenderingTracing`
+ * before joining a channel.
*/
int elapsedTime;
/**
- * Elapsed time from the start tracing time to the time when join channel.
- *
- * **Note**
- * If the start tracing time is behind the time when join channel, this value will be negative.
+ * The time interval (ms) from `startMediaRenderingTracing` to `joinChannel(const char* token, const
+ * char* channelId, const char* info, uid_t uid)` or `joinChannel(const char* token, const char*
+ * channelId, uid_t uid, const ChannelMediaOptions& options)`
+ * . A negative number indicates that `startMediaRenderingTracing` is called after calling
+ * `joinChannel(const char* token, const char* channelId, uid_t uid, const ChannelMediaOptions&
+ * options)`.
*/
int start2JoinChannel;
/**
- * Elapsed time from joining channel to finishing joining channel.
+ * The time interval (ms) from `joinChannel(const char* token, const char* channelId, const char*
+ * info, uid_t uid)` or `joinChannel(const char* token, const char* channelId, uid_t uid, const
+ * ChannelMediaOptions& options)` to successfully joining
+ * the channel.
*/
int join2JoinSuccess;
/**
- * Elapsed time from finishing joining channel to remote user joined.
- *
- * **Note**
- * If the start tracing time is after the time finishing join channel, this value will be
- * the elapsed time from the start tracing time to remote user joined. The minimum value is 0.
+ * - If the local user calls `startMediaRenderingTracing` before successfully joining the channel,
+ * this value is the time interval (ms) from the local user successfully joining the channel to the
+ * remote user joining the channel.
+ * - If the local user calls `startMediaRenderingTracing` after successfully joining the channel,
+ * the value is the time interval (ms) from `startMediaRenderingTracing` to when the remote user
+ * joins the channel.
+ * @note
+ * - If the local user calls `startMediaRenderingTracing` after the remote user joins the channel,
+ * the value is 0 and meaningless.
+ * - In order to reduce the time of rendering the first frame for remote users, Agora recommends
+ * that the local user joins the channel when the remote user is in the channel to reduce this
+ * value.
*/
int joinSuccess2RemoteJoined;
/**
- * Elapsed time from remote user joined to set the view.
- *
- * **Note**
- * If the start tracing time is after the time when remote user joined, this value will be
- * the elapsed time from the start tracing time to set the view. The minimum value is 0.
+ * - If the local user calls `startMediaRenderingTracing` before the remote user joins the channel,
+ * this value is the time interval (ms) from when the remote user joins the channel to when the
+ * local user sets the remote view.
+ * - If the local user calls `startMediaRenderingTracing` after the remote user joins the channel,
+ * this value is the time interval (ms) from calling `startMediaRenderingTracing` to setting the
+ * remote view.
+ * @note
+ * - If the local user calls `startMediaRenderingTracing` after setting the remote view, the value
+ * is 0 and has no effect.
+ * - In order to reduce the time of rendering the first frame for remote users, Agora recommends
+ * that the local user sets the remote view before the remote user joins the channel, or sets the
+ * remote view immediately after the remote user joins the channel to reduce this value.
*/
int remoteJoined2SetView;
/**
- * Elapsed time from remote user joined to the time subscribing remote video stream.
- *
- * **Note**
- * If the start tracing time is after the time when remote user joined, this value will be
- * the elapsed time from the start tracing time to the time subscribing remote video stream.
- * The minimum value is 0.
+ * - If the local user calls `startMediaRenderingTracing` before the remote user joins the channel,
+ * this value is the time interval (ms) from the remote user joining the channel to subscribing to
+ * the remote video stream.
+ * - If the local user calls `startMediaRenderingTracing` after the remote user joins the channel,
+ * this value is the time interval (ms) from `startMediaRenderingTracing` to subscribing to the
+ * remote video stream.
+ * @note
+ * - If the local user calls `startMediaRenderingTracing` after subscribing to the remote video
+ * stream, the value is 0 and has no effect.
+ * - In order to reduce the time of rendering the first frame for remote users, Agora recommends
+ * that after the remote user joins the channel, the local user immediately subscribes to the remote
+ * video stream to reduce this value.
*/
int remoteJoined2UnmuteVideo;
/**
- * Elapsed time from remote user joined to the remote video packet received.
- *
- * **Note**
- * If the start tracing time is after the time when remote user joined, this value will be
- * the elapsed time from the start tracing time to the time subscribing remote video stream.
- * The minimum value is 0.
+ * - If the local user calls `startMediaRenderingTracing` before the remote user joins the channel,
+ * this value is the time interval (ms) from when the remote user joins the channel to when the
+ * local user receives the remote video stream.
+ * - If the local user calls `startMediaRenderingTracing` after the remote user joins the channel,
+ * this value is the time interval (ms) from `startMediaRenderingTracing` to receiving the remote
+ * video stream.
+ * @note
+ * - If the local user calls `startMediaRenderingTracing` after receiving the remote video stream,
+ * the value is 0 and has no effect.
+ * - In order to reduce the time of rendering the first frame for remote users, Agora recommends
+ * that the remote user publishes video streams immediately after joining the channel, and the local
+ * user immediately subscribes to remote video streams to reduce this value.
*/
int remoteJoined2PacketReceived;
};
@@ -7269,29 +7810,41 @@ enum CONFIG_FETCH_TYPE {
CONFIG_FETCH_TYPE_JOIN_CHANNEL = 2,
};
-/** The local proxy mode type. */
+/**
+ * @brief Connection mode with the Agora Private Media Server.
+ */
enum LOCAL_PROXY_MODE {
- /** 0: Connect local proxy with high priority, if not connected to local proxy, fallback to sdrtn.
+ /**
+ * 0: The SDK first tries to connect to the specified Agora Private Media Server; if it fails, it
+ * connects to the Agora SD-RTN™.
*/
ConnectivityFirst = 0,
- /** 1: Only connect local proxy
+ /**
+ * 1: The SDK only tries to connect to the specified Agora Private Media Server.
*/
LocalOnly = 1,
};
+/**
+ * @brief Configuration information for the log server.
+ */
struct LogUploadServerInfo {
- /** Log upload server domain
+ /**
+ * Domain name of the log server.
*/
const char* serverDomain;
- /** Log upload server path
+ /**
+ * Storage path for logs on the server.
*/
const char* serverPath;
- /** Log upload server port
+ /**
+ * Port of the log server.
*/
int serverPort;
- /** Whether to use HTTPS request:
- - true: Use HTTPS request
- - fasle: Use HTTP request
+ /**
+ * Whether the log server uses HTTPS protocol:
+ * - `true`: Uses HTTPS.
+ * - `false`: Uses HTTP.
*/
bool serverHttps;
@@ -7301,34 +7854,55 @@ struct LogUploadServerInfo {
: serverDomain(domain), serverPath(path), serverPort(port), serverHttps(https) {}
};
+/**
+ * @brief Advanced options for the Local Access Point.
+ */
struct AdvancedConfigInfo {
- /** Log upload server
+ /**
+ * Custom log upload server. By default, the SDK uploads logs to the Agora log server. You can use
+ * this parameter to change the log upload server. See `LogUploadServerInfo`.
*/
LogUploadServerInfo logUploadServer;
};
+/**
+ * @brief Configuration for the Local Access Point.
+ */
struct LocalAccessPointConfiguration {
- /** Local access point IP address list.
+ /**
+ * Internal IP address list of the Local Access Point. Either ipList or domainList must be
+ * specified.
*/
const char** ipList;
- /** The number of local access point IP address.
+ /**
+ * Number of internal IP addresses for the Local Access Point. This value must match the number of
+ * IP addresses you provide.
*/
int ipListSize;
- /** Local access point domain list.
+ /**
+ * Domain name list of the Local Access Point. The SDK resolves the IP addresses of the Local Access
+ * Point from the provided domain names. The DNS resolution timeout is 10 seconds. Either ipList or
+ * domainList must be specified. If you specify both IP addresses and domain names, the SDK merges
+ * and deduplicates the resolved IP addresses and the specified IP addresses, then randomly selects
+ * one for load balancing.
*/
const char** domainList;
- /** The number of local access point domain.
+ /**
+ * Number of domain names for the Local Access Point. This value must match the number of domain
+ * names you provide.
*/
int domainListSize;
- /** Certificate domain name installed on specific local access point. pass "" means using sni
- * domain on specific local access point SNI(Server Name Indication) is an extension to the TLS
- * protocol.
+ /**
+ * Domain name for internal certificate verification. If left empty, the SDK uses the default domain
+ * name `secure-edge.local` for certificate verification.
*/
const char* verifyDomainName;
- /** Local proxy connection mode, connectivity first or local only.
+ /**
+ * Connection mode. See `LOCAL_PROXY_MODE`.
*/
LOCAL_PROXY_MODE mode;
- /** Local proxy connection, advanced Config info.
+ /**
+ * Advanced options for the Local Access Point. See `AdvancedConfigInfo`.
*/
AdvancedConfigInfo advancedConfig;
/**
@@ -7347,21 +7921,30 @@ struct LocalAccessPointConfiguration {
disableAut(true) {}
};
+/**
+ * @brief Type of video stream to be recorded.
+ */
enum RecorderStreamType {
+ /**
+ * 0: (Default) Video stream in the channel.
+ */
RTC,
+ /**
+ * 1: Local preview video stream before joining the channel.
+ */
PREVIEW,
};
/**
- * The information about recorded media streams.
+ * @brief The information about the media streams to be recorded.
*/
struct RecorderStreamInfo {
/**
- * The channel ID of the audio/video stream needs to be recorded.
+ * The name of the channel in which the media streams publish.
*/
const char* channelId;
/**
- * The user ID.
+ * The ID of the user whose media streams you want to record.
*/
uid_t uid;
/**
@@ -7467,77 +8050,128 @@ class LicenseCallback {
} // namespace base
/**
- * Spatial audio parameters
+ * @brief The spatial audio parameters.
*/
struct SpatialAudioParams {
/**
- * Speaker azimuth in a spherical coordinate system centered on the listener.
+ * The azimuth angle of the remote user or media player relative to the local user. The value range
+ * is [0,360], and the unit is degrees, The values are as follows:
+ * - 0: (Default) 0 degrees, which means directly in front on the horizontal plane.
+ * - 90: 90 degrees, which means directly to the left on the horizontal plane.
+ * - 180: 180 degrees, which means directly behind on the horizontal plane.
+ * - 270: 270 degrees, which means directly to the right on the horizontal plane.
+ * - 360: 360 degrees, which means directly in front on the horizontal plane.
*/
Optional speaker_azimuth;
/**
- * Speaker elevation in a spherical coordinate system centered on the listener.
+ * The elevation angle of the remote user or media player relative to the local user. The value
+ * range is [-90,90], and the unit is degrees, The values are as follows:
+ * - 0: (Default) 0 degrees, which means that the horizontal plane is not rotated.
+ * - -90: -90 degrees, which means that the horizontal plane is rotated 90 degrees downwards.
+ * - 90: 90 degrees, which means that the horizontal plane is rotated 90 degrees upwards.
*/
Optional speaker_elevation;
/**
- * Distance between speaker and listener.
+ * The distance of the remote user or media player relative to the local user. The value range is
+ * [1,50], and the unit is meters. The default value is 1 meter.
*/
Optional speaker_distance;
/**
- * Speaker orientation [0-180], 0 degree is the same with listener orientation.
+ * The orientation of the remote user or media player relative to the local user. The value range is
+ * [0,180], and the unit is degrees, The values are as follows:
+ * - 0: (Default) 0 degrees, which means that the sound source and listener face the same direction.
+ * - 180: 180 degrees, which means that the sound source and listener face each other.
*/
Optional speaker_orientation;
/**
- * Enable blur or not for the speaker.
+ * Whether to enable audio blurring:
+ * - `true`: Enable audio blurring.
+ * - `false`: (Default) Disable audio blurring.
*/
Optional enable_blur;
/**
- * Enable air absorb or not for the speaker.
+ * Whether to enable air absorption, that is, to simulate the sound attenuation effect of sound
+ * transmitting in the air; under a certain transmission distance, the attenuation speed of
+ * high-frequency sound is fast, and the attenuation speed of low-frequency sound is slow.
+ * - `true`: (Default) Enable air absorption. Make sure that the value of `speaker_attenuation` is
+ * not `0`; otherwise, this setting does not take effect.
+ * - `false`: Disable air absorption.
*/
Optional enable_air_absorb;
/**
- * Speaker attenuation factor.
+ * The sound attenuation coefficient of the remote user or media player. The value range is [0,1].
+ * The values are as follows:
+ * - 0: Broadcast mode, where the volume and timbre are not attenuated with distance, and the volume
+ * and timbre heard by local users do not change regardless of distance.
+ * - (0,0.5): Weak attenuation mode, where the volume and timbre only have a weak attenuation during
+ * the propagation, and the sound can travel farther than that in a real environment.
+ * `enable_air_absorb` needs to be enabled at the same time.
+ * - 0.5: (Default) Simulates the attenuation of the volume in the real environment; the effect is
+ * equivalent to not setting the `speaker_attenuation` parameter.
+ * - (0.5,1]: Strong attenuation mode, where volume and timbre attenuate rapidly during the
+ * propagation. `enable_air_absorb` needs to be enabled at the same time.
*/
Optional speaker_attenuation;
/**
- * Enable doppler factor.
+ * Whether to enable the Doppler effect: When there is a relative displacement between the sound
+ * source and the receiver of the sound source, the tone heard by the receiver changes.
+ * - `true`: Enable the Doppler effect.
+ * - `false`: (Default) Disable the Doppler effect.
+ * @note
+ * - This parameter is suitable for scenarios where the sound source is moving at high speed (for
+ * example, racing games). It is not recommended for common audio and video interactive scenarios
+ * (for example, voice chat, co-streaming, or online KTV).
+ * - When this parameter is enabled, Agora recommends that you set a regular period (such as 30 ms),
+ * and then call the `updatePlayerPositionInfo`, `updateSelfPosition`, and `updateRemotePosition`
+ * methods to continuously update the relative distance between the sound source and the receiver.
+ * The following factors can cause the Doppler effect to be unpredictable or the sound to be
+ * jittery: the period of updating the distance is too long, the updating period is irregular, or
+ * the distance information is lost due to network packet loss or delay.
*/
Optional enable_doppler;
};
/**
- * Layout info of video stream which compose a transcoder video stream.
+ * @brief Layout information of a specific sub-video stream within the mixed stream.
*/
struct VideoLayout {
/**
- * Channel Id from which this video stream come from.
+ * The channel name to which the sub-video stream belongs.
*/
const char* channelId;
/**
- * User id of video stream.
+ * User ID who published this sub-video stream.
*/
rtc::uid_t uid;
/**
- * User account of video stream.
+ * Reserved for future use.
*/
user_id_t strUid;
/**
- * x coordinate of video stream on a transcoded video stream canvas.
+ * X-coordinate (px) of the sub-video stream on the mixing canvas. The relative lateral displacement
+ * of the top left corner of the video for video mixing to the origin (the top left corner of the
+ * canvas).
*/
uint32_t x;
/**
- * y coordinate of video stream on a transcoded video stream canvas.
+ * Y-coordinate (px) of the sub-video stream on the mixing canvas. The relative longitudinal
+ * displacement of the top left corner of the captured video to the origin (the top left corner of
+ * the canvas).
*/
uint32_t y;
/**
- * width of video stream on a transcoded video stream canvas.
+ * Width (px) of the sub-video stream.
*/
uint32_t width;
/**
- * height of video stream on a transcoded video stream canvas.
+ * Heitht (px) of the sub-video stream.
*/
uint32_t height;
/**
- * video state of video stream on a transcoded video stream canvas.
- * 0 for normal video , 1 for placeholder image showed , 2 for black image.
+ * Status of the sub-video stream on the video mixing canvas.
+ * - 0: Normal. The sub-video stream has been rendered onto the mixing canvas.
+ * - 1: Placeholder image. The sub-video stream has no video frames and is displayed as a
+ * placeholder on the mixing canvas.
+ * - 2: Black image. The sub-video stream is replaced by a black image.
*/
uint32_t videoState;
@@ -7606,7 +8240,7 @@ AGORA_API int AGORA_CALL getAgoraCertificateVerifyResult(const char* credential_
AGORA_API void setAgoraLicenseCallback(agora::base::LicenseCallback* callback);
/**
- * @brief Get the LicenseCallback pointer if already setup,
+ * @brief Gets the LicenseCallback pointer if already setup,
* otherwise, return null.
*
* @return a pointer of agora::base::LicenseCallback
diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/AgoraMediaBase.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/AgoraMediaBase.h
index 6da9d7931..8e25b24c5 100644
--- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/AgoraMediaBase.h
+++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/AgoraMediaBase.h
@@ -36,26 +36,25 @@ static const unsigned int DUMMY_CONNECTION_ID = (std::numeric_limits\AppData\Local\Agora\\example.jpg`
- * - iOS: `/App Sandbox/Library/Caches/example.jpg`
+ * - iOS:` /App Sandbox/Library/Caches/example.jpg`
* - macOS: `~/Library/Logs/example.jpg`
- * - Android: `/storage/emulated/0/Android/data//files/example.jpg`
+ * - Android:` /storage/emulated/0/Android/data//files/example.jpg`
+ * @note Ensure that the path you specify exists and is writable.
*/
const char* filePath;
- /**
- * The position of the video observation. See VIDEO_MODULE_POSITION.
- *
- * Allowed values vary depending on the `uid` parameter passed in `takeSnapshot` or `takeSnapshotEx`:
- * - uid = 0: Position 2, 4 and 8 are allowed.
- * - uid != 0: Only position 2 is allowed.
- *
+ /**
+ * The position of the snapshot video frame in the video pipeline. See `VIDEO_MODULE_POSITION`.
*/
media::base::VIDEO_MODULE_POSITION position;
SnapshotConfig() :filePath(NULL), position(media::base::POSITION_PRE_ENCODER) {}
@@ -1250,12 +1395,14 @@ struct SnapshotConfig {
class IAudioPcmFrameSink {
public:
/**
- * Occurs when each time the player receives an audio frame.
+ * @brief Occurs each time the player receives an audio frame.
+ *
+ * @details
+ * After registering the audio frame observer, the callback occurs every time the player receives an
+ * audio frame, reporting the detailed information of the audio frame.
+ *
+ * @param frame The audio frame information. See AudioPcmFrame.
*
- * After registering the audio frame observer,
- * the callback occurs when each time the player receives an audio frame,
- * reporting the detailed information of the audio frame.
- * @param frame The detailed information of the audio frame. See {@link AudioPcmFrame}.
*/
virtual void onFrame(agora::media::base::AudioPcmFrame* frame) = 0;
virtual ~IAudioPcmFrameSink() {}
@@ -1267,62 +1414,56 @@ class IAudioPcmFrameSink {
class IAudioFrameObserverBase {
public:
/**
- * Audio frame types.
+ * @brief Audio frame type.
*/
enum AUDIO_FRAME_TYPE {
/**
- * 0: 16-bit PCM.
+ * 0: PCM 16
*/
FRAME_TYPE_PCM16 = 0,
};
enum { MAX_HANDLE_TIME_CNT = 10 };
/**
- * The definition of the AudioFrame struct.
+ * @brief Raw audio data.
*/
struct AudioFrame {
/**
- * The audio frame type: #AUDIO_FRAME_TYPE.
+ * The type of the audio frame. See `AUDIO_FRAME_TYPE`.
*/
AUDIO_FRAME_TYPE type;
/**
- * The number of samples per channel in this frame.
+ * The number of samples per channel in the audio frame.
*/
int samplesPerChannel;
/**
- * The number of bytes per sample: #BYTES_PER_SAMPLE
+ * The number of bytes per sample. For PCM, this parameter is generally set to 16 bits (2 bytes).
*/
agora::rtc::BYTES_PER_SAMPLE bytesPerSample;
/**
- * The number of audio channels (data is interleaved, if stereo).
+ * The number of audio channels (the data are interleaved if it is stereo).
* - 1: Mono.
* - 2: Stereo.
*/
int channels;
/**
- * The sample rate
+ * The number of samples per channel in the audio frame.
*/
int samplesPerSec;
/**
- * The data buffer of the audio frame. When the audio frame uses a stereo channel, the data
- * buffer is interleaved.
- *
- * Buffer data size: buffer = samplesPerChannel × channels × bytesPerSample.
+ * The data buffer of the audio frame. When the audio frame uses a stereo channel, the data buffer
+ * is interleaved.
+ * The size of the data buffer is as follows: `buffer` = `samples` × `channels` × `bytesPerSample`.
*/
void* buffer;
/**
- * The timestamp to render the audio data.
- *
+ * The timestamp (ms) of the external audio frame.
* You can use this timestamp to restore the order of the captured audio frame, and synchronize
- * audio and video frames in video scenarios, including scenarios where external video sources
- * are used.
+ * audio and video frames in video scenarios, including scenarios where external video sources are
+ * used.
*/
int64_t renderTimeMs;
/**
- * A reserved parameter.
- *
- * You can use this presentationMs parameter to indicate the presenation milisecond timestamp,
- * this will then filled into audio4 extension part, the remote side could use this pts in av
- * sync process with video frame.
+ * Reserved for future use.
*/
int avsync_type;
/**
@@ -1374,30 +1515,52 @@ class IAudioFrameObserverBase {
AUDIO_FRAME_POSITION_EAR_MONITORING = 0x0010,
};
+ /**
+ * @brief Audio data format.
+ *
+ * @details
+ * You can pass the `AudioParams` object in the following APIs to set the audio data format for the
+ * corresponding callback:
+ * - `getRecordAudioParams`: Sets the audio data format for the `onRecordAudioFrame` callback.
+ * - `getPlaybackAudioParams`: Sets the audio data format for the `onPlaybackAudioFrame` callback.
+ * - `getMixedAudioParams`: Sets the audio data format for the `onMixedAudioFrame` callback.
+ * - `getEarMonitoringAudioParams`: Sets the audio data format for the `onEarMonitoringAudioFrame`
+ * callback.
+ *
+ * @note
+ * - The SDK calculates the sampling interval through the `samplesPerCall`, `sampleRate`, and
+ * `channel` parameters in `AudioParams`, and triggers the `onRecordAudioFrame`,
+ * `onPlaybackAudioFrame`, `onMixedAudioFrame`, and `onEarMonitoringAudioFrame` callbacks according
+ * to the sampling interval.
+ * - Sample interval (sec) = `samplePerCall` /( `sampleRate` × `channel` ).
+ * - Ensure that the sample interval ≥ 0.01 (s).
+ *
+ */
struct AudioParams {
- /** The audio sample rate (Hz), which can be set as one of the following values:
-
- - `8000`
- - `16000` (Default)
- - `32000`
- - `44100 `
- - `48000`
+ /**
+ * The audio sample rate (Hz), which can be set as one of the following values:
+ * - 8000.
+ * - (Default) 16000.
+ * - 32000.
+ * - 44100
+ * - 48000
*/
int sample_rate;
- /* The number of audio channels, which can be set as either of the following values:
-
- - `1`: Mono (Default)
- - `2`: Stereo
+ /**
+ * The number of audio channels, which can be set as either of the following values:
+ * - 1: (Default) Mono.
+ * - 2: Stereo.
*/
int channels;
- /* The use mode of the audio data. See AgoraAudioRawFrameOperationMode.
+ /**
+ * The use mode of the audio data. See `RAW_AUDIO_FRAME_OP_MODE_TYPE`.
*/
rtc::RAW_AUDIO_FRAME_OP_MODE_TYPE mode;
- /** The number of samples. For example, set it as 1024 for RTMP or RTMPS
- streaming.
+ /**
+ * The number of samples, such as 1024 for the media push.
*/
int samples_per_call;
@@ -1418,38 +1581,107 @@ class IAudioFrameObserverBase {
virtual ~IAudioFrameObserverBase() {}
/**
- * Occurs when the recorded audio frame is received.
- * @param channelId The channel name
- * @param audioFrame The reference to the audio frame: AudioFrame.
+ * @brief Gets the captured audio frame.
+ *
+ * @details
+ * To ensure that the format of the cpatured audio frame is as expected, you can choose one of the
+ * following two methods to set the audio data format:
+ * - Method 1: After calling `setRecordingAudioFrameParameters` to set the audio data format and
+ * `registerAudioFrameObserver` to register the audio frame observer object, the SDK calculates the
+ * sampling interval according to the parameters set in the methods, and triggers the
+ * `onRecordAudioFrame` callback according to the sampling interval.
+ * - Method 2: After calling `registerAudioFrameObserver` to register the audio frame observer
+ * object, set the audio data format in the return value of the `getObservedAudioFramePosition`
+ * callback. The SDK then calculates the sampling interval according to the return value of the
+ * `getRecordAudioParams` callback, and triggers the `onRecordAudioFrame` callback according to the
+ * sampling interval.
+ *
+ * @note The priority of method 1 is higher than that of method 2. If method 1 is used to set the
+ * audio data format, the setting of method 2 is invalid.
+ *
+ * @param audioFrame The raw audio data. See `AudioFrame`.
+ * @param channelId The channel ID.
+ *
* @return
- * - true: The recorded audio frame is valid and is encoded and sent.
- * - false: The recorded audio frame is invalid and is not encoded or sent.
+ * Without practical meaning.
*/
virtual bool onRecordAudioFrame(const char* channelId, AudioFrame& audioFrame) = 0;
/**
- * Occurs when the playback audio frame is received.
- * @param channelId The channel name
- * @param audioFrame The reference to the audio frame: AudioFrame.
+ * @brief Gets the raw audio frame for playback.
+ *
+ * @details
+ * To ensure that the data format of audio frame for playback is as expected, Agora recommends that
+ * you choose one of the following two methods to set the audio data format:
+ * - Method 1: After calling `setPlaybackAudioFrameParameters` to set the audio data format and
+ * `registerAudioFrameObserver` to register the audio frame observer object, the SDK calculates the
+ * sampling interval according to the parameters set in the methods, and triggers the
+ * `onPlaybackAudioFrame` callback according to the sampling interval.
+ * - Method 2: After calling `registerAudioFrameObserver` to register the audio frame observer
+ * object, set the audio data format in the return value of the `getObservedAudioFramePosition`
+ * callback. The SDK then calculates the sampling interval according to the return value of the
+ * `getPlaybackAudioParams` callback, and triggers the `onPlaybackAudioFrame` callback according to
+ * the sampling interval.
+ *
+ * @note The priority of method 1 is higher than that of method 2. If method 1 is used to set the
+ * audio data format, the setting of method 2 is invalid.
+ *
+ * @param audioFrame The raw audio data. See `AudioFrame`.
+ * @param channelId The channel ID.
+ *
* @return
- * - true: The playback audio frame is valid and is encoded and sent.
- * - false: The playback audio frame is invalid and is not encoded or sent.
+ * Without practical meaning.
*/
virtual bool onPlaybackAudioFrame(const char* channelId, AudioFrame& audioFrame) = 0;
/**
- * Occurs when the mixed audio data is received.
- * @param channelId The channel name
- * @param audioFrame The reference to the audio frame: AudioFrame.
+ * @brief Retrieves the mixed captured and playback audio frame.
+ *
+ * @details
+ * To ensure that the data format of mixed captured and playback audio frame meets the expectations,
+ * Agora recommends that you choose one of the following two ways to set the data format:
+ * - Method 1: After calling `setMixedAudioFrameParameters` to set the audio data format and
+ * `registerAudioFrameObserver` to register the audio frame observer object, the SDK calculates the
+ * sampling interval according to the parameters set in the methods, and triggers the
+ * `onMixedAudioFrame` callback according to the sampling interval.
+ * - Method 2: After calling `registerAudioFrameObserver` to register the audio frame observer
+ * object, set the audio data format in the return value of the `getObservedAudioFramePosition`
+ * callback. The SDK then calculates the sampling interval according to the return value of the
+ * `getMixedAudioParams` callback, and triggers the `onMixedAudioFrame` callback according to the
+ * sampling interval.
+ *
+ * @note The priority of method 1 is higher than that of method 2. If method 1 is used to set the
+ * audio data format, the setting of method 2 is invalid.
+ *
+ * @param audioFrame The raw audio data. See `AudioFrame`.
+ * @param channelId The channel ID.
+ *
* @return
- * - true: The mixed audio data is valid and is encoded and sent.
- * - false: The mixed audio data is invalid and is not encoded or sent.
+ * Without practical meaning.
*/
virtual bool onMixedAudioFrame(const char* channelId, AudioFrame& audioFrame) = 0;
/**
- * Occurs when the ear monitoring audio frame is received.
- * @param audioFrame The reference to the audio frame: AudioFrame.
+ * @brief Gets the in-ear monitoring audio frame.
+ *
+ * @details
+ * In order to ensure that the obtained in-ear audio data meets the expectations, Agora recommends
+ * that you choose one of the following two methods to set the in-ear monitoring-ear audio data
+ * format:
+ * - Method 1: After calling `setEarMonitoringAudioFrameParameters` to set the audio data format and
+ * `registerAudioFrameObserver` to register the audio frame observer object, the SDK calculates the
+ * sampling interval according to the parameters set in the methods, and triggers the
+ * `onEarMonitoringAudioFrame` callback according to the sampling interval.
+ * - Method 2: After calling `registerAudioFrameObserver` to register the audio frame observer
+ * object, set the audio data format in the return value of the `getObservedAudioFramePosition`
+ * callback. The SDK then calculates the sampling interval according to the return value of the
+ * `getEarMonitoringAudioParams` callback, and triggers the `onEarMonitoringAudioFrame` callback
+ * according to the sampling interval.
+ *
+ * @note The priority of method 1 is higher than that of method 2. If method 1 is used to set the
+ * audio data format, the setting of method 2 is invalid.
+ *
+ * @param audioFrame The raw audio data. See `AudioFrame`.
+ *
* @return
- * - true: The ear monitoring audio data is valid and is encoded and sent.
- * - false: The ear monitoring audio data is invalid and is not encoded or sent.
+ * Without practical meaning.
*/
virtual bool onEarMonitoringAudioFrame(AudioFrame& audioFrame) = 0;
/**
@@ -1470,75 +1702,109 @@ class IAudioFrameObserverBase {
}
/**
- * Sets the frame position for the audio observer.
- * @return A bit mask that controls the frame position of the audio observer.
- * @note - Use '|' (the OR operator) to observe multiple frame positions.
- *
- * After you successfully register the audio observer, the SDK triggers this callback each time it
- * receives a audio frame. You can determine which position to observe by setting the return
- * value. The SDK provides 4 positions for observer. Each position corresponds to a callback
- * function:
- * - `AUDIO_FRAME_POSITION_PLAYBACK (1 << 0)`: The position for playback audio frame is received,
- * which corresponds to the \ref onPlaybackFrame "onPlaybackFrame" callback.
- * - `AUDIO_FRAME_POSITION_RECORD (1 << 1)`: The position for record audio frame is received,
- * which corresponds to the \ref onRecordFrame "onRecordFrame" callback.
- * - `AUDIO_FRAME_POSITION_MIXED (1 << 2)`: The position for mixed audio frame is received, which
- * corresponds to the \ref onMixedFrame "onMixedFrame" callback.
- * - `AUDIO_FRAME_POSITION_BEFORE_MIXING (1 << 3)`: The position for playback audio frame before
- * mixing is received, which corresponds to the \ref onPlaybackFrameBeforeMixing
- * "onPlaybackFrameBeforeMixing" callback.
- * @return The bit mask that controls the audio observation positions.
- * See AUDIO_FRAME_POSITION.
+ * @brief Sets the frame position for the video observer.
+ *
+ * @details
+ * After successfully registering the audio data observer, the SDK uses this callback for each
+ * specific audio frame processing node to determine whether to trigger the following callbacks:
+ * - `onRecordAudioFrame`
+ * - `onPlaybackAudioFrame`
+ * - `onPlaybackAudioFrameBeforeMixing`
+ * - `onMixedAudioFrame`
+ * - `onEarMonitoringAudioFrame`
+ * You can set one or more positions you need to observe by modifying the return value of
+ * `getObservedAudioFramePosition` based on your scenario requirements:
+ * When the annotation observes multiple locations, the | (or operator) is required. To conserve
+ * system resources, you can reduce the number of frame positions that you want to observe.
+ *
+ * @return
+ * a bitmask that sets the observation position, with the following values:
+ * - AUDIO_FRAME_POSITION_PLAYBACK (0x0001): This position can observe the playback audio mixed by
+ * all remote users, corresponding to the `onPlaybackAudioFrame` callback.
+ * - AUDIO_FRAME_POSITION_RECORD (0x0002): This position can observe the collected local user's
+ * audio, corresponding to the `onRecordAudioFrame` callback.
+ * - AUDIO_FRAME_POSITION_MIXED (0x0004): This position can observe the playback audio mixed by the
+ * loacl user and all remote users, corresponding to the `onMixedAudioFrame` callback.
+ * - AUDIO_FRAME_POSITION_BEFORE_MIXING (0x0008): This position can observe the audio of a single
+ * remote user before mixing, corresponding to the `onPlaybackAudioFrameBeforeMixing` callback.
+ * - AUDIO_FRAME_POSITION_EAR_MONITORING (0x0010): This position can observe the in-ear monitoring
+ * audio of the local user, corresponding to the `onEarMonitoringAudioFrame` callback.
*/
-
virtual int getObservedAudioFramePosition() = 0;
- /** Sets the audio playback format
- **Note**:
-
- - The SDK calculates the sample interval according to the `AudioParams`
- you set in the return value of this callback and triggers the
- `onPlaybackAudioFrame` callback at the calculated sample interval.
- Sample interval (seconds) = `samplesPerCall`/(`sampleRate` × `channel`).
- Ensure that the value of sample interval is equal to or greater than 0.01.
-
- @return Sets the audio format. See AgoraAudioParams.
+ /**
+ * @brief Sets the audio format for the `onPlaybackAudioFrame` callback.
+ *
+ * @details
+ * You need to register the callback when calling the `registerAudioFrameObserver` method. After you
+ * successfully register the audio observer, the SDK triggers this callback, and you can set the
+ * audio format in the return value of this callback.
+ *
+ * @note
+ * The SDK triggers the `onPlaybackAudioFrame` callback with the `AudioParams` calculated sampling
+ * interval you set in the return value. The calculation formula is Sample interval (sec) =
+ * `samplePerCall` /( `sampleRate` × `channel` ).
+ * Ensure that the sample interval ≥ 0.01 (s).
+ *
+ * @return
+ * The audio data for playback, see `AudioParams`.
*/
virtual AudioParams getPlaybackAudioParams() = 0;
- /** Sets the audio recording format
- **Note**:
- - The SDK calculates the sample interval according to the `AudioParams`
- you set in the return value of this callback and triggers the
- `onRecordAudioFrame` callback at the calculated sample interval.
- Sample interval (seconds) = `samplesPerCall`/(`sampleRate` × `channel`).
- Ensure that the value of sample interval is equal to or greater than 0.01.
-
- @return Sets the audio format. See AgoraAudioParams.
+ /**
+ * @brief Sets the audio format for the `onRecordAudioFrame` callback.
+ *
+ * @details
+ * You need to register the callback when calling the `registerAudioFrameObserver` method. After you
+ * successfully register the audio observer, the SDK triggers this callback, and you can set the
+ * audio format in the return value of this callback.
+ *
+ * @note
+ * The SDK triggers the `onRecordAudioFrame` callback with the `AudioParams` calculated sampling
+ * interval you set in the return value. The calculation formula is Sample interval (sec) =
+ * `samplePerCall` /( `sampleRate` × `channel` ).
+ * Ensure that the sample interval ≥ 0.01 (s).
+ *
+ * @return
+ * The captured audio data, see `AudioParams`.
*/
virtual AudioParams getRecordAudioParams() = 0;
- /** Sets the audio mixing format
- **Note**:
- - The SDK calculates the sample interval according to the `AudioParams`
- you set in the return value of this callback and triggers the
- `onMixedAudioFrame` callback at the calculated sample interval.
- Sample interval (seconds) = `samplesPerCall`/(`sampleRate` × `channel`).
- Ensure that the value of sample interval is equal to or greater than 0.01.
-
- @return Sets the audio format. See AgoraAudioParams.
+ /**
+ * @brief Sets the audio format for the `onMixedAudioFrame` callback.
+ *
+ * @details
+ * You need to register the callback when calling the `registerAudioFrameObserver` method. After you
+ * successfully register the audio observer, the SDK triggers this callback, and you can set the
+ * audio format in the return value of this callback.
+ *
+ * @note
+ * The SDK triggers the `onMixedAudioFrame` callback with the `AudioParams` calculated sampling
+ * interval you set in the return value. The calculation formula is Sample interval (sec) =
+ * `samplePerCall` /( `sampleRate` × `channel` ).
+ * Ensure that the sample interval ≥ 0.01 (s).
+ *
+ * @return
+ * The mixed captured and playback audio data. See `AudioParams`.
*/
virtual AudioParams getMixedAudioParams() = 0;
- /** Sets the ear monitoring audio format
- **Note**:
- - The SDK calculates the sample interval according to the `AudioParams`
- you set in the return value of this callback and triggers the
- `onEarMonitoringAudioFrame` callback at the calculated sample interval.
- Sample interval (seconds) = `samplesPerCall`/(`sampleRate` × `channel`).
- Ensure that the value of sample interval is equal to or greater than 0.01.
-
- @return Sets the audio format. See AgoraAudioParams.
+ /**
+ * @brief Sets the audio format for the `onEarMonitoringAudioFrame` callback.
+ *
+ * @details
+ * You need to register the callback when calling the `registerAudioFrameObserver` method. After you
+ * successfully register the audio observer, the SDK triggers this callback, and you can set the
+ * audio format in the return value of this callback.
+ *
+ * @note
+ * The SDK triggers the `onEarMonitoringAudioFrame` callback with the `AudioParams` calculated
+ * sampling interval you set in the return value. The calculation formula is `Sample` interval (
+ * `sec` ) = `samplePerCall` /( `sampleRate` × `channel` ).
+ * Ensure that the sample interval ≥ 0.01 (s).
+ *
+ * @return
+ * The audio data of in-ear monitoring, see `AudioParams`.
*/
virtual AudioParams getEarMonitoringAudioParams() = 0;
};
@@ -1550,25 +1816,31 @@ class IAudioFrameObserver : public IAudioFrameObserverBase {
public:
using IAudioFrameObserverBase::onPlaybackAudioFrameBeforeMixing;
/**
- * Occurs when the before-mixing playback audio frame is received.
- * @param channelId The channel name
- * @param uid ID of the remote user.
- * @param audioFrame The reference to the audio frame: AudioFrame.
+ * @brief Retrieves the audio frame before mixing of subscribed remote users.
+ *
+ * @param channelId The channel ID.
+ * @param uid The ID of subscribed remote users.
+ * @param audioFrame The raw audio data. See `AudioFrame`.
+ *
* @return
- * - true: The before-mixing playback audio frame is valid and is encoded and sent.
- * - false: The before-mixing playback audio frame is invalid and is not encoded or sent.
+ * Without practical meaning.
*/
virtual bool onPlaybackAudioFrameBeforeMixing(const char* channelId, rtc::uid_t uid,
AudioFrame& audioFrame) = 0;
};
+/**
+ * @brief The audio spectrum data.
+ */
struct AudioSpectrumData {
/**
- * The audio spectrum data of audio.
+ * The audio spectrum data. Agora divides the audio frequency into 256 frequency domains, and
+ * reports the energy value of each frequency domain through this parameter. The value range of each
+ * energy type is [-300, 1] and the unit is dBFS.
*/
const float* audioSpectrumData;
/**
- * The data length of audio spectrum data.
+ * The audio spectrum data length is 256.
*/
int dataLength;
@@ -1576,13 +1848,16 @@ struct AudioSpectrumData {
AudioSpectrumData(const float* data, int length) : audioSpectrumData(data), dataLength(length) {}
};
+/**
+ * @brief Audio spectrum information of the remote user.
+ */
struct UserAudioSpectrumInfo {
/**
- * User ID of the speaker.
+ * The user ID of the remote user.
*/
agora::rtc::uid_t uid;
/**
- * The audio spectrum data of audio.
+ * Audio spectrum information of the remote user. See `AudioSpectrumData`.
*/
struct AudioSpectrumData spectrumData;
@@ -1600,37 +1875,40 @@ class IAudioSpectrumObserver {
virtual ~IAudioSpectrumObserver() {}
/**
- * Reports the audio spectrum of local audio.
+ * @brief Gets the statistics of a local audio spectrum.
*
- * This callback reports the audio spectrum data of the local audio at the moment
- * in the channel.
+ * @details
+ * After successfully calling `registerAudioSpectrumObserver` to implement the
+ * `onLocalAudioSpectrum` callback in `IAudioSpectrumObserver` and calling
+ * `enableAudioSpectrumMonitor` to enable audio spectrum monitoring, the SDK triggers this callback
+ * as the time interval you set to report the received remote audio data spectrum before encoding.
*
- * You can set the time interval of this callback using \ref
- * ILocalUser::enableAudioSpectrumMonitor "enableAudioSpectrumMonitor".
+ * @param data The audio spectrum data of the local user. See `AudioSpectrumData`.
*
- * @param data The audio spectrum data of local audio.
- * - true: Processed.
- * - false: Not processed.
+ * @return
+ * Whether the spectrum data is received:
+ * - `true`: Spectrum data is received.
+ * - `false`: No spectrum data is received.
*/
virtual bool onLocalAudioSpectrum(const AudioSpectrumData& data) = 0;
/**
- * Reports the audio spectrum of remote user.
+ * @brief Gets the remote audio spectrum.
*
- * This callback reports the IDs and audio spectrum data of the loudest speakers at the moment
- * in the channel.
+ * @details
+ * After successfully calling `registerAudioSpectrumObserver` to implement the
+ * `onRemoteAudioSpectrum` callback in the `IAudioSpectrumObserver` and calling
+ * `enableAudioSpectrumMonitor` to enable audio spectrum monitoring, the SDK will trigger the
+ * callback as the time interval you set to report the received remote audio data spectrum.
*
- * You can set the time interval of this callback using \ref
- * ILocalUser::enableAudioSpectrumMonitor "enableAudioSpectrumMonitor".
+ * @param spectrums The audio spectrum information of the remote user. See `UserAudioSpectrumInfo`.
+ * The number of arrays is the number of remote users monitored by the SDK. If the array is null, it
+ * means that no audio spectrum of remote users is detected.
+ * @param spectrumNumber The number of remote users.
*
- * @param spectrums The pointer to \ref agora::media::UserAudioSpectrumInfo
- * "UserAudioSpectrumInfo", which is an array containing the user ID and audio spectrum data for
- * each speaker.
- * - This array contains the following members:
- * - `uid`, which is the UID of each remote speaker
- * - `spectrumData`, which reports the audio spectrum of each remote speaker.
- * @param spectrumNumber The array length of the spectrums.
- * - true: Processed.
- * - false: Not processed.
+ * @return
+ * Whether the spectrum data is received:
+ * - `true`: Spectrum data is received.
+ * - `false`: No spectrum data is received.
*/
virtual bool onRemoteAudioSpectrum(const UserAudioSpectrumInfo* spectrums,
unsigned int spectrumNumber) = 0;
@@ -1642,17 +1920,26 @@ class IAudioSpectrumObserver {
class IVideoEncodedFrameObserver {
public:
/**
- * Occurs each time the SDK receives an encoded video image.
- * @param uid The user id of remote user.
- * @param imageBuffer The pointer to the video image buffer.
+ * @brief Reports that the receiver has received the to-be-decoded video frame sent by the remote
+ * end.
+ *
+ * @details
+ * If you call the `setRemoteVideoSubscriptionOptions` method and set `encodedFrameOnly` to `true`,
+ * the SDK triggers this callback locally to report the received encoded video frame information.
+ *
+ * @since 4.6.0
+ * @param channelId The channel name.
+ * @param uid The user ID of the remote user.
+ * @param imageBuffer The encoded video image buffer.
* @param length The data length of the video image.
- * @param videoEncodedFrameInfo The information of the encoded video frame: EncodedVideoFrameInfo.
- * @return Determines whether to accept encoded video image.
- * - true: Accept.
- * - false: Do not accept.
+ * @param videoEncodedFrameInfo For the information of the encoded video frame, see
+ * `EncodedVideoFrameInfo`.
+ *
+ * @return
+ * Without practical meaning.
*/
virtual bool onEncodedVideoFrameReceived(
- rtc::uid_t uid, const uint8_t* imageBuffer, size_t length,
+ const char* channelId, rtc::uid_t uid, const uint8_t* imageBuffer, size_t length,
const rtc::EncodedVideoFrameInfo& videoEncodedFrameInfo) = 0;
virtual ~IVideoEncodedFrameObserver() {}
@@ -1665,19 +1952,17 @@ class IVideoFrameObserver {
public:
typedef media::base::VideoFrame VideoFrame;
/**
- * The process mode of the video frame:
+ * @brief The process mode of the video frame:
*/
enum VIDEO_FRAME_PROCESS_MODE {
/**
* Read-only mode.
- *
* In this mode, you do not modify the video frame. The video frame observer is a renderer.
*/
PROCESS_MODE_READ_ONLY, // Observer works as a pure renderer and will not modify the original
// frame.
/**
* Read and write mode.
- *
* In this mode, you modify the video frame. The video frame observer is a video filter.
*/
PROCESS_MODE_READ_WRITE, // Observer works as a filter that will process the video frame and
@@ -1688,52 +1973,87 @@ class IVideoFrameObserver {
virtual ~IVideoFrameObserver() {}
/**
- * Occurs each time the SDK receives a video frame captured by the local camera.
- *
- * After you successfully register the video frame observer, the SDK triggers this callback each
- * time a video frame is received. In this callback, you can get the video data captured by the
- * local camera. You can then pre-process the data according to your scenarios.
+ * @brief Occurs each time the SDK receives a video frame captured by local devices.
*
- * After pre-processing, you can send the processed video data back to the SDK by setting the
- * `videoFrame` parameter in this callback.
+ * @details
+ * You can get raw video data collected by the local device through this callback and preprocess it
+ * as needed. Once the preprocessing is complete, you can directly modify `videoFrame` in this
+ * callback, and set the return value to `true` to send the modified video data to the SDK.
+ * If you need to send the preprocessed data to the SDK, you need to call `getVideoFrameProcessMode`
+ * first to set the video processing mode to read and write mode ( PROCESS_MODE_READ_WRITE ).
+ * Applicable scenarios: - Preprocess the locally collected video data before it is processed by the
+ * SDK. For example, get video data through this callback and process it with filters, watermarks,
+ * cropping, rotation, etc.
+ * - Get information about the locally collected video data before it is processed by the SDK. For
+ * example, the original width, height, frame rate of the video frame, etc.
+ * Call timing: After the successful registration of the video data observer, each time the SDK
+ * captures a video frame.
*
* @note
- * - If you get the video data in RGBA color encoding format, Agora does not support using this
- * callback to send the processed data in RGBA color encoding format back to the SDK.
- * - The video data that this callback gets has not been pre-processed, such as watermarking,
- * cropping content, rotating, or image enhancement.
+ * - If the video data type you get is RGBA, the SDK does not support processing the data of the
+ * alpha channel.
+ * - It is recommended that you ensure the modified parameters in `videoFrame` are consistent with
+ * the actual situation of the video frames in the video frame buffer. Otherwise, it may cause
+ * unexpected rotation, distortion, and other issues in the local preview and remote video display.
+ * The default video format that you get from this callback is YUV420. If you need other formats,
+ * you can set the expected data format in the getVideoFormatPreference callback.
*
- * @param videoFrame A pointer to the video frame: VideoFrame
- * @param sourceType source type of video frame. See #VIDEO_SOURCE_TYPE.
- * @return Determines whether to ignore the current video frame if the pre-processing fails:
- * - true: Do not ignore.
- * - false: Ignore, in which case this method does not sent the current video frame to the SDK.
+ * @param sourceType Video source types, including cameras, screens, or media player. See
+ * `VIDEO_SOURCE_TYPE`.
+ * @param videoFrame The video frame. See `VideoFrame`.Note: The default value of the video frame
+ * data format obtained through this callback is as follows:
+ * - Android: I420 or RGB (GLES20.GL_TEXTURE_2D)
+ * - iOS: I420 or CVPixelBufferRef
+ * - macOS: I420 or CVPixelBufferRef
+ * - Windows: YUV420
+ *
+ * @return
+ * - When the video processing mode is `PROCESS_MODE_READ_ONLY`:
+ * - `true`: Reserved for future use.
+ * - `false`: Reserved for future use.
+ * - When the video processing mode is `PROCESS_MODE_READ_WRITE`:
+ * - `true`: Sets the SDK to receive the video frame.
+ * - `false`: Sets the SDK to discard the video frame.
*/
virtual bool onCaptureVideoFrame(agora::rtc::VIDEO_SOURCE_TYPE sourceType,
VideoFrame& videoFrame) = 0;
/**
- * Occurs each time the SDK receives a video frame before encoding.
+ * @brief Occurs each time the SDK receives a video frame before encoding.
*
+ * @details
* After you successfully register the video frame observer, the SDK triggers this callback each
- * time when it receives a video frame. In this callback, you can get the video data before
- * encoding. You can then process the data according to your particular scenarios.
- *
- * After processing, you can send the processed video data back to the SDK by setting the
- * `videoFrame` parameter in this callback.
+ * time it receives a video frame. In this callback, you can get the video data before encoding and
+ * then process the data according to your particular scenarios.
+ * After processing, you can send the processed video data back to the SDK in this callback.
*
* @note
- * - To get the video data captured from the second screen before encoding, you need to set (1 <<
- * 2) as a frame position through `getObservedFramePosition`.
- * - The video data that this callback gets has been pre-processed, such as watermarking, cropping
- * content, rotating, or image enhancement.
- * - This callback does not support sending processed RGBA video data back to the SDK.
+ * - If you need to send the preprocessed data to the SDK, you need to call
+ * `getVideoFrameProcessMode` first to set the video processing mode to read and write mode (
+ * PROCESS_MODE_READ_WRITE ).
+ * - To get the video data captured from the second screen before encoding, you need to set
+ * `POSITION_PRE_ENCODER` (1 << 2) as a frame position through `getObservedFramePosition`.
+ * - The video data that this callback gets has been preprocessed, with its content cropped and
+ * rotated, and the image enhanced.
+ * - It is recommended that you ensure the modified parameters in `videoFrame` are consistent with
+ * the actual situation of the video frames in the video frame buffer. Otherwise, it may cause
+ * unexpected rotation, distortion, and other issues in the local preview and remote video display.
*
- * @param videoFrame A pointer to the video frame: VideoFrame
- * @param sourceType source type of video frame. See #VIDEO_SOURCE_TYPE.
- * @return Determines whether to ignore the current video frame if the pre-processing fails:
- * - true: Do not ignore.
- * - false: Ignore, in which case this method does not sent the current video frame to the SDK.
+ * @param sourceType The type of the video source. See `VIDEO_SOURCE_TYPE`.
+ * @param videoFrame The video frame. See `VideoFrame`.Note: The default value of the video frame
+ * data format obtained through this callback is as follows:
+ * - Android: I420 or RGB (GLES20.GL_TEXTURE_2D)
+ * - iOS: I420 or CVPixelBufferRef
+ * - macOS: I420 or CVPixelBufferRef
+ * - Windows: YUV420
+ *
+ * @return
+ * - When the video processing mode is `PROCESS_MODE_READ_ONLY`:
+ * - `true`: Reserved for future use.
+ * - `false`: Reserved for future use.
+ * - When the video processing mode is `PROCESS_MODE_READ_WRITE`:
+ * - `true`: Sets the SDK to receive the video frame.
+ * - `false`: Sets the SDK to discard the video frame.
*/
virtual bool onPreEncodeVideoFrame(agora::rtc::VIDEO_SOURCE_TYPE sourceType,
VideoFrame& videoFrame) = 0;
@@ -1764,23 +2084,41 @@ class IVideoFrameObserver {
virtual bool onMediaPlayerVideoFrame(VideoFrame& videoFrame, int mediaPlayerId) = 0;
/**
- * Occurs each time the SDK receives a video frame sent by the remote user.
+ * @brief Occurs each time the SDK receives a video frame sent by the remote user.
*
+ * @details
* After you successfully register the video frame observer, the SDK triggers this callback each
- * time a video frame is received. In this callback, you can get the video data sent by the remote
- * user. You can then post-process the data according to your scenarios.
+ * time it receives a video frame. In this callback, you can get the video data sent from the remote
+ * end before rendering, and then process it according to the particular scenarios.
+ * The default video format that you get from this callback is YUV420. If you need other formats,
+ * you can set the expected data format in the `getVideoFormatPreference` callback.
*
- * After post-processing, you can send the processed data back to the SDK by setting the
- * `videoFrame` parameter in this callback.
+ * @note
+ * - If you need to send the preprocessed data to the SDK, you need to call
+ * `getVideoFrameProcessMode` first to set the video processing mode to read and write mode (
+ * PROCESS_MODE_READ_WRITE ).
+ * - If the video data type you get is RGBA, the SDK does not support processing the data of the
+ * alpha channel.
+ * - It is recommended that you ensure the modified parameters in `videoFrame` are consistent with
+ * the actual situation of the video frames in the video frame buffer. Otherwise, it may cause
+ * unexpected rotation, distortion, and other issues in the local preview and remote video display.
*
- * @note This callback does not support sending processed RGBA video data back to the SDK.
+ * @param remoteUid The user ID of the remote user who sends the current video frame.
+ * @param videoFrame The video frame. See `VideoFrame`.Note: The default value of the video frame
+ * data format obtained through this callback is as follows:
+ * - Android: I420 or RGB (GLES20.GL_TEXTURE_2D)
+ * - iOS: I420 or CVPixelBufferRef
+ * - macOS: I420 or CVPixelBufferRef
+ * - Windows: YUV420
+ * @param channelId The channel ID.
*
- * @param channelId The channel name
- * @param remoteUid ID of the remote user who sends the current video frame.
- * @param videoFrame A pointer to the video frame: VideoFrame
- * @return Determines whether to ignore the current video frame if the post-processing fails:
- * - true: Do not ignore.
- * - false: Ignore, in which case this method does not sent the current video frame to the SDK.
+ * @return
+ * - When the video processing mode is `PROCESS_MODE_READ_ONLY`:
+ * - `true`: Reserved for future use.
+ * - `false`: Reserved for future use.
+ * - When the video processing mode is `PROCESS_MODE_READ_WRITE`:
+ * - `true`: Sets the SDK to receive the video frame.
+ * - `false`: Sets the SDK to discard the video frame.
*/
virtual bool onRenderVideoFrame(const char* channelId, rtc::uid_t remoteUid,
VideoFrame& videoFrame) = 0;
@@ -1788,13 +2126,16 @@ class IVideoFrameObserver {
virtual bool onTranscodedVideoFrame(VideoFrame& videoFrame) = 0;
/**
- * Occurs each time the SDK receives a video frame and prompts you to set the process mode of the
- * video frame.
+ * @brief Occurs each time the SDK receives a video frame and prompts you to set the process mode of
+ * the video frame.
*
+ * @details
* After you successfully register the video frame observer, the SDK triggers this callback each
* time it receives a video frame. You need to set your preferred process mode in the return value
* of this callback.
- * @return VIDEO_FRAME_PROCESS_MODE.
+ *
+ * @return
+ * See `VIDEO_FRAME_PROCESS_MODE`.
*/
virtual VIDEO_FRAME_PROCESS_MODE getVideoFrameProcessMode() { return PROCESS_MODE_READ_ONLY; }
@@ -1816,61 +2157,90 @@ class IVideoFrameObserver {
virtual base::VIDEO_PIXEL_FORMAT getVideoFormatPreference() { return base::VIDEO_PIXEL_DEFAULT; }
/**
- * Occurs each time the SDK receives a video frame, and prompts you whether to rotate the captured
- * video.
+ * @brief Occurs each time the SDK receives a video frame, and prompts you whether to rotate the
+ * captured video.
*
- * If you want to rotate the captured video according to the rotation member in the `VideoFrame`
- * class, register this callback by calling `registerVideoFrameObserver`. After you successfully
- * register the video frame observer, the SDK triggers this callback each time it receives a video
- * frame. You need to set whether to rotate the video frame in the return value of this callback.
+ * @details
+ * If you want to rotate the captured video according to the `rotation` member in the `VideoFrame`
+ * class, ensure that you register this callback when calling `registerVideoFrameObserver`. After
+ * you successfully register the video frame observer, the SDK triggers this callback each time it
+ * receives a video frame. You need to set whether to rotate the video frame in the return value of
+ * this callback.
*
- * @note This function only supports video data in RGBA or YUV420.
+ * @note
+ * - On the Android platform, the supported video data formats for this callback are: I420, RGBA,
+ * and Texture.
+ * - On the Windows platform, the supported video data formats for this callback are: I420, RGBA,
+ * and TextureBuffer.
+ * - On the iOS platform, the supported video data formats for this callback are: I420, RGBA, and
+ * CVPixelBuffer.
+ * - On the macOS platform, the supported video data formats for this callback are: I420 and RGBA.
*
- * @return Determines whether to rotate.
+ * @return
+ * Sets whether to rotate the captured video:
* - `true`: Rotate the captured video.
* - `false`: (Default) Do not rotate the captured video.
*/
virtual bool getRotationApplied() { return false; }
/**
- * Occurs each time the SDK receives a video frame and prompts you whether or not to mirror the
- * captured video.
+ * @brief Occurs each time the SDK receives a video frame and prompts you whether or not to mirror
+ * the captured video.
*
+ * @details
* If the video data you want to obtain is a mirror image of the original video, you need to
- * register this callback when calling `registerVideoFrameObserver`. After you successfully
- * register the video frame observer, the SDK triggers this callback each time it receives a video
- * frame. You need to set whether or not to mirror the video frame in the return value of this
- * callback.
+ * register this callback when calling `registerVideoFrameObserver`. After you successfully register
+ * the video frame observer, the SDK triggers this callback each time it receives a video frame. You
+ * need to set whether or not to mirror the video frame in the return value of this callback.
*
- * @note This function only supports video data in RGBA and YUV420 formats.
+ * @note
+ * - On the Android platform, the supported video data formats for this callback are: I420, RGBA,
+ * and Texture.
+ * - On the Windows platform, the supported video data formats for this callback are: I420, RGBA,
+ * and TextureBuffer.
+ * - On the iOS platform, the supported video data formats for this callback are: I420, RGBA, and
+ * CVPixelBuffer.
+ * - On the macOS platform, the supported video data formats for this callback are: I420 and RGBA.
+ * - Both this method and the `setVideoEncoderConfiguration` method support setting the mirroring
+ * effect. Agora recommends that you only choose one method to set it up. Using both methods at the
+ * same time causes the mirroring effect to overlap, and the mirroring settings fail.
*
- * @return Determines whether to mirror.
+ * @return
+ * Sets whether or not to mirror the captured video:
* - `true`: Mirror the captured video.
* - `false`: (Default) Do not mirror the captured video.
*/
virtual bool getMirrorApplied() { return false; }
/**
- * Sets the frame position for the video observer.
- *
- * After you successfully register the video observer, the SDK triggers this callback each time it
- * receives a video frame. You can determine which position to observe by setting the return
- * value. The SDK provides 3 positions for observer. Each position corresponds to a callback
- * function:
+ * @brief Sets the frame position for the video observer.
*
- * POSITION_POST_CAPTURER(1 << 0): The position after capturing the video data, which corresponds
- * to the onCaptureVideoFrame callback. POSITION_PRE_RENDERER(1 << 1): The position before
- * receiving the remote video data, which corresponds to the onRenderVideoFrame callback.
- * POSITION_PRE_ENCODER(1 << 2): The position before encoding the video data, which corresponds to
- * the onPreEncodeVideoFrame callback.
+ * @details
+ * After successfully registering the video data observer, the SDK uses this callback to determine
+ * whether to trigger `onCaptureVideoFrame`, `onRenderVideoFrame` and `onPreEncodeVideoFrame`
+ * callback at each specific video frame processing position, so that you can observe the locally
+ * collected video data, the video data sent by the remote end, and the video data before encoding.
+ * You can set one or more positions you need to observe by modifying the return value according to
+ * your scenario:
+ * - `POSITION_POST_CAPTURER` (1 << 0): The position after capturing the video data, which
+ * corresponds to the `onCaptureVideoFrame` callback.
+ * - `POSITION_PRE_RENDERER` (1 << 1): The position of the received remote video data before
+ * rendering, which corresponds to the `onRenderVideoFrame` callback.
+ * - `POSITION_PRE_ENCODER` (1 << 2): The position before encoding the video data, which corresponds
+ * to the `onPreEncodeVideoFrame` callback.
*
- * To observe multiple frame positions, use '|' (the OR operator).
- * This callback observes POSITION_POST_CAPTURER(1 << 0) and POSITION_PRE_RENDERER(1 << 1) by
- * default. To conserve the system consumption, you can reduce the number of frame positions that
- * you want to observe.
+ * @note
+ * - Use '|' (the OR operator) to observe multiple frame positions.
+ * - This callback observes `POSITION_POST_CAPTURER` (1 << 0) and `POSITION_PRE_RENDERER` (1 << 1)
+ * by default.
+ * - To conserve system resources, you can reduce the number of frame positions that you want to
+ * observe.
+ * - When the video processing mode is `PROCESS_MODE_READ_WRITE` and the observation position is set
+ * to `POSITION_PRE_ENCODER` | `POSITION_POST_CAPTURER`, the `getMirrorApplied` does not take
+ * effect; you need to modify the video processing mode or the position of the observer.
*
- * @return A bit mask that controls the frame position of the video observer:
- * VIDEO_OBSERVER_POSITION.
+ * @return
+ * A bit mask that controls the frame position of the video observer. See `VIDEO_MODULE_POSITION`.
*/
virtual uint32_t getObservedFramePosition() {
return base::POSITION_POST_CAPTURER | base::POSITION_PRE_RENDERER;
@@ -1887,21 +2257,21 @@ class IVideoFrameObserver {
};
/**
- * The external video source type.
+ * @brief The external video frame encoding type.
*/
enum EXTERNAL_VIDEO_SOURCE_TYPE {
/**
- * 0: non-encoded video frame.
+ * 0: The video frame is not encoded.
*/
VIDEO_FRAME = 0,
/**
- * 1: encoded video frame.
+ * 1: The video frame is encoded.
*/
ENCODED_VIDEO_FRAME,
};
/**
- * The format of the recording file.
+ * @brief Format of the recording file.
*
* @since v3.5.2
*/
@@ -1912,7 +2282,7 @@ enum MediaRecorderContainerFormat {
FORMAT_MP4 = 1,
};
/**
- * The recording content.
+ * @brief The recording content.
*
* @since v3.5.2
*/
@@ -1931,32 +2301,32 @@ enum MediaRecorderStreamType {
STREAM_TYPE_BOTH = STREAM_TYPE_AUDIO | STREAM_TYPE_VIDEO,
};
/**
- * The current recording state.
+ * @brief The current recording state.
*
* @since v3.5.2
*/
enum RecorderState {
/**
- * -1: An error occurs during the recording. See RecorderReasonCode for the reason.
+ * -1: An error occurs during the recording. See `RecorderReasonCode` for the reason.
*/
RECORDER_STATE_ERROR = -1,
/**
- * 2: The audio and video recording is started.
+ * 2: The audio and video recording starts.
*/
RECORDER_STATE_START = 2,
/**
- * 3: The audio and video recording is stopped.
+ * 3: The audio and video recording stops.
*/
RECORDER_STATE_STOP = 3,
};
/**
- * The reason for the state change
+ * @brief The reason for the state change.
*
* @since v3.5.2
*/
enum RecorderReasonCode {
/**
- * 0: No error occurs.
+ * 0: No error.
*/
RECORDER_REASON_NONE = 0,
/**
@@ -1964,8 +2334,8 @@ enum RecorderReasonCode {
*/
RECORDER_REASON_WRITE_FAILED = 1,
/**
- * 2: The SDK does not detect audio and video streams to be recorded, or audio and video streams
- * are interrupted for more than five seconds during recording.
+ * 2: The SDK does not detect any audio and video streams, or audio and video streams are
+ * interrupted for more than five seconds during recording.
*/
RECORDER_REASON_NO_STREAM = 2,
/**
@@ -1978,62 +2348,77 @@ enum RecorderReasonCode {
RECORDER_REASON_CONFIG_CHANGED = 4,
};
/**
- * Configurations for the local audio and video recording.
+ * @brief Configuration for audio and video stream recording.
*
* @since v3.5.2
*/
struct MediaRecorderConfiguration {
/**
- * The absolute path (including the filename extensions) of the recording file.
- * For example, `C:\Users\\AppData\Local\Agora\\example.mp4` on Windows,
- * `/App Sandbox/Library/Caches/example.mp4` on iOS, `/Library/Logs/example.mp4` on macOS, and
- * `/storage/emulated/0/Android/data//files/example.mp4` on Android.
- *
- * @note Ensure that the specified path exists and is writable.
+ * The absolute path where the recording file will be saved locally, including the file name and
+ * format. For example:
+ * - Windows: `C:\Users\\AppData\Local\Agora\\example.mp4`
+ * - iOS: `/App Sandbox/Library/Caches/example.mp4`
+ * - macOS: `/Library/Logs/example.mp4`
+ * - Android: `/storage/emulated/0/Android/data//files/example.mp4`
+ * @note Make sure the specified path exists and is writable.
*/
const char* storagePath;
/**
- * The format of the recording file. See \ref agora::rtc::MediaRecorderContainerFormat
- * "MediaRecorderContainerFormat".
+ * The format of the recording file. See `MediaRecorderContainerFormat`.
*/
MediaRecorderContainerFormat containerFormat;
/**
- * The recording content. See \ref agora::rtc::MediaRecorderStreamType "MediaRecorderStreamType".
+ * The content to record. See `MediaRecorderStreamType`.
*/
MediaRecorderStreamType streamType;
/**
- * The maximum recording duration, in milliseconds. The default value is 120000.
+ * Maximum recording duration in milliseconds. Default is 120000.
*/
int maxDurationMs;
/**
- * The interval (ms) of updating the recording information. The value range is
- * [1000,10000]. Based on the set value of `recorderInfoUpdateInterval`, the
- * SDK triggers the \ref IMediaRecorderObserver::onRecorderInfoUpdated "onRecorderInfoUpdated"
- * callback to report the updated recording information.
+ * Interval for recording information updates, in milliseconds. The valid range is [1000,10000]. The
+ * SDK triggers the `onRecorderInfoUpdated` callback based on this value to report updated recording
+ * information.
*/
int recorderInfoUpdateInterval;
/**
- * The video width
+ * Width (px) of the recorded video. The maximum value for width × height must not exceed 3840 ×
+ * 2160.
+ * This parameter is required only when calling `createMediaRecorder` and setting `type` in
+ * `RecorderStreamInfo` to PREVIEW.
*/
int width;
/**
- * The video height
+ * Height (px) of the recorded video. The maximum value for width × height must not exceed 3840 ×
+ * 2160.
+ * This parameter is required only when calling `createMediaRecorder` and setting `type` in
+ * `RecorderStreamInfo` to PREVIEW.
*/
int height;
/**
- * The video fps
+ * Frame rate of the recorded video. The maximum is 30. For example: 5, 10, 15, 24, 30.
+ * This parameter is required only when calling `createMediaRecorder` and setting `type` in
+ * `RecorderStreamInfo` to PREVIEW.
*/
int fps;
/**
- * The audio sample rate
+ * Sample rate (Hz) of the recorded audio. Supported values: 16000, 32000, 44100, or 48000.
+ * This parameter is required only when calling `createMediaRecorder` and setting `type` in
+ * `RecorderStreamInfo` to PREVIEW.
*/
int sample_rate;
/**
- * The audio channel nums
+ * Number of audio channels to record:
+ * - 1: Mono
+ * - 2: Stereo
+ * This parameter is required only when calling `createMediaRecorder` and setting `type` in
+ * `RecorderStreamInfo` to PREVIEW.
*/
int channel_num;
/**
- * The video source just for out channel recoder
+ * Type of video source to record. See `VIDEO_SOURCE_TYPE`.
+ * This parameter is required only when calling `createMediaRecorder` and setting `type` in
+ * `RecorderStreamInfo` to PREVIEW.
*/
agora::rtc::VIDEO_SOURCE_TYPE videoSourceType;
@@ -2067,11 +2452,46 @@ struct MediaRecorderConfiguration {
class IFaceInfoObserver {
public:
/**
- * Occurs when the face info is received.
- * @param outFaceInfo The output face info.
+ * @brief Occurs when the facial information processed by speech driven extension is received.
+ *
+ * @param outFaceInfo Output parameter, the JSON string of the facial information processed by the
+ * voice driver plugin, including the following fields:
+ * - faces: Object sequence. The collection of facial information, with each face corresponding to
+ * an object.
+ * - blendshapes: Object. The collection of face capture coefficients, named according to ARkit
+ * standards, with each key-value pair representing a blendshape coefficient. The blendshape
+ * coefficient is a floating point number with a range of [0.0, 1.0].
+ * - rotation: Object sequence. The rotation of the head, which includes the following three
+ * key-value pairs, with values as floating point numbers ranging from -180.0 to 180.0:
+ * - pitch: Head pitch angle. A positve value means looking down, while a negative value means
+ * looking up.
+ * - yaw: Head yaw angle. A positve value means turning left, while a negative value means turning
+ * right.
+ * - roll: Head roll angle. A positve value means tilting to the right, while a negative value
+ * means tilting to the left.
+ * - timestamp: String. The timestamp of the output result, in milliseconds.
+ * Here is an example of JSON:
+ * ```json
+ * { "faces":[{ "blendshapes":{ "eyeBlinkLeft":0.9, "eyeLookDownLeft":0.0, "eyeLookInLeft":0.0,
+ * "eyeLookOutLeft":0.0, "eyeLookUpLeft":0.0, "eyeSquintLeft":0.0, "eyeWideLeft":0.0,
+ * "eyeBlinkRight":0.0, "eyeLookDownRight":0.0, "eyeLookInRight":0.0, "eyeLookOutRight":0.0,
+ * "eyeLookUpRight":0.0, "eyeSquintRight":0.0, "eyeWideRight":0.0, "jawForward":0.0, "jawLeft":0.0,
+ * "jawRight":0.0, "jawOpen":0.0, "mouthClose":0.0, "mouthFunnel":0.0, "mouthPucker":0.0,
+ * "mouthLeft":0.0, "mouthRight":0.0, "mouthSmileLeft":0.0, "mouthSmileRight":0.0,
+ * "mouthFrownLeft":0.0, "mouthFrownRight":0.0, "mouthDimpleLeft":0.0, "mouthDimpleRight":0.0,
+ * "mouthStretchLeft":0.0, "mouthStretchRight":0.0, "mouthRollLower":0.0, "mouthRollUpper":0.0,
+ * "mouthShrugLower":0.0, "mouthShrugUpper":0.0, "mouthPressLeft":0.0, "mouthPressRight":0.0,
+ * "mouthLowerDownLeft":0.0, "mouthLowerDownRight":0.0, "mouthUpperUpLeft":0.0,
+ * "mouthUpperUpRight":0.0, "browDownLeft":0.0, "browDownRight":0.0, "browInnerUp":0.0,
+ * "browOuterUpLeft":0.0, "browOuterUpRight":0.0, "cheekPuff":0.0, "cheekSquintLeft":0.0,
+ * "cheekSquintRight":0.0, "noseSneerLeft":0.0, "noseSneerRight":0.0, "tongueOut":0.0 },
+ * "rotation":{"pitch":30.0, "yaw":25.5, "roll":-15.5},
+ * }], "timestamp":"654879876546" }
+ * ```
+ *
* @return
- * - true: The face info is valid.
- * - false: The face info is invalid.
+ * - `true`: Facial information JSON parsing successful.
+ * - `false`: Facial information JSON parsing failed.
*/
virtual bool onFaceInfo(const char* outFaceInfo) = 0;
@@ -2079,21 +2499,21 @@ class IFaceInfoObserver {
};
/**
- * Information for the recording file.
+ * @brief Information about the recording file.
*
* @since v3.5.2
*/
struct RecorderInfo {
/**
- * The absolute path of the recording file.
+ * Absolute storage path of the recording file.
*/
const char* fileName;
/**
- * The recording duration, in milliseconds.
+ * Duration of the recording file in milliseconds.
*/
unsigned int durationMs;
/**
- * The size in bytes of the recording file.
+ * Size of the recording file in bytes.
*/
unsigned int fileSize;
@@ -2105,35 +2525,36 @@ struct RecorderInfo {
class IMediaRecorderObserver {
public:
/**
- * Occurs when the recording state changes.
+ * @brief Callback when the recording state changes.
*
* @since v4.0.0
*
- * When the local audio and video recording state changes, the SDK triggers this callback to
+ * @details
+ * When the recording state of the audio and video stream changes, the SDK triggers this callback to
* report the current recording state and the reason for the change.
*
- * @param channelId The channel name.
- * @param uid ID of the user.
- * @param state The current recording state. See \ref agora::media::RecorderState "RecorderState".
- * @param reason The reason for the state change. See \ref agora::media::RecorderReasonCode
- * "RecorderReasonCode".
+ * @param channelId Channel name.
+ * @param uid User ID.
+ * @param state Current recording state. See `RecorderState`.
+ * @param reason Reason for the recording state change. See `RecorderReasonCode`.
+ *
*/
virtual void onRecorderStateChanged(const char* channelId, rtc::uid_t uid, RecorderState state,
RecorderReasonCode reason) = 0;
/**
- * Occurs when the recording information is updated.
+ * @brief Callback for recording information updates.
*
* @since v4.0.0
*
- * After you successfully register this callback and enable the local audio and video recording,
- * the SDK periodically triggers the `onRecorderInfoUpdated` callback based on the set value of
- * `recorderInfoUpdateInterval`. This callback reports the filename, duration, and size of the
- * current recording file.
+ * @details
+ * After successfully registering this callback and starting audio and video stream recording, the
+ * SDK periodically triggers this callback based on the value of `recorderInfoUpdateInterval` set in
+ * `MediaRecorderConfiguration`,
+ * reporting the current recording file's name, duration, and size.
*
- * @param channelId The channel name.
- * @param uid ID of the user.
- * @param info Information about the recording file. See \ref agora::media::RecorderInfo
- * "RecorderInfo".
+ * @param channelId Channel name.
+ * @param uid User ID.
+ * @param info Recording file information. See `RecorderInfo`.
*
*/
virtual void onRecorderInfoUpdated(const char* channelId, rtc::uid_t uid,
diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/AgoraMediaPlayerTypes.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/AgoraMediaPlayerTypes.h
index d55d1d9e0..ceb0642ea 100644
--- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/AgoraMediaPlayerTypes.h
+++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/AgoraMediaPlayerTypes.h
@@ -49,31 +49,39 @@ namespace base {
static const uint8_t kMaxCharBufferLength = 50;
/**
* @brief The playback state.
- *
*/
enum MEDIA_PLAYER_STATE {
- /** Default state.
+ /**
+ * 0: The default state. The media player returns this state code before you open the media resource
+ * or after you stop the playback.
*/
PLAYER_STATE_IDLE = 0,
- /** Opening the media file.
+ /**
+ * 1: Opening the media resource.
*/
PLAYER_STATE_OPENING,
- /** The media file is opened successfully.
+ /**
+ * 2: Opens the media resource successfully.
*/
PLAYER_STATE_OPEN_COMPLETED,
- /** Playing the media file.
+ /**
+ * 3: The media resource is playing.
*/
PLAYER_STATE_PLAYING,
- /** The playback is paused.
+ /**
+ * 4: Pauses the playback.
*/
PLAYER_STATE_PAUSED,
- /** The playback is completed.
+ /**
+ * 5: The playback is complete.
*/
PLAYER_STATE_PLAYBACK_COMPLETED,
- /** All loops are completed.
+ /**
+ * 6: The loop is complete.
*/
PLAYER_STATE_PLAYBACK_ALL_LOOPS_COMPLETED,
- /** The playback is stopped.
+ /**
+ * 7: The playback stops.
*/
PLAYER_STATE_STOPPED,
/** Player pausing (internal)
@@ -97,58 +105,73 @@ enum MEDIA_PLAYER_STATE {
/** Player set track state (internal)
*/
PLAYER_STATE_SET_TRACK_INTERNAL,
- /** The playback fails.
+ /**
+ * 100: The media player fails to play the media resource.
*/
PLAYER_STATE_FAILED = 100,
};
/**
- * @brief Player error code
- *
+ * @brief Reasons for the changes in the media player status.
*/
enum MEDIA_PLAYER_REASON {
- /** No error.
+ /**
+ * 0: No error.
*/
PLAYER_REASON_NONE = 0,
- /** The parameter is invalid.
+ /**
+ * -1: Invalid arguments.
*/
PLAYER_REASON_INVALID_ARGUMENTS = -1,
- /** Internel error.
+ /**
+ * -2: Internal error.
*/
PLAYER_REASON_INTERNAL = -2,
- /** No resource.
+ /**
+ * -3: No resource.
*/
PLAYER_REASON_NO_RESOURCE = -3,
- /** Invalid media source.
+ /**
+ * -4: Invalid media resource.
*/
PLAYER_REASON_INVALID_MEDIA_SOURCE = -4,
- /** The type of the media stream is unknown.
+ /**
+ * -5: The media stream type is unknown.
*/
PLAYER_REASON_UNKNOWN_STREAM_TYPE = -5,
- /** The object is not initialized.
+ /**
+ * -6: The object is not initialized.
*/
PLAYER_REASON_OBJ_NOT_INITIALIZED = -6,
- /** The codec is not supported.
+ /**
+ * -7: The codec is not supported.
*/
PLAYER_REASON_CODEC_NOT_SUPPORTED = -7,
- /** Invalid renderer.
+ /**
+ * -8: Invalid renderer.
*/
PLAYER_REASON_VIDEO_RENDER_FAILED = -8,
- /** An error occurs in the internal state of the player.
+ /**
+ * -9: An error with the internal state of the player occurs.
*/
PLAYER_REASON_INVALID_STATE = -9,
- /** The URL of the media file cannot be found.
+ /**
+ * -10: The URL of the media resource cannot be found.
*/
PLAYER_REASON_URL_NOT_FOUND = -10,
- /** Invalid connection between the player and the Agora server.
+ /**
+ * -11: Invalid connection between the player and the Agora Server.
*/
PLAYER_REASON_INVALID_CONNECTION_STATE = -11,
- /** The playback buffer is insufficient.
+ /**
+ * -12: The playback buffer is insufficient.
*/
PLAYER_REASON_SRC_BUFFER_UNDERFLOW = -12,
- /** The audio mixing file playback is interrupted.
+ /**
+ * -13: The playback is interrupted.
*/
PLAYER_REASON_INTERRUPTED = -13,
- /** The SDK does not support this function.
+ /**
+ * -14: The SDK does not support the method being called.
*/
PLAYER_REASON_NOT_SUPPORTED = -14,
/** The token has expired.
@@ -157,75 +180,92 @@ enum MEDIA_PLAYER_REASON {
/** The ip has expired.
*/
PLAYER_REASON_IP_EXPIRED = -16,
- /** An unknown error occurs.
+ /**
+ * -17: An unknown error.
*/
PLAYER_REASON_UNKNOWN = -17,
};
/**
* @brief The type of the media stream.
- *
*/
enum MEDIA_STREAM_TYPE {
- /** The type is unknown.
+ /**
+ * 0: The type is unknown.
*/
STREAM_TYPE_UNKNOWN = 0,
- /** The video stream.
+ /**
+ * 1: The video stream.
*/
STREAM_TYPE_VIDEO = 1,
- /** The audio stream.
+ /**
+ * 2: The audio stream.
*/
STREAM_TYPE_AUDIO = 2,
- /** The subtitle stream.
+ /**
+ * 3: The subtitle stream.
*/
STREAM_TYPE_SUBTITLE = 3,
};
/**
- * @brief The playback event.
- *
+ * @brief Media player events.
*/
enum MEDIA_PLAYER_EVENT {
- /** The player begins to seek to the new playback position.
+ /**
+ * 0: The player begins to seek to a new playback position.
*/
PLAYER_EVENT_SEEK_BEGIN = 0,
- /** The seek operation completes.
+ /**
+ * 1: The player finishes seeking to a new playback position.
*/
PLAYER_EVENT_SEEK_COMPLETE = 1,
- /** An error occurs during the seek operation.
+ /**
+ * 2: An error occurs when seeking to a new playback position.
*/
PLAYER_EVENT_SEEK_ERROR = 2,
- /** The player changes the audio track for playback.
+ /**
+ * 5: The audio track used by the player has been changed.
*/
PLAYER_EVENT_AUDIO_TRACK_CHANGED = 5,
- /** player buffer low
+ /**
+ * 6: The currently buffered data is not enough to support playback.
*/
PLAYER_EVENT_BUFFER_LOW = 6,
- /** player buffer recover
+ /**
+ * 7: The currently buffered data is just enough to support playback.
*/
PLAYER_EVENT_BUFFER_RECOVER = 7,
- /** The video or audio is interrupted
+ /**
+ * 8: The audio or video playback freezes.
*/
PLAYER_EVENT_FREEZE_START = 8,
- /** Interrupt at the end of the video or audio
+ /**
+ * 9: The audio or video playback resumes without freezing.
*/
PLAYER_EVENT_FREEZE_STOP = 9,
- /** switch source begin
- */
+ /**
+ * 10: The player starts switching the media resource.
+ */
PLAYER_EVENT_SWITCH_BEGIN = 10,
- /** switch source complete
- */
+ /**
+ * 11: Media resource switching is complete.
+ */
PLAYER_EVENT_SWITCH_COMPLETE = 11,
- /** switch source error
- */
+ /**
+ * 12: Media resource switching error.
+ */
PLAYER_EVENT_SWITCH_ERROR = 12,
- /** An application can render the video to less than a second
+ /**
+ * 13: The first video frame is rendered.
*/
PLAYER_EVENT_FIRST_DISPLAYED = 13,
- /** cache resources exceed the maximum file count
+ /**
+ * 14: The cached media files reach the limit in number.
*/
PLAYER_EVENT_REACH_CACHE_FILE_MAX_COUNT = 14,
- /** cache resources exceed the maximum file size
+ /**
+ * 15: The cached media files reach the limit in aggregate storage space.
*/
PLAYER_EVENT_REACH_CACHE_FILE_MAX_SIZE = 15,
/** Triggered when a retry is required to open the media
@@ -244,63 +284,91 @@ enum MEDIA_PLAYER_EVENT {
};
/**
- * @brief The play preload another source event.
- *
+ * @brief Events that occur when media resources are preloaded.
*/
enum PLAYER_PRELOAD_EVENT {
- /** preload source begin
- */
+ /**
+ * 0: Starts preloading media resources.
+ */
PLAYER_PRELOAD_EVENT_BEGIN = 0,
- /** preload source complete
- */
+ /**
+ * 1: Preloading media resources is complete.
+ */
PLAYER_PRELOAD_EVENT_COMPLETE = 1,
- /** preload source error
- */
+ /**
+ * 2: An error occurs when preloading media resources.
+ */
PLAYER_PRELOAD_EVENT_ERROR = 2,
};
/**
- * @brief The information of the media stream object.
- *
+ * @brief The detailed information of the media stream.
*/
struct PlayerStreamInfo {
- /** The index of the media stream. */
+ /**
+ * The index of the media stream.
+ */
int streamIndex;
- /** The type of the media stream. See {@link MEDIA_STREAM_TYPE}. */
+ /**
+ * The type of the media stream. See `MEDIA_STREAM_TYPE`.
+ */
MEDIA_STREAM_TYPE streamType;
- /** The codec of the media stream. */
+ /**
+ * The codec of the media stream.
+ */
char codecName[kMaxCharBufferLength];
- /** The language of the media stream. */
+ /**
+ * The language of the media stream.
+ */
char language[kMaxCharBufferLength];
- /** The frame rate (fps) if the stream is video. */
+ /**
+ * This parameter only takes effect for video streams, and indicates the video frame rate (fps).
+ */
int videoFrameRate;
- /** The video bitrate (bps) if the stream is video. */
+ /**
+ * This parameter only takes effect for video streams, and indicates the video bitrate (bps).
+ */
int videoBitRate;
- /** The video width (pixel) if the stream is video. */
+ /**
+ * This parameter only takes effect for video streams, and indicates the video width (pixel).
+ */
int videoWidth;
- /** The video height (pixel) if the stream is video. */
+ /**
+ * This parameter only takes effect for video streams, and indicates the video height (pixel).
+ */
int videoHeight;
- /** The rotation angle if the steam is video. */
+ /**
+ * This parameter only takes effect for video streams, and indicates the video rotation angle.
+ */
int videoRotation;
- /** The sample rate if the stream is audio. */
+ /**
+ * This parameter only takes effect for audio streams, and indicates the audio sample rate (Hz).
+ */
int audioSampleRate;
- /** The number of audio channels if the stream is audio. */
+ /**
+ * This parameter only takes effect for audio streams, and indicates the audio channel number.
+ */
int audioChannels;
- /** The number of bits per sample if the stream is audio. */
+ /**
+ * This parameter only takes effect for audio streams, and indicates the bit number of each audio
+ * sample.
+ */
int audioBitsPerSample;
- /** The total duration (millisecond) of the media stream. */
+ /**
+ * The total duration (ms) of the media stream.
+ */
int64_t duration;
PlayerStreamInfo() : streamIndex(0),
@@ -320,90 +388,104 @@ struct PlayerStreamInfo {
};
/**
- * @brief The information of the media stream object.
- *
+ * @brief Information about the video bitrate of the media resource being played.
*/
struct SrcInfo {
- /** The bitrate of the media stream. The unit of the number is kbps.
- *
+ /**
+ * The video bitrate (Kbps) of the media resource being played.
*/
int bitrateInKbps;
- /** The name of the media stream.
- *
- */
+ /**
+ * The name of the media resource.
+ */
const char* name;
};
/**
- * @brief The type of the media metadata.
- *
+ * @brief The type of media metadata.
*/
enum MEDIA_PLAYER_METADATA_TYPE {
- /** The type is unknown.
+ /**
+ * 0: The type is unknown.
*/
PLAYER_METADATA_TYPE_UNKNOWN = 0,
- /** The type is SEI.
+ /**
+ * 1: The type is SEI.
*/
PLAYER_METADATA_TYPE_SEI = 1,
};
+/**
+ * @brief Statistics about the media files being cached.
+ */
struct CacheStatistics {
- /** total data size of uri
+ /**
+ * The size (bytes) of the media file being played.
*/
int64_t fileSize;
- /** data of uri has cached
+ /**
+ * The size (bytes) of the media file that you want to cache.
*/
int64_t cacheSize;
- /** data of uri has downloaded
+ /**
+ * The size (bytes) of the media file that has been downloaded.
*/
int64_t downloadSize;
};
/**
- * @brief The real time statistics of the media stream being played.
- *
+ * @brief The information of the media file being played.
*/
struct PlayerPlaybackStats {
- /** Video fps.
+ /**
+ * The frame rate (fps) of the video.
*/
int videoFps;
- /** Video bitrate (Kbps).
+ /**
+ * The bitrate (kbps) of the video.
*/
int videoBitrateInKbps;
- /** Audio bitrate (Kbps).
+ /**
+ * The bitrate (kbps) of the audio.
*/
int audioBitrateInKbps;
- /** Total bitrate (Kbps).
+ /**
+ * The total bitrate (kbps) of the media stream.
*/
int totalBitrateInKbps;
};
/**
- * @brief The updated information of media player.
- *
+ * @brief Information related to the media player.
*/
struct PlayerUpdatedInfo {
/** @technical preview
*/
const char* internalPlayerUuid;
- /** The device ID of the playback device.
+ /**
+ * The ID of a deivce.
*/
const char* deviceId;
- /** Video height.
+ /**
+ * Height (pixel) of the video.
*/
int videoHeight;
- /** Video width.
+ /**
+ * Width (pixel) of the video.
*/
int videoWidth;
- /** Audio sample rate.
+ /**
+ * Audio sample rate (Hz).
*/
int audioSampleRate;
- /** The audio channel number.
+ /**
+ * The number of audio channels.
*/
int audioChannels;
- /** The bit number of each audio sample.
+ /**
+ * The number of bits per audio sample point.
*/
int audioBitsPerSample;
@@ -424,89 +506,132 @@ class IMediaPlayerCustomDataProvider {
public:
/**
- * @brief The player requests to read the data callback, you need to fill the specified length of data into the buffer
- * @param buffer the buffer pointer that you need to fill data.
- * @param bufferSize the bufferSize need to fill of the buffer pointer.
- * @return you need return offset value if succeed. return 0 if failed.
+ * @brief Occurs when the SDK reads the media resource data.
+ *
+ * @details
+ * When you call the `openWithMediaSource` method to open a media resource, the SDK triggers this
+ * callback and request you to pass in the buffer of the media resource data.
+ *
+ * @param buffer An input parameter. Data buffer (bytes). Write the `bufferSize` data reported by
+ * the SDK into this parameter.
+ * @param bufferSize The length of the data buffer (bytes).
+ *
+ * @return
+ * - If the data is read successfully, pass in the length of the data (bytes) you actually read in
+ * the return value.
+ * - If reading the data fails, pass in 0 in the return value.
*/
virtual int onReadData(unsigned char *buffer, int bufferSize) = 0;
/**
- * @brief The Player seek event callback, you need to operate the corresponding stream seek operation, You can refer to the definition of lseek() at https://man7.org/linux/man-pages/man2/lseek.2.html
- * @param offset the value of seek offset.
- * @param whence the postion of start seeking, the directive whence as follows:
- * 0 - SEEK_SET : The file offset is set to offset bytes.
- * 1 - SEEK_CUR : The file offset is set to its current location plus offset bytes.
- * 2 - SEEK_END : The file offset is set to the size of the file plus offset bytes.
- * 65536 - AVSEEK_SIZE : Optional. Passing this as the "whence" parameter to a seek function causes it to return the filesize without seeking anywhere.
+ * @brief Occurs when the SDK seeks the media resource data.
+ *
+ * @details
+ * When you call the `openWithMediaSource` or `open` method to open a custom media resource, the SDK
+ * triggers this callback to request the specified location in the media resource.
+ *
+ * @param offset An input parameter. The offset of the target position relative to the starting
+ * point, in bytes. The value can be positive or negative.
+ * @param whence An input parameter. The starting point. You can set it as one of the following
+ * values:
+ * - 0: The starting point is the head of the data, and the actual data offset after seeking is
+ * `offset`.
+ * - 1: The starting point is the current position, and the actual data offset after seeking is the
+ * current position plus `offset`.
+ * - 2: The starting point is the end of the data, and the actual data offset after seeking is the
+ * whole data length plus `offset`.
+ * - 65536: Do not perform position seeking, return the file size. Agora recommends that you use
+ * this parameter value when playing pure audio files such as MP3 and WAV.
+ *
* @return
- * whence == 65536, return filesize if you need.
- * whence >= 0 && whence < 3 , return offset value if succeed. return -1 if failed.
+ * - When `whence` is `65536`, the media file size is returned.
+ * - When `whence` is `0`, `1`, or `2`, the actual data offset after the seeking is returned.
+ * - -1: Seeking failed.
*/
virtual int64_t onSeek(int64_t offset, int whence) = 0;
virtual ~IMediaPlayerCustomDataProvider() {}
};
+/**
+ * @brief Information related to the media file to be played and the playback scenario
+ * configurations.
+ */
struct MediaSource {
/**
- * The URL of the media file that you want to play.
+ * The URL of the media file to be played.
+ * @note If you open a common media resource, pass in the value to `url`. If you open a custom media
+ * resource, pass in the value to `provider`. Agora recommends that you do not pass in values to
+ * both parameters in one call; otherwise, this call may fail.
*/
const char* url;
/**
- * The URI of the media file
- *
- * When caching is enabled, if the url cannot distinguish the cache file name,
- * the uri must be able to ensure that the cache file name corresponding to the url is unique.
+ * The URI (Uniform Resource Identifier) of the media file.
*/
const char* uri;
/**
- * Set the starting position for playback, in ms.
+ * The starting position (ms) for playback. The default value is 0.
*/
int64_t startPos;
/**
- * Determines whether to autoplay after opening a media resource.
- * - true: (Default) Autoplay after opening a media resource.
- * - false: Do not autoplay after opening a media resource.
+ * Whether to enable autoplay once the media file is opened:
+ * - `true`: (Default) Yes.
+ * - `false`: No.
+ * @note If autoplay is disabled, you need to call the `play` method to play a media file after it
+ * is opened.
*/
bool autoPlay;
/**
- * Determines whether to enable cache streaming to local files. If enable cached, the media player will
- * use the url or uri as the cache index.
- *
+ * Whether to cache the media file when it is being played:
+ * - `true`:Enables caching.
+ * - `false`: (Default) Disables caching.
* @note
- * The local cache function only supports on-demand video/audio streams and does not support live streams.
- * Caching video and audio files based on the HLS protocol (m3u8) to your local device is not supported.
- *
- * - true: Enable cache.
- * - false: (Default) Disable cache.
+ * - Agora only supports caching on-demand audio and video streams that are not transmitted in HLS
+ * protocol.
+ * - If you need to enable caching, pass in a value to `uri`; otherwise, caching is based on the
+ * `url` of the media file.
+ * - If you enable this function, the Media Player caches part of the media file being played on
+ * your local device, and you can play the cached media file without internet connection. The
+ * statistics about the media file being cached are updated every second after the media file is
+ * played. See `CacheStatistics`.
*/
bool enableCache;
/**
- * Determines whether to enable multi-track audio stream decoding.
- * Then you can select multi audio track of the media file for playback or publish to channel
- *
- * @note
- * If you use the selectMultiAudioTrack API, you must set enableMultiAudioTrack to true.
- *
- * - true: Enable MultiAudioTrack;.
- * - false: (Default) Disable MultiAudioTrack;.
+ * Whether to allow the selection of different audio tracks when playing this media file:
+ * - `true`: Allow to select different audio tracks.
+ * - `false`: (Default) Do not allow to select different audio tracks.
+ * If you need to set different audio tracks for local playback and publishing to the channel, you
+ * need to set this parameter to `true`, and then call the `selectMultiAudioTrack` method to select
+ * the audio track.
*/
bool enableMultiAudioTrack;
/**
- * Determines whether the opened media resource is a stream through the Agora Broadcast Streaming Network(CDN).
- * - true: It is a stream through the Agora Broadcast Streaming Network.
- * - false: (Default) It is not a stream through the Agora Broadcast Streaming Network.
+ * Whether the media resource to be opened is a live stream or on-demand video distributed through
+ * Media Broadcast service:
+ * - `true`: The media resource to be played is a live or on-demand video distributed through Media
+ * Broadcast service.
+ * - `false`: (Default) The media resource is not a live stream or on-demand video distributed
+ * through Media Broadcast service.
+ * @note If you need to open a live stream or on-demand video distributed through Broadcast
+ * Streaming service, pass in the URL of the media resource to `url`, and set `isAgoraSource` as
+ * `true`; otherwise, you don't need to set the `isAgoraSource` parameter.
*/
Optional isAgoraSource;
/**
- * Determines whether the opened media resource is a live stream. If is a live stream, it can speed up the opening of media resources.
- * - true: It is a live stream.
- * - false: (Default) It is not is a live stream.
+ * Whether the media resource to be opened is a live stream:
+ * - `true`: The media resource is a live stream.
+ * - `false`: (Default) The media resource is not a live stream.
+ * If the media resource you want to open is a live stream, Agora recommends that you set this
+ * parameter as `true` so that the live stream can be loaded more quickly.
+ * @note If the media resource you open is not a live stream, but you set `isLiveSource` as `true`,
+ * the media resource is not to be loaded more quickly.
*/
Optional isLiveSource;
/**
- * External custom data source object
+ * The callback for custom media resource files. See `IMediaPlayerCustomDataProvider`.
+ * @note If you open a custom media resource, pass in the value to `provider`. If you open a common
+ * media resource, pass in the value to `url`. Agora recommends that you do not pass in values to
+ * both `url` and `provider` in one call; otherwise, this call may fail.
*/
IMediaPlayerCustomDataProvider* provider;
diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/IAgoraLog.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/IAgoraLog.h
index 20b6416ef..4bc586442 100644
--- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/IAgoraLog.h
+++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/IAgoraLog.h
@@ -28,13 +28,29 @@ namespace agora {
namespace commons {
/**
- * Supported logging severities of SDK
+ * @brief The output log level of the SDK.
*/
OPTIONAL_ENUM_CLASS LOG_LEVEL {
+ /**
+ * 0: Do not output any log information.
+ */
LOG_LEVEL_NONE = 0x0000,
+ /**
+ * 0x0001: (Default) Output `FATAL`, `ERROR`, `WARN`, and `INFO` level log information. We recommend
+ * setting your log filter to this level.
+ */
LOG_LEVEL_INFO = 0x0001,
+ /**
+ * 0x0002: Output `FATAL`, `ERROR`, and `WARN` level log information.
+ */
LOG_LEVEL_WARN = 0x0002,
+ /**
+ * 0x0004: Output `FATAL` and `ERROR` level log information.
+ */
LOG_LEVEL_ERROR = 0x0004,
+ /**
+ * 0x0008: Output `FATAL` level log information.
+ */
LOG_LEVEL_FATAL = 0x0008,
LOG_LEVEL_API_CALL = 0x0010,
LOG_LEVEL_DEBUG = 0x0020,
@@ -62,12 +78,36 @@ class ILogWriter {
virtual ~ILogWriter() {}
};
+/**
+ * @brief The output log level of the SDK.
+ */
enum LOG_FILTER_TYPE {
+ /**
+ * 0: Do not output any log information.
+ */
LOG_FILTER_OFF = 0,
+ /**
+ * 0x080f: Output all log information. Set your log filter to this level if you want to get the most
+ * complete log file.
+ */
LOG_FILTER_DEBUG = 0x080f,
+ /**
+ * 0x000f: Output `LOG_FILTER_CRITICAL`, `LOG_FILTER_ERROR`, `LOG_FILTER_WARN`, and
+ * `LOG_FILTER_INFO` level log information. We recommend setting your log filter to this level.
+ */
LOG_FILTER_INFO = 0x000f,
+ /**
+ * 0x000e: Output `LOG_FILTER_CRITICAL`, `LOG_FILTER_ERROR`, and `LOG_FILTER_WARN` level log
+ * information.
+ */
LOG_FILTER_WARN = 0x000e,
+ /**
+ * 0x000c: Output `LOG_FILTER_CRITICAL` and `LOG_FILTER_ERROR` level log information.
+ */
LOG_FILTER_ERROR = 0x000c,
+ /**
+ * 0x0008: Output `LOG_FILTER_CRITICAL` level log information.
+ */
LOG_FILTER_CRITICAL = 0x0008,
LOG_FILTER_MASK = 0x80f,
};
@@ -78,16 +118,34 @@ const uint32_t MIN_LOG_SIZE = 128 * 1024; // 128KB
*/
const uint32_t DEFAULT_LOG_SIZE_IN_KB = 2048;
-/** Definition of LogConfiguration
+/**
+ * @brief Configuration of Agora SDK log files.
*/
struct LogConfig {
- /**The log file path, default is NULL for default log path
+ /**
+ * The complete path of the log files. Agora recommends using the default log directory. If you need
+ * to modify the default directory, ensure that the directory you specify exists and is writable.
+ * The default log directory is:
+ * - Android: /storage/emulated/0/Android/data//files/agorasdk.log.
+ * - iOS: App Sandbox/Library/caches/agorasdk.log.
+ * - macOS:
+ * - If Sandbox is enabled: App Sandbox/Library/Logs/agorasdk.log. For example,
+ * /Users//Library/Containers//Data/Library/Logs/agorasdk.log.
+ * - If Sandbox is disabled: ~/Library/Logs/agorasdk.log
+ * - Windows: C:\Users\\AppData\Local\Agora\\agorasdk.log.
*/
const char* filePath;
- /** The log file size, KB , set 2048KB to use default log size
+ /**
+ * The size (KB) of an `agorasdk.log` file. The value range is [128,20480]. The default value is
+ * 2,048 KB. If you set `fileSizeInKByte` smaller than 128 KB, the SDK automatically adjusts it to
+ * 128 KB; if you set `fileSizeInKByte` greater than 20,480 KB, the SDK automatically adjusts it to
+ * 20,480 KB.
*/
uint32_t fileSizeInKB;
- /** The log level, set LOG_LEVEL_INFO to use default log level
+ /**
+ * The output level of the SDK log file. See `LOG_LEVEL`.
+ * For example, if you set the log level to WARN, the SDK outputs the logs within levels FATAL,
+ * ERROR, and WARN.
*/
LOG_LEVEL level;
diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/IAgoraMediaPlayerSource.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/IAgoraMediaPlayerSource.h
index 4cd8206ca..563497881 100644
--- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/IAgoraMediaPlayerSource.h
+++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/IAgoraMediaPlayerSource.h
@@ -403,58 +403,83 @@ class IMediaPlayerSourceObserver {
virtual ~IMediaPlayerSourceObserver() {}
/**
- * @brief Reports the playback state change.
+ * @brief Reports the changes of playback state.
+ *
+ * @details
+ * When the state of the media player changes, the SDK triggers this callback to report the current
+ * playback state.
+ *
+ * @param state The playback state. See `MEDIA_PLAYER_STATE`.
+ * @param reason The reason for the changes in the media player status. See `MEDIA_PLAYER_REASON`.
*
- * When the state of the playback changes, the SDK triggers this callback to report the new playback state and the reason or error for the change.
- * @param state The new playback state after change. See {@link media::base::MEDIA_PLAYER_STATE MEDIA_PLAYER_STATE}.
- * @param reason The player's error code. See {@link media::base::MEDIA_PLAYER_REASON MEDIA_PLAYER_REASON}.
*/
virtual void onPlayerSourceStateChanged(media::base::MEDIA_PLAYER_STATE state,
media::base::MEDIA_PLAYER_REASON reason) = 0;
/**
- * @brief Reports current playback progress.
+ * @brief Reports the playback progress of the media file.
+ *
+ * @details
+ * When playing media files, the SDK triggers this callback every two second to report current
+ * playback progress.
+ *
+ * @param positionMs The playback position (ms) of media files.
+ * @param timeStampMs The NTP timestamp (ms) of the current playback progress.
*
- * The callback occurs once every one second during the playback and reports the current playback progress.
- * @param positionMs Current playback progress (milisecond).
- * @param timestampMs Current NTP(Network Time Protocol) time (milisecond).
*/
virtual void onPositionChanged(int64_t positionMs, int64_t timestampMs) = 0;
/**
- * @brief Reports the playback event.
+ * @brief Reports the player events.
*
- * - After calling the `seek` method, the SDK triggers the callback to report the results of the seek operation.
- * - After calling the `selectAudioTrack` method, the SDK triggers the callback to report that the audio track changes.
+ * @details
+ * - After calling the `seek` method, the SDK triggers the callback to report the results of the
+ * seek operation.
+ *
+ * @param eventCode The player event. See `MEDIA_PLAYER_EVENT`.
+ * @param elapsedTime The time (ms) when the event occurs.
+ * @param message Information about the event.
*
- * @param eventCode The playback event. See {@link media::base::MEDIA_PLAYER_EVENT MEDIA_PLAYER_EVENT}.
- * @param elapsedTime The playback elapsed time.
- * @param message The playback message.
*/
virtual void onPlayerEvent(media::base::MEDIA_PLAYER_EVENT eventCode, int64_t elapsedTime, const char* message) = 0;
/**
- * @brief Occurs when the metadata is received.
+ * @brief Occurs when the media metadata is received.
+ *
+ * @details
+ * The callback occurs when the player receives the media metadata and reports the detailed
+ * information of the media metadata.
*
- * The callback occurs when the player receives the media metadata and reports the detailed information of the media metadata.
* @param data The detailed data of the media metadata.
* @param length The data length (bytes).
+ *
*/
virtual void onMetaData(const void* data, int length) = 0;
/**
- * @brief Triggered when play buffer updated, once every 1 second
+ * @brief Reports the playback duration that the buffered data can support.
+ *
+ * @details
+ * When playing online media resources, the SDK triggers this callback every two seconds to report
+ * the playback duration that the currently buffered data can support.
+ * - When the playback duration supported by the buffered data is less than the threshold (0 by
+ * default), the SDK returns `PLAYER_EVENT_BUFFER_LOW` (6).
+ * - When the playback duration supported by the buffered data is greater than the threshold (0 by
+ * default), the SDK returns `PLAYER_EVENT_BUFFER_RECOVER` (7).
+ *
+ * @param playCachedBuffer The playback duration (ms) that the buffered data can support.
*
- * @param int cached buffer during playing, in milliseconds
*/
virtual void onPlayBufferUpdated(int64_t playCachedBuffer) = 0;
/**
- * @brief Triggered when the player preloadSrc
+ * @brief Reports the events of preloaded media resources.
+ *
+ * @param src The URL of the media resource.
+ * @param event Events that occur when media resources are preloaded. See `PLAYER_PRELOAD_EVENT`.
*
- * @param event
*/
virtual void onPreloadEvent(const char* src, media::base::PLAYER_PRELOAD_EVENT event) = 0;
@@ -472,43 +497,65 @@ class IMediaPlayerSourceObserver {
virtual void onAgoraCDNTokenWillExpire() = 0;
/**
- * @brief Reports current playback source bitrate changed.
- * @brief Reports current playback source info changed.
+ * @brief Occurs when the video bitrate of the media resource changes.
+ *
+ * @param from Information about the video bitrate of the media resource being played. See
+ * `SrcInfo`.
+ * @param to Information about the changed video bitrate of media resource being played. See
+ * `SrcInfo`.
*
- * @param from Streaming media information before the change.
- * @param to Streaming media information after the change.
*/
virtual void onPlayerSrcInfoChanged(const media::base::SrcInfo& from, const media::base::SrcInfo& to) = 0;
- /**
- * @brief Triggered when media player information updated.
+ /**
+ * @brief Occurs when information related to the media player changes.
+ *
+ * @details
+ * When the information about the media player changes, the SDK triggers this callback. You can use
+ * this callback for troubleshooting.
+ *
+ * @param info Information related to the media player. See `PlayerUpdatedInfo`.
*
- * @param info Include information of media player.
*/
virtual void onPlayerInfoUpdated(const media::base::PlayerUpdatedInfo& info) = 0;
- /**
- * @brief Triggered every 1 second, reports the statistics of the files being cached.
- *
- * @param stats Cached file statistics.
+ /**
+ * @brief Reports the statistics of the media file being cached.
+ *
+ * @details
+ * After you call the `openWithMediaSource` method and set `enableCache` as `true`, the SDK triggers
+ * this callback once per second to report the statistics of the media file being cached.
+ *
+ * @param stats The statistics of the media file being cached. See `CacheStatistics`.
+ *
*/
virtual void onPlayerCacheStats(const media::base::CacheStatistics& stats) {
(void)stats;
}
- /**
- * @brief Triggered every 1 second, reports the statistics of the media stream being played.
- *
- * @param stats The statistics of the media stream.
+ /**
+ * @brief The statistics of the media file being played.
+ *
+ * @details
+ * The SDK triggers this callback once per second to report the statistics of the media file being
+ * played.
+ *
+ * @param stats The statistics of the media file. See `PlayerPlaybackStats`.
+ *
*/
virtual void onPlayerPlaybackStats(const media::base::PlayerPlaybackStats& stats) {
(void)stats;
}
/**
- * @brief Triggered every 200 millisecond ,update player current volume range [0,255]
+ * @brief Reports the volume of the media player.
+ *
+ * @details
+ * The SDK triggers this callback every 200 milliseconds to report the current volume of the media
+ * player.
+ *
+ * @param volume The volume of the media player. The value ranges from 0 to 255.
*
- * @param volume volume of current player.
*/
virtual void onAudioVolumeIndication(int volume) = 0;
};
diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/IAgoraMediaRecorder.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/IAgoraMediaRecorder.h
index 79a8db35e..b10ee2b37 100644
--- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/IAgoraMediaRecorder.h
+++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/IAgoraMediaRecorder.h
@@ -17,70 +17,82 @@ class IMediaRecorder : public RefCountInterface {
public:
/**
- * Registers the IMediaRecorderObserver object.
+ * @brief Registers the `IMediaRecorderObserver` observer.
*
* @since v4.0.0
*
- * @note Call this method before the startRecording method.
+ * @details
+ * This method sets the callback for audio and video recording, so the app can be notified of
+ * recording status and information during the recording process.
+ * Before calling this method, make sure that:
+ * - The `IRtcEngine` object has been created and initialized.
+ * - The media recorder object has been created using `createMediaRecorder`.
*
- * @param callback The callbacks for recording audio and video streams. See \ref IMediaRecorderObserver.
+ * @param callback Callback for audio and video stream recording. See `IMediaRecorderObserver`.
*
* @return
- * - 0(ERR_OK): Success.
- * - < 0: Failure:
+ * - 0: Success.
+ * - < 0: Failure. See `Error Codes` for details and resolution suggestions.
*/
virtual int setMediaRecorderObserver(media::IMediaRecorderObserver* callback) = 0;
/**
- * Starts recording the local or remote audio and video.
+ * @brief Starts audio and video stream recording.
*
* @since v4.0.0
*
- * After successfully calling \ref IRtcEngine::createMediaRecorder "createMediaRecorder" to get the media recorder object
- * , you can call this method to enable the recording of the local audio and video.
- *
- * This method can record the following content:
- * - The audio captured by the local microphone and encoded in AAC format.
- * - The video captured by the local camera and encoded by the SDK.
- * - The audio received from remote users and encoded in AAC format.
- * - The video received from remote users.
- *
- * The SDK can generate a recording file only when it detects the recordable audio and video streams; when there are
- * no audio and video streams to be recorded or the audio and video streams are interrupted for more than five
- * seconds, the SDK stops recording and triggers the
- * \ref IMediaRecorderObserver::onRecorderStateChanged "onRecorderStateChanged" (RECORDER_STATE_ERROR, RECORDER_ERROR_NO_STREAM)
- * callback.
+ * @details
+ * This method starts recording audio and video streams. The Agora SDK supports recording both local
+ * and remote users' audio and video streams simultaneously.
+ * Before starting the recording, make sure that:
+ * - You have created the media recorder object using `createMediaRecorder`.
+ * - You have registered a recorder observer using `setMediaRecorderObserver` to listen for
+ * recording callbacks.
+ * - You have joined a channel.
+ * This method supports recording the following data:
+ * - Audio captured from the microphone in AAC encoding format.
+ * - Video captured from the camera in H.264 or H.265 encoding format.
+ * After recording starts, if the video resolution changes during recording, the SDK stops the
+ * recording. If the audio sample rate or number of channels changes, the SDK continues recording
+ * and generates a single MP4 file.
+ * A recording file is only successfully generated when a recordable audio or video stream is
+ * detected. If there is no recordable stream, or if the stream is interrupted for more than 5
+ * seconds during recording, the SDK stops the recording and triggers the
+ * `onRecorderStateChanged` (`RECORDER_STATE_ERROR, RECORDER_REASON_NO_STREAM`) callback.
*
- * @note Call this method after joining the channel.
+ * @note
+ * - If you want to record local audio and video streams, make sure the local user role is set to
+ * broadcaster before starting recording.
+ * - If you want to record remote audio and video streams, make sure you have subscribed to the
+ * remote user's streams before starting recording.
*
- * @param config The recording configurations. See MediaRecorderConfiguration.
+ * @param config Audio and video stream recording configuration. See `MediaRecorderConfiguration`.
*
* @return
- * - 0(ERR_OK): Success.
- * - < 0: Failure:
- * - `-1(ERR_FAILED)`: IRtcEngine does not support the request because the remote user did not subscribe to the target channel or the media streams published by the local user during remote recording.
- * - `-2(ERR_INVALID_ARGUMENT)`: The parameter is invalid. Ensure the following:
- * - The specified path of the recording file exists and is writable.
- * - The specified format of the recording file is supported.
- * - The maximum recording duration is correctly set.
- * - During remote recording, ensure the user whose media streams you want record did join the channel.
- * - `-4(ERR_NOT_SUPPORTED)`: IRtcEngine does not support the request due to one of the following reasons:
- * - The recording is ongoing.
- * - The recording stops because an error occurs.
- * - No \ref IMediaRecorderObserver object is registered.
+ * - 0: Success.
+ * - < 0: Failure. See `Error Codes` for details and resolution suggestions.
+ * - -2: Invalid parameter. Please ensure that:
+ * - The specified recording file path is correct and writable.
+ * - The specified recording file format is correct.
+ * - The maximum recording duration is set correctly.
+ * - -4: `IRtcEngine` is in a state that does not support this operation. This may be because a
+ * recording is already in progress or has stopped due to an error.
+ * - -7: `IRtcEngine` is not initialized when this method is called. Please make sure the
+ * `IMediaRecorder` object has been created before calling this method.
*/
virtual int startRecording(const media::MediaRecorderConfiguration& config) = 0;
/**
- * Stops recording the audio and video.
+ * @brief Stops audio and video stream recording.
*
* @since v4.0.0
*
- * @note After calling \ref IMediaRecorder::startRecording "startRecording", if you want to stop the recording,
- * you must call `stopRecording`; otherwise, the generated recording files might not be playable.
- *
+ * @note After calling `startRecording`, you must call this method to stop the recording; otherwise,
+ * the generated recording file may not play properly.
*
* @return
- * - 0(ERR_OK): Success.
+ * - 0: Success.
* - < 0: Failure:
+ * - -7: `IRtcEngine` is not initialized when this method is called. Please make sure the
+ * `IMediaRecorder` object has been created before calling this method.
*/
virtual int stopRecording() = 0;
};
diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/IAgoraParameter.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/IAgoraParameter.h
index f50afe9b5..a463de955 100644
--- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/IAgoraParameter.h
+++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/IAgoraParameter.h
@@ -144,6 +144,10 @@ typedef CopyableAutoPtr AString;
namespace base {
+/**
+ * @brief The interface class of Agora RTC SDK, which provides JSON configuration information of the
+ * SDK.
+ */
class IAgoraParameter : public RefCountInterface {
public:
/**
@@ -291,10 +295,17 @@ class IAgoraParameter : public RefCountInterface {
virtual int getArray(const char* key, const char* args, agora::util::AString& value) = 0;
/**
- * set parameters of the sdk or engine
- * @param [in] parameters
- * the parameters
- * @return return 0 if success or an error code
+ * @brief Provides the technical preview functionalities or special customizations by configuring
+ * the SDK with JSON options.
+ *
+ * @details
+ * Contact `technical support` to get the JSON configuration method.
+ *
+ * @param parameters Pointer to the set parameters in a JSON string.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int setParameters(const char* parameters) = 0;
diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/IAgoraService.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/IAgoraService.h
index edfee1800..a886b598f 100644
--- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/IAgoraService.h
+++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/IAgoraService.h
@@ -21,6 +21,7 @@ class IRtmpConnection;
class ILocalUser;
class IMediaDeviceManager;
class INGAudioDeviceManager;
+class INGVideoDeviceManager;
struct TConnectionInfo;
struct RtcConnectionConfiguration;
struct RtmpConnectionConfiguration;
@@ -380,6 +381,23 @@ class IServiceObserver {
* @param configContent The config fetched from server.
*/
virtual void onFetchConfigResult(int code, rtc::CONFIG_FETCH_TYPE configType, const char* configContent) {}
+
+#if defined(__ANDROID__)
+ /**
+ * Reports the permission granted.
+ * @param permission {@link PERMISSION}
+ */
+ virtual void onPermissionGranted(agora::rtc::PERMISSION_TYPE permissionType) {}
+#endif
+
+ /**
+ * Occurs when the local user registers a user account.
+ *
+ * @param uid The ID of the local user.
+ * @param userAccount The user account of the local user.
+ */
+ virtual void onLocalUserRegistered(rtc::uid_t uid, const char* userAccount) {}
+
};
/**
@@ -669,6 +687,16 @@ class IAgoraService {
*/
virtual agora_refptr createAudioDeviceManager() = 0;
+
+ /**
+ * Creates a video device manager object and returns the pointer.
+ *
+ * @return
+ * - The pointer to \ref rtc::INGVideoDeviceManager "INGVideoDeviceManager": Success.
+ * - A null pointer: Failure.
+ */
+ virtual agora_refptr createVideoDeviceManager() = 0;
+
/**
* Creates a media node factory object and returns the pointer.
*
@@ -794,6 +822,21 @@ class IAgoraService {
*/
virtual agora_refptr createScreenCaptureVideoTrack(
agora_refptr screen, const char* id = OPTIONAL_NULLPTR) = 0;
+
+
+/**
+ * Creates a local audio track object with a screen capture source extension and returns the pointer.
+ *
+ * Once created, this track can be used to work with the screen capture extension.
+ *
+ * @param screen The pointer to the screen capture source.
+ *
+ * @return
+ * - The pointer to \ref rtc::ILocalAudioTrack "ILocalAudioTrack": Success.
+ * - A null pointer: Failure.
+ */
+ virtual agora_refptr createScreenCaptureAudioTrack(
+ agora_refptr screen) = 0;
#endif
/// @cond (!Linux)
@@ -1019,4 +1062,4 @@ class IAgoraService {
* - A null pointer: Failure.
*/
AGORA_API agora::base::IAgoraService* AGORA_CALL createAgoraService();
-/** @} */
\ No newline at end of file
+/** @} */
diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraCameraCapturer.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraCameraCapturer.h
index 022a6c181..ebb5e6502 100644
--- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraCameraCapturer.h
+++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraCameraCapturer.h
@@ -52,66 +52,6 @@ class ICameraCapturer : public RefCountInterface {
CAMERA_STOPPED,
};
- // Interface for receiving information about available camera devices.
- /**
- * The IDeviceInfo class, which manages the information of available cameras.
- */
- class IDeviceInfo {
- public:
- virtual ~IDeviceInfo() {}
-
- /**
- * Releases the device.
- */
- virtual void release() = 0;
-
- /**
- * Gets the number of all available cameras.
- * @return The number of all available cameras.
- */
- virtual uint32_t NumberOfDevices() = 0;
-
- /**
- * Gets the name of a specified camera.
- * @param deviceNumber The index number of the device.
- * @param deviceNameUTF8 The name of the device.
- * @param deviceNameLength The length of the device name.
- * @param deviceUniqueIdUTF8 The unique ID of the device.
- * @param deviceUniqueIdLength The length of the device ID.
- * @param productUniqueIdUTF8 The unique ID of the product.
- * @param productUniqueIdLength The length of the product ID.
- * @param deviceTypeUTF8 The camera type of the device.
- * @param deviceTypeLength The length of the camera type.
- * @return
- * The name of the device in the UTF8 format: Success.
- */
- virtual int32_t GetDeviceName(uint32_t deviceNumber, char* deviceNameUTF8,
- uint32_t deviceNameLength, char* deviceUniqueIdUTF8,
- uint32_t deviceUniqueIdLength, char* productUniqueIdUTF8 = 0,
- uint32_t productUniqueIdLength = 0,
- char* deviceTypeUTF8 = 0, uint32_t deviceTypeLength = 0) = 0;
-
- /**
- * Sets the capability number for a specified device.
- * @param deviceUniqueIdUTF8 The pointer to the ID of the device in the UTF8 format.
- * @return
- * The capability number of the device.
- */
- virtual int32_t NumberOfCapabilities(const char* deviceUniqueIdUTF8) = 0;
-
- /**
- * Gets the capability of a specified device.
- * @param deviceUniqueIdUTF8 The pointer to the ID of the device in the UTF8 format.
- * @param deviceCapabilityNumber The capability number of the device.
- * @param capability The reference to the video capability. See {@link VideoFormat}.
- * @return
- * The capability number of the device.
- */
- virtual int32_t GetCapability(const char* deviceUniqueIdUTF8,
- const uint32_t deviceCapabilityNumber,
- VideoFormat& capability) = 0;
- };
-
public:
#if defined(__ANDROID__) || (defined(__APPLE__) && TARGET_OS_IPHONE) || defined (__OHOS__)
/**
@@ -385,16 +325,6 @@ class ICameraCapturer : public RefCountInterface {
#elif defined(_WIN32) || (defined(__linux__) && !defined(__ANDROID__) && !defined (__OHOS__)) || \
(defined(__APPLE__) && TARGET_OS_MAC && !TARGET_OS_IPHONE)
- /**
- * Creates a DeviceInfo object.
- *
- * @note
- * This method applies to Windows, macOS, and Linux only.
- * @return
- * - The pointer to \ref agora::rtc::ICameraCapturer::IDeviceInfo "IDeviceInfo": Success.
- * - An empty pointer NULL: Failure.
- */
- virtual IDeviceInfo* createDeviceInfo() = 0;
/**
* Initializes the device with the device ID.
*
diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraRtcConnection.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraRtcConnection.h
index e96bc304e..d7f150a53 100644
--- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraRtcConnection.h
+++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraRtcConnection.h
@@ -531,6 +531,12 @@ class IRtcConnection : public RefCountInterface {
* When enabled, the SDK can use multiple network paths for data transmission,
* which can improve the reliability and performance of the connection.
*
+ * @note
+ * - Permission And System Required:
+ * - Android: Android 7.0 or later(API level 24 or later), with ACCESS_NETWORK_STATE and CHANGE_NETWORK_STATE permission.
+ * - IOS: IOS 12.0 or later
+ * - Macos: 10.14 or later
+ * - Windows: Windows Vista or later
* @param enable A boolean value indicating whether to enable (true) or disable (false) multipath.
* @param ares A reference for asynchronous operations, defaulting to AOSL_REF_INVALID.
* @return
diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraSyncClient.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraSyncClient.h
index 4b85bd44b..8c01b017d 100644
--- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraSyncClient.h
+++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraSyncClient.h
@@ -69,6 +69,7 @@ class ISyncClientObserver {
const CollectionEvent* events, int eventSize) = 0;
virtual void onDatabaseEvent(const char* databaseName, SyncClientError error) = 0;
virtual void onDataException(const char* databaseName, const char* collectionName) = 0;
+ virtual void onCollectionSyncCompleted() = 0;
virtual ~ISyncClientObserver() {};
};
diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraVideoTrack.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraVideoTrack.h
index 22874c28e..7abd8f6b6 100644
--- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraVideoTrack.h
+++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraVideoTrack.h
@@ -326,6 +326,7 @@ struct LocalVideoTrackStats {
int height;
uint32_t encoder_type;
uint32_t hw_encoder_accelerating;
+ uint32_t encoder_frame_depth;
/*
* encoder vender id, VideoCodecVenderId
*/
@@ -349,8 +350,6 @@ struct LocalVideoTrackStats {
SimulcastStreamProfile simulcast_stream_profile[STREAM_LAYER_COUNT_MAX];
- uint8_t hdr_stream_encoder;
-
LocalVideoTrackStats() : number_of_streams(0),
bytes_major_stream(0),
bytes_minor_stream(0),
@@ -373,12 +372,12 @@ struct LocalVideoTrackStats {
height(0),
encoder_type(0),
hw_encoder_accelerating(0),
+ encoder_frame_depth(8),
encoder_vender_id(0),
uplink_cost_time_ms(0),
quality_adapt_indication(ADAPT_NONE),
txPacketLossRate(0),
- capture_brightness_level(CAPTURE_BRIGHTNESS_LEVEL_INVALID),
- hdr_stream_encoder(0) {}
+ capture_brightness_level(CAPTURE_BRIGHTNESS_LEVEL_INVALID) {}
};
/**
diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/aosl_types.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/aosl_types.h
index 71e2b0152..706bad712 100644
--- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/aosl_types.h
+++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/aosl_types.h
@@ -54,6 +54,33 @@ typedef void (*aosl_argv_f) (uintptr_t argc, uintptr_t argv []);
typedef aosl_argv_f aosl_obj_dtor_t;
+/**
+ * The common customized internal object constructor callback function.
+ * Parameters:
+ * dst: the destination address internal for constructing the object;
+ * src: the source address for constructing the internal object;
+ * Return value:
+ * <0: indicate some error occurs;
+ * >=0: successful;
+ * Remarks:
+ * Construct the customized internal object according to the source,
+ * such as invoking the C++ constructor etc for C++ API.
+ **/
+typedef int (*aosl_ctor_t) (void *dst, void *src);
+
+/**
+ * The common customized internal object destructor callback function.
+ * Parameters:
+ * obj: the address of the customized internal object;
+ * Return value:
+ * N/A.
+ * Remarks:
+ * Destruct the customized internal object in this callback function,
+ * such as invoking the C++ destructor etc for C++ API.
+ **/
+typedef void (*aosl_dtor_t) (void *obj);
+
+
#if !defined (_WIN32) && !defined (__kspreadtrum__)
typedef int aosl_fd_t;
#define AOSL_INVALID_FD ((aosl_fd_t)-1)
diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/cpp/aosl_ref_class.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/cpp/aosl_ref_class.h
index f85bdcca8..16a53ea4e 100644
--- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/cpp/aosl_ref_class.h
+++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/cpp/aosl_ref_class.h
@@ -1302,7 +1302,7 @@ class aosl_ref_class {
aosl_ref_magic_t magic () const
{
- return refoop->magic ();
+ return refmagic;
}
int hold (aosl_ref_func_t f, uintptr_t argc, ...)
diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/CMakeLists.txt b/Android/APIExample/agora-simple-filter/src/main/cpp/CMakeLists.txt
index b052242a5..b6cf19282 100644
--- a/Android/APIExample/agora-simple-filter/src/main/cpp/CMakeLists.txt
+++ b/Android/APIExample/agora-simple-filter/src/main/cpp/CMakeLists.txt
@@ -3,7 +3,7 @@
# Sets the minimum version of CMake required to build the native library.
-cmake_minimum_required(VERSION 3.4.1)
+cmake_minimum_required(VERSION 3.22.1)
project(agora-simple-filter)
diff --git a/Android/APIExample/agora-stream-encrypt/build.gradle b/Android/APIExample/agora-stream-encrypt/build.gradle
index 68ae77f33..cb6d4082b 100644
--- a/Android/APIExample/agora-stream-encrypt/build.gradle
+++ b/Android/APIExample/agora-stream-encrypt/build.gradle
@@ -35,7 +35,7 @@ android {
externalNativeBuild {
cmake {
path "src/main/cpp/CMakeLists.txt"
- version "3.10.2"
+ version "3.22.1"
}
}
diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/CMakeLists.txt b/Android/APIExample/agora-stream-encrypt/src/main/cpp/CMakeLists.txt
index 4547b6c56..977c28ca8 100644
--- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/CMakeLists.txt
+++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/CMakeLists.txt
@@ -3,7 +3,7 @@
# Sets the minimum version of CMake required to build the native library.
-cmake_minimum_required(VERSION 3.4.1)
+cmake_minimum_required(VERSION 3.22.1)
project(agora-stream-encrypt)
diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/AgoraBase.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/AgoraBase.h
index 537fd5fae..99397d4fe 100644
--- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/AgoraBase.h
+++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/AgoraBase.h
@@ -262,19 +262,15 @@ class AList {
} // namespace util
/**
- * The channel profile.
+ * @brief The channel profile.
*/
enum CHANNEL_PROFILE_TYPE {
/**
- * 0: Communication.
- *
- * This profile prioritizes smoothness and applies to the one-to-one scenario.
+ * 0: Communication. Use this profile when there are only two users in the channel.
*/
CHANNEL_PROFILE_COMMUNICATION = 0,
/**
- * 1: (Default) Live Broadcast.
- *
- * This profile prioritizes supporting a large audience in a live broadcast channel.
+ * 1: Live streaming. Use this profile when there are more than two users in the channel.
*/
CHANNEL_PROFILE_LIVE_BROADCASTING = 1,
/**
@@ -283,8 +279,8 @@ enum CHANNEL_PROFILE_TYPE {
*/
CHANNEL_PROFILE_GAME __deprecated = 2,
/**
- * 3: Cloud Gaming.
- *
+ * Cloud gaming. The scenario is optimized for latency. Use this profile if the use case requires
+ * frequent interactions between users.
* @deprecated This profile is deprecated.
*/
CHANNEL_PROFILE_CLOUD_GAMING __deprecated = 3,
@@ -451,59 +447,75 @@ enum WARN_CODE_TYPE {
};
/**
- * The error codes.
+ * @brief Error codes.
+ *
+ * @details
+ * An error code indicates that the SDK encountered an unrecoverable error that requires application
+ * intervention. For example, an error is returned when the camera fails to open, and the app needs
+ * to inform the user that the camera cannot be used.
+ *
*/
enum ERROR_CODE_TYPE {
/**
- * 0: No error occurs.
+ * 0: No error.
*/
ERR_OK = 0,
// 1~1000
/**
- * 1: A general error occurs (no specified reason).
+ * 1: General error with no classified reason. Try calling the method again.
*/
ERR_FAILED = 1,
/**
- * 2: The argument is invalid. For example, the specific channel name
- * includes illegal characters.
+ * 2: An invalid parameter is used. For example, the specified channel name includes illegal
+ * characters. Reset the parameter.
*/
ERR_INVALID_ARGUMENT = 2,
/**
- * 3: The SDK module is not ready. Choose one of the following solutions:
- * - Check the audio device.
- * - Check the completeness of the app.
- * - Reinitialize the RTC engine.
+ * 3: The SDK is not ready. Possible reasons include the following:
+ * - The initialization of `IRtcEngine` fails. Reinitialize the `IRtcEngine`.
+ * - No user has joined the channel when the method is called. Check the code logic.
+ * - The user has not left the channel when the `rate` or `complain` method is called. Check the
+ * code logic.
+ * - The audio module is disabled.
+ * - The program is not complete.
*/
ERR_NOT_READY = 3,
/**
- * 4: The SDK does not support this function.
+ * 4: The `IRtcEngine` does not support the request. Possible reasons include the following:
+ * - The built-in encryption mode is incorrect, or the SDK fails to load the external encryption
+ * library. Check the encryption mode setting, or reload the external encryption library.
*/
ERR_NOT_SUPPORTED = 4,
/**
- * 5: The request is rejected.
+ * 5: The request is rejected. Possible reasons include the following:
+ * - The `IRtcEngine` initialization fails. Reinitialize the `IRtcEngine`.
+ * - The channel name is set as the empty string `""` when joining the channel. Reset the channel
+ * name.
+ * - When the `joinChannelEx` method is called to join multiple channels, the specified channel name
+ * is already in use. Reset the channel name.
*/
ERR_REFUSED = 5,
/**
- * 6: The buffer size is not big enough to store the returned data.
+ * 6: The buffer size is insufficient to store the returned data.
*/
ERR_BUFFER_TOO_SMALL = 6,
/**
- * 7: The SDK is not initialized before calling this method.
+ * 7: A method is called before the initialization of `IRtcEngine`. Ensure that the `IRtcEngine`
+ * object is initialized before using this method.
*/
ERR_NOT_INITIALIZED = 7,
/**
- * 8: The state is invalid.
+ * 8: Invalid state.
*/
ERR_INVALID_STATE = 8,
/**
- * 9: No permission. This is for internal use only, and does
- * not return to the app through any method or callback.
+ * 9: Permission to access is not granted. Check whether your app has access to the audio and video
+ * device.
*/
ERR_NO_PERMISSION = 9,
/**
- * 10: An API timeout occurs. Some API methods require the SDK to return the
- * execution result, and this error occurs if the request takes too long
- * (more than 10 seconds) for the SDK to process.
+ * 10: A timeout occurs. Some API calls require the SDK to return the execution result. This error
+ * occurs if the SDK takes too long (more than 10 seconds) to return the result.
*/
ERR_TIMEDOUT = 10,
/**
@@ -529,126 +541,112 @@ enum ERROR_CODE_TYPE {
*/
ERR_NET_DOWN = 14,
/**
- * 17: The request to join the channel is rejected. This error usually occurs
- * when the user is already in the channel, and still calls the method to join
- * the channel, for example, \ref agora::rtc::IRtcEngine::joinChannel "joinChannel()".
+ * 17: The request to join the channel is rejected. Possible reasons include the following:
+ * - The user is already in the channel. Agora recommends that you use the
+ * `onConnectionStateChanged` callback to see whether the user is in the channel. Do not call this
+ * method to join the channel unless you receive the `CONNECTION_STATE_DISCONNECTED` (1) state.
+ * - After calling `startEchoTest` for the call test, the user tries to join the channel without
+ * calling `stopEchoTest` to end the current test. To join a channel, the call test must be ended by
+ * calling `stopEchoTest`.
*/
ERR_JOIN_CHANNEL_REJECTED = 17,
/**
- * 18: The request to leave the channel is rejected. This error usually
- * occurs when the user has already left the channel, and still calls the
- * method to leave the channel, for example, \ref agora::rtc::IRtcEngine::leaveChannel
- * "leaveChannel".
+ * 18: Fails to leave the channel. Possible reasons include the following:
+ * - The user has left the channel before calling the `leaveChannel(const LeaveChannelOptions&
+ * options)` method. Stop calling this
+ * method to clear this error.
+ * - The user calls the `leaveChannel(const LeaveChannelOptions& options)` method to leave the
+ * channel before joining the channel.
+ * In this case, no extra operation is needed.
*/
ERR_LEAVE_CHANNEL_REJECTED = 18,
/**
- * 19: The resources have been occupied and cannot be reused.
+ * 19: Resources are already in use.
*/
ERR_ALREADY_IN_USE = 19,
/**
- * 20: The SDK gives up the request due to too many requests. This is for
- * internal use only, and does not return to the app through any method or callback.
+ * 20: The request is abandoned by the SDK, possibly because the request has been sent too
+ * frequently.
*/
ERR_ABORTED = 20,
/**
- * 21: On Windows, specific firewall settings can cause the SDK to fail to
- * initialize and crash.
+ * 21: The `IRtcEngine` fails to initialize and has crashed because of specific Windows firewall
+ * settings.
*/
ERR_INIT_NET_ENGINE = 21,
/**
- * 22: The app uses too much of the system resource and the SDK
- * fails to allocate any resource.
+ * 22: The SDK fails to allocate resources because your app uses too many system resources or system
+ * resources are insufficient.
*/
ERR_RESOURCE_LIMITED = 22,
/**
- * 101: The App ID is invalid, usually because the data format of the App ID is incorrect.
- *
- * Solution: Check the data format of your App ID. Ensure that you use the correct App ID to
- * initialize the Agora service.
+ * 23: The function is prohibited. Please allow it in the console, or contact the Agora technical support.
+ * @technical preview
+ */
+ ERR_FUNC_IS_PROHIBITED = 23,
+ /**
+ * 101: The specified App ID is invalid. Rejoin the channel with a valid App ID.
*/
ERR_INVALID_APP_ID = 101,
/**
- * 102: The specified channel name is invalid. Please try to rejoin the
- * channel with a valid channel name.
+ * 102: The specified channel name is invalid. A possible reason is that the parameter's data type
+ * is incorrect. Rejoin the channel with a valid channel name.
*/
ERR_INVALID_CHANNEL_NAME = 102,
/**
- * 103: Fails to get server resources in the specified region. Please try to
- * specify another region when calling \ref agora::rtc::IRtcEngine::initialize
- * "initialize".
+ * 103: Fails to get server resources in the specified region. Try another region when initializing
+ * `IRtcEngine`.
*/
ERR_NO_SERVER_RESOURCES = 103,
/**
- * 109: The token has expired, usually for the following reasons:
- * - Timeout for token authorization: Once a token is generated, you must use it to access the
- * Agora service within 24 hours. Otherwise, the token times out and you can no longer use it.
- * - The token privilege expires: To generate a token, you need to set a timestamp for the token
- * privilege to expire. For example, If you set it as seven days, the token expires seven days
- * after its usage. In that case, you can no longer access the Agora service. The users cannot
- * make calls, or are kicked out of the channel.
- *
- * Solution: Regardless of whether token authorization times out or the token privilege expires,
- * you need to generate a new token on your server, and try to join the channel.
+ * 109: The current token has expired. Apply for a new token on the server and call `renewToken`.
*/
ERR_TOKEN_EXPIRED = 109,
/**
- * 110: The token is invalid, usually for one of the following reasons:
- * - Did not provide a token when joining a channel in a situation where the project has enabled
- * the App Certificate.
- * - Tried to join a channel with a token in a situation where the project has not enabled the App
- * Certificate.
- * - The App ID, user ID and channel name that you use to generate the token on the server do not
- * match those that you use when joining a channel.
- *
- * Solution:
- * - Before joining a channel, check whether your project has enabled the App certificate. If yes,
- * you must provide a token when joining a channel; if no, join a channel without a token.
- * - When using a token to join a channel, ensure that the App ID, user ID, and channel name that
- * you use to generate the token is the same as the App ID that you use to initialize the Agora
- * service, and the user ID and channel name that you use to join the channel.
+ * 110: Invalid token. Typical reasons include the following:
+ * - App Certificate is enabled in Agora Console, but the code still uses App ID for authentication.
+ * Once App Certificate is enabled for a project, you must use token-based authentication.
+ * - The `uid` used to generate the token is not the same as the `uid` used to join the channel.
*/
ERR_INVALID_TOKEN = 110,
/**
- * 111: The internet connection is interrupted. This applies to the Agora Web
- * SDK only.
+ * 111: The network connection is interrupted. The SDK triggers this callback when it loses
+ * connection with the server for more than four seconds after the connection is established.
*/
ERR_CONNECTION_INTERRUPTED = 111, // only used in web sdk
/**
- * 112: The internet connection is lost. This applies to the Agora Web SDK
- * only.
+ * 112: The network connection is lost. Occurs when the SDK cannot reconnect to Agora's edge server
+ * 10 seconds after its connection to the server is interrupted.
*/
ERR_CONNECTION_LOST = 112, // only used in web sdk
/**
- * 113: The user is not in the channel when calling the
- * \ref agora::rtc::IRtcEngine::sendStreamMessage "sendStreamMessage()" method.
+ * 113: The user is not in the channel when calling the `sendStreamMessage` method.
*/
ERR_NOT_IN_CHANNEL = 113,
/**
- * 114: The data size is over 1024 bytes when the user calls the
- * \ref agora::rtc::IRtcEngine::sendStreamMessage "sendStreamMessage()" method.
+ * 114: The data size exceeds 1 KB when calling the `sendStreamMessage` method.
*/
ERR_SIZE_TOO_LARGE = 114,
/**
- * 115: The bitrate of the sent data exceeds the limit of 6 Kbps when the
- * user calls the \ref agora::rtc::IRtcEngine::sendStreamMessage "sendStreamMessage()".
+ * 115: The data bitrate exceeds 6 KB/s when calling the `sendStreamMessage` method.
*/
ERR_BITRATE_LIMIT = 115,
/**
- * 116: Too many data streams (over 5) are created when the user
- * calls the \ref agora::rtc::IRtcEngine::createDataStream "createDataStream()" method.
+ * 116: More than five data streams are created when calling the `createDataStream(int* streamId,
+ * const DataStreamConfig& config)` method.
*/
ERR_TOO_MANY_DATA_STREAMS = 116,
/**
- * 117: A timeout occurs for the data stream transmission.
+ * 117: The data stream transmission times out.
*/
ERR_STREAM_MESSAGE_TIMEOUT = 117,
/**
- * 119: Switching the user role fails. Please try to rejoin the channel.
+ * 119: Switching roles fails, try rejoining the channel.
*/
ERR_SET_CLIENT_ROLE_NOT_AUTHORIZED = 119,
/**
- * 120: MediaStream decryption fails. The user may have tried to join the channel with a wrong
- * password. Check your settings or try rejoining the channel.
+ * 120: Media streams decryption fails. The user might use an incorrect password to join the
+ * channel. Check the entered password, or tell the user to try rejoining the channel.
*/
ERR_DECRYPTION_FAILED = 120,
/**
@@ -656,18 +654,16 @@ enum ERROR_CODE_TYPE {
*/
ERR_INVALID_USER_ID = 121,
/**
- * 122: DataStream decryption fails. The peer may have tried to join the channel with a wrong
- * password, or did't enable datastream encryption
+ * 122: Data streams decryption fails. The user might use an incorrect password to join the channel.
+ * Check the entered password, or tell the user to try rejoining the channel.
*/
ERR_DATASTREAM_DECRYPTION_FAILED = 122,
/**
- * 123: The app is banned by the server.
+ * 123: The user is banned from the server.
*/
ERR_CLIENT_IS_BANNED_BY_SERVER = 123,
/**
- * 130: Encryption is enabled when the user calls the
- * \ref agora::rtc::IRtcEngine::addPublishStreamUrl "addPublishStreamUrl()" method
- * (CDN live streaming does not support encrypted streams).
+ * 130: The SDK does not support pushing encrypted streams to CDN.
*/
ERR_ENCRYPTED_STREAM_NOT_ALLOWED_PUBLISH = 130,
@@ -677,8 +673,7 @@ enum ERROR_CODE_TYPE {
ERR_LICENSE_CREDENTIAL_INVALID = 131,
/**
- * 134: The user account is invalid, usually because the data format of the user account is
- * incorrect.
+ * 134: The user account is invalid, possibly because it contains invalid parameters.
*/
ERR_INVALID_USER_ACCOUNT = 134,
@@ -705,7 +700,13 @@ enum ERROR_CODE_TYPE {
ERR_CERT_REQUEST = 168,
// PcmSend Error num
+ /**
+ * 200: Unsupported PCM format.
+ */
ERR_PCMSEND_FORMAT = 200, // unsupport pcm format
+ /**
+ * 201: Buffer overflow, the PCM send rate too quickly.
+ */
ERR_PCMSEND_BUFFEROVERFLOW = 201, // buffer overflow, the pcm send rate too quickly
/// @cond
@@ -749,43 +750,43 @@ enum ERROR_CODE_TYPE {
/// @endcond
// 1001~2000
/**
- * 1001: Fails to load the media engine.
+ * 1001: The SDK fails to load the media engine.
*/
ERR_LOAD_MEDIA_ENGINE = 1001,
/**
- * 1005: Audio device module: A general error occurs in the Audio Device Module (no specified
- * reason). Check if the audio device is used by another app, or try
- * rejoining the channel.
+ * 1005: A general error occurs (no specified reason). Check whether the audio device is already in
+ * use by another app, or try rejoining the channel.
*/
ERR_ADM_GENERAL_ERROR = 1005,
/**
- * 1008: Audio Device Module: An error occurs in initializing the playback
- * device.
+ * 1008: An error occurs when initializing the playback device. Check whether the playback device is
+ * already in use by another app, or try rejoining the channel.
*/
ERR_ADM_INIT_PLAYOUT = 1008,
/**
- * 1009: Audio Device Module: An error occurs in starting the playback device.
+ * 1009: An error occurs when starting the playback device. Check the playback device.
*/
ERR_ADM_START_PLAYOUT = 1009,
/**
- * 1010: Audio Device Module: An error occurs in stopping the playback device.
+ * 1010: An error occurs when stopping the playback device.
*/
ERR_ADM_STOP_PLAYOUT = 1010,
/**
- * 1011: Audio Device Module: An error occurs in initializing the recording
- * device.
+ * 1011: An error occurs when initializing the recording device. Check the recording device, or try
+ * rejoining the channel.
*/
ERR_ADM_INIT_RECORDING = 1011,
/**
- * 1012: Audio Device Module: An error occurs in starting the recording device.
+ * 1012: An error occurs when starting the recording device. Check the recording device.
*/
ERR_ADM_START_RECORDING = 1012,
/**
- * 1013: Audio Device Module: An error occurs in stopping the recording device.
+ * 1013: An error occurs when stopping the recording device.
*/
ERR_ADM_STOP_RECORDING = 1013,
/**
- * 1501: Video Device Module: The camera is not authorized.
+ * 1501: Permission to access the camera is not granted. Check whether permission to access the
+ * camera permission is granted.
*/
ERR_VDM_CAMERA_NOT_AUTHORIZED = 1501,
};
@@ -818,11 +819,11 @@ enum LICENSE_ERROR_TYPE {
};
/**
- * The operational permission of the SDK on the audio session.
+ * @brief The operation permissions of the SDK on the audio session.
*/
enum AUDIO_SESSION_OPERATION_RESTRICTION {
/**
- * 0: No restriction; the SDK can change the audio session.
+ * 0: No restriction, the SDK can change the audio session.
*/
AUDIO_SESSION_OPERATION_RESTRICTION_NONE = 0,
/**
@@ -834,13 +835,13 @@ enum AUDIO_SESSION_OPERATION_RESTRICTION {
*/
AUDIO_SESSION_OPERATION_RESTRICTION_CONFIGURE_SESSION = 1 << 1,
/**
- * 4: The SDK keeps the audio session active when the user leaves the
- * channel, for example, to play an audio file in the background.
+ * 4: The SDK keeps the audio session active when the user leaves the channel, for example, to play
+ * an audio file in the background.
*/
AUDIO_SESSION_OPERATION_RESTRICTION_DEACTIVATE_SESSION = 1 << 2,
/**
- * 128: Completely restricts the operational permission of the SDK on the
- * audio session; the SDK cannot change the audio session.
+ * 128: Completely restricts the operation permissions of the SDK on the audio session; the SDK
+ * cannot change the audio session.
*/
AUDIO_SESSION_OPERATION_RESTRICTION_ALL = 1 << 7,
};
@@ -849,7 +850,7 @@ typedef const char* user_id_t;
typedef void* view_t;
/**
- * The definition of the UserInfo struct.
+ * @brief The information of the user.
*/
struct UserInfo {
/**
@@ -878,17 +879,18 @@ typedef util::AList UserList;
namespace rtc {
/**
- * Reasons for a user being offline.
+ * @brief Reasons for a user being offline.
*/
enum USER_OFFLINE_REASON_TYPE {
/**
- * 0: The user leaves the current channel.
+ * 0: The user quits the call.
*/
USER_OFFLINE_QUIT = 0,
/**
- * 1: The SDK times out and the user drops offline because no data packet was received within a
- * certain period of time. If a user quits the call and the message is not passed to the SDK (due
- * to an unreliable channel), the SDK assumes that the user drops offline.
+ * 1: The SDK times out and the user drops offline because no data packet is received within a
+ * certain period of time.
+ * @note If the user quits the call and the message is not passed to the SDK (due to an unreliable
+ * channel), the SDK assumes the user dropped offline.
*/
USER_OFFLINE_DROPPED = 1,
/**
@@ -897,14 +899,32 @@ enum USER_OFFLINE_REASON_TYPE {
USER_OFFLINE_BECOME_AUDIENCE = 2,
};
+/**
+ * @brief The interface class.
+ */
enum INTERFACE_ID_TYPE {
+ /**
+ * 1: The `IAudioDeviceManager` interface class.
+ */
AGORA_IID_AUDIO_DEVICE_MANAGER = 1,
+ /**
+ * 2: The `IVideoDeviceManager` interface class.
+ */
AGORA_IID_VIDEO_DEVICE_MANAGER = 2,
+ /**
+ * This interface class is deprecated.
+ */
AGORA_IID_PARAMETER_ENGINE = 3,
+ /**
+ * 4: The `IMediaEngine` interface class.
+ */
AGORA_IID_MEDIA_ENGINE = 4,
AGORA_IID_AUDIO_ENGINE = 5,
AGORA_IID_VIDEO_ENGINE = 6,
AGORA_IID_RTC_CONNECTION = 7,
+ /**
+ * This interface class is deprecated.
+ */
AGORA_IID_SIGNALING_ENGINE = 8,
AGORA_IID_MEDIA_ENGINE_REGULATOR = 9,
AGORA_IID_LOCAL_SPATIAL_AUDIO = 11,
@@ -915,7 +935,7 @@ enum INTERFACE_ID_TYPE {
};
/**
- * The network quality types.
+ * @brief Network quality types.
*/
enum QUALITY_TYPE {
/**
@@ -924,16 +944,15 @@ enum QUALITY_TYPE {
*/
QUALITY_UNKNOWN __deprecated = 0,
/**
- * 1: The quality is excellent.
+ * 1: The network quality is excellent.
*/
QUALITY_EXCELLENT = 1,
/**
- * 2: The quality is quite good, but the bitrate may be slightly
- * lower than excellent.
+ * 2: The network quality is quite good, but the bitrate may be slightly lower than excellent.
*/
QUALITY_GOOD = 2,
/**
- * 3: Users can feel the communication slightly impaired.
+ * 3: Users can feel the communication is slightly impaired.
*/
QUALITY_POOR = 3,
/**
@@ -941,11 +960,11 @@ enum QUALITY_TYPE {
*/
QUALITY_BAD = 4,
/**
- * 5: Users can barely communicate.
+ * 5: The quality is so bad that users can barely communicate.
*/
QUALITY_VBAD = 5,
/**
- * 6: Users cannot communicate at all.
+ * 6: The network is down and users cannot communicate at all.
*/
QUALITY_DOWN = 6,
/**
@@ -953,7 +972,7 @@ enum QUALITY_TYPE {
*/
QUALITY_UNSUPPORTED = 7,
/**
- * 8: Detecting the network quality.
+ * 8: The last-mile network probe test is in progress.
*/
QUALITY_DETECTING = 8,
};
@@ -977,29 +996,29 @@ enum FIT_MODE_TYPE {
};
/**
- * The rotation information.
+ * @brief The clockwise rotation of the video.
*/
enum VIDEO_ORIENTATION {
/**
- * 0: Rotate the video by 0 degree clockwise.
+ * 0: (Default) No rotation.
*/
VIDEO_ORIENTATION_0 = 0,
/**
- * 90: Rotate the video by 90 degrees clockwise.
+ * 90: 90 degrees.
*/
VIDEO_ORIENTATION_90 = 90,
/**
- * 180: Rotate the video by 180 degrees clockwise.
+ * 180: 180 degrees.
*/
VIDEO_ORIENTATION_180 = 180,
/**
- * 270: Rotate the video by 270 degrees clockwise.
+ * 270: 270 degrees.
*/
VIDEO_ORIENTATION_270 = 270
};
/**
- * The video frame rate.
+ * @brief The video frame rate.
*/
enum FRAME_RATE {
/**
@@ -1027,7 +1046,8 @@ enum FRAME_RATE {
*/
FRAME_RATE_FPS_30 = 30,
/**
- * 60: 60 fps. Applies to Windows and macOS only.
+ * 60: 60 fps.
+ * @note For Windows and macOS only.
*/
FRAME_RATE_FPS_60 = 60,
};
@@ -1041,85 +1061,97 @@ enum FRAME_HEIGHT {
};
/**
- * Types of the video frame.
+ * @brief The video frame type.
*/
enum VIDEO_FRAME_TYPE {
- /** 0: A black frame. */
+ /**
+ * 0: A black frame.
+ */
VIDEO_FRAME_TYPE_BLANK_FRAME = 0,
- /** 3: Key frame. */
+ /**
+ * 3: Key frame.
+ */
VIDEO_FRAME_TYPE_KEY_FRAME = 3,
- /** 4: Delta frame. */
+ /**
+ * 4: Delta frame.
+ */
VIDEO_FRAME_TYPE_DELTA_FRAME = 4,
- /** 5: The B frame.*/
+ /**
+ * 5: The B frame.
+ */
VIDEO_FRAME_TYPE_B_FRAME = 5,
- /** 6: A discarded frame. */
+ /**
+ * 6: A discarded frame.
+ */
VIDEO_FRAME_TYPE_DROPPABLE_FRAME = 6,
- /** Unknown frame. */
+ /**
+ * Unknown frame.
+ */
VIDEO_FRAME_TYPE_UNKNOW
};
/**
- * Video output orientation modes.
+ * @brief Video output orientation mode.
*/
enum ORIENTATION_MODE {
/**
- * 0: The output video always follows the orientation of the captured video. The receiver takes
- * the rotational information passed on from the video encoder. This mode applies to scenarios
- * where video orientation can be adjusted on the receiver:
+ * 0: (Default) The output video always follows the orientation of the captured video. The receiver
+ * takes the rotational information passed on from the video encoder. This mode applies to scenarios
+ * where video orientation can be adjusted on the receiver.
* - If the captured video is in landscape mode, the output video is in landscape mode.
* - If the captured video is in portrait mode, the output video is in portrait mode.
*/
ORIENTATION_MODE_ADAPTIVE = 0,
/**
- * 1: Landscape mode. In this mode, the SDK always outputs videos in landscape (horizontal) mode.
- * If the captured video is in portrait mode, the video encoder crops it to fit the output.
- * Applies to situations where the receiving end cannot process the rotational information. For
- * example, CDN live streaming.
+ * 1: In this mode, the SDK always outputs videos in landscape (horizontal) mode. If the captured
+ * video is in portrait mode, the video encoder crops it to fit the output. Applies to situations
+ * where the receiving end cannot process the rotational information. For example, CDN live
+ * streaming.
*/
ORIENTATION_MODE_FIXED_LANDSCAPE = 1,
/**
- * 2: Portrait mode. In this mode, the SDK always outputs video in portrait (portrait) mode. If
- * the captured video is in landscape mode, the video encoder crops it to fit the output. Applies
- * to situations where the receiving end cannot process the rotational information. For example,
- * CDN live streaming.
+ * 2: In this mode, the SDK always outputs video in portrait (portrait) mode. If the captured video
+ * is in landscape mode, the video encoder crops it to fit the output. Applies to situations where
+ * the receiving end cannot process the rotational information. For example, CDN live streaming.
*/
ORIENTATION_MODE_FIXED_PORTRAIT = 2,
};
/**
- * (For future use) Video degradation preferences under limited bandwidth.
+ * @brief Video degradation preferences when the bandwidth is a constraint.
*/
enum DEGRADATION_PREFERENCE {
/**
- * -1: (Default) SDK uses degradation preference according to setVideoScenario API settings, real-time network state and other relevant data information.
- * If API setVideoScenario set video scenario to APPLICATION_SCENARIO_LIVESHOW, then MAINTAIN_BALANCED is used. If not, then MAINTAIN_RESOLUTION is used.
- * Also if network state has changed, SDK may change this parameter between MAINTAIN_FRAMERATE、MAINTAIN_BALANCED and MAINTAIN_RESOLUTION automatically to get the best QOE.
- * We recommend using this option.
- */
+ * -1: (Default) Automatic mode. The SDK will automatically select MAINTAIN_FRAMERATE,
+ * MAINTAIN_BALANCED or MAINTAIN_RESOLUTION based on the video scenario you set, in order to achieve
+ * the best overall quality of experience (QoE).
+ */
MAINTAIN_AUTO = -1,
/**
- * 0: (Deprecated) Prefers to reduce the video frame rate while maintaining video quality during
- * video encoding under limited bandwidth. This degradation preference is suitable for scenarios
- * where video quality is prioritized.
- * @note In the COMMUNICATION channel profile, the resolution of the video sent may change, so
- * remote users need to handle this issue.
+ * 0: Prefers to reduce the video frame rate while maintaining video resolution during video
+ * encoding under limited bandwidth. This degradation preference is suitable for scenarios where
+ * video quality is prioritized.
*/
MAINTAIN_QUALITY = 0,
/**
- * 1: Prefers to reduce the video quality while maintaining the video frame rate during video
- * encoding under limited bandwidth. This degradation preference is suitable for scenarios where
- * smoothness is prioritized and video quality is allowed to be reduced.
+ * 1: Reduces the video resolution while maintaining the video frame rate during video encoding
+ * under limited bandwidth. This degradation preference is suitable for scenarios where smoothness
+ * is prioritized and video quality is allowed to be reduced.
*/
MAINTAIN_FRAMERATE = 1,
/**
- * 2: Reduces the video frame rate and video quality simultaneously during video encoding under
- * limited bandwidth. MAINTAIN_BALANCED has a lower reduction than MAINTAIN_RESOLUTION and
- * MAINTAIN_FRAMERATE, and this preference is suitable for scenarios where both smoothness and
- * video quality are a priority.
+ * 2: Reduces the video frame rate and video resolution simultaneously during video encoding under
+ * limited bandwidth. The MAINTAIN_BALANCED has a lower reduction than MAINTAIN_QUALITY and
+ * MAINTAIN_FRAMERATE, and this preference is suitable for scenarios where both smoothness and video
+ * quality are a priority.
+ * @note The resolution of the video sent may change, so remote users need to handle this issue. See
+ * `onVideoSizeChanged`.
*/
MAINTAIN_BALANCED = 2,
/**
- * 3: Degrade framerate in order to maintain resolution.
+ * 3: Reduces the video frame rate while maintaining the video resolution during video encoding
+ * under limited bandwidth. This degradation preference is suitable for scenarios where video
+ * quality is prioritized.
*/
MAINTAIN_RESOLUTION = 3,
/**
@@ -1129,15 +1161,15 @@ enum DEGRADATION_PREFERENCE {
};
/**
- * The definition of the VideoDimensions struct.
+ * @brief The video dimension.
*/
struct VideoDimensions {
/**
- * The width of the video, in pixels.
+ * The width (pixels) of the video.
*/
int width;
/**
- * The height of the video, in pixels.
+ * The height (pixels) of the video.
*/
int height;
VideoDimensions() : width(640), height(480) {}
@@ -1174,38 +1206,57 @@ const int DEFAULT_MIN_BITRATE = -1;
const int DEFAULT_MIN_BITRATE_EQUAL_TO_TARGET_BITRATE = -2;
/**
- * screen sharing supported capability level.
+ * @brief The highest frame rate supported by the screen sharing device.
*/
enum SCREEN_CAPTURE_FRAMERATE_CAPABILITY {
+ /**
+ * 0: The device supports the frame rate of up to 15 fps.
+ */
SCREEN_CAPTURE_FRAMERATE_CAPABILITY_15_FPS = 0,
+ /**
+ * 1: The device supports the frame rate of up to 30 fps.
+ */
SCREEN_CAPTURE_FRAMERATE_CAPABILITY_30_FPS = 1,
+ /**
+ * 2: The device supports the frame rate of up to 60 fps.
+ */
SCREEN_CAPTURE_FRAMERATE_CAPABILITY_60_FPS = 2,
};
/**
- * Video codec capability levels.
+ * @brief The level of the codec capability.
*/
enum VIDEO_CODEC_CAPABILITY_LEVEL {
- /** No specified level */
+ /**
+ * -1: Unsupported video type. Currently, only H.264 and H.265 formats are supported. If the video
+ * is in another format, this value will be returned.
+ */
CODEC_CAPABILITY_LEVEL_UNSPECIFIED = -1,
- /** Only provide basic support for the codec type */
+ /**
+ * 5: Supports encoding and decoding videos up to 1080p and 30 fps.
+ */
CODEC_CAPABILITY_LEVEL_BASIC_SUPPORT = 5,
- /** Can process 1080p video at a rate of approximately 30 fps. */
+ /**
+ * 10: Supports encoding and decoding videos up to1080p and 30 fps.
+ */
CODEC_CAPABILITY_LEVEL_1080P30FPS = 10,
- /** Can process 1080p video at a rate of approximately 60 fps. */
+ /**
+ * 20: Support encoding and decoding videos up to 1080p and 60 fps.
+ */
CODEC_CAPABILITY_LEVEL_1080P60FPS = 20,
- /** Can process 4k video at a rate of approximately 30 fps. */
+ /**
+ * 30: Support encoding and decoding videos up to 4K and 30 fps.
+ */
CODEC_CAPABILITY_LEVEL_4K60FPS = 30,
};
/**
- * The video codec types.
+ * @brief Video codec types.
*/
enum VIDEO_CODEC_TYPE {
/**
- * 0: (Default) SDK will automatically adjust the codec type according to country and region or real-time network state and other relevant data information.
- * Also if network state is changed, SDK may change codec automatically to get the best QOE.
- * We recommend use this option.
+ * 0: (Default) Unspecified codec format. The SDK automatically matches the appropriate codec format
+ * based on the current video stream's resolution and device performance.
*/
VIDEO_CODEC_NONE = 0,
/**
@@ -1247,23 +1298,26 @@ enum VIDEO_CODEC_TYPE {
};
/**
- * Camera focal length type.
+ * @brief The camera focal length types.
+ *
+ * @note This enumeration class applies to Android and iOS only.
+ *
*/
enum CAMERA_FOCAL_LENGTH_TYPE {
/**
- * By default, there are no wide-angle and ultra-wide-angle properties.
+ * 0: (Default) Standard lens.
*/
CAMERA_FOCAL_LENGTH_DEFAULT = 0,
/**
- * Lens with focal length from 24mm to 35mm.
+ * 1: Wide-angle lens.
*/
CAMERA_FOCAL_LENGTH_WIDE_ANGLE = 1,
/**
- * Lens with focal length of less than 24mm.
+ * 2: Ultra-wide-angle lens.
*/
CAMERA_FOCAL_LENGTH_ULTRA_WIDE = 2,
/**
- * Telephoto lens.
+ * 3: (For iOS only) Telephoto lens.
*/
CAMERA_FOCAL_LENGTH_TELEPHOTO = 3,
};
@@ -1361,7 +1415,7 @@ struct SenderOptions {
};
/**
- * Audio codec types.
+ * @brief The codec type of audio.
*/
enum AUDIO_CODEC_TYPE {
/**
@@ -1385,11 +1439,11 @@ enum AUDIO_CODEC_TYPE {
/** 7: AAC. */
// AUDIO_CODEC_AAC = 7,
/**
- * 8: AAC LC.
+ * 8: LC-AAC.
*/
AUDIO_CODEC_AACLC = 8,
/**
- * 9: HE AAC.
+ * 9: HE-AAC.
*/
AUDIO_CODEC_HEAAC = 9,
/**
@@ -1411,77 +1465,77 @@ enum AUDIO_CODEC_TYPE {
};
/**
- * Audio encoding types of the audio encoded frame observer.
+ * @brief Audio encoding type.
*/
enum AUDIO_ENCODING_TYPE {
/**
- * AAC encoding format, 16000 Hz sampling rate, bass quality. A file with an audio duration of 10
- * minutes is approximately 1.2 MB after encoding.
+ * 0x010101: AAC encoding format, 16000 Hz sampling rate, bass quality. A file with an audio
+ * duration of 10 minutes is approximately 1.2 MB after encoding.
*/
AUDIO_ENCODING_TYPE_AAC_16000_LOW = 0x010101,
/**
- * AAC encoding format, 16000 Hz sampling rate, medium sound quality. A file with an audio
+ * 0x010102: AAC encoding format, 16000 Hz sampling rate, medium sound quality. A file with an audio
* duration of 10 minutes is approximately 2 MB after encoding.
*/
AUDIO_ENCODING_TYPE_AAC_16000_MEDIUM = 0x010102,
/**
- * AAC encoding format, 32000 Hz sampling rate, bass quality. A file with an audio duration of 10
- * minutes is approximately 1.2 MB after encoding.
+ * 0x010201: AAC encoding format, 32000 Hz sampling rate, bass quality. A file with an audio
+ * duration of 10 minutes is approximately 1.2 MB after encoding.
*/
AUDIO_ENCODING_TYPE_AAC_32000_LOW = 0x010201,
/**
- * AAC encoding format, 32000 Hz sampling rate, medium sound quality. A file with an audio
+ * 0x010202: AAC encoding format, 32000 Hz sampling rate, medium sound quality. A file with an audio
* duration of 10 minutes is approximately 2 MB after encoding.
*/
AUDIO_ENCODING_TYPE_AAC_32000_MEDIUM = 0x010202,
/**
- * AAC encoding format, 32000 Hz sampling rate, high sound quality. A file with an audio duration
- * of 10 minutes is approximately 3.5 MB after encoding.
+ * 0x010203: AAC encoding format, 32000 Hz sampling rate, high sound quality. A file with an audio
+ * duration of 10 minutes is approximately 3.5 MB after encoding.
*/
AUDIO_ENCODING_TYPE_AAC_32000_HIGH = 0x010203,
/**
- * AAC encoding format, 48000 Hz sampling rate, medium sound quality. A file with an audio
+ * 0x010302: AAC encoding format, 48000 Hz sampling rate, medium sound quality. A file with an audio
* duration of 10 minutes is approximately 2 MB after encoding.
*/
AUDIO_ENCODING_TYPE_AAC_48000_MEDIUM = 0x010302,
/**
- * AAC encoding format, 48000 Hz sampling rate, high sound quality. A file with an audio duration
- * of 10 minutes is approximately 3.5 MB after encoding.
+ * 0x010303: AAC encoding format, 48000 Hz sampling rate, high sound quality. A file with an audio
+ * duration of 10 minutes is approximately 3.5 MB after encoding.
*/
AUDIO_ENCODING_TYPE_AAC_48000_HIGH = 0x010303,
/**
- * OPUS encoding format, 16000 Hz sampling rate, bass quality. A file with an audio duration of 10
- * minutes is approximately 2 MB after encoding.
+ * 0x020101: OPUS encoding format, 16000 Hz sampling rate, bass quality. A file with an audio
+ * duration of 10 minutes is approximately 2 MB after encoding.
*/
AUDIO_ENCODING_TYPE_OPUS_16000_LOW = 0x020101,
/**
- * OPUS encoding format, 16000 Hz sampling rate, medium sound quality. A file with an audio
- * duration of 10 minutes is approximately 2 MB after encoding.
+ * 0x020102: OPUS encoding format, 16000 Hz sampling rate, medium sound quality. A file with an
+ * audio duration of 10 minutes is approximately 2 MB after encoding.
*/
AUDIO_ENCODING_TYPE_OPUS_16000_MEDIUM = 0x020102,
/**
- * OPUS encoding format, 48000 Hz sampling rate, medium sound quality. A file with an audio
- * duration of 10 minutes is approximately 2 MB after encoding.
+ * 0x020302: OPUS encoding format, 48000 Hz sampling rate, medium sound quality. A file with an
+ * audio duration of 10 minutes is approximately 2 MB after encoding.
*/
AUDIO_ENCODING_TYPE_OPUS_48000_MEDIUM = 0x020302,
/**
- * OPUS encoding format, 48000 Hz sampling rate, high sound quality. A file with an audio duration
- * of 10 minutes is approximately 3.5 MB after encoding.
+ * 0x020303: OPUS encoding format, 48000 Hz sampling rate, high sound quality. A file with an audio
+ * duration of 10 minutes is approximately 3.5 MB after encoding.
*/
AUDIO_ENCODING_TYPE_OPUS_48000_HIGH = 0x020303,
};
/**
- * The adaptation mode of the watermark.
+ * @brief The adaptation mode of the watermark.
*/
enum WATERMARK_FIT_MODE {
/**
- * Use the `positionInLandscapeMode` and `positionInPortraitMode` values you set in
- * #WatermarkOptions. The settings in `WatermarkRatio` are invalid.
+ * 0: Use the `positionInLandscapeMode` and `positionInPortraitMode` values you set in
+ * `WatermarkOptions`. The settings in `WatermarkRatio` are invalid.
*/
FIT_MODE_COVER_POSITION = 0,
/**
- * Use the value you set in `WatermarkRatio`. The settings in `positionInLandscapeMode` and
+ * 1: Use the value you set in `WatermarkRatio`. The settings in `positionInLandscapeMode` and
* `positionInPortraitMode` in `WatermarkOptions` are invalid.
*/
FIT_MODE_USE_IMAGE_RATIO = 1,
@@ -1508,7 +1562,7 @@ struct EncodedAudioFrameAdvancedSettings {
};
/**
- * The definition of the EncodedAudioFrameInfo struct.
+ * @brief Audio information after encoding.
*/
struct EncodedAudioFrameInfo {
EncodedAudioFrameInfo()
@@ -1526,25 +1580,23 @@ struct EncodedAudioFrameInfo {
advancedSettings(rhs.advancedSettings),
captureTimeMs(rhs.captureTimeMs) {}
/**
- * The audio codec: #AUDIO_CODEC_TYPE.
+ * Audio Codec type: `AUDIO_CODEC_TYPE`.
*/
AUDIO_CODEC_TYPE codec;
/**
- * The sample rate (Hz) of the audio frame.
+ * Audio sample rate (Hz).
*/
int sampleRateHz;
/**
- * The number of samples per audio channel.
- *
- * If this value is not set, it is 1024 for AAC, or 960 for OPUS by default.
+ * The number of audio samples per channel.
*/
int samplesPerChannel;
/**
- * The number of audio channels of the audio frame.
+ * The number of audio channels.
*/
int numberOfChannels;
/**
- * The advanced settings of the audio frame.
+ * This function is currently not supported.
*/
EncodedAudioFrameAdvancedSettings advancedSettings;
@@ -1603,78 +1655,86 @@ enum H264PacketizeMode {
};
/**
- * Video stream types.
+ * @brief The type of video streams.
*/
enum VIDEO_STREAM_TYPE {
/**
- * 0: The high-quality video stream, which has the highest resolution and bitrate.
+ * 0: High-quality video stream, that is, a video stream with the highest resolution and bitrate.
*/
VIDEO_STREAM_HIGH = 0,
/**
- * 1: The low-quality video stream, which has the lowest resolution and bitrate.
+ * 1: Low-quality video stream, that is, a video stream with the lowest resolution and bitrate.
*/
VIDEO_STREAM_LOW = 1,
/**
- * 4: The video stream of layer_1, which has a lower resolution and bitrate than VIDEO_STREAM_HIGH.
+ * 4. Video stream layer 1. The resolution of this quality level is only lower than that of
+ * VIDEO_STREAM_HIGH.
*/
VIDEO_STREAM_LAYER_1 = 4,
/**
- * 5: The video stream of layer_2, which has a lower resolution and bitrate than VIDEO_STREAM_LAYER_1.
+ * 5: Video stream layer 2. The resolution of this quality level is only lower than that of
+ * VIDEO_STREAM_LAYER_1.
*/
VIDEO_STREAM_LAYER_2 = 5,
/**
- * 6: The video stream of layer_3, which has a lower resolution and bitrate than VIDEO_STREAM_LAYER_2.
+ * 6: Video stream layer 3. The resolution of this quality level is only lower than that of
+ * VIDEO_STREAM_LAYER_2.
*/
VIDEO_STREAM_LAYER_3 = 6,
/**
- * 7: The video stream of layer_4, which has a lower resolution and bitrate than VIDEO_STREAM_LAYER_3.
+ * 7: Video stream layer 4. The resolution of this quality level is only lower than that of
+ * VIDEO_STREAM_LAYER_3.
*/
VIDEO_STREAM_LAYER_4 = 7,
/**
- * 8: The video stream of layer_5, which has a lower resolution and bitrate than VIDEO_STREAM_LAYER_4.
+ * 8: Video stream layer 5. The resolution of this quality level is only lower than that of
+ * VIDEO_STREAM_LAYER_4.
*/
VIDEO_STREAM_LAYER_5 = 8,
/**
- * 9: The video stream of layer_6, which has a lower resolution and bitrate than VIDEO_STREAM_LAYER_5.
+ * 9: Video stream layer 6. The resolution of this quality level is only lower than that of
+ * VIDEO_STREAM_LAYER_5.
*/
VIDEO_STREAM_LAYER_6 = 9,
};
+/**
+ * @brief Video subscription options.
+ */
struct VideoSubscriptionOptions {
/**
- * The type of the video stream to subscribe to.
- *
- * The default value is `VIDEO_STREAM_HIGH`, which means the high-quality
- * video stream.
+ * The video stream type that you want to subscribe to. The default value is VIDEO_STREAM_HIGH,
+ * indicating that the high-quality video streams are subscribed. See `VIDEO_STREAM_TYPE`.
*/
Optional type;
/**
- * Whether to subscribe to encoded video data only:
- * - `true`: Subscribe to encoded video data only.
- * - `false`: (Default) Subscribe to decoded video data.
+ * Whether to subscribe to encoded video frames only:
+ * - `true`: Subscribe to the encoded video data (structured data) only; the SDK does not decode or
+ * render raw video data.
+ * - `false`: (Default) Subscribe to both raw video data and encoded video data.
*/
Optional encodedFrameOnly;
VideoSubscriptionOptions() {}
};
-/** The maximum length of the user account.
+/**
+ * @brief The maximum length of the user account.
*/
enum MAX_USER_ACCOUNT_LENGTH_TYPE {
- /** The maximum length of the user account is 256 bytes.
+ /**
+ * The maximum length of the user account is 256 bytes.
*/
MAX_USER_ACCOUNT_LENGTH = 256
};
/**
- * The definition of the EncodedVideoFrameInfo struct, which contains the information of the
- * external encoded video frame.
+ * @brief Information about externally encoded video frames.
*/
struct EncodedVideoFrameInfo {
EncodedVideoFrameInfo()
- : uid(0),
- codecType(VIDEO_CODEC_H264),
+ : codecType(VIDEO_CODEC_H264),
width(0),
height(0),
framesPerSecond(0),
@@ -1687,8 +1747,7 @@ struct EncodedVideoFrameInfo {
presentationMs(-1) {}
EncodedVideoFrameInfo(const EncodedVideoFrameInfo& rhs)
- : uid(rhs.uid),
- codecType(rhs.codecType),
+ : codecType(rhs.codecType),
width(rhs.width),
height(rhs.height),
framesPerSecond(rhs.framesPerSecond),
@@ -1702,7 +1761,6 @@ struct EncodedVideoFrameInfo {
EncodedVideoFrameInfo& operator=(const EncodedVideoFrameInfo& rhs) {
if (this == &rhs) return *this;
- uid = rhs.uid;
codecType = rhs.codecType;
width = rhs.width;
height = rhs.height;
@@ -1718,51 +1776,47 @@ struct EncodedVideoFrameInfo {
}
/**
- * ID of the user that pushes the the external encoded video frame..
- */
- uid_t uid;
- /**
- * The codec type of the local video stream. See #VIDEO_CODEC_TYPE. The default value is
- * `VIDEO_CODEC_H265 (3)`.
+ * The codec type of the local video stream. See `VIDEO_CODEC_TYPE`. The default value is
+ * `VIDEO_CODEC_H264 (2)`.
*/
VIDEO_CODEC_TYPE codecType;
/**
- * The width (px) of the video frame.
+ * Width (pixel) of the video frame.
*/
int width;
/**
- * The height (px) of the video frame.
+ * Height (pixel) of the video frame.
*/
int height;
/**
* The number of video frames per second.
- * When this parameter is not 0, you can use it to calculate the Unix timestamp of the external
+ * When this parameter is not `0`, you can use it to calculate the Unix timestamp of externally
* encoded video frames.
*/
int framesPerSecond;
/**
- * The video frame type: #VIDEO_FRAME_TYPE.
+ * The video frame type. See `VIDEO_FRAME_TYPE`.
*/
VIDEO_FRAME_TYPE frameType;
/**
- * The rotation information of the video frame: #VIDEO_ORIENTATION.
+ * The rotation information of the video frame. See `VIDEO_ORIENTATION`.
*/
VIDEO_ORIENTATION rotation;
/**
- * The track ID of the video frame.
+ * Reserved for future use.
*/
int trackId; // This can be reserved for multiple video tracks, we need to create different ssrc
// and additional payload for later implementation.
/**
- * This is a input parameter which means the timestamp for capturing the video.
+ * The Unix timestamp (ms) for capturing the external encoded video frames.
*/
int64_t captureTimeMs;
/**
- * The timestamp for decoding the video.
+ * The Unix timestamp (ms) for decoding the external encoded video frames.
*/
int64_t decodeTimeMs;
/**
- * The stream type of video frame.
+ * The type of video streams. See `VIDEO_STREAM_TYPE`.
*/
VIDEO_STREAM_TYPE streamType;
@@ -1771,62 +1825,67 @@ struct EncodedVideoFrameInfo {
};
/**
- * Video compression preference.
+ * @brief Compression preference for video encoding.
*/
enum COMPRESSION_PREFERENCE {
/**
- * (Default) SDK uses compression preference according to setVideoScenario API settings, real-time network state and other relevant data information.
- * If API setVideoScenario set video scenario to APPLICATION_SCENARIO_LIVESHOW, then PREFER_QUALITY is used. If not, then PREFER_LOW_LATENCY is used.
- * Also if network state has changed, SDK may change this parameter between PREFER_QUALITY and PREFER_LOW_LATENCY automatically to get the best QOE.
- * We recommend using this option.
- */
+ * -1: (Default) Automatic mode. The SDK will automatically select PREFER_LOW_LATENCY or
+ * PREFER_QUALITY based on the video scenario you set to achieve the best user experience.
+ */
PREFER_COMPRESSION_AUTO = -1,
/**
- * Prefer low latency, usually used in real-time communication where low latency is the number one priority.
- */
+ * 0: Low latency preference. The SDK compresses video frames to reduce latency. This preference is
+ * suitable for scenarios where smoothness is prioritized and reduced video quality is acceptable.
+ */
PREFER_LOW_LATENCY = 0,
/**
- * Prefer quality in sacrifice of a degree of latency, usually around 30ms ~ 150ms, depends target fps
- */
+ * 1: High quality preference. The SDK compresses video frames while maintaining video quality. This
+ * preference is suitable for scenarios where video quality is prioritized.
+ */
PREFER_QUALITY = 1,
};
/**
- * The video encoder type preference.
+ * @brief Video encoder preference.
*/
enum ENCODING_PREFERENCE {
/**
- *Default .
+ * -1: Adaptive preference. The SDK automatically selects the optimal encoding type for encoding
+ * based on factors such as platform and device type.
*/
PREFER_AUTO = -1,
/**
- * Software encoding.
+ * 0: Software coding preference. The SDK prefers software encoders for video encoding.
*/
PREFER_SOFTWARE = 0,
/**
- * Hardware encoding
+ * 1: Hardware encoding preference. The SDK prefers a hardware encoder for video encoding. When the
+ * device does not support hardware encoding, the SDK automatically uses software encoding and
+ * reports the currently used video encoder type through `hwEncoderAccelerating` in the
+ * `onLocalVideoStats` callback.
*/
PREFER_HARDWARE = 1,
};
/**
- * The definition of the AdvanceOptions struct.
+ * @brief Advanced options for video encoding.
*/
struct AdvanceOptions {
/**
- * The video encoder type preference..
+ * Video encoder preference. See `ENCODING_PREFERENCE`.
*/
ENCODING_PREFERENCE encodingPreference;
/**
- * Video compression preference.
+ * Compression preference for video encoding. See `COMPRESSION_PREFERENCE`.
*/
COMPRESSION_PREFERENCE compressionPreference;
/**
- * Whether to encode and send the alpha data to the remote when alpha data is present.
- * The default value is false.
- */
+ * Whether to encode and send the Alpha data present in the video frame to the remote end:
+ * - `true`: Encode and send Alpha data.
+ * - `false`: (Default) Do not encode and send Alpha data.
+ */
bool encodeAlpha;
AdvanceOptions() : encodingPreference(PREFER_AUTO),
@@ -1848,19 +1907,22 @@ struct AdvanceOptions {
};
/**
- * Video mirror mode types.
+ * @brief Video mirror mode.
*/
enum VIDEO_MIRROR_MODE_TYPE {
/**
- * 0: The mirror mode determined by the SDK.
+ * 0: The SDK determines the mirror mode.
+ * - For the mirror mode of the local video view: If you use a front camera, the SDK enables the
+ * mirror mode by default; if you use a rear camera, the SDK disables the mirror mode by default.
+ * - For the remote user: The mirror mode is disabled by default.
*/
VIDEO_MIRROR_MODE_AUTO = 0,
/**
- * 1: Enable the mirror mode.
+ * 1: Enable mirror mode.
*/
VIDEO_MIRROR_MODE_ENABLED = 1,
/**
- * 2: Disable the mirror mode.
+ * 2: Disable mirror mode.
*/
VIDEO_MIRROR_MODE_DISABLED = 2,
};
@@ -1901,26 +1963,49 @@ enum HDR_CAPABILITY {
HDR_CAPABILITY_SUPPORTED = 1,
};
-/** Supported codec type bit mask. */
+/**
+ * @brief The bit mask of the codec type.
+ */
enum CODEC_CAP_MASK {
- /** 0: No codec support. */
+ /**
+ * (0): The device does not support encoding or decoding.
+ */
CODEC_CAP_MASK_NONE = 0,
- /** bit 1: Hardware decoder support flag. */
+ /**
+ * (1 << 0): The device supports hardware decoding.
+ */
CODEC_CAP_MASK_HW_DEC = 1 << 0,
- /** bit 2: Hardware encoder support flag. */
+ /**
+ * (1 << 1): The device supports hardware encoding.
+ */
CODEC_CAP_MASK_HW_ENC = 1 << 1,
- /** bit 3: Software decoder support flag. */
+ /**
+ * (1 << 2): The device supports software decoding.
+ */
CODEC_CAP_MASK_SW_DEC = 1 << 2,
- /** bit 4: Software encoder support flag. */
+ /**
+ * (1 << 3): The device supports software ecoding.
+ */
CODEC_CAP_MASK_SW_ENC = 1 << 3,
};
+/**
+ * @brief The level of the codec capability.
+ */
struct CodecCapLevels {
+ /**
+ * Hardware decoding capability level, which represents the device's ability to perform hardware
+ * decoding on videos of different quality. See `VIDEO_CODEC_CAPABILITY_LEVEL`.
+ */
VIDEO_CODEC_CAPABILITY_LEVEL hwDecodingLevel;
+ /**
+ * Software decoding capability level, which represents the device's ability to perform software
+ * decoding on videos of different quality. See `VIDEO_CODEC_CAPABILITY_LEVEL`.
+ */
VIDEO_CODEC_CAPABILITY_LEVEL swDecodingLevel;
CodecCapLevels()
@@ -1928,138 +2013,103 @@ struct CodecCapLevels {
swDecodingLevel(CODEC_CAPABILITY_LEVEL_UNSPECIFIED) {}
};
-/** The codec support information. */
+/**
+ * @brief The codec capability of the SDK.
+ */
struct CodecCapInfo {
- /** The codec type: #VIDEO_CODEC_TYPE. */
+ /**
+ * The video codec types. See `VIDEO_CODEC_TYPE`.
+ */
VIDEO_CODEC_TYPE codecType;
- /** The codec support flag. */
+ /**
+ * Bit mask of the codec types in SDK. See `CODEC_CAP_MASK`.
+ */
int codecCapMask;
- /** The codec capability level, estimated based on the device hardware.*/
+ /**
+ * Codec capability of the SDK. See `CodecCapLevels`.
+ */
CodecCapLevels codecLevels;
CodecCapInfo() : codecType(VIDEO_CODEC_NONE), codecCapMask(0) {}
};
-/** FocalLengthInfo contains the IDs of the front and rear cameras, along with the wide-angle types.
+/**
+ * @brief Focal length information supported by the camera, including the camera direction and focal
+ * length type.
+ *
+ * @note This enumeration class applies to Android and iOS only.
+ *
*/
struct FocalLengthInfo {
- /** The camera direction. */
+ /**
+ * The camera direction. See `CAMERA_DIRECTION`.
+ */
int cameraDirection;
- /** Camera focal segment type. */
+ /**
+ * The focal length type. See `CAMERA_FOCAL_LENGTH_TYPE`.
+ */
CAMERA_FOCAL_LENGTH_TYPE focalLengthType;
};
/**
- * The definition of the VideoEncoderConfiguration struct.
+ * @brief Video encoder configurations.
*/
struct VideoEncoderConfiguration {
/**
- * The video encoder code type: #VIDEO_CODEC_TYPE.
+ * The codec type of the local video stream. See `VIDEO_CODEC_TYPE`.
*/
VIDEO_CODEC_TYPE codecType;
/**
- * The video dimension: VideoDimensions.
+ * The dimensions of the encoded video (px). See `VideoDimensions`. This parameter measures the
+ * video encoding quality in the format of length × width. The default value is 960 × 540. You can
+ * set a custom value.
*/
VideoDimensions dimensions;
/**
- * The frame rate of the video. You can set it manually, or choose one from #FRAME_RATE.
+ * The frame rate (fps) of the encoding video frame. The default value is 15. See `FRAME_RATE`.
*/
int frameRate;
/**
- * The bitrate (Kbps) of the video.
- *
- * Refer to the **Video Bitrate Table** below and set your bitrate. If you set a bitrate beyond
- * the proper range, the SDK automatically adjusts it to a value within the range. You can also
- * choose from the following options:
- *
- * - #STANDARD_BITRATE: (Recommended) Standard bitrate mode. In this mode, the bitrates differ
- * between the Live Broadcast and Communication profiles:
- * - In the Communication profile, the video bitrate is the same as the base bitrate.
- * - In the Live Broadcast profile, the video bitrate is twice the base bitrate.
- * - #COMPATIBLE_BITRATE: Compatible bitrate mode. The compatible bitrate mode. In this mode, the
- * bitrate stays the same regardless of the profile. If you choose this mode for the Live
- * Broadcast profile, the video frame rate may be lower than the set value.
- *
- * Agora uses different video codecs for different profiles to optimize the user experience. For
- * example, the communication profile prioritizes the smoothness while the live-broadcast profile
- * prioritizes the video quality (a higher bitrate). Therefore, We recommend setting this
- * parameter as #STANDARD_BITRATE.
- *
- * | Resolution | Frame Rate (fps) | Maximum Bitrate (Kbps) |
- * |------------------------|------------------|------------------------|
- * | 120 * 120 | 15 | 150 |
- * | 120 * 160 | 15 | 186 |
- * | 180 * 180 | 15 | 270 |
- * | 180 * 240 | 15 | 336 |
- * | 180 * 320 | 15 | 420 |
- * | 240 * 240 | 15 | 420 |
- * | 240 * 320 | 15 | 522 |
- * | 240 * 424 | 15 | 648 |
- * | 360 * 360 | 15 | 774 |
- * | 360 * 360 | 30 | 1162 |
- * | 360 * 480 | 15 | 966 |
- * | 360 * 480 | 30 | 1407 |
- * | 360 * 640 | 15 | 1200 |
- * | 360 * 640 | 30 | 1696 |
- * | 480 * 480 | 15 | 1200 |
- * | 480 * 480 | 30 | 1696 |
- * | 480 * 640 | 10 | 1164 |
- * | 480 * 640 | 15 | 1445 |
- * | 480 * 640 | 30 | 2041 |
- * | 480 * 848 | 15 | 1735 |
- * | 480 * 848 | 30 | 2445 |
- * | 540 * 960 | 15 | 2029 |
- * | 540 * 960 | 30 | 2852 |
- * | 720 * 960 | 15 | 2443 |
- * | 720 * 960 | 30 | 3434 |
- * | 720 * 1280 | 15 | 2938 |
- * | 720 * 1280 | 30 | 4113 |
- * | 1080 * 1920 | 15 | 4914 |
- * | 1080 * 1920 | 30 | 6819 |
- * | 1080 * 1920 | 60 | 9380 |
- * | 2560 * 1440 | 15 | 7040 |
- * | 2560 * 1440 | 30 | 9700 |
- * | 2560 * 1440 | 60 | 13230 |
- * | 3840 * 2160 | 15 | 11550 |
- * | 3840 * 2160 | 30 | 15726 |
- * | 3840 * 2160 | 60 | 21133 |
+ * The encoding bitrate (Kbps) of the video. This parameter does not need to be set; keeping the
+ * default value `STANDARD_BITRATE` is sufficient. The SDK automatically matches the most suitable
+ * bitrate based on the video resolution and frame rate you have set. For the correspondence between
+ * video resolution and frame rate, see `Video profile`.
+ * - STANDARD_BITRATE (0): (Recommended) Standard bitrate mode.
+ * - COMPATIBLE_BITRATE (-1): Adaptive bitrate mode. In general, Agora suggests that you do not use
+ * this value.
*/
int bitrate;
/**
- * The minimum encoding bitrate (Kbps).
- *
- * The Agora SDK automatically adjusts the encoding bitrate to adapt to the
- * network conditions.
- *
- * Using a value greater than the default value forces the video encoder to
- * output high-quality images but may cause more packet loss and hence
- * sacrifice the smoothness of the video transmission. That said, unless you
- * have special requirements for image quality, Agora does not recommend
- * changing this value.
- *
- * @note
- * This parameter applies to the live-broadcast profile only.
+ * The minimum encoding bitrate (Kbps) of the video.
+ * The SDK automatically adjusts the encoding bitrate to adapt to the network conditions. Using a
+ * value greater than the default value forces the video encoder to output high-quality images but
+ * may cause more packet loss and sacrifice the smoothness of the video transmission. Unless you
+ * have special requirements for image quality, Agora does not recommend changing this value.
+ * @note This parameter only applies to the interactive streaming profile.
*/
int minBitrate;
/**
- * The video orientation mode: #ORIENTATION_MODE.
+ * The orientation mode of the encoded video. See `ORIENTATION_MODE`.
*/
ORIENTATION_MODE orientationMode;
/**
- * The video degradation preference under limited bandwidth: #DEGRADATION_PREFERENCE.
+ * Video degradation preference under limited bandwidth. See `DEGRADATION_PREFERENCE`.
+ * @note When this parameter is set to MAINTAIN_FRAMERATE (1) or MAINTAIN_BALANCED (2),
+ * `orientationMode` needs to be set to ORIENTATION_MODE_ADAPTIVE (0) at the same time, otherwise
+ * the setting will not take effect.
*/
DEGRADATION_PREFERENCE degradationPreference;
/**
- * The mirror mode is disabled by default
- * If mirror_type is set to VIDEO_MIRROR_MODE_ENABLED, then the video frame would be mirrored
- * before encoding.
+ * Sets the mirror mode of the published local video stream. It only affects the video that the
+ * remote user sees. See `VIDEO_MIRROR_MODE_TYPE`.
+ * @note By default, the video is not mirrored.
*/
VIDEO_MIRROR_MODE_TYPE mirrorMode;
/**
- * The advanced options for the video encoder configuration. See AdvanceOptions.
+ * Advanced options for video encoding. See `AdvanceOptions`.
*/
AdvanceOptions advanceOptions;
@@ -2120,26 +2170,34 @@ struct VideoEncoderConfiguration {
};
/**
- * The configurations for the data stream.
+ * @brief The configurations for the data stream.
+ *
+ * @details
+ * The following table shows the SDK behaviors under different parameter settings:
+ * | `syncWithAudio` | `ordered` | SDK behaviors |
+ * | --------------- | --------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+ * | `false` | `false` | The SDK triggers the `onStreamMessage` callback immediately after the receiver receives a data packet. |
+ * | `true` | `false` | If the data packet delay is within the audio delay, the SDK triggers the onStreamMessage callback when the synchronized audio packet is played out. If the data packet delay exceeds the audio delay, the SDK triggers the onStreamMessage callback as soon as the data packet is received. |
+ * | `false` | `true` | If the delay of a data packet is less than five seconds, the SDK corrects the order of the data packet. If the delay of a data packet exceeds five seconds, the SDK discards the data packet. |
+ * | `true` | `true` | If the delay of the data packet is within the range of the audio delay, the SDK corrects the order of the data packet. If the delay of a data packet exceeds the audio delay, the SDK discards this data packet. |
+ *
*/
struct DataStreamConfig {
/**
* Whether to synchronize the data packet with the published audio packet.
- * - `true`: Synchronize the data packet with the audio packet.
- * - `false`: Do not synchronize the data packet with the audio packet.
- *
+ * - `true`: Synchronize the data packet with the audio packet. This setting is suitable for special
+ * scenarios such as lyrics synchronization.
+ * - `false`: Do not synchronize the data packet with the audio packet. This setting is suitable for
+ * scenarios where data packets need to arrive at the receiving end immediately.
* When you set the data packet to synchronize with the audio, then if the data packet delay is
* within the audio delay, the SDK triggers the `onStreamMessage` callback when the synchronized
- * audio packet is played out. Do not set this parameter as true if you need the receiver to
- * receive the data packet immediately. Agora recommends that you set this parameter to `true`
- * only when you need to implement specific functions, for example lyric synchronization.
+ * audio packet is played out.
*/
bool syncWithAudio;
/**
* Whether the SDK guarantees that the receiver receives the data in the sent order.
* - `true`: Guarantee that the receiver receives the data in the sent order.
* - `false`: Do not guarantee that the receiver receives the data in the sent order.
- *
* Do not set this parameter as `true` if you need the receiver to receive the data packet
* immediately.
*/
@@ -2147,38 +2205,42 @@ struct DataStreamConfig {
};
/**
- * The definition of SIMULCAST_STREAM_MODE
+ * @brief The mode in which the video stream is sent.
*/
enum SIMULCAST_STREAM_MODE {
- /*
- * disable simulcast stream until receive request for enable simulcast stream by other broadcaster
+ /**
+ * -1: By default, do not send the low-quality video stream until a subscription request for the
+ * low-quality video stream is received from the receiving end, then automatically start sending
+ * low-quality video stream.
*/
AUTO_SIMULCAST_STREAM = -1,
- /*
- * disable simulcast stream
+ /**
+ * 0: Never send low-quality video stream.
*/
DISABLE_SIMULCAST_STREAM = 0,
- /*
- * always enable simulcast stream
+ /**
+ * 1: Always send low-quality video stream.
*/
ENABLE_SIMULCAST_STREAM = 1,
};
/**
- * The configuration of the low-quality video stream.
+ * @brief The configuration of the low-quality video stream.
*/
struct SimulcastStreamConfig {
/**
- * The video frame dimension: VideoDimensions. The default value is 160 × 120.
+ * The video dimension. See `VideoDimensions`. The default value is 50% of the high-quality video
+ * stream.
*/
VideoDimensions dimensions;
/**
- * The video bitrate (Kbps), represented by an instantaneous value. The default value of the log
- * level is 5.
+ * Video bitrate (Kbps). The default value is -1. This parameter does not need to be set. The SDK
+ * automatically matches the most suitable bitrate based on the video resolution and frame rate you
+ * set.
*/
int kBitrate;
/**
- * The capture frame rate (fps) of the local video. The default value is 5.
+ * The frame rate (fps) of the local video. The default value is 5.
*/
int framerate;
SimulcastStreamConfig() : dimensions(160, 120), kBitrate(65), framerate(5) {}
@@ -2189,97 +2251,93 @@ struct SimulcastStreamConfig {
};
/**
- * The configuration of the multi-layer video stream.
+ * @brief Configure video streams of different quality levels.
+ *
* @since v4.6.0
*/
struct SimulcastConfig {
/**
- * The index of multi-layer video stream
+ * @brief Index of video streams of different quality levels.
*/
enum StreamLayerIndex {
/**
- * 0: The video stream of layer_1, which has a lower resolution and bitrate than STREAM_HIGH.
+ * (0): Video stream layer_1, with lower resolution and bitrate than VIDEO_STREAM_HIGH.
*/
STREAM_LAYER_1 = 0,
/**
- * 1: The video stream of layer_2, which has a lower resolution and bitrate than VIDEO_STREAM_LAYER_1.
+ * (1): Video stream layer_2, with lower resolution and bitrate than VIDEO_STREAM_LAYER_1.
*/
STREAM_LAYER_2 = 1,
/**
- * 2: The video stream of layer_3, which has a lower resolution and bitrate than VIDEO_STREAM_LAYER_2.
+ * (2): Video stream layer_3, with lower resolution and bitrate than VIDEO_STREAM_LAYER_2.
*/
STREAM_LAYER_3 = 2,
/**
- * 3: The video stream of layer_4, which has a lower resolution and bitrate than VIDEO_STREAM_LAYER_3.
+ * (3): Video stream layer_4, with lower resolution and bitrate than VIDEO_STREAM_LAYER_3.
*/
STREAM_LAYER_4 = 3,
/**
- * 4: The video stream of layer_5, which has a lower resolution and bitrate than VIDEO_STREAM_LAYER_4.
+ * (4): Video stream layer_5, with lower resolution and bitrate than VIDEO_STREAM_LAYER_4.
*/
STREAM_LAYER_5 = 4,
/**
- * 5: The video stream of layer_6, which has a lower resolution and bitrate than VIDEO_STREAM_LAYER_5.
+ * (5): Video stream layer_6, with lower resolution and bitrate than VIDEO_STREAM_LAYER_5.
*/
STREAM_LAYER_6 = 5,
/**
- * 6: The low-quality video stream, which has the lowest resolution and bitrate.
+ * (6): Low-quality video stream, with the lowest resolution and bitrate.
*/
STREAM_LOW = 6,
/**
- * 7: Max count of video stream layers
+ * (7): Maximum number of video stream layers.
*/
STREAM_LAYER_COUNT_MAX = 7
};
/**
- * The configuration of a specific layer in the multi-layer video stream.
+ * @brief Configures the parameters of a specific layer in multi-quality video streams.
+ *
+ * @details
+ * Used to configure the resolution, frame rate, and enable status of a specific layer in
+ * multi-quality video streams.
+ *
*/
struct StreamLayerConfig {
/**
- * The video frame dimension. The default value is 0.
+ * Video frame size. Default is 0. See `VideoDimensions`.
*/
VideoDimensions dimensions;
/**
- * The capture frame rate (fps) of the local video. The default value is 0.
+ * Frame rate (fps) of the local video capture. Default is 0.
*/
int framerate;
/**
- * Whether to enable the corresponding layer of video stream. The default value is false.
- * - true: Enable the corresponding layer of video stream
- * - false: (Default) Disable the corresponding layer of video stream
+ * Whether to enable the video stream for the corresponding layer. Default is false.
+ * - `true`: Enables the video stream for the corresponding layer.
+ * - `false`: (Default) Disables the video stream for the corresponding layer.
*/
bool enable;
StreamLayerConfig() : dimensions(0, 0), framerate(0), enable(false) {}
};
/**
- * The array of StreamLayerConfig, which contains STREAM_LAYER_COUNT_MAX layers of video stream at most.
+ * Configurations for multi-layer streaming: `StreamLayerConfig`.
*/
StreamLayerConfig configs[STREAM_LAYER_COUNT_MAX];
/**
- * Whether to enable fallback publishing. When set to true, it allows dynamic disabling of multiple streams when the performance or network of the publishing end is poor. The order of disabling is layer1->layer6.
- * - true: Enable fallback publishing.
- * - false: (Default) Disable fallback publishing.
- *
- * @details The system guarantees that even under poor network conditions or limited
- * device capabilities, at least the major stream and lowest-resolution minor stream
- * will be maintained for basic video continuity.
- *
+ * Whether to enable fallback publishing:
+ * - `true`: Enable fallback publishing. When the device performance or network is poor at the
+ * publishing end, the SDK will dynamically disable multiple video streams of different quality
+ * levels, from layer1 to layer6. At least the video streams of the highest and lowest quality are
+ * retained to maintain basic video continuity.
+ * - `false`: (Default) Disable fallback publishing.
*/
bool publish_fallback_enable;
- /**
- * Whether to enable on-demand publishing. When set to true, a simulcast layer will only be published
- * when there are subscribers requesting that layer.
- * - true: (Default) Enable on-demand publishing.
- * - false: Disable on-demand publishing. All enabled simulcast layers will be published regardless
- * of subscription status.
- */
- bool publish_on_demand;
- SimulcastConfig(): publish_fallback_enable(false), publish_on_demand(true) {}
+ SimulcastConfig(): publish_fallback_enable(false) {}
};
/**
- * The location of the target area relative to the screen or window. If you do not set this parameter,
- * the SDK selects the whole screen or window.
+ * @brief The location of the target area relative to the screen or window. If you do not set this
+ * parameter, the SDK selects the whole screen or window.
*/
struct Rectangle {
/**
@@ -2291,11 +2349,11 @@ struct Rectangle {
*/
int y;
/**
- * The width of the region.
+ * The width of the target area.
*/
int width;
/**
- * The height of the region.
+ * The height of the target area.
*/
int height;
@@ -2304,26 +2362,28 @@ struct Rectangle {
};
/**
- * The position and size of the watermark on the screen.
+ * @brief The position and size of the watermark on the screen.
*
+ * @details
* The position and size of the watermark on the screen are determined by `xRatio`, `yRatio`, and
* `widthRatio`:
- * - (`xRatio`, `yRatio`) refers to the coordinates of the upper left corner of the watermark, which
- * determines the distance from the upper left corner of the watermark to the upper left corner of
- * the screen. The `widthRatio` determines the width of the watermark.
+ * - ( `xRatio`, `yRatio` ) refers to the coordinates of the upper left corner of the watermark,
+ * which determines the distance from the upper left corner of the watermark to the upper left
+ * corner of the screen.
+ * - The `widthRatio` determines the width of the watermark.
+ *
*/
struct WatermarkRatio {
/**
* The x-coordinate of the upper left corner of the watermark. The horizontal position relative to
- * the origin, where the upper left corner of the screen is the origin, and the x-coordinate is
- * the upper left corner of the watermark. The value range is [0.0,1.0], and the default value is
- * 0.
+ * the origin, where the upper left corner of the screen is the origin, and the x-coordinate is the
+ * upper left corner of the watermark. The value range is [0.0,1.0], and the default value is 0.
*/
float xRatio;
/**
- * The y-coordinate of the upper left corner of the watermark. The vertical position relative to
- * the origin, where the upper left corner of the screen is the origin, and the y-coordinate is
- * the upper left corner of the screen. The value range is [0.0,1.0], and the default value is 0.
+ * The y-coordinate of the upper left corner of the watermark. The vertical position relative to the
+ * origin, where the upper left corner of the screen is the origin, and the y-coordinate is the
+ * upper left corner of the screen. The value range is [0.0,1.0], and the default value is 0.
*/
float yRatio;
/**
@@ -2339,36 +2399,40 @@ struct WatermarkRatio {
};
/**
- * Configurations of the watermark image.
+ * @brief Watermark image configurations.
+ *
+ * @details
+ * Configuration options for setting the watermark image to be added.
+ *
*/
struct WatermarkOptions {
/**
- * Whether or not the watermark image is visible in the local video preview:
- * - true: (Default) The watermark image is visible in preview.
- * - false: The watermark image is not visible in preview.
+ * Whether the watermark is visible in the local preview view:
+ * - `true`: (Default) The watermark is visible in the local preview view.
+ * - `false`: The watermark is not visible in the local preview view.
*/
bool visibleInPreview;
/**
- * When the adaptation mode of the watermark is `FIT_MODE_COVER_POSITION`, it is used to set the
- * area of the watermark image in landscape mode. See #FIT_MODE_COVER_POSITION for details.
+ * When the adaptation mode of the watermark is FIT_MODE_COVER_POSITION, it is used to set the area
+ * of the watermark image in landscape mode. See `Rectangle`.
*/
Rectangle positionInLandscapeMode;
/**
- * When the adaptation mode of the watermark is `FIT_MODE_COVER_POSITION`, it is used to set the
- * area of the watermark image in portrait mode. See #FIT_MODE_COVER_POSITION for details.
+ * When the adaptation mode of the watermark is FIT_MODE_COVER_POSITION, it is used to set the area
+ * of the watermark image in portrait mode. See `Rectangle`.
*/
Rectangle positionInPortraitMode;
/**
- * When the watermark adaptation mode is `FIT_MODE_USE_IMAGE_RATIO`, this parameter is used to set
- * the watermark coordinates. See WatermarkRatio for details.
+ * When the watermark adaptation mode is FIT_MODE_USE_IMAGE_RATIO, this parameter is used to set the
+ * watermark coordinates. See `WatermarkRatio`.
*/
WatermarkRatio watermarkRatio;
/**
- * The adaptation mode of the watermark. See #WATERMARK_FIT_MODE for details.
+ * The adaptation mode of the watermark. See `WATERMARK_FIT_MODE`.
*/
WATERMARK_FIT_MODE mode;
/**
- * The z-order of the watermark image. The default value is 0.
+ * Layer order of the watermark image. The default value is 0.
*/
int zOrder;
@@ -2381,17 +2445,17 @@ struct WatermarkOptions {
};
/**
- * @brief The source type of the watermark.
- *
+ * @brief Type of watermark source.
+ *
* @since 4.6.0
*/
enum WATERMARK_SOURCE_TYPE {
/**
- * 0: The watermark source is an image.
+ * (0): The watermark source is an image.
*/
IMAGE = 0,
/**
- * 1: The watermark source is a buffer.
+ * (1): The watermark source is a buffer.
*/
BUFFER = 1,
/**
@@ -2476,34 +2540,38 @@ struct WatermarkLiteral {
};
/**
- * @brief Defines the configuration for a buffer watermark.
+ * @brief Configures the format, size, and pixel buffer of the watermark image.
*
* @since 4.6.0
+ *
+ * @details
+ * Defines the buffer data structure of the watermark image, including image width, height, format,
+ * length, and image data buffer.
+ *
*/
struct WatermarkBuffer {
/**
- * The width of the watermark buffer.
+ * Width of the watermark buffer, in pixels.
*/
int width;
/**
- * The height of the watermark buffer.
+ * Height of the watermark buffer, in pixels.
*/
int height;
/**
- * The length of the watermark buffer.
+ * Length of the watermark buffer, in bytes.
*/
int length;
/**
- * The format of the watermark buffer. The default value is #VIDEO_PIXEL_I420.
- * Currently supports: #VIDEO_PIXEL_I420, #VIDEO_PIXEL_RGBA, #VIDEO_PIXEL_BGRA, and #VIDEO_PIXEL_NV21.
+ * Format of the watermark buffer. See `VIDEO_PIXEL_FORMAT`. Default is VIDEO_PIXEL_I420. Currently
+ * supported formats include: VIDEO_PIXEL_I420, VIDEO_PIXEL_RGBA, VIDEO_PIXEL_BGRA, and
+ * VIDEO_PIXEL_NV21.
*/
media::base::VIDEO_PIXEL_FORMAT format;
/**
- * The buffer data of the watermark.
- *
- * @note If used asynchronously, copy the buffer to memory that will not be released.
+ * Buffer data of the watermark.
*/
const uint8_t* buffer;
@@ -2511,22 +2579,22 @@ struct WatermarkBuffer {
};
/**
- * @brief Defines the configuration for a watermark.
+ * @brief Used to configure watermark-related information.
*
* @since 4.6.0
*/
struct WatermarkConfig {
/**
- * The unique identifier of the watermark. It is recommended to use a UUID.
+ * Unique identifier for the watermark. It is recommended to use a UUID.
*/
const char* id;
/**
- * The watermark source type. See #WATERMARK_SOURCE_TYPE for details.
+ * Type of the watermark. See `WATERMARK_SOURCE_TYPE`.
*/
WATERMARK_SOURCE_TYPE type;
union {
/**
- * The watermark buffer. See WatermarkBuffer.
+ * Buffer of the watermark. See `WatermarkBuffer`.
*/
WatermarkBuffer buffer;
/**
@@ -2542,15 +2610,13 @@ struct WatermarkConfig {
*/
WatermarkLiteral literal;
/**
- * The URL of the image file for the watermark. The default value is NULL.
- *
- * @note If used asynchronously, copy the URL to memory that will not be released.
+ * URL of the watermark image file. Default value is NULL.
*/
const char* imageUrl;
};
/**
- * The options of the watermark. See WatermarkOptions.
+ * Options for the watermark. See `WatermarkOptions`.
*/
WatermarkOptions options;
@@ -2558,7 +2624,7 @@ struct WatermarkConfig {
};
/**
- * @brief Defines how data is transmitted across multiple network paths.
+ * @brief The transmission mode of data over multiple network paths.
*
* @since 4.6.0
*/
@@ -2568,51 +2634,52 @@ enum MultipathMode {
*/
Duplicate= 0,
/**
- * Dynamic mode, the data is transmitted only over the path that the internal algorithm determines to be optimal for transmission quality.
- */
+ * (1): Dynamic transmission mode. The SDK dynamically selects the optimal path for data
+ * transmission based on the current network conditions to improve transmission performance.
+ */
Dynamic
};
/**
- * @brief Defines the types of network paths used in multipath transmission.
+ * @brief Network path types used in multipath transmission.
*
* @since 4.6.0
- */
+ */
enum MultipathType {
/**
- * The local area network (LAN) path.
+ * (0): Local Area Network (LAN) path.
*/
LAN = 0,
/**
- * The Wi-Fi path.
+ * (1): Wi-Fi path.
*/
WIFI,
/**
- * The mobile network path.
+ * (2): Mobile network path.
*/
Mobile,
/**
- * An unknown or unspecified network path.
+ * (99): Unknown or unspecified network path.
*/
Unknown = 99
};
/**
- * @brief Contains statistics for a specific network path in multipath transmission.
+ * @brief Statistical information about a specific network path.
*
* @since 4.6.0
*/
struct PathStats {
/**
- * The type of the path.
+ * Types of network path. See `MultipathType`.
*/
MultipathType type;
/**
- * The transmission bitrate of the path.
+ * The transmission bitrate of the path in Kbps.
*/
int txKBitRate;
/**
- * The receiving bitrate of the path.
+ * The receiving bitrate of the path in Kbps.
*/
int rxKBitRate;
PathStats() : type(Unknown), txKBitRate(0), rxKBitRate(0) {}
@@ -2620,41 +2687,41 @@ struct PathStats {
};
/**
- * @brief Aggregates statistics for all network paths used in multipath transmission.
+ * @brief Aggregates statistics of each network path in multipath transmission.
*
* @since 4.6.0
*/
struct MultipathStats {
/**
- * The number of bytes transmitted over the LAN path.
+ * The total number of bytes sent over the LAN path.
*/
uint32_t lanTxBytes;
/**
- * The number of bytes received over the LAN path.
+ * The total number of bytes received over the LAN path.
*/
uint32_t lanRxBytes;
/**
- * The number of bytes transmitted over the Wi-Fi path.
+ * The total number of bytes sent over the Wi-Fi path.
*/
uint32_t wifiTxBytes;
/**
- * The number of bytes received over the Wi-Fi path.
+ * The total number of bytes received over the Wi-Fi path.
*/
uint32_t wifiRxBytes;
/**
- * The number of bytes transmitted over the mobile network path.
+ * The total number of bytes sent over the mobile network path.
*/
uint32_t mobileTxBytes;
/**
- * The number of bytes received over the mobile network path.
+ * The total number of bytes received over the mobile network path.
*/
uint32_t mobileRxBytes;
/**
- * The number of active paths.
+ * The number of active transmission paths.
*/
int activePathNum;
/**
- * “An array of statistics for each active path.
+ * An array of statistics for each active transmission path. See `PathStats`.
*/
const PathStats* pathStats;
MultipathStats()
@@ -2669,63 +2736,63 @@ struct MultipathStats {
};
/**
- * The definition of the RtcStats struct.
+ * @brief Statistics of a call session.
*/
struct RtcStats {
/**
- * The call duration (s), represented by an aggregate value.
+ * Call duration of the local user in seconds, represented by an aggregate value.
*/
unsigned int duration;
/**
- * The total number of bytes transmitted, represented by an aggregate value.
+ * The number of bytes sent.
*/
unsigned int txBytes;
/**
- * The total number of bytes received, represented by an aggregate value.
+ * The number of bytes received.
*/
unsigned int rxBytes;
/**
- * The total number of audio bytes sent (bytes), represented by an aggregate value.
+ * The total number of audio bytes sent, represented by an aggregate value.
*/
unsigned int txAudioBytes;
/**
- * The total number of video bytes sent (bytes), represented by an aggregate value.
+ * The total number of video bytes sent, represented by an aggregate value.
*/
unsigned int txVideoBytes;
/**
- * The total number of audio bytes received (bytes), represented by an aggregate value.
+ * The total number of audio bytes received, represented by an aggregate value.
*/
unsigned int rxAudioBytes;
/**
- * The total number of video bytes received (bytes), represented by an aggregate value.
+ * The total number of video bytes received, represented by an aggregate value.
*/
unsigned int rxVideoBytes;
/**
- * The transmission bitrate (Kbps), represented by an instantaneous value.
+ * The actual bitrate (Kbps) while sending the local video stream.
*/
unsigned short txKBitRate;
/**
- * The receiving bitrate (Kbps), represented by an instantaneous value.
+ * The receiving bitrate (Kbps).
*/
unsigned short rxKBitRate;
/**
- * Audio receiving bitrate (Kbps), represented by an instantaneous value.
+ * The bitrate (Kbps) of receiving the audio.
*/
unsigned short rxAudioKBitRate;
/**
- * The audio transmission bitrate (Kbps), represented by an instantaneous value.
+ * The bitrate (Kbps) of sending the audio packet.
*/
unsigned short txAudioKBitRate;
/**
- * The video receive bitrate (Kbps), represented by an instantaneous value.
+ * The bitrate (Kbps) of receiving the video.
*/
unsigned short rxVideoKBitRate;
/**
- * The video transmission bitrate (Kbps), represented by an instantaneous value.
+ * The bitrate (Kbps) of sending the video.
*/
unsigned short txVideoKBitRate;
/**
- * The VOS client-server latency (ms).
+ * The client-to-server delay (milliseconds).
*/
unsigned short lastmileDelay;
/**
@@ -2733,49 +2800,49 @@ struct RtcStats {
*/
unsigned int userCount;
/**
- * The app CPU usage (%).
+ * Application CPU usage (%).
* @note
* - The value of `cpuAppUsage` is always reported as 0 in the `onLeaveChannel` callback.
- * - As of Android 8.1, you cannot get the CPU usage from this attribute due to system
- * limitations.
+ * - As of Android 8.1, you cannot get the CPU usage from this attribute due to system limitations.
*/
double cpuAppUsage;
/**
* The system CPU usage (%).
- *
* For Windows, in the multi-kernel environment, this member represents the average CPU usage. The
* value = (100 - System Idle Progress in Task Manager)/100.
* @note
* - The value of `cpuTotalUsage` is always reported as 0 in the `onLeaveChannel` callback.
- * - As of Android 8.1, you cannot get the CPU usage from this attribute due to system
- * limitations.
+ * - As of Android 8.1, you cannot get the CPU usage from this attribute due to system limitations.
*/
double cpuTotalUsage;
/**
- * The round-trip time delay from the client to the local router.
- * @note On Android, to get `gatewayRtt`, ensure that you add the
- * `android.permission.ACCESS_WIFI_STATE` permission after `` in the
- * `AndroidManifest.xml` file in your project.
+ * The round-trip time delay (ms) from the client to the local router.
+ * @note
+ * This property is disabled on devices running iOS 14 or later, and enabled on devices running
+ * versions earlier than iOS 14 by default.
+ * To enable this property on devices running iOS 14 or later, `technical support`.
+ * On Android, to get `gatewayRtt`, ensure that you add the `android.permission.ACCESS_WIFI_STATE`
+ * permission after `` in the `AndroidManifest.xml` file in your project.
*/
int gatewayRtt;
/**
- * The memory usage ratio of the app (%).
+ * The memory ratio occupied by the app (%).
* @note This value is for reference only. Due to system limitations, you may not get this value.
*/
double memoryAppUsageRatio;
/**
- * The memory usage ratio of the system (%).
+ * The memory occupied by the system (%).
* @note This value is for reference only. Due to system limitations, you may not get this value.
*/
double memoryTotalUsageRatio;
/**
- * The memory usage of the app (KB).
+ * The memory size occupied by the app (KB).
* @note This value is for reference only. Due to system limitations, you may not get this value.
*/
int memoryAppUsageInKbytes;
/**
- * The time elapsed from the when the app starts connecting to an Agora channel
- * to when the connection is established. 0 indicates that this member does not apply.
+ * The duration (ms) between the SDK starts connecting and the connection is established. If the
+ * value reported is 0, it means invalid.
*/
int connectTimeMs;
/**
@@ -2824,11 +2891,13 @@ struct RtcStats {
*/
int firstVideoKeyFrameRenderedDurationAfterUnmute;
/**
- * The packet loss rate of sender(broadcaster).
+ * The packet loss rate (%) from the client to the Agora server before applying the anti-packet-loss
+ * algorithm.
*/
int txPacketLossRate;
/**
- * The packet loss rate of receiver(audience).
+ * The packet loss rate (%) from the Agora server to the client before using the anti-packet-loss
+ * method.
*/
int rxPacketLossRate;
/**
@@ -2876,41 +2945,41 @@ struct RtcStats {
};
/**
- * User role types.
+ * @brief The user role in the interactive live streaming.
*/
enum CLIENT_ROLE_TYPE {
/**
- * 1: Broadcaster. A broadcaster can both send and receive streams.
+ * 1: Host. A host can both send and receive streams.
*/
CLIENT_ROLE_BROADCASTER = 1,
/**
- * 2: Audience. An audience member can only receive streams.
+ * 2: (Default) Audience. An audience member can only receive streams.
*/
CLIENT_ROLE_AUDIENCE = 2,
};
/**
- * Quality change of the local video in terms of target frame rate and target bit rate since last
- * count.
+ * @brief Quality change of the local video in terms of target frame rate and target bit rate since
+ * last count.
*/
enum QUALITY_ADAPT_INDICATION {
/**
- * 0: The quality of the local video stays the same.
+ * 0: The local video quality stays the same.
*/
ADAPT_NONE = 0,
/**
- * 1: The quality improves because the network bandwidth increases.
+ * 1: The local video quality improves because the network bandwidth increases.
*/
ADAPT_UP_BANDWIDTH = 1,
/**
- * 2: The quality worsens because the network bandwidth decreases.
+ * 2: The local video quality deteriorates because the network bandwidth decreases.
*/
ADAPT_DOWN_BANDWIDTH = 2,
};
/**
- * The latency level of an audience member in interactive live streaming. This enum takes effect
- * only when the user role is set to `CLIENT_ROLE_AUDIENCE`.
+ * @brief The latency level of an audience member in interactive live streaming. This enum takes
+ * effect only when the user role is set to CLIENT_ROLE_AUDIENCE .
*/
enum AUDIENCE_LATENCY_LEVEL_TYPE {
/**
@@ -2918,13 +2987,13 @@ enum AUDIENCE_LATENCY_LEVEL_TYPE {
*/
AUDIENCE_LATENCY_LEVEL_LOW_LATENCY = 1,
/**
- * 2: Ultra low latency.
+ * 2: (Default) Ultra low latency.
*/
AUDIENCE_LATENCY_LEVEL_ULTRA_LOW_LATENCY = 2,
};
/**
- * The detailed options of a user.
+ * @brief Setting of user role properties.
*/
struct ClientRoleOptions {
/**
@@ -2937,21 +3006,25 @@ struct ClientRoleOptions {
};
/**
- * Quality of experience (QoE) of the local user when receiving a remote audio stream.
+ * @brief The Quality of Experience (QoE) of the local user when receiving a remote audio stream.
*/
enum EXPERIENCE_QUALITY_TYPE {
- /** 0: QoE of the local user is good. */
+ /**
+ * 0: The QoE of the local user is good.
+ */
EXPERIENCE_QUALITY_GOOD = 0,
- /** 1: QoE of the local user is poor. */
+ /**
+ * 1: The QoE of the local user is poor.
+ */
EXPERIENCE_QUALITY_BAD = 1,
};
/**
- * Reasons why the QoE of the local user when receiving a remote audio stream is poor.
+ * @brief Reasons why the QoE of the local user when receiving a remote audio stream is poor.
*/
enum EXPERIENCE_POOR_REASON {
/**
- * 0: No reason, indicating good QoE of the local user.
+ * 0: No reason, indicating a good QoE of the local user.
*/
EXPERIENCE_REASON_NONE = 0,
/**
@@ -2967,42 +3040,47 @@ enum EXPERIENCE_POOR_REASON {
*/
WIRELESS_SIGNAL_POOR = 4,
/**
- * 8: The local user enables both Wi-Fi and bluetooth, and their signals interfere with each
- * other. As a result, audio transmission quality is undermined.
+ * 8: The local user enables both Wi-Fi and bluetooth, and their signals interfere with each other.
+ * As a result, audio transmission quality is undermined.
*/
WIFI_BLUETOOTH_COEXIST = 8,
};
/**
- * Audio AINS mode
+ * @brief AI noise suppression modes.
*/
enum AUDIO_AINS_MODE {
/**
- * AINS mode with soft suppression level.
+ * 0: (Default) Balance mode. This mode allows for a balanced performance on noice suppression and
+ * time delay.
*/
AINS_MODE_BALANCED = 0,
/**
- * AINS mode with high suppression level.
+ * 1: Aggressive mode. In scenarios where high performance on noise suppression is required, such as
+ * live streaming outdoor events, this mode reduces nosie more dramatically, but may sometimes
+ * affect the original character of the audio.
*/
AINS_MODE_AGGRESSIVE = 1,
/**
- * AINS mode with high suppression level and ultra-low-latency
+ * 2: Aggressive mode with low latency. The noise suppression delay of this mode is about only half
+ * of that of the balance and aggressive modes. It is suitable for scenarios that have high
+ * requirements on noise suppression with low latency, such as sing together online in real time.
*/
AINS_MODE_ULTRALOWLATENCY = 2
};
/**
- * Audio profile types.
+ * @brief The audio profile.
*/
enum AUDIO_PROFILE_TYPE {
/**
* 0: The default audio profile.
- * - For the Communication profile:
+ * - For the interactive streaming profile: A sample rate of 48 kHz, music encoding, mono, and a
+ * bitrate of up to 64 Kbps.
+ * - For the communication profile:
* - Windows: A sample rate of 16 kHz, audio encoding, mono, and a bitrate of up to 16 Kbps.
* - Android/macOS/iOS: A sample rate of 32 kHz, audio encoding, mono, and a bitrate of up to 18
- * Kbps. of up to 16 Kbps.
- * - For the Live-broadcast profile: A sample rate of 48 kHz, music encoding, mono, and a bitrate
- * of up to 64 Kbps.
+ * Kbps.
*/
AUDIO_PROFILE_DEFAULT = 0,
/**
@@ -3014,10 +3092,9 @@ enum AUDIO_PROFILE_TYPE {
*/
AUDIO_PROFILE_MUSIC_STANDARD = 2,
/**
- * 3: A sample rate of 48 kHz, music encoding, stereo, and a bitrate of up to 80 Kbps.
- *
- * To implement stereo audio, you also need to call `setAdvancedAudioOptions` and set
- * `audioProcessingChannels` to `AUDIO_PROCESSING_STEREO` in `AdvancedAudioOptions`.
+ * 3: A sample rate of 48 kHz, music encoding, stereo, and a bitrate of up to 80 Kbps. To implement
+ * stereo audio, you also need to call `setAdvancedAudioOptions` and set `audioProcessingChannels`
+ * to `AUDIO_PROCESSING_STEREO` in `AdvancedAudioOptions`.
*/
AUDIO_PROFILE_MUSIC_STANDARD_STEREO = 3,
/**
@@ -3025,38 +3102,37 @@ enum AUDIO_PROFILE_TYPE {
*/
AUDIO_PROFILE_MUSIC_HIGH_QUALITY = 4,
/**
- * 5: A sample rate of 48 kHz, music encoding, stereo, and a bitrate of up to 128 Kbps.
- *
- * To implement stereo audio, you also need to call `setAdvancedAudioOptions` and set
- * `audioProcessingChannels` to `AUDIO_PROCESSING_STEREO` in `AdvancedAudioOptions`.
+ * 5: A sample rate of 48 kHz, music encoding, stereo, and a bitrate of up to 128 Kbps. To implement
+ * stereo audio, you also need to call `setAdvancedAudioOptions` and set `audioProcessingChannels`
+ * to `AUDIO_PROCESSING_STEREO` in `AdvancedAudioOptions`.
*/
AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO = 5,
/**
- * 6: A sample rate of 16 kHz, audio encoding, mono, and Acoustic Echo Cancellation (AES) enabled.
+ * 6: A sample rate of 16 kHz, audio encoding, mono, and Acoustic Echo Cancellation (AEC) enabled.
*/
AUDIO_PROFILE_IOT = 6,
+ /**
+ * Enumerator boundary.
+ */
AUDIO_PROFILE_NUM = 7
};
/**
- * The audio scenario.
+ * @brief The audio scenarios.
*/
enum AUDIO_SCENARIO_TYPE {
/**
- * 0: Automatic scenario, where the SDK chooses the appropriate audio quality according to the
- * user role and audio route.
+ * 0: (Default) Automatic scenario match, where the SDK chooses the appropriate audio quality
+ * according to the user role and audio route.
*/
AUDIO_SCENARIO_DEFAULT = 0,
/**
- * 3: (Recommended) The live gaming scenario, which needs to enable gaming
- * audio effects in the speaker. Choose this scenario to achieve high-fidelity
- * music playback.
+ * 3: High-quality audio scenario, where users mainly play music. For example, instrument tutoring.
*/
AUDIO_SCENARIO_GAME_STREAMING = 3,
/**
- * 5: The chatroom scenario, which needs to keep recording when setClientRole to audience.
- * Normally, app developer can also use mute api to achieve the same result,
- * and we implement this 'non-orthogonal' behavior only to make API backward compatible.
+ * 5: Chatroom scenario, where users need to frequently switch the user role or mute and unmute the
+ * microphone. For example, education scenarios.
*/
AUDIO_SCENARIO_CHATROOM = 5,
/**
@@ -3065,7 +3141,7 @@ enum AUDIO_SCENARIO_TYPE {
*/
AUDIO_SCENARIO_CHORUS = 7,
/**
- * 8: Meeting
+ * 8: Meeting scenario that mainly contains the human voice.
*/
AUDIO_SCENARIO_MEETING = 8,
/**
@@ -3074,17 +3150,18 @@ enum AUDIO_SCENARIO_TYPE {
*/
AUDIO_SCENARIO_AI_SERVER = 9,
/**
- * 10: AI Client.
+ * 10: AI conversation scenario, which is only applicable to scenarios where the user interacts with
+ * the conversational AI agent created by `Conversational AI Engine`.
*/
AUDIO_SCENARIO_AI_CLIENT = 10,
/**
- * 11: The number of enumerations.
+ * The number of enumerations.
*/
AUDIO_SCENARIO_NUM = 11,
};
/**
- * The format of the video frame.
+ * @brief The format of the video frame.
*/
struct VideoFormat {
OPTIONAL_ENUM_SIZE_T{
@@ -3097,15 +3174,15 @@ struct VideoFormat {
};
/**
- * The width (px) of the video.
+ * The width (px) of the video frame. The default value is 960.
*/
int width; // Number of pixels.
/**
- * The height (px) of the video.
+ * The height (px) of the video frame. The default value is 540.
*/
int height; // Number of pixels.
/**
- * The video frame rate (fps).
+ * The video frame rate (fps). The default value is 15.
*/
int fps;
VideoFormat() : width(FRAME_WIDTH_960), height(FRAME_HEIGHT_540), fps(FRAME_RATE_FPS_15) {}
@@ -3127,73 +3204,114 @@ struct VideoFormat {
};
/**
- * Video content hints.
+ * @brief The content hint for screen sharing.
*/
enum VIDEO_CONTENT_HINT {
/**
- * (Default) No content hint. In this case, the SDK balances smoothness with sharpness.
+ * (Default) No content hint.
*/
CONTENT_HINT_NONE,
/**
- * Choose this option if you prefer smoothness or when
- * you are sharing motion-intensive content such as a video clip, movie, or video game.
- *
- *
+ * Motion-intensive content. Choose this option if you prefer smoothness or when you are sharing a
+ * video clip, movie, or video game.
*/
CONTENT_HINT_MOTION,
/**
- * Choose this option if you prefer sharpness or when you are
- * sharing montionless content such as a picture, PowerPoint slide, ot text.
- *
+ * Motionless content. Choose this option if you prefer sharpness or when you are sharing a picture,
+ * PowerPoint slides, or texts.
*/
CONTENT_HINT_DETAILS
};
/**
- * The screen sharing scenario.
+ * @brief The screen sharing scenario.
*/
enum SCREEN_SCENARIO_TYPE {
/**
- * 1: Document. This scenario prioritizes the video quality of screen sharing and reduces the
- * latency of the shared video for the receiver. If you share documents, slides, and tables,
- * you can set this scenario.
+ * 1: (Default) Document. This scenario prioritizes the video quality of screen sharing and reduces
+ * the latency of the shared video for the receiver. If you share documents, slides, and tables, you
+ * can set this scenario.
*/
SCREEN_SCENARIO_DOCUMENT = 1,
/**
- * 2: Game. This scenario prioritizes the smoothness of screen sharing. If you share games, you
- * can set this scenario.
+ * 2: Game. This scenario prioritizes the smoothness of screen sharing. If you share games, you can
+ * set this scenario.
*/
SCREEN_SCENARIO_GAMING = 2,
/**
- * 3: Video. This scenario prioritizes the smoothness of screen sharing. If you share movies or
- * live videos, you can set this scenario.
+ * 3: Video. This scenario prioritizes the smoothness of screen sharing. If you share movies or live
+ * videos, you can set this scenario.
*/
SCREEN_SCENARIO_VIDEO = 3,
/**
- * 4: Remote control. This scenario prioritizes the video quality of screen sharing and reduces
- * the latency of the shared video for the receiver. If you share the device desktop being
- * remotely controlled, you can set this scenario.
+ * 4: Remote control. This scenario prioritizes the video quality of screen sharing and reduces the
+ * latency of the shared video for the receiver. If you share the device desktop being remotely
+ * controlled, you can set this scenario.
*/
SCREEN_SCENARIO_RDC = 4,
};
/**
- * The video application scenario type.
+ * @brief The video application scenarios.
*/
enum VIDEO_APPLICATION_SCENARIO_TYPE {
/**
- * 0: Default Scenario.
+ * 0: (Default) The general scenario.
*/
APPLICATION_SCENARIO_GENERAL = 0,
/**
- * 1: Meeting Scenario. This scenario is the best QoE practice of meeting application.
+ * 1: The meeting scenario.
+ * `APPLICATION_SCENARIO_MEETING` (1) is suitable for meeting scenarios. The SDK automatically
+ * enables the following strategies:
+ * - In meeting scenarios where low-quality video streams are required to have a high bitrate, the
+ * SDK automatically enables multiple technologies used to deal with network congestions, to enhance
+ * the performance of the low-quality streams and to ensure the smooth reception by subscribers.
+ * - The SDK monitors the number of subscribers to the high-quality video stream in real time and
+ * dynamically adjusts its configuration based on the number of subscribers.
+ * - If nobody subscribers to the high-quality stream, the SDK automatically reduces its bitrate
+ * and frame rate to save upstream bandwidth.
+ * - If someone subscribes to the high-quality stream, the SDK resets the high-quality stream to
+ * the `VideoEncoderConfiguration` configuration used in the most recent calling of
+ * `setVideoEncoderConfiguration`. If no configuration has been set by the user previously, the
+ * following values are used:
+ * - Resolution: (Windows and macOS) 1280 × 720; (Android and iOS) 960 × 540
+ * - Frame rate: 15 fps
+ * - Bitrate: (Windows and macOS) 1600 Kbps; (Android and iOS) 1000 Kbps
+ * - The SDK monitors the number of subscribers to the low-quality video stream in real time and
+ * dynamically enables or disables it based on the number of subscribers.
+ * - If nobody subscribes to the low-quality stream, the SDK automatically disables it to save
+ * upstream bandwidth.
+ * - If someone subscribes to the low-quality stream, the SDK enables the low-quality stream and
+ * resets it to the `SimulcastStreamConfig` configuration used in the most recent calling of
+ * `setDualStreamMode(SIMULCAST_STREAM_MODE mode, const SimulcastStreamConfig& streamConfig)`. If no
+ * configuration has been set by the user previously, the following
+ * values are used:
+ * - Resolution: 480 × 272
+ * - Frame rate: 15 fps
+ * - Bitrate: 500 Kbps
+ * @note If the user has called `setDualStreamMode(SIMULCAST_STREAM_MODE mode, const
+ * SimulcastStreamConfig& streamConfig)` to set that never send low-quality video
+ * stream ( `DISABLE_SIMULCAST_STREAM` ), the dynamic adjustment of the low-quality stream in
+ * meeting scenarios will not take effect.
*/
APPLICATION_SCENARIO_MEETING = 1,
/**
- * 2: Video Call Scenario. This scenario is used to optimize the video experience in video application, like 1v1 video call.
+ * 2: 1v1 video call scenario.
+ * `APPLICATION_SCENARIO_1V1` (2) This is applicable to the `one to one live` scenario. To meet the
+ * requirements for low latency and high-quality video in this scenario, the SDK optimizes its
+ * strategies, improving performance in terms of video quality, first frame rendering, latency on
+ * mid-to-low-end devices, and smoothness under weak network conditions.
+ * @note This enumeration value is only applicable to the broadcaster vs. broadcaster scenario.
*/
APPLICATION_SCENARIO_1V1 = 2,
/**
- * 3: Live Show Scenario. This scenario is used to optimize the video experience in video live show.
+ * 3. Live show scenario.
+ * `APPLICATION_SCENARIO_LIVESHOW` (3) This is applicable to the `show room` scenario. In this
+ * scenario, fast video rendering and high image quality are crucial. The SDK implements several
+ * performance optimizations, including automatically enabling accelerated audio and video frame
+ * rendering to minimize first-frame latency (no need to call `enableInstantMediaRendering` ), and
+ * B-frame encoding to achieve better image quality and bandwidth efficiency. The SDK also provides
+ * enhanced video quality and smooth playback, even in poor network conditions or on lower-end
+ * devices.
*/
APPLICATION_SCENARIO_LIVESHOW = 3,
};
@@ -3222,39 +3340,57 @@ enum VIDEO_QOE_PREFERENCE_TYPE {
};
/**
- * The brightness level of the video image captured by the local camera.
+ * @brief The brightness level of the video image captured by the local camera.
*/
enum CAPTURE_BRIGHTNESS_LEVEL_TYPE {
- /** -1: The SDK does not detect the brightness level of the video image.
- * Wait a few seconds to get the brightness level from `CAPTURE_BRIGHTNESS_LEVEL_TYPE` in the next
- * callback.
+ /**
+ * -1: The SDK does not detect the brightness level of the video image. Wait a few seconds to get
+ * the brightness level from `captureBrightnessLevel` in the next callback.
*/
CAPTURE_BRIGHTNESS_LEVEL_INVALID = -1,
- /** 0: The brightness level of the video image is normal.
+ /**
+ * 0: The brightness level of the video image is normal.
*/
CAPTURE_BRIGHTNESS_LEVEL_NORMAL = 0,
- /** 1: The brightness level of the video image is too bright.
+ /**
+ * 1: The brightness level of the video image is too bright.
*/
CAPTURE_BRIGHTNESS_LEVEL_BRIGHT = 1,
- /** 2: The brightness level of the video image is too dark.
+ /**
+ * 2: The brightness level of the video image is too dark.
*/
CAPTURE_BRIGHTNESS_LEVEL_DARK = 2,
};
+/**
+ * @brief Camera stabilization modes.
+ *
+ * @details
+ * The camera stabilization effect increases in the order of 1 < 2 < 3, and the latency will also
+ * increase accordingly.
+ *
+ */
enum CAMERA_STABILIZATION_MODE {
- /** The camera stabilization mode is disabled.
+ /**
+ * -1: (Default) Camera stabilization mode off.
*/
CAMERA_STABILIZATION_MODE_OFF = -1,
- /** device choose stabilization mode automatically.
+ /**
+ * 0: Automatic camera stabilization. The system automatically selects a stabilization mode based on
+ * the status of the camera. However, the latency is relatively high in this mode, so it is
+ * recommended not to use this enumeration.
*/
CAMERA_STABILIZATION_MODE_AUTO = 0,
- /** stabilization mode level 1.
+ /**
+ * 1: (Recommended) Level 1 camera stabilization.
*/
CAMERA_STABILIZATION_MODE_LEVEL_1 = 1,
- /** stabilization mode level 2.
+ /**
+ * 2: Level 2 camera stabilization.
*/
CAMERA_STABILIZATION_MODE_LEVEL_2 = 2,
- /** stabilization mode level 3.
+ /**
+ * 3: Level 3 camera stabilization.
*/
CAMERA_STABILIZATION_MODE_LEVEL_3 = 3,
/** The maximum level of the camera stabilization mode.
@@ -3263,7 +3399,7 @@ enum CAMERA_STABILIZATION_MODE {
};
/**
- * Local audio states.
+ * @brief The state of the local audio.
*/
enum LOCAL_AUDIO_STREAM_STATE {
/**
@@ -3271,7 +3407,7 @@ enum LOCAL_AUDIO_STREAM_STATE {
*/
LOCAL_AUDIO_STREAM_STATE_STOPPED = 0,
/**
- * 1: The capturing device starts successfully.
+ * 1: The local audio capturing device starts successfully.
*/
LOCAL_AUDIO_STREAM_STATE_RECORDING = 1,
/**
@@ -3285,7 +3421,7 @@ enum LOCAL_AUDIO_STREAM_STATE {
};
/**
- * Local audio state error codes.
+ * @brief Reasons for local audio state changes.
*/
enum LOCAL_AUDIO_STREAM_REASON {
/**
@@ -3298,43 +3434,56 @@ enum LOCAL_AUDIO_STREAM_REASON {
*/
LOCAL_AUDIO_STREAM_REASON_FAILURE = 1,
/**
- * 2: No permission to use the local audio device. Remind your users to grant permission.
+ * 2: No permission to use the local audio capturing device. Remind your users to grant permission.
*/
LOCAL_AUDIO_STREAM_REASON_DEVICE_NO_PERMISSION = 2,
/**
- * 3: (Android and iOS only) The local audio capture device is used. Remind your users to check
- * whether another application occupies the microphone. Local audio capture automatically resume
- * after the microphone is idle for about five seconds. You can also try to rejoin the channel
- * after the microphone is idle.
+ * 3: (Android and iOS only) The local audio capture device is already in use. Remind your users to
+ * check whether another application occupies the microphone. Local audio capture automatically
+ * resumes after the microphone is idle for about five seconds. You can also try to rejoin the
+ * channel after the microphone is idle.
*/
LOCAL_AUDIO_STREAM_REASON_DEVICE_BUSY = 3,
/**
- * 4: The local audio capture failed.
+ * 4: The local audio capture fails.
*/
LOCAL_AUDIO_STREAM_REASON_RECORD_FAILURE = 4,
/**
- * 5: The local audio encoding failed.
+ * 5: The local audio encoding fails.
*/
LOCAL_AUDIO_STREAM_REASON_ENCODE_FAILURE = 5,
- /** 6: The SDK cannot find the local audio recording device.
+ /**
+ * 6: (Windows and macOS only) No local audio capture device. Remind your users to check whether the
+ * microphone is connected to the device properly in the control panel of the device or if the
+ * microphone is working properly.
*/
LOCAL_AUDIO_STREAM_REASON_NO_RECORDING_DEVICE = 6,
- /** 7: The SDK cannot find the local audio playback device.
+ /**
+ * 7: (Windows and macOS only) No local audio capture device. Remind your users to check whether the
+ * speaker is connected to the device properly in the control panel of the device or if the speaker
+ * is working properly.
*/
LOCAL_AUDIO_STREAM_REASON_NO_PLAYOUT_DEVICE = 7,
/**
- * 8: The local audio capturing is interrupted by the system call.
+ * 8: (Android and iOS only) The local audio capture is interrupted by a system call, smart
+ * assistants, or alarm clock. Prompt your users to end the phone call, smart assistants, or alarm
+ * clock if the local audio capture is required.
*/
LOCAL_AUDIO_STREAM_REASON_INTERRUPTED = 8,
- /** 9: An invalid audio capture device ID.
+ /**
+ * 9: (Windows only) The ID of the local audio-capture device is invalid. Prompt the user to check
+ * the audio capture device ID.
*/
LOCAL_AUDIO_STREAM_REASON_RECORD_INVALID_ID = 9,
- /** 10: An invalid audio playback device ID.
+ /**
+ * 10: (Windows only) The ID of the local audio-playback device is invalid. Prompt the user to check
+ * the audio playback device ID.
*/
LOCAL_AUDIO_STREAM_REASON_PLAYOUT_INVALID_ID = 10,
};
-/** Local video state types.
+/**
+ * @brief Local video state types.
*/
enum LOCAL_VIDEO_STREAM_STATE {
/**
@@ -3342,8 +3491,8 @@ enum LOCAL_VIDEO_STREAM_STATE {
*/
LOCAL_VIDEO_STREAM_STATE_STOPPED = 0,
/**
- * 1: The local video capturing device starts successfully. The SDK also reports this state when
- * you call `startScreenCaptureByWindowId` to share a maximized window.
+ * 1: The local video capturing device starts successfully. The SDK also reports this state when you
+ * call `startScreenCaptureByWindowId` to share a maximized window.
*/
LOCAL_VIDEO_STREAM_STATE_CAPTURING = 1,
/**
@@ -3357,7 +3506,30 @@ enum LOCAL_VIDEO_STREAM_STATE {
};
/**
- * Local video state error codes.
+ * @brief The local video event type.
+ * @since v4.6.1
+ */
+enum LOCAL_VIDEO_EVENT_TYPE {
+ /**
+ * 1: (Android only) The screen capture window is hidden.
+ */
+ LOCAL_VIDEO_EVENT_TYPE_SCREEN_CAPTURE_WINDOW_HIDDEN = 1,
+ /**
+ * 2: (Android only) The screen capture window is recovered from hidden.
+ */
+ LOCAL_VIDEO_EVENT_TYPE_SCREEN_CAPTURE_WINDOW_RECOVER_FROM_HIDDEN = 2,
+ /**
+ * 3: (Android only) The screen capture is stopped by user.
+ */
+ LOCAL_VIDEO_EVENT_TYPE_SCREEN_CAPTURE_STOPPED_BY_USER = 3,
+ /**
+ * 4: (Android only) An internal error occurs during the screen capture.
+ */
+ LOCAL_VIDEO_EVENT_TYPE_SCREEN_CAPTURE_SYSTEM_INTERNAL_ERROR = 4,
+};
+
+/**
+ * @brief Reasons for local video state changes.
*/
enum LOCAL_VIDEO_STREAM_REASON {
/**
@@ -3369,35 +3541,34 @@ enum LOCAL_VIDEO_STREAM_REASON {
*/
LOCAL_VIDEO_STREAM_REASON_FAILURE = 1,
/**
- * 2: No permission to use the local video capturing device. Remind the user to grant permission
+ * 2: No permission to use the local video capturing device. Prompt the user to grant permissions
* and rejoin the channel.
*/
LOCAL_VIDEO_STREAM_REASON_DEVICE_NO_PERMISSION = 2,
/**
- * 3: The local video capturing device is in use. Remind the user to check whether another
- * application occupies the camera.
+ * 3: The local video capturing device is in use. Prompt the user to check if the camera is being
+ * used by another app, or try to rejoin the channel.
*/
LOCAL_VIDEO_STREAM_REASON_DEVICE_BUSY = 3,
/**
- * 4: The local video capture fails. Remind the user to check whether the video capture device
- * is working properly or the camera is occupied by another application, and then to rejoin the
- * channel.
+ * 4: The local video capture fails. Prompt the user to check whether the video capture device is
+ * working properly, whether the camera is used by another app, or try to rejoin the channel.
*/
LOCAL_VIDEO_STREAM_REASON_CAPTURE_FAILURE = 4,
/**
- * 5: The local video encoder is not supported.
+ * 5: The local video encoding fails.
*/
LOCAL_VIDEO_STREAM_REASON_CODEC_NOT_SUPPORT = 5,
/**
- * 6: (iOS only) The app is in the background. Remind the user that video capture cannot be
+ * 6: (iOS only) The app is in the background. Prompt the user that video capture cannot be
* performed normally when the app is in the background.
*/
LOCAL_VIDEO_STREAM_REASON_CAPTURE_INBACKGROUND = 6,
/**
- * 7: (iOS only) The current application window is running in Slide Over, Split View, or Picture
- * in Picture mode, and another app is occupying the camera. Remind the user that the application
- * cannot capture video properly when the app is running in Slide Over, Split View, or Picture in
- * Picture mode and another app is occupying the camera.
+ * 7: (iOS only) The current app window is running in Slide Over, Split View, or Picture in Picture
+ * mode, and another app is occupying the camera. Prompt the user that the app cannot capture video
+ * properly when it is running in Slide Over, Split View, or Picture in Picture mode and another app
+ * is occupying the camera.
*/
LOCAL_VIDEO_STREAM_REASON_CAPTURE_MULTIPLE_FOREGROUND_APPS = 7,
/**
@@ -3407,23 +3578,28 @@ enum LOCAL_VIDEO_STREAM_REASON {
*/
LOCAL_VIDEO_STREAM_REASON_DEVICE_NOT_FOUND = 8,
/**
- * 9: (macOS and Windows only) The video capture device currently in use is disconnected (such as being
- * unplugged).
+ * 9: (macOS and Windows only) The video capture device currently in use is disconnected (such as
+ * being unplugged).
*/
LOCAL_VIDEO_STREAM_REASON_DEVICE_DISCONNECTED = 9,
/**
- * 10: (macOS and Windows only) The SDK cannot find the video device in the video device list.
- * Check whether the ID of the video device is valid.
+ * 10: (macOS and Windows only) The SDK cannot find the video device in the video device list. Check
+ * whether the ID of the video device is valid.
*/
LOCAL_VIDEO_STREAM_REASON_DEVICE_INVALID_ID = 10,
/**
- * 14: (Android only) Video capture was interrupted, possibly due to the camera being occupied
- * or some policy reasons such as background termination.
+ * 14: (Android only) Video capture is interrupted. Possible reasons include the following:
+ * - The camera is being used by another app. Prompt the user to check if the camera is being used
+ * by another app.
+ * - The current app has been switched to the background. You can use foreground services to notify
+ * the operating system and ensure that the app can still collect video when it switches to the
+ * background.
*/
LOCAL_VIDEO_STREAM_REASON_DEVICE_INTERRUPT = 14,
/**
- * 15: (Android only) The device may need to be shut down and restarted to restore camera
- * function, or there may be a persistent hardware problem.
+ * 15: (Android only) The video capture device encounters an error. Prompt the user to close and
+ * restart the camera to restore functionality. If this operation does not solve the problem, check
+ * if the camera has a hardware failure.
*/
LOCAL_VIDEO_STREAM_REASON_DEVICE_FATAL_ERROR = 15,
/**
@@ -3431,58 +3607,78 @@ enum LOCAL_VIDEO_STREAM_REASON {
*/
LOCAL_VIDEO_STREAM_REASON_DEVICE_SYSTEM_PRESSURE = 101,
/**
- * 11: (macOS only) The shared window is minimized when you call `startScreenCaptureByWindowId`
- * to share a window. The SDK cannot share a minimized window. You can cancel the minimization
- * of this window at the application layer, for example by maximizing this window.
+ * 11: (macOS and Windows only) The shared window is minimized when you call the
+ * `startScreenCaptureByWindowId` method to share a window. The SDK cannot share a minimized window.
+ * Please prompt the user to unminimize the shared window.
*/
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_MINIMIZED = 11,
/**
- * 12: (macOS and Windows only) The error code indicates that a window shared by the window ID
- * has been closed or a full-screen window shared by the window ID has exited full-screen mode.
- * After exiting full-screen mode, remote users cannot see the shared window. To prevent remote
- * users from seeing a black screen, Agora recommends that you immediately stop screen sharing.
- *
- * Common scenarios for reporting this error code:
- * - When the local user closes the shared window, the SDK reports this error code.
- * - The local user shows some slides in full-screen mode first, and then shares the windows of
- * the slides. After the user exits full-screen mode, the SDK reports this error code.
- * - The local user watches a web video or reads a web document in full-screen mode first, and
- * then shares the window of the web video or document. After the user exits full-screen mode,
- * the SDK reports this error code.
+ * 12: (macOS and Windows only) The error code indicates that a window shared by the window ID has
+ * been closed or a full-screen window shared by the window ID has exited full-screen mode. After
+ * exiting full-screen mode, remote users cannot see the shared window. To prevent remote users from
+ * seeing a black screen, Agora recommends that you immediately stop screen sharing.
+ * Common scenarios reporting this error code:
+ * - The local user closes the shared window.
+ * - The local user shows some slides in full-screen mode first, and then shares the windows of the
+ * slides. After the user exits full-screen mode, the SDK reports this error code.
+ * - The local user watches a web video or reads a web document in full-screen mode first, and then
+ * shares the window of the web video or document. After the user exits full-screen mode, the SDK
+ * reports this error code.
*/
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_CLOSED = 12,
- /** 13: The local screen capture window is occluded. */
+ /**
+ * 13: (Windows only) The window being shared is overlapped by another window, so the overlapped
+ * area is blacked out by the SDK during window sharing.
+ */
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_OCCLUDED = 13,
/** 20: The local screen capture window is not supported. */
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_NOT_SUPPORTED = 20,
- /** 21: The screen capture fails. */
+ /**
+ * 21: (Windows and Android only) The currently captured window has no data.
+ */
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_FAILURE = 21,
- /** 22: No permision to capture screen. */
+ /**
+ * 22: (Windows and macOS only) No permission for screen capture.
+ */
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_NO_PERMISSION = 22,
/**
- * 24: (Windows Only) An unexpected error (possibly due to window block failure) occurs during the
- * screen sharing process, resulting in performance degradation. However, the screen sharing
- * process itself is functioning normally.
+ * 24: (Windows only) An unexpected error occurred during screen sharing (possibly due to window
+ * blocking failure), resulting in decreased performance, but the screen sharing process itself was
+ * not affected.
+ * @note During screen sharing, if blocking a specific window fails due to device driver issues, the
+ * SDK will report this event and automatically fall back to sharing the entire screen. If your use
+ * case requires masking specific windows to protect privacy, we recommend listening for this event
+ * and implementing additional privacy protection mechanisms when it is triggered.
*/
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_AUTO_FALLBACK = 24,
- /** 25: (Windows only) The local screen capture window is currently hidden and not visible on the
- desktop. */
+ /**
+ * 25: (Windows only) The window for the current screen capture is hidden and not visible on the
+ * current screen.
+ */
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_HIDDEN = 25,
- /** 26: (Windows only) The local screen capture window is recovered from its hidden state. */
+ /**
+ * 26: (Windows only) The window for screen capture has been restored from hidden state.
+ */
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_RECOVER_FROM_HIDDEN = 26,
- /** 27: (Windows and macOS only) The window is recovered from miniminzed */
+ /**
+ * 27: (macOS and Windows only) The window for screen capture has been restored from the minimized
+ * state.
+ */
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_RECOVER_FROM_MINIMIZED = 27,
/**
- * 28: The screen capture paused.
- *
- * Common scenarios for reporting this error code:
- * - When the desktop switch to the secure desktop such as UAC dialog or the Winlogon desktop on
- * Windows platform, the SDK reports this error code.
+ * 28: (Windows only) Screen capture has been paused. Common scenarios reporting this error code:
+ * The current screen may have been switched to a secure desktop, such as a UAC dialog box or
+ * Winlogon desktop.
*/
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_PAUSED = 28,
- /** 29: The screen capture is resumed. */
+ /**
+ * 29: (Windows only) Screen capture has resumed from paused state.
+ */
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_RESUMED = 29,
- /** 30: The shared display has been disconnected */
+ /**
+ * 30: (Windows and macOS only) The displayer used for screen capture is disconnected. The current
+ * screen sharing has been paused. Prompt the user to restart the screen sharing.
+ */
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_DISPLAY_DISCONNECTED = 30,
/* 30: (HMOS only) ScreenCapture stopped by user */
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_STOPPED_BY_USER = 31,
@@ -3490,18 +3686,18 @@ enum LOCAL_VIDEO_STREAM_REASON {
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_INTERRUPTED_BY_OTHER = 32,
/* 32: (HMOS only) ScreenCapture stopped by SIM call */
LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_STOPPED_BY_CALL = 33,
- /* 34: HDR Video Source fallback to SDR */
- LOCAL_AUDIO_STREAM_REASON_VIDEO_SOURCE_HDR_TO_SDR = 34,
+ /** 34: (Windows only) Some windows of the exclude window list failed to be excluded from the screen capture. */
+ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_EXCLUDE_WINDOW_FAILED = 34,
};
/**
- * Remote audio states.
+ * @brief Remote audio states.
*/
enum REMOTE_AUDIO_STATE {
/**
- * 0: The remote audio is in the default state. The SDK reports this state in the case of
- * `REMOTE_AUDIO_REASON_LOCAL_MUTED(3)`, `REMOTE_AUDIO_REASON_REMOTE_MUTED(5)`, or
- * `REMOTE_AUDIO_REASON_REMOTE_OFFLINE(7)`.
+ * 0: The local audio is in the initial state. The SDK reports this state in the case of
+ * `REMOTE_AUDIO_REASON_LOCAL_MUTED`, `REMOTE_AUDIO_REASON_REMOTE_MUTED` or
+ * `REMOTE_AUDIO_REASON_REMOTE_OFFLINE`.
*/
REMOTE_AUDIO_STATE_STOPPED =
0, // Default state, audio is started or remote user disabled/muted audio stream
@@ -3510,30 +3706,30 @@ enum REMOTE_AUDIO_STATE {
*/
REMOTE_AUDIO_STATE_STARTING = 1, // The first audio frame packet has been received
/**
- * 2: The remote audio stream is decoded and plays normally. The SDK reports this state in the
- * case of `REMOTE_AUDIO_REASON_NETWORK_RECOVERY(2)`, `REMOTE_AUDIO_REASON_LOCAL_UNMUTED(4)`, or
- * `REMOTE_AUDIO_REASON_REMOTE_UNMUTED(6)`.
+ * 2: The remote audio stream is decoded and plays normally. The SDK reports this state in the case
+ * of `REMOTE_AUDIO_REASON_NETWORK_RECOVERY`, `REMOTE_AUDIO_REASON_LOCAL_UNMUTED` or
+ * `REMOTE_AUDIO_REASON_REMOTE_UNMUTED`.
*/
REMOTE_AUDIO_STATE_DECODING =
2, // The first remote audio frame has been decoded or fronzen state ends
/**
* 3: The remote audio is frozen. The SDK reports this state in the case of
- * `REMOTE_AUDIO_REASON_NETWORK_CONGESTION(1)`.
+ * `REMOTE_AUDIO_REASON_NETWORK_CONGESTION`.
*/
REMOTE_AUDIO_STATE_FROZEN = 3, // Remote audio is frozen, probably due to network issue
/**
* 4: The remote audio fails to start. The SDK reports this state in the case of
- * `REMOTE_AUDIO_REASON_INTERNAL(0)`.
+ * `REMOTE_AUDIO_REASON_INTERNAL`.
*/
REMOTE_AUDIO_STATE_FAILED = 4, // Remote audio play failed
};
/**
- * Reasons for the remote audio state change.
+ * @brief The reason for the remote audio state change.
*/
enum REMOTE_AUDIO_STATE_REASON {
/**
- * 0: The SDK reports this reason when the video state changes.
+ * 0: The SDK reports this reason when the audio state changes.
*/
REMOTE_AUDIO_REASON_INTERNAL = 0,
/**
@@ -3545,23 +3741,19 @@ enum REMOTE_AUDIO_STATE_REASON {
*/
REMOTE_AUDIO_REASON_NETWORK_RECOVERY = 2,
/**
- * 3: The local user stops receiving the remote audio stream or
- * disables the audio module.
+ * 3: The local user stops receiving the remote audio stream or disables the audio module.
*/
REMOTE_AUDIO_REASON_LOCAL_MUTED = 3,
/**
- * 4: The local user resumes receiving the remote audio stream or
- * enables the audio module.
+ * 4: The local user resumes receiving the remote audio stream or enables the audio module.
*/
REMOTE_AUDIO_REASON_LOCAL_UNMUTED = 4,
/**
- * 5: The remote user stops sending the audio stream or disables the
- * audio module.
+ * 5: The remote user stops sending the audio stream or disables the audio module.
*/
REMOTE_AUDIO_REASON_REMOTE_MUTED = 5,
/**
- * 6: The remote user resumes sending the audio stream or enables the
- * audio module.
+ * 6: The remote user resumes sending the audio stream or enables the audio module.
*/
REMOTE_AUDIO_REASON_REMOTE_UNMUTED = 6,
/**
@@ -3579,14 +3771,13 @@ enum REMOTE_AUDIO_STATE_REASON {
};
/**
- * The state of the remote video.
+ * @brief The state of the remote video stream.
*/
enum REMOTE_VIDEO_STATE {
/**
- * 0: The remote video is in the default state. The SDK reports this state in the case of
- * `REMOTE_VIDEO_STATE_REASON_LOCAL_MUTED (3)`, `REMOTE_VIDEO_STATE_REASON_REMOTE_MUTED (5)`,
- * `REMOTE_VIDEO_STATE_REASON_REMOTE_OFFLINE (7)`, or `REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK
- * (8)`.
+ * 0: The remote video is in the initial state. The SDK reports this state in the case of
+ * `REMOTE_VIDEO_STATE_REASON_LOCAL_MUTED`, `REMOTE_VIDEO_STATE_REASON_REMOTE_MUTED`, or
+ * `REMOTE_VIDEO_STATE_REASON_REMOTE_OFFLINE`.
*/
REMOTE_VIDEO_STATE_STOPPED = 0,
/**
@@ -3594,23 +3785,25 @@ enum REMOTE_VIDEO_STATE {
*/
REMOTE_VIDEO_STATE_STARTING = 1,
/**
- * 2: The remote video stream is decoded and plays normally. The SDK reports this state in the
- * case of `REMOTE_VIDEO_STATE_REASON_NETWORK_RECOVERY (2)`,
- * `REMOTE_VIDEO_STATE_REASON_LOCAL_UNMUTED (4)`, `REMOTE_VIDEO_STATE_REASON_REMOTE_UNMUTED (6)`,
- * or `REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK_RECOVERY (9)`.
+ * 2: The remote video stream is decoded and plays normally. The SDK reports this state in the case
+ * of `REMOTE_VIDEO_STATE_REASON_NETWORK_RECOVERY`, `REMOTE_VIDEO_STATE_REASON_LOCAL_UNMUTED`,
+ * `REMOTE_VIDEO_STATE_REASON_REMOTE_UNMUTED`, or
+ * `REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK_RECOVERY`.
*/
REMOTE_VIDEO_STATE_DECODING = 2,
- /** 3: The remote video is frozen, probably due to
- * #REMOTE_VIDEO_STATE_REASON_NETWORK_CONGESTION (1).
+ /**
+ * 3: The remote video is frozen. The SDK reports this state in the case of
+ * `REMOTE_VIDEO_STATE_REASON_NETWORK_CONGESTION` or `REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK`.
*/
REMOTE_VIDEO_STATE_FROZEN = 3,
- /** 4: The remote video fails to start. The SDK reports this state in the case of
- * `REMOTE_VIDEO_STATE_REASON_INTERNAL (0)`.
+ /**
+ * 4: The remote video fails to start. The SDK reports this state in the case of
+ * `REMOTE_VIDEO_STATE_REASON_INTERNAL`.
*/
REMOTE_VIDEO_STATE_FAILED = 4,
};
/**
- * The reason for the remote video state change.
+ * @brief The reason for the remote video state change.
*/
enum REMOTE_VIDEO_STATE_REASON {
/**
@@ -3622,7 +3815,7 @@ enum REMOTE_VIDEO_STATE_REASON {
*/
REMOTE_VIDEO_STATE_REASON_NETWORK_CONGESTION = 1,
/**
- * 2: Network recovery.
+ * 2: Network is recovered.
*/
REMOTE_VIDEO_STATE_REASON_NETWORK_RECOVERY = 2,
/**
@@ -3645,12 +3838,14 @@ enum REMOTE_VIDEO_STATE_REASON {
* 7: The remote user leaves the channel.
*/
REMOTE_VIDEO_STATE_REASON_REMOTE_OFFLINE = 7,
- /** 8: The remote audio-and-video stream falls back to the audio-only stream
- * due to poor network conditions.
+ /**
+ * 8: The remote audio-and-video stream falls back to the audio-only stream due to poor network
+ * conditions.
*/
REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK = 8,
- /** 9: The remote audio-only stream switches back to the audio-and-video
- * stream after the network conditions improve.
+ /**
+ * 9: The remote audio-only stream switches back to the audio-and-video stream after the network
+ * conditions improve.
*/
REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK_RECOVERY = 9,
/** (Internal use only) 10: The remote video stream type change to low stream type
@@ -3659,11 +3854,13 @@ enum REMOTE_VIDEO_STATE_REASON {
/** (Internal use only) 11: The remote video stream type change to high stream type
*/
REMOTE_VIDEO_STATE_REASON_VIDEO_STREAM_TYPE_CHANGE_TO_HIGH = 11,
- /** (iOS only) 12: The app of the remote user is in background.
+ /**
+ * 12: (iOS only) The remote user's app has switched to the background.
*/
REMOTE_VIDEO_STATE_REASON_SDK_IN_BACKGROUND = 12,
- /** 13: The remote video stream is not supported by the decoder
+ /**
+ * 13: The local video decoder does not support decoding the remote video stream.
*/
REMOTE_VIDEO_STATE_REASON_CODEC_NOT_SUPPORT = 13,
@@ -3771,19 +3968,22 @@ enum REMOTE_VIDEO_DOWNSCALE_LEVEL {
};
/**
- * The volume information of users.
+ * @brief The volume information of users.
*/
struct AudioVolumeInfo {
/**
- * User ID of the speaker.
- * - In the local user's callback, `uid` = 0.
- * - In the remote users' callback, `uid` is the user ID of a remote user whose instantaneous
- * volume is one of the three highest.
+ * The user ID.
+ * - In the local user's callback, `uid` is 0.
+ * - In the remote users' callback, `uid` is the user ID of a remote user whose instantaneous volume
+ * is the highest.
*/
uid_t uid;
/**
* The volume of the user. The value ranges between 0 (the lowest volume) and 255 (the highest
- * volume). If the user calls `startAudioMixing`, the value of volume is the volume after audio
+ * volume). If the local user enables audio capturing and calls `muteLocalAudioStream` and set it as
+ * `true` to mute, the value of `volume` indicates the volume of locally captured audio signal. If
+ * the user calls `startAudioMixing(const char* filePath, bool loopback, int cycle, int startPos)`,
+ * the value of `volume` indicates the volume after audio
* mixing.
*/
unsigned int volume; // [0,255]
@@ -3794,14 +3994,14 @@ struct AudioVolumeInfo {
* @note
* - The `vad` parameter does not report the voice activity status of remote users. In a remote
* user's callback, the value of `vad` is always 1.
- * - To use this parameter, you must set `reportVad` to true when calling
+ * - To use this parameter, you must set `reportVad` to `true` when calling
* `enableAudioVolumeIndication`.
*/
unsigned int vad;
/**
- * The voice pitch (Hz) of the local user. The value ranges between 0.0 and 4000.0.
- * @note The `voicePitch` parameter does not report the voice pitch of remote users. In the
- * remote users' callback, the value of `voicePitch` is always 0.0.
+ * The voice pitch of the local user. The value ranges between 0.0 and 4000.0.
+ * @note The `voicePitch` parameter does not report the voice pitch of remote users. In the remote
+ * users' callback, the value of `voicePitch` is always 0.0.
*/
double voicePitch;
@@ -3809,10 +4009,13 @@ struct AudioVolumeInfo {
};
/**
- * The audio device information.
+ * @brief The audio device information.
+ *
+ * @note This class is for Android only.
+ *
*/
struct DeviceInfo {
- /*
+ /**
* Whether the audio device supports ultra-low-latency capture and playback:
* - `true`: The device supports ultra-low-latency capture and playback.
* - `false`: The device does not support ultra-low-latency capture and playback.
@@ -3829,13 +4032,13 @@ class IPacketObserver {
public:
virtual ~IPacketObserver() {}
/**
- * The definition of the Packet struct.
+ * @brief Configurations for the `Packet` instance.
*/
struct Packet {
/**
* The buffer address of the sent or received data.
- * @note Agora recommends setting `buffer` to a value larger than 2048 bytes. Otherwise, you
- * may encounter undefined behaviors (such as crashes).
+ * @note Agora recommends setting `buffer` to a value larger than 2048 bytes. Otherwise, you may
+ * encounter undefined behaviors (such as crashes).
*/
const unsigned char* buffer;
/**
@@ -3846,62 +4049,70 @@ class IPacketObserver {
Packet() : buffer(OPTIONAL_NULLPTR), size(0) {}
};
/**
- * Occurs when the SDK is ready to send the audio packet.
- * @param packet The audio packet to be sent: Packet.
- * @return Whether to send the audio packet:
- * - true: Send the packet.
- * - false: Do not send the packet, in which case the audio packet will be discarded.
+ * @brief Occurs when the local user sends an audio packet.
+ *
+ * @param packet The sent audio packet, see `Packet`.
+ *
+ * @return
+ * - `true`: The audio packet is sent successfully.
+ * - `false`: The audio packet is discarded.
*/
virtual bool onSendAudioPacket(Packet& packet) = 0;
/**
- * Occurs when the SDK is ready to send the video packet.
- * @param packet The video packet to be sent: Packet.
- * @return Whether to send the video packet:
- * - true: Send the packet.
- * - false: Do not send the packet, in which case the audio packet will be discarded.
+ * @brief Occurs when the local user sends a video packet.
+ *
+ * @param packet The sent video packet, see `Packet`.
+ *
+ * @return
+ * - `true`: The video packet is sent successfully.
+ * - `false`: The video packet is discarded.
*/
virtual bool onSendVideoPacket(Packet& packet) = 0;
/**
- * Occurs when the audio packet is received.
- * @param packet The received audio packet: Packet.
- * @return Whether to process the audio packet:
- * - true: Process the packet.
- * - false: Do not process the packet, in which case the audio packet will be discarded.
+ * @brief Occurs when the local user receives an audio packet.
+ *
+ * @param packet The received audio packet, see `Packet`.
+ *
+ * @return
+ * - `true`: The audio packet is received successfully.
+ * - `false`: The audio packet is discarded.
*/
virtual bool onReceiveAudioPacket(Packet& packet) = 0;
/**
- * Occurs when the video packet is received.
- * @param packet The received video packet: Packet.
- * @return Whether to process the audio packet:
- * - true: Process the packet.
- * - false: Do not process the packet, in which case the video packet will be discarded.
+ * @brief Occurs when the local user receives a video packet.
+ *
+ * @param packet The received video packet, see `Packet`.
+ *
+ * @return
+ * - `true`: The video packet is received successfully.
+ * - `false`: The video packet is discarded.
*/
virtual bool onReceiveVideoPacket(Packet& packet) = 0;
};
/**
- * Audio sample rate types.
+ * @brief The audio sampling rate of the stream to be pushed to the CDN.
*/
enum AUDIO_SAMPLE_RATE_TYPE {
/**
- * 32000: 32 KHz.
+ * 32000: 32 kHz
*/
AUDIO_SAMPLE_RATE_32000 = 32000,
/**
- * 44100: 44.1 KHz.
+ * 44100: 44.1 kHz
*/
AUDIO_SAMPLE_RATE_44100 = 44100,
/**
- * 48000: 48 KHz.
+ * 48000: (Default) 48 kHz
*/
AUDIO_SAMPLE_RATE_48000 = 48000,
};
/**
- * The codec type of the output video.
+ * @brief The codec type of the output video.
*/
enum VIDEO_CODEC_TYPE_FOR_STREAM {
/**
- * 1: H.264.
+ * 1: (Default) H.264.
*/
VIDEO_CODEC_H264_FOR_STREAM = 1,
/**
@@ -3911,30 +4122,31 @@ enum VIDEO_CODEC_TYPE_FOR_STREAM {
};
/**
- * Video codec profile types.
+ * @brief Video codec profile types.
*/
enum VIDEO_CODEC_PROFILE_TYPE {
/**
- * 66: Baseline video codec profile. Generally used in video calls on mobile phones.
+ * 66: Baseline video codec profile; generally used for video calls on mobile phones.
*/
VIDEO_CODEC_PROFILE_BASELINE = 66,
/**
- * 77: Main video codec profile. Generally used in mainstream electronics, such as MP4 players,
+ * 77: Main video codec profile; generally used in mainstream electronics such as MP4 players,
* portable video players, PSP, and iPads.
*/
VIDEO_CODEC_PROFILE_MAIN = 77,
/**
- * 100: High video codec profile. Generally used in high-resolution broadcasts or television.
+ * 100: (Default) High video codec profile; generally used in high-resolution live streaming or
+ * television.
*/
VIDEO_CODEC_PROFILE_HIGH = 100,
};
/**
- * Self-defined audio codec profile.
+ * @brief Self-defined audio codec profile.
*/
enum AUDIO_CODEC_PROFILE_TYPE {
/**
- * 0: LC-AAC.
+ * 0: (Default) LC-AAC.
*/
AUDIO_CODEC_PROFILE_LC_AAC = 0,
/**
@@ -3942,13 +4154,13 @@ enum AUDIO_CODEC_PROFILE_TYPE {
*/
AUDIO_CODEC_PROFILE_HE_AAC = 1,
/**
- * 2: HE-AAC v2.
+ * 2: HE-AAC v2.
*/
AUDIO_CODEC_PROFILE_HE_AAC_V2 = 2,
};
/**
- * Local audio statistics.
+ * @brief Local audio statistics.
*/
struct LocalAudioStats {
/**
@@ -3973,7 +4185,7 @@ struct LocalAudioStats {
*/
unsigned short txPacketLossRate;
/**
- * The audio delay of the device, contains record and playout delay
+ * The audio device module delay (ms) when playing or recording audio.
*/
int audioDeviceDelay;
/**
@@ -3981,27 +4193,26 @@ struct LocalAudioStats {
*/
int audioPlayoutDelay;
/**
- * The signal delay estimated from audio in-ear monitoring (ms).
+ * The ear monitor delay (ms), which is the delay from microphone input to headphone output.
*/
int earMonitorDelay;
/**
- * The signal delay estimated during the AEC process from nearin and farin (ms).
+ * Acoustic echo cancellation (AEC) module estimated delay (ms), which is the signal delay between
+ * when audio is played locally before being locally captured.
*/
int aecEstimatedDelay;
};
/**
- * States of the Media Push.
+ * @brief States of the Media Push.
*/
enum RTMP_STREAM_PUBLISH_STATE {
/**
- * 0: The Media Push has not started or has ended. This state is also triggered after you remove a
- * RTMP or RTMPS stream from the CDN by calling `removePublishStreamUrl`.
+ * 0: The Media Push has not started or has ended.
*/
RTMP_STREAM_PUBLISH_STATE_IDLE = 0,
/**
- * 1: The SDK is connecting to Agora's streaming server and the CDN server. This state is
- * triggered after you call the `addPublishStreamUrl` method.
+ * 1: The streaming server and CDN server are being connected.
*/
RTMP_STREAM_PUBLISH_STATE_CONNECTING = 1,
/**
@@ -4010,42 +4221,37 @@ enum RTMP_STREAM_PUBLISH_STATE {
*/
RTMP_STREAM_PUBLISH_STATE_RUNNING = 2,
/**
- * 3: The RTMP or RTMPS streaming is recovering. When exceptions occur to the CDN, or the
- * streaming is interrupted, the SDK tries to resume RTMP or RTMPS streaming and returns this
- * state.
- * - If the SDK successfully resumes the streaming, #RTMP_STREAM_PUBLISH_STATE_RUNNING (2)
- * returns.
+ * 3: The RTMP or RTMPS streaming is recovering. When exceptions occur to the CDN, or the streaming
+ * is interrupted, the SDK tries to resume RTMP or RTMPS streaming and returns this state.
+ * - If the SDK successfully resumes the streaming, RTMP_STREAM_PUBLISH_STATE_RUNNING (2) returns.
* - If the streaming does not resume within 60 seconds or server errors occur,
- * #RTMP_STREAM_PUBLISH_STATE_FAILURE (4) returns. You can also reconnect to the server by calling
- * the `removePublishStreamUrl` and `addPublishStreamUrl` methods.
+ * RTMP_STREAM_PUBLISH_STATE_FAILURE (4) returns. If you feel that 60 seconds is too long, you can
+ * also actively try to reconnect.
*/
RTMP_STREAM_PUBLISH_STATE_RECOVERING = 3,
/**
- * 4: The RTMP or RTMPS streaming fails. See the `errCode` parameter for the detailed error
- * information. You can also call the `addPublishStreamUrl` method to publish the RTMP or RTMPS
- * streaming again.
+ * 4: The RTMP or RTMPS streaming fails. After a failure, you can troubleshoot the cause of the
+ * error through the returned error code.
*/
RTMP_STREAM_PUBLISH_STATE_FAILURE = 4,
/**
- * 5: The SDK is disconnecting to Agora's streaming server and the CDN server. This state is
- * triggered after you call the `removePublishStreamUrl` method.
+ * 5: The SDK is disconnecting from the Agora streaming server and CDN. When you call
+ * `stopRtmpStream` to stop the Media Push normally, the SDK reports the Media Push state as
+ * `RTMP_STREAM_PUBLISH_STATE_DISCONNECTING` and `RTMP_STREAM_PUBLISH_STATE_IDLE` in sequence.
*/
RTMP_STREAM_PUBLISH_STATE_DISCONNECTING = 5,
};
/**
- * Error codes of the RTMP or RTMPS streaming.
+ * @brief Reasons for changes in the status of RTMP or RTMPS streaming.
*/
enum RTMP_STREAM_PUBLISH_REASON {
/**
- * 0: The RTMP or RTMPS streaming publishes successfully.
+ * 0: The RTMP or RTMPS streaming has not started or has ended.
*/
RTMP_STREAM_PUBLISH_REASON_OK = 0,
/**
- * 1: Invalid argument used. If, for example, you do not call the `setLiveTranscoding` method to
- * configure the LiveTranscoding parameters before calling the addPublishStreamUrl method, the SDK
- * returns this error. Check whether you set the parameters in the `setLiveTranscoding` method
- * properly.
+ * 1: Invalid argument used. Check the parameter setting.
*/
RTMP_STREAM_PUBLISH_REASON_INVALID_ARGUMENT = 1,
/**
@@ -4053,13 +4259,11 @@ enum RTMP_STREAM_PUBLISH_REASON {
*/
RTMP_STREAM_PUBLISH_REASON_ENCRYPTED_STREAM_NOT_ALLOWED = 2,
/**
- * 3: Timeout for the RTMP or RTMPS streaming. Call the `addPublishStreamUrl` method to publish
- * the streaming again.
+ * 3: Timeout for the RTMP or RTMPS streaming.
*/
RTMP_STREAM_PUBLISH_REASON_CONNECTION_TIMEOUT = 3,
/**
- * 4: An error occurs in Agora's streaming server. Call the `addPublishStreamUrl` method to
- * publish the streaming again.
+ * 4: An error occurs in Agora's streaming server.
*/
RTMP_STREAM_PUBLISH_REASON_INTERNAL_SERVER_ERROR = 4,
/**
@@ -4075,7 +4279,8 @@ enum RTMP_STREAM_PUBLISH_REASON {
*/
RTMP_STREAM_PUBLISH_REASON_REACH_LIMIT = 7,
/**
- * 8: The host manipulates other hosts' URLs. Check your app logic.
+ * 8: The host manipulates other hosts' URLs. For example, the host updates or stops other hosts'
+ * streams. Check your app logic.
*/
RTMP_STREAM_PUBLISH_REASON_NOT_AUTHORIZED = 8,
/**
@@ -4083,20 +4288,19 @@ enum RTMP_STREAM_PUBLISH_REASON {
*/
RTMP_STREAM_PUBLISH_REASON_STREAM_NOT_FOUND = 9,
/**
- * 10: The format of the RTMP or RTMPS streaming URL is not supported. Check whether the URL
- * format is correct.
+ * 10: The format of the RTMP or RTMPS streaming URL is not supported. Check whether the URL format
+ * is correct.
*/
RTMP_STREAM_PUBLISH_REASON_FORMAT_NOT_SUPPORTED = 10,
/**
- * 11: The user role is not host, so the user cannot use the CDN live streaming function. Check
- * your application code logic.
+ * 11: The user role is not host, so the user cannot use the CDN live streaming function. Check your
+ * application code logic.
*/
RTMP_STREAM_PUBLISH_REASON_NOT_BROADCASTER =
11, // Note: match to ERR_PUBLISH_STREAM_NOT_BROADCASTER in AgoraBase.h
/**
- * 13: The `updateRtmpTranscoding` or `setLiveTranscoding` method is called to update the
- * transcoding configuration in a scenario where there is streaming without transcoding. Check
- * your application code logic.
+ * 13: The `updateRtmpTranscoding` method is called to update the transcoding configuration in a
+ * scenario where there is streaming without transcoding. Check your application code logic.
*/
RTMP_STREAM_PUBLISH_REASON_TRANSCODING_NO_MIX_STREAM =
13, // Note: match to ERR_PUBLISH_STREAM_TRANSCODING_NO_MIX_STREAM in AgoraBase.h
@@ -4109,25 +4313,28 @@ enum RTMP_STREAM_PUBLISH_REASON {
*/
RTMP_STREAM_PUBLISH_REASON_INVALID_APPID =
15, // Note: match to ERR_PUBLISH_STREAM_APPID_INVALID in AgoraBase.h
- /** invalid privilege. */
+ /**
+ * 16: Your project does not have permission to use streaming services.
+ */
RTMP_STREAM_PUBLISH_REASON_INVALID_PRIVILEGE = 16,
/**
- * 100: The streaming has been stopped normally. After you call `removePublishStreamUrl` to stop
- * streaming, the SDK returns this value.
+ * 100: The streaming has been stopped normally. After you stop the Media Push, the SDK returns this
+ * value.
*/
RTMP_STREAM_UNPUBLISH_REASON_OK = 100,
};
-/** Events during the RTMP or RTMPS streaming. */
+/**
+ * @brief Events during the Media Push.
+ */
enum RTMP_STREAMING_EVENT {
/**
- * 1: An error occurs when you add a background image or a watermark image to the RTMP or RTMPS
- * stream.
+ * 1: An error occurs when you add a background image or a watermark image in the Media Push.
*/
RTMP_STREAMING_EVENT_FAILED_LOAD_IMAGE = 1,
/**
- * 2: The streaming URL is already being used for CDN live streaming. If you want to start new
- * streaming, use a new streaming URL.
+ * 2: The streaming URL is already being used for Media Push. If you want to start new streaming,
+ * use a new streaming URL.
*/
RTMP_STREAMING_EVENT_URL_ALREADY_IN_USE = 2,
/**
@@ -4135,48 +4342,52 @@ enum RTMP_STREAMING_EVENT {
*/
RTMP_STREAMING_EVENT_ADVANCED_FEATURE_NOT_SUPPORT = 3,
/**
- * 4: Client request too frequently.
+ * 4: Reserved.
*/
RTMP_STREAMING_EVENT_REQUEST_TOO_OFTEN = 4,
};
/**
- * Image properties.
+ * @brief Image properties.
+ *
+ * @details
+ * This class sets the properties of the watermark and background images in the live video.
+ *
*/
typedef struct RtcImage {
/**
- *The HTTP/HTTPS URL address of the image in the live video. The maximum length of this parameter
- *is 1024 bytes.
+ * The HTTP/HTTPS URL address of the image in the live video. The maximum length of this parameter
+ * is 1024 bytes.
*/
const char* url;
/**
- * The x coordinate (pixel) of the image on the video frame (taking the upper left corner of the
- * video frame as the origin).
+ * The x-coordinate (px) of the image on the video frame (taking the upper left corner of the video
+ * frame as the origin).
*/
int x;
/**
- * The y coordinate (pixel) of the image on the video frame (taking the upper left corner of the
- * video frame as the origin).
+ * The y-coordinate (px) of the image on the video frame (taking the upper left corner of the video
+ * frame as the origin).
*/
int y;
/**
- * The width (pixel) of the image on the video frame.
+ * The width (px) of the image on the video frame.
*/
int width;
/**
- * The height (pixel) of the image on the video frame.
+ * The height (px) of the image on the video frame.
*/
int height;
/**
- * The layer index of the watermark or background image. When you use the watermark array to add
- * a watermark or multiple watermarks, you must pass a value to `zOrder` in the range [1,255];
- * otherwise, the SDK reports an error. In other cases, zOrder can optionally be passed in the
+ * The layer index of the watermark or background image. When you use the watermark array to add a
+ * watermark or multiple watermarks, you must pass a value to `zOrder` in the range [1,255];
+ * otherwise, the SDK reports an error. In other cases, `zOrder` can optionally be passed in the
* range [0,255], with 0 being the default value. 0 means the bottom layer and 255 means the top
* layer.
*/
int zOrder;
- /** The transparency level of the image. The value ranges between 0.0 and 1.0:
- *
+ /**
+ * The transparency of the watermark or background image. The range of the value is [0.0,1.0]:
* - 0.0: Completely transparent.
* - 1.0: (Default) Opaque.
*/
@@ -4185,10 +4396,12 @@ typedef struct RtcImage {
RtcImage() : url(OPTIONAL_NULLPTR), x(0), y(0), width(0), height(0), zOrder(0), alpha(1.0) {}
} RtcImage;
/**
- * The configuration for advanced features of the RTMP or RTMPS streaming with transcoding.
+ * @brief The configuration for advanced features of the RTMP or RTMPS streaming with transcoding.
*
+ * @details
* If you want to enable the advanced features of streaming with transcoding, contact
- * support@agora.io.
+ * `support@agora.io`.
+ *
*/
struct LiveStreamAdvancedFeature {
LiveStreamAdvancedFeature() : featureName(OPTIONAL_NULLPTR), opened(false) {}
@@ -4207,63 +4420,66 @@ struct LiveStreamAdvancedFeature {
/**
* Whether to enable the advanced features of streaming with transcoding:
- * - `true`: Enable the advanced feature.
- * - `false`: (Default) Disable the advanced feature.
+ * - `true`: Enable the advanced features.
+ * - `false`: (Default) Do not enable the advanced features.
*/
bool opened;
};
/**
- * Connection state types.
+ * @brief Connection states.
*/
enum CONNECTION_STATE_TYPE {
/**
* 1: The SDK is disconnected from the Agora edge server. The state indicates the SDK is in one of
* the following phases:
- * - The initial state before calling the `joinChannel` method.
- * - The app calls the `leaveChannel` method.
+ * - Theinitial state before calling the `joinChannel(const char* token, const char* channelId,
+ * uid_t uid, const ChannelMediaOptions& options)` method.
+ * - The app calls the `leaveChannel()` method.
*/
CONNECTION_STATE_DISCONNECTED = 1,
/**
* 2: The SDK is connecting to the Agora edge server. This state indicates that the SDK is
- * establishing a connection with the specified channel after the app calls `joinChannel`.
- * - If the SDK successfully joins the channel, it triggers the `onConnectionStateChanged`
- * callback and the connection state switches to `CONNECTION_STATE_CONNECTED`.
+ * establishing a connection with the specified channel after the app calls `joinChannel(const char*
+ * token, const char* channelId, uid_t uid, const ChannelMediaOptions& options)`.
+ * - If the SDK successfully joins the channel, it triggers the `onConnectionStateChanged` callback
+ * and the connection state switches to CONNECTION_STATE_CONNECTED.
* - After the connection is established, the SDK also initializes the media and triggers
* `onJoinChannelSuccess` when everything is ready.
*/
CONNECTION_STATE_CONNECTING = 2,
/**
- * 3: The SDK is connected to the Agora edge server. This state also indicates that the user
- * has joined a channel and can now publish or subscribe to a media stream in the channel.
- * If the connection to the Agora edge server is lost because, for example, the network is down
- * or switched, the SDK automatically tries to reconnect and triggers `onConnectionStateChanged`
- * that indicates the connection state switches to `CONNECTION_STATE_RECONNECTING`.
+ * 3: The SDK is connected to the Agora edge server. This state also indicates that the user has
+ * joined a channel and can now publish or subscribe to a media stream in the channel. If the
+ * connection to the channel is lost because, for example, if the network is down or switched, the
+ * SDK automatically tries to reconnect and triggers `onConnectionStateChanged` callback, notifying
+ * that the current network state becomes CONNECTION_STATE_RECONNECTING.
*/
CONNECTION_STATE_CONNECTED = 3,
/**
- * 4: The SDK keeps reconnecting to the Agora edge server. The SDK keeps rejoining the channel
- * after being disconnected from a joined channel because of network issues.
- * - If the SDK cannot rejoin the channel within 10 seconds, it triggers `onConnectionLost`,
- * stays in the `CONNECTION_STATE_RECONNECTING` state, and keeps rejoining the channel.
- * - If the SDK fails to rejoin the channel 20 minutes after being disconnected from the Agora
- * edge server, the SDK triggers the `onConnectionStateChanged` callback, switches to the
- * `CONNECTION_STATE_FAILED` state, and stops rejoining the channel.
+ * 4: The SDK keeps reconnecting to the Agora edge server. The SDK keeps rejoining the channel after
+ * being disconnected from a joined channel because of network issues.
+ * - If the SDK cannot rejoin the channel within 10 seconds, it triggers `onConnectionLost`, stays
+ * in the CONNECTION_STATE_RECONNECTING state, and keeps rejoining the channel.
+ * - If the SDK fails to rejoin the channel 20 minutes after being disconnected from the Agora edge
+ * server, the SDK triggers the `onConnectionStateChanged` callback, switches to the
+ * CONNECTION_STATE_FAILED state, and stops rejoining the channel.
*/
CONNECTION_STATE_RECONNECTING = 4,
/**
* 5: The SDK fails to connect to the Agora edge server or join the channel. This state indicates
- * that the SDK stops trying to rejoin the channel. You must call `leaveChannel` to leave the
+ * that the SDK stops trying to rejoin the channel. You must call `leaveChannel()` to leave the
* channel.
- * - You can call `joinChannel` to rejoin the channel.
- * - If the SDK is banned from joining the channel by the Agora edge server through the RESTful
- * API, the SDK triggers the `onConnectionStateChanged` callback.
+ * - You can call `joinChannel(const char* token, const char* channelId, uid_t uid, const
+ * ChannelMediaOptions& options)` to rejoin the channel.
+ * - If the SDK is banned from joining the channel by the Agora edge server through the RESTful API,
+ * the SDK triggers the `onConnectionStateChanged` callback.
*/
CONNECTION_STATE_FAILED = 5,
};
/**
- * Transcoding configurations of each host.
+ * @brief Transcoding configurations of each host.
*/
struct TranscodingUser {
/**
@@ -4294,12 +4510,13 @@ struct TranscodingUser {
* The layer index number of the host's video. The value range is [0, 100].
* - 0: (Default) The host's video is the bottom layer.
* - 100: The host's video is the top layer.
- *
- * If the value is beyond this range, the SDK reports the error code `ERR_INVALID_ARGUMENT`.
+ * @note
+ * - If the value is less than 0 or greater than 100, `ERR_INVALID_ARGUMENT` error is returned.
+ * - Setting zOrder to 0 is supported.
*/
int zOrder;
/**
- * The transparency of the host's video. The value range is [0.0, 1.0].
+ * The transparency of the host's video. The value range is [0.0,1.0].
* - 0.0: Completely transparent.
* - 1.0: (Default) Opaque.
*/
@@ -4307,8 +4524,8 @@ struct TranscodingUser {
/**
* The audio channel used by the host's audio in the output audio. The default value is 0, and the
* value range is [0, 5].
- * - `0`: (Recommended) The defaut setting, which supports dual channels at most and depends on
- * the upstream of the host.
+ * - `0`: (Recommended) The defaut setting, which supports dual channels at most and depends on the
+ * upstream of the host.
* - `1`: The host's audio uses the FL audio channel. If the host's upstream uses multiple audio
* channels, the Agora server mixes them into mono first.
* - `2`: The host's audio uses the FC audio channel. If the host's upstream uses multiple audio
@@ -4319,9 +4536,8 @@ struct TranscodingUser {
* channels, the Agora server mixes them into mono first.
* - `5`: The host's audio uses the BR audio channel. If the host's upstream uses multiple audio
* channels, the Agora server mixes them into mono first.
- * - `0xFF` or a value greater than 5: The host's audio is muted, and the Agora server removes the
+ * - `0xFF` or a value greater than `5`: The host's audio is muted, and the Agora server removes the
* host's audio.
- *
* @note If the value is not `0`, a special player is required.
*/
int audioChannel;
@@ -4331,108 +4547,125 @@ struct TranscodingUser {
};
/**
- * Transcoding configurations for Media Push.
+ * @brief Transcoding configurations for Media Push.
*/
struct LiveTranscoding {
- /** The width of the video in pixels. The default value is 360.
- * - When pushing video streams to the CDN, the value range of `width` is [64,1920].
- * If the value is less than 64, Agora server automatically adjusts it to 64; if the
- * value is greater than 1920, Agora server automatically adjusts it to 1920.
+ /**
+ * The width of the video in pixels. The default value is 360.
+ * - When pushing video streams to the CDN, the value range of `width` is [64,1920]. If the value is
+ * less than 64, Agora server automatically adjusts it to 64; if the value is greater than 1920,
+ * Agora server automatically adjusts it to 1920.
* - When pushing audio streams to the CDN, set `width` and `height` as 0.
*/
int width;
- /** The height of the video in pixels. The default value is 640.
- * - When pushing video streams to the CDN, the value range of `height` is [64,1080].
- * If the value is less than 64, Agora server automatically adjusts it to 64; if the
- * value is greater than 1080, Agora server automatically adjusts it to 1080.
+ /**
+ * The height of the video in pixels. The default value is 640.
+ * - When pushing video streams to the CDN, the value range of` height` is [64,1080]. If the value
+ * is less than 64, Agora server automatically adjusts it to 64; if the value is greater than 1080,
+ * Agora server automatically adjusts it to 1080.
* - When pushing audio streams to the CDN, set `width` and `height` as 0.
*/
int height;
- /** Bitrate of the CDN live output video stream. The default value is 400 Kbps.
-
- Set this parameter according to the Video Bitrate Table. If you set a bitrate beyond the proper
- range, the SDK automatically adapts it to a value within the range.
- */
+ /**
+ * The encoding bitrate (Kbps) of the video. This parameter does not need to be set; keeping the
+ * default value `STANDARD_BITRATE` is sufficient. The SDK automatically matches the most suitable
+ * bitrate based on the video resolution and frame rate you have set. For the correspondence between
+ * video resolution and frame rate, see `Video profile`.
+ */
int videoBitrate;
- /** Frame rate of the output video stream set for the CDN live streaming. The default value is 15
- fps, and the value range is (0,30].
-
- @note The Agora server adjusts any value over 30 to 30.
- */
+ /**
+ * Frame rate (fps) of the output video stream set for Media Push. The default value is 15. The
+ * value range is (0,30].
+ * @note The Agora server adjusts any value over 30 to 30.
+ */
int videoFramerate;
- /** **DEPRECATED** Latency mode:
-
- - true: Low latency with unassured quality.
- - false: (Default) High latency with assured quality.
+ /**
+ * Deprecated
+ * This member is deprecated.
+ * Latency mode:
+ * - `true`: Low latency with unassured quality.
+ * - `false`: (Default) High latency with assured quality.
*/
bool lowLatency;
- /** Video GOP in frames. The default value is 30 fps.
+ /**
+ * GOP (Group of Pictures) in fps of the video frames for Media Push. The default value is 30.
*/
int videoGop;
- /** Self-defined video codec profile: #VIDEO_CODEC_PROFILE_TYPE.
-
- @note If you set this parameter to other values, Agora adjusts it to the default value of 100.
- */
+ /**
+ * Video codec profile type for Media Push. Set it as 66, 77, or 100 (default). See
+ * `VIDEO_CODEC_PROFILE_TYPE` for details.
+ * @note If you set this parameter to any other value, Agora adjusts it to the default value.
+ */
VIDEO_CODEC_PROFILE_TYPE videoCodecProfile;
- /** The background color in RGB hex value. Value only. Do not include a preceeding #. For example,
+ /**
+ * The background color in RGB hex value. Value only. Do not include a preceeding #. For example,
* 0xFFB6C1 (light pink). The default value is 0x000000 (black).
*/
unsigned int backgroundColor;
- /** Video codec profile types for Media Push. See VIDEO_CODEC_TYPE_FOR_STREAM. */
+ /**
+ * Video codec profile types for Media Push. See `VIDEO_CODEC_TYPE_FOR_STREAM`.
+ */
VIDEO_CODEC_TYPE_FOR_STREAM videoCodecType;
- /** The number of users in the live interactive streaming.
- * The value range is [0, 17].
+ /**
+ * The number of users in the Media Push. The value range is [0,17].
*/
unsigned int userCount;
- /** Manages the user layout configuration in the Media Push. Agora supports a maximum of 17
+ /**
+ * Manages the user layout configuration in the Media Push. Agora supports a maximum of 17
* transcoding users in a Media Push channel. See `TranscodingUser`.
*/
TranscodingUser* transcodingUsers;
- /** Reserved property. Extra user-defined information to send SEI for the H.264/H.265 video stream
- to the CDN live client. Maximum length: 4096 Bytes.
-
- For more information on SEI frame, see [SEI-related questions](https://docs.agora.io/en/faq/sei).
+ /**
+ * Reserved property. Extra user-defined information to send SEI for the H.264/H.265 video stream to
+ * the CDN live client. Maximum length: 4096 bytes. For more information on SEI, see SEI-related
+ * questions.
*/
const char* transcodingExtraInfo;
- /** **DEPRECATED** The metadata sent to the CDN live client.
+ /**
+ * Deprecated
+ * Obsolete and not recommended for use.
+ * The metadata sent to the CDN client.
*/
const char* metadata;
- /** The watermark on the live video. The image format needs to be PNG. See `RtcImage`.
-
- You can add one watermark, or add multiple watermarks using an array. This parameter is used with
- `watermarkCount`.
- */
+ /**
+ * The watermark on the live video. The image format needs to be PNG. See `RtcImage`.
+ * You can add one watermark, or add multiple watermarks using an array. This parameter is used with
+ * `watermarkCount`.
+ */
RtcImage* watermark;
/**
- * The number of watermarks on the live video. The total number of watermarks and background
- * images can range from 0 to 10. This parameter is used with `watermark`.
+ * The number of watermarks on the live video. The total number of watermarks and background images
+ * can range from 0 to 10. This parameter is used with `watermark`.
*/
unsigned int watermarkCount;
- /** The number of background images on the live video. The image format needs to be PNG. See
+ /**
+ * The number of background images on the live video. The image format needs to be PNG. See
* `RtcImage`.
- *
- * You can add a background image or use an array to add multiple background images. This
- * parameter is used with `backgroundImageCount`.
+ * You can add a background image or use an array to add multiple background images. This parameter
+ * is used with `backgroundImageCount`.
*/
RtcImage* backgroundImage;
/**
- * The number of background images on the live video. The total number of watermarks and
- * background images can range from 0 to 10. This parameter is used with `backgroundImage`.
+ * The number of background images on the live video. The total number of watermarks and background
+ * images can range from 0 to 10. This parameter is used with `backgroundImage`.
*/
unsigned int backgroundImageCount;
- /** The audio sampling rate (Hz) of the output media stream. See #AUDIO_SAMPLE_RATE_TYPE.
+ /**
+ * The audio sampling rate (Hz) of the output media stream. See `AUDIO_SAMPLE_RATE_TYPE`.
*/
AUDIO_SAMPLE_RATE_TYPE audioSampleRate;
- /** Bitrate (Kbps) of the audio output stream for Media Push. The default value is 48, and the
+ /**
+ * Bitrate (Kbps) of the audio output stream for Media Push. The default value is 48, and the
* highest value is 128.
*/
int audioBitrate;
- /** The number of audio channels for Media Push. Agora recommends choosing 1 (mono), or 2 (stereo)
+ /**
+ * The number of audio channels for Media Push. Agora recommends choosing 1 (mono), or 2 (stereo)
* audio channels. Special players are required if you choose 3, 4, or 5.
* - 1: (Default) Mono.
* - 2: Stereo.
@@ -4441,15 +4674,18 @@ struct LiveTranscoding {
* - 5: Five audio channels.
*/
int audioChannels;
- /** Audio codec profile type for Media Push. See #AUDIO_CODEC_PROFILE_TYPE.
+ /**
+ * Audio codec profile type for Media Push. See `AUDIO_CODEC_PROFILE_TYPE`.
*/
AUDIO_CODEC_PROFILE_TYPE audioCodecProfile;
- /** Advanced features of the RTMP or RTMPS streaming with transcoding. See
- * LiveStreamAdvancedFeature.
+ /**
+ * Advanced features of the Media Push with transcoding. See `LiveStreamAdvancedFeature`.
*/
LiveStreamAdvancedFeature* advancedFeatures;
- /** The number of enabled advanced features. The default value is 0. */
+ /**
+ * The number of enabled advanced features. The default value is 0.
+ */
unsigned int advancedFeatureCount;
LiveTranscoding()
@@ -4479,65 +4715,66 @@ struct LiveTranscoding {
};
/**
- * The video streams for the video mixing on the local client.
+ * @brief The video streams for local video mixing.
*/
struct TranscodingVideoStream {
/**
- * The source type of video for the video mixing on the local client. See #VIDEO_SOURCE_TYPE.
+ * The video source type for local video mixing. See `VIDEO_SOURCE_TYPE`.
*/
VIDEO_SOURCE_TYPE sourceType;
/**
- * The ID of the remote user.
- * @note Use this parameter only when the source type of the video for the video mixing on the
- * local client is `VIDEO_SOURCE_REMOTE`.
+ * The user ID of the remote user.
+ * @note Use this parameter only when the source type is `VIDEO_SOURCE_REMOTE` for local video
+ * mixing.
*/
uid_t remoteUserUid;
/**
- * The URL of the image.
- * @note Use this parameter only when the source type of the video for the video mixing on the
- * local client is `RTC_IMAGE`.
+ * The file path of local images.
+ * Examples:
+ * - Windows: `C:\\Users\\{username}\\Pictures\\image.png`
+ * @note Use this parameter only when the source type is the image for local video mixing.
*/
const char* imageUrl;
/**
- * MediaPlayer id if sourceType is MEDIA_PLAYER_SOURCE.
+ * (Optional) Media player ID. Use the parameter only when you set `sourceType` to
+ * `VIDEO_SOURCE_MEDIA_PLAYER`.
*/
int mediaPlayerId;
/**
- * The horizontal displacement of the top-left corner of the video for the video mixing on the
- * client relative to the top-left corner (origin) of the canvas for this video mixing.
+ * The relative lateral displacement of the top left corner of the video for local video mixing to
+ * the origin (the top left corner of the canvas).
*/
int x;
/**
- * The vertical displacement of the top-left corner of the video for the video mixing on the
- * client relative to the top-left corner (origin) of the canvas for this video mixing.
+ * The relative longitudinal displacement of the top left corner of the captured video to the origin
+ * (the top left corner of the canvas).
*/
int y;
/**
- * The width (px) of the video for the video mixing on the local client.
+ * The width (px) of the video for local video mixing on the canvas.
*/
int width;
/**
- * The height (px) of the video for the video mixing on the local client.
+ * The height (px) of the video for local video mixing on the canvas.
*/
int height;
/**
- * The number of the layer to which the video for the video mixing on the local client belongs.
- * The value range is [0,100].
+ * The number of the layer to which the video for the local video mixing belongs. The value range is
+ * [0, 100].
* - 0: (Default) The layer is at the bottom.
* - 100: The layer is at the top.
*/
int zOrder;
/**
- * The transparency of the video for the video mixing on the local client. The value range is
- * [0.0,1.0]. 0.0 means the transparency is completely transparent. 1.0 means the transparency is
- * opaque.
+ * The transparency of the video for local video mixing. The value range is [0.0, 1.0]. 0.0
+ * indicates that the video is completely transparent, and 1.0 indicates that it is opaque.
*/
double alpha;
/**
- * Whether to mirror the video for the video mixing on the local client.
- * - true: Mirroring.
- * - false: (Default) Do not mirror.
- * @note The paramter only works for videos with the source type `CAMERA`.
+ * Whether to mirror the video for the local video mixing.
+ * - `true`: Mirror the video for the local video mixing.
+ * - `false`: (Default) Do not mirror the video for the local video mixing.
+ * @note This parameter only takes effect on video source types that are cameras.
*/
bool mirror;
@@ -4555,7 +4792,7 @@ struct TranscodingVideoStream {
};
/**
- * The configuration of the video mixing on the local client.
+ * @brief The configuration of the video mixing on the local client.
*/
struct LocalTranscoderConfiguration {
/**
@@ -4563,12 +4800,12 @@ struct LocalTranscoderConfiguration {
*/
unsigned int streamCount;
/**
- * The video streams for the video mixing on the local client. See TranscodingVideoStream.
+ * The video streams for local video mixing. See `TranscodingVideoStream`.
*/
TranscodingVideoStream* videoInputStreams;
/**
- * The encoding configuration of the mixed video stream after the video mixing on the local
- * client. See VideoEncoderConfiguration.
+ * The encoding configuration of the mixed video stream after the local video mixing. See
+ * `VideoEncoderConfiguration`.
*/
VideoEncoderConfiguration videoOutputConfiguration;
/**
@@ -4588,55 +4825,71 @@ struct LocalTranscoderConfiguration {
syncWithPrimaryCamera(true) {}
};
+/**
+ * @brief The error code of the local video mixing failure.
+ */
enum VIDEO_TRANSCODER_ERROR {
/**
- * The video track of the video source is not started.
+ * 1: The selected video source has not started video capture. You need to create a video track for
+ * it and start video capture.
*/
VT_ERR_VIDEO_SOURCE_NOT_READY = 1,
/**
- * The video source type is not supported.
+ * 2: The video source type is invalid. You need to re-specify the supported video source type.
*/
VT_ERR_INVALID_VIDEO_SOURCE_TYPE = 2,
/**
- * The image url is not correctly of image source.
+ * 3: The image path is invalid. You need to re-specify the correct image path.
*/
VT_ERR_INVALID_IMAGE_PATH = 3,
/**
- * The image format not the type png/jpeg/gif of image source.
+ * 4: The image format is invalid. Make sure the image format is one of PNG, JPEG, or GIF.
*/
VT_ERR_UNSUPPORT_IMAGE_FORMAT = 4,
/**
- * The layout is invalid such as width is zero.
+ * 5: The video encoding resolution after video mixing is invalid.
*/
VT_ERR_INVALID_LAYOUT = 5,
/**
- * Internal error.
+ * 20: Unknown internal error.
*/
VT_ERR_INTERNAL = 20
};
/**
- * The audio streams for the video mixing on the local client.
+ * @brief The source of the audio streams that are mixed locally.
*/
struct MixedAudioStream {
/**
- * The source type of audio for the audio mixing on the local client. See #AUDIO_SOURCE_TYPE.
+ * The type of the audio source. See `AUDIO_SOURCE_TYPE`.
*/
AUDIO_SOURCE_TYPE sourceType;
/**
- * The ID of the remote user.
- * @note Use this parameter only when the source type is `AUDIO_SOURCE_REMOTE`.
+ * The user ID of the remote user.
+ * @note Set this parameter if the source type of the locally mixed audio steams is
+ * AUDIO_SOURCE_REMOTE_USER.
*/
uid_t remoteUserUid;
/**
- * The channel ID of the remote user.
- * @note Use this parameter only when the source type is `AUDIO_SOURCE_REMOTE`.
+ * The channel name. This parameter signifies the channel in which users engage in real-time audio
+ * and video interaction. Under the premise of the same App ID, users who fill in the same channel
+ * ID enter the same channel for audio and video interaction. The string length must be less than 64
+ * bytes. Supported characters (89 characters in total):
+ * - All lowercase English letters: a to z.
+ * - All uppercase English letters: A to Z.
+ * - All numeric characters: 0 to 9.
+ * - "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]",
+ * "^", "_", "{", "}", "|", "~", ","
+ * @note Set this parameter if the source type of the locally mixed audio streams is
+ * AUDIO_SOURCE_REMOTE_CHANNEL or AUDIO_SOURCE_REMOTE_USER.
*/
const char* channelId;
/**
- * The track ID of the local track.
- * @note Use this parameter only when the source type is `AUDIO_SOURCE_REMOTE`.
+ * The audio track ID. Set this parameter to the custom audio track ID returned in
+ * `createCustomAudioTrack`.
+ * @note Set this parameter if the source type of the locally mixed audio steams is
+ * AUDIO_SOURCE_CUSTOM.
*/
track_id_t trackId;
@@ -4664,22 +4917,24 @@ struct MixedAudioStream {
};
/**
- * The configuration of the audio mixing on the local client.
+ * @brief The configurations for mixing the lcoal audio.
*/
struct LocalAudioMixerConfiguration {
/**
- * The number of the audio streams for the audio mixing on the local client.
+ * The number of the audio streams that are mixed locally.
*/
unsigned int streamCount;
/**
- * The source of the streams to mixed;
- */
+ * The source of the audio streams that are mixed locally. See `MixedAudioStream`.
+ */
MixedAudioStream* audioInputStreams;
/**
- * Whether to use the timestamp follow the local mic's audio frame.
- * - true: (Default) Use the timestamp of the captured audio frame as the timestamp of the mixed audio frame.
- * - false: Do not use the timestamp of the captured audio frame as the timestamp of the mixed audio frame. Instead, use the timestamp when the mixed audio frame is constructed.
+ * Whether the mxied audio stream uses the timestamp of the audio frames captured by the local
+ * microphone.
+ * - `true`: (Default) Yes. Set to this value if you want all locally captured audio streams
+ * synchronized.
+ * - `false`: No. The SDK uses the timestamp of the audio frames at the time when they are mixed.
*/
bool syncWithLocalMic;
@@ -4687,37 +4942,36 @@ struct LocalAudioMixerConfiguration {
};
/**
- * Configurations of the last-mile network test.
+ * @brief Configurations of the last-mile network test.
*/
struct LastmileProbeConfig {
/**
- * Determines whether to test the uplink network. Some users, for example,
- * the audience in a live broadcast channel, do not need such a test:
- * - true: Test.
- * - false: Do not test.
+ * Sets whether to test the uplink network. Some users, for example, the audience members in a
+ * LIVE_BROADCASTING channel, do not need such a test.
+ * - `true`: Test the uplink network.
+ * - `false`: Do not test the uplink network.
*/
bool probeUplink;
/**
- * Determines whether to test the downlink network:
- * - true: Test.
- * - false: Do not test.
+ * Sets whether to test the downlink network:
+ * - `true`: Test the downlink network.
+ * - `false`: Do not test the downlink network.
*/
bool probeDownlink;
/**
- * The expected maximum sending bitrate (bps) of the local user. The value range is [100000,
- * 5000000]. We recommend setting this parameter according to the bitrate value set by
- * `setVideoEncoderConfiguration`.
+ * The expected maximum uplink bitrate (bps) of the local user. The value range is [100000,
+ * 5000000]. Agora recommends referring to `setVideoEncoderConfiguration` to set the value.
*/
unsigned int expectedUplinkBitrate;
/**
- * The expected maximum receiving bitrate (bps) of the local user. The value range is
+ * The expected maximum downlink bitrate (bps) of the local user. The value range is
* [100000,5000000].
*/
unsigned int expectedDownlinkBitrate;
};
/**
- * The status of the last-mile network tests.
+ * @brief The status of the last-mile probe test.
*/
enum LASTMILE_PROBE_RESULT_STATE {
/**
@@ -4726,18 +4980,18 @@ enum LASTMILE_PROBE_RESULT_STATE {
LASTMILE_PROBE_RESULT_COMPLETE = 1,
/**
* 2: The last-mile network probe test is incomplete because the bandwidth estimation is not
- * available due to limited test resources.
+ * available due to limited test resources. One possible reason is that testing resources are
+ * temporarily limited.
*/
LASTMILE_PROBE_RESULT_INCOMPLETE_NO_BWE = 2,
/**
- * 3: The last-mile network probe test is not carried out, probably due to poor network
- * conditions.
+ * 3: The last-mile network probe test is not carried out. Probably due to poor network conditions.
*/
LASTMILE_PROBE_RESULT_UNAVAILABLE = 3
};
/**
- * Results of the uplink or downlink last-mile network test.
+ * @brief Results of the uplink or downlink last-mile network test.
*/
struct LastmileProbeOneWayResult {
/**
@@ -4757,19 +5011,19 @@ struct LastmileProbeOneWayResult {
};
/**
- * Results of the uplink and downlink last-mile network tests.
+ * @brief Results of the uplink and downlink last-mile network tests.
*/
struct LastmileProbeResult {
/**
- * The status of the last-mile network tests. See #LASTMILE_PROBE_RESULT_STATE.
+ * The status of the last-mile network tests. See `LASTMILE_PROBE_RESULT_STATE`.
*/
LASTMILE_PROBE_RESULT_STATE state;
/**
- * Results of the uplink last-mile network test. For details, see LastmileProbeOneWayResult.
+ * Results of the uplink last-mile network test. See `LastmileProbeOneWayResult`.
*/
LastmileProbeOneWayResult uplinkReport;
/**
- * Results of the downlink last-mile network test. For details, see LastmileProbeOneWayResult.
+ * Results of the downlink last-mile network test. See `LastmileProbeOneWayResult`.
*/
LastmileProbeOneWayResult downlinkReport;
/**
@@ -4781,11 +5035,11 @@ struct LastmileProbeResult {
};
/**
- * Reasons causing the change of the connection state.
+ * @brief Reasons causing the change of the connection state.
*/
enum CONNECTION_CHANGED_REASON_TYPE {
/**
- * 0: The SDK is connecting to the server.
+ * 0: The SDK is connecting to the Agora edge server.
*/
CONNECTION_CHANGED_CONNECTING = 0,
/**
@@ -4793,17 +5047,18 @@ enum CONNECTION_CHANGED_REASON_TYPE {
*/
CONNECTION_CHANGED_JOIN_SUCCESS = 1,
/**
- * 2: The connection between the SDK and the server is interrupted.
+ * 2: The connection between the SDK and the Agora edge server is interrupted.
*/
CONNECTION_CHANGED_INTERRUPTED = 2,
/**
- * 3: The connection between the SDK and the server is banned by the server. This error occurs
- * when the user is kicked out of the channel by the server.
+ * 3: The connection between the SDK and the Agora edge server is banned by the Agora edge server.
+ * For example, when a user is kicked out of the channel, this status will be returned.
*/
CONNECTION_CHANGED_BANNED_BY_SERVER = 3,
/**
* 4: The SDK fails to join the channel. When the SDK fails to join the channel for more than 20
- * minutes, this error occurs and the SDK stops reconnecting to the channel.
+ * minutes, this code will be returned and the SDK stops reconnecting to the channel. You need to
+ * prompt the user to try to switch to another network and rejoin the channel.
*/
CONNECTION_CHANGED_JOIN_FAILED = 4,
/**
@@ -4811,37 +5066,51 @@ enum CONNECTION_CHANGED_REASON_TYPE {
*/
CONNECTION_CHANGED_LEAVE_CHANNEL = 5,
/**
- * 6: The connection fails because the App ID is not valid.
+ * 6: The App ID is invalid. You need to rejoin the channel with a valid APP ID and make sure the
+ * App ID you are using is consistent with the one generated in the Agora Console.
*/
CONNECTION_CHANGED_INVALID_APP_ID = 6,
/**
- * 7: The connection fails because the channel name is not valid. Please rejoin the channel with a
- * valid channel name.
+ * 7: Invalid channel name. Rejoin the channel with a valid channel name. A valid channel name is a
+ * string of up to 64 bytes in length. Supported characters (89 characters in total):
+ * - All lowercase English letters: a to z.
+ * - All uppercase English letters: A to Z.
+ * - All numeric characters: 0 to 9.
+ * - "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]",
+ * "^", "_", "{", "}", "|", "~", ","
*/
CONNECTION_CHANGED_INVALID_CHANNEL_NAME = 7,
/**
- * 8: The connection fails because the token is not valid. Typical reasons include:
- * - The App Certificate for the project is enabled in Agora Console, but you do not use a token
- * when joining the channel. If you enable the App Certificate, you must use a token to join the
- * channel.
- * - The `uid` specified when calling `joinChannel` to join the channel is inconsistent with the
- * `uid` passed in when generating the token.
+ * 8: Invalid token. Possible reasons are as follows:
+ * - The App Certificate for the project is enabled in Agora Console, but you do not pass in a token
+ * when joining a channel.
+ * - The uid specified when calling `joinChannel(const char* token, const char* channelId, uid_t
+ * uid, const ChannelMediaOptions& options)` to join the channel is inconsistent with the
+ * uid passed in when generating the token.
+ * - The generated token and the token used to join the channel are not consistent.
+ * Ensure the following:
+ * - When your project enables App Certificate, you need to pass in a token to join a channel.
+ * - The user ID specified when generating the token is consistent with the user ID used when
+ * joining the channel.
+ * - The generated token is the same as the token passed in to join the channel.
*/
CONNECTION_CHANGED_INVALID_TOKEN = 8,
/**
- * 9: The connection fails because the token has expired.
+ * 9: The token currently being used has expired. You need to generate a new token on your server
+ * and rejoin the channel with the new token.
*/
CONNECTION_CHANGED_TOKEN_EXPIRED = 9,
/**
- * 10: The connection is rejected by the server. Typical reasons include:
- * - The user is already in the channel and still calls a method, for example, `joinChannel`, to
- * join the channel. Stop calling this method to clear this error.
- * - The user tries to join the channel when conducting a pre-call test. The user needs to call
- * the channel after the call test ends.
+ * 10: The connection is rejected by server. Possible reasons are as follows:
+ * - The user is already in the channel and still calls a method, for example, `joinChannel(const
+ * char* token, const char* channelId, uid_t uid, const ChannelMediaOptions& options)`,
+ * to join the channel. Stop calling this method to clear this error.
+ * - The user tries to join a channel while a test call is in progress. The user needs to join the
+ * channel after the call test ends.
*/
CONNECTION_CHANGED_REJECTED_BY_SERVER = 10,
/**
- * 11: The connection changes to reconnecting because the SDK has set a proxy server.
+ * 11: The connection state changed to reconnecting because the SDK has set a proxy server.
*/
CONNECTION_CHANGED_SETTING_PROXY_SERVER = 11,
/**
@@ -4849,17 +5118,17 @@ enum CONNECTION_CHANGED_REASON_TYPE {
*/
CONNECTION_CHANGED_RENEW_TOKEN = 12,
/**
- * 13: The IP address of the client has changed, possibly because the network type, IP address, or
- * port has been changed.
+ * 13: Client IP address changed. If you receive this code multiple times, You need to prompt the
+ * user to switch networks and try joining the channel again.
*/
CONNECTION_CHANGED_CLIENT_IP_ADDRESS_CHANGED = 13,
/**
* 14: Timeout for the keep-alive of the connection between the SDK and the Agora edge server. The
- * connection state changes to CONNECTION_STATE_RECONNECTING.
+ * SDK tries to reconnect to the server automatically.
*/
CONNECTION_CHANGED_KEEP_ALIVE_TIMEOUT = 14,
/**
- * 15: The SDK has rejoined the channel successfully.
+ * 15: The user has rejoined the channel successfully.
*/
CONNECTION_CHANGED_REJOIN_SUCCESS = 15,
/**
@@ -4867,19 +5136,19 @@ enum CONNECTION_CHANGED_REASON_TYPE {
*/
CONNECTION_CHANGED_LOST = 16,
/**
- * 17: The change of connection state is caused by echo test.
+ * 17: The connection state changes due to the echo test.
*/
CONNECTION_CHANGED_ECHO_TEST = 17,
/**
- * 18: The local IP Address is changed by user.
+ * 18: The local IP address was changed by the user.
*/
CONNECTION_CHANGED_CLIENT_IP_ADDRESS_CHANGED_BY_USER = 18,
/**
- * 19: The connection is failed due to join the same channel on another device with the same uid.
+ * 19: The user joined the same channel from different devices with the same UID.
*/
CONNECTION_CHANGED_SAME_UID_LOGIN = 19,
/**
- * 20: The connection is failed due to too many broadcasters in the channel.
+ * 20: The number of hosts in the channel has reached the upper limit.
*/
CONNECTION_CHANGED_TOO_MANY_BROADCASTERS = 20,
@@ -4902,31 +5171,37 @@ enum CONNECTION_CHANGED_REASON_TYPE {
};
/**
- * The reason of changing role's failure.
+ * @brief The reason for a user role switch failure.
*/
enum CLIENT_ROLE_CHANGE_FAILED_REASON {
/**
- * 1: Too many broadcasters in the channel.
+ * 1: The number of hosts in the channel exceeds the limit.
+ * @note This enumerator is reported only when the support for 128 users is enabled. The maximum
+ * number of hosts is based on the actual number of hosts configured when you enable the 128-user
+ * feature.
*/
CLIENT_ROLE_CHANGE_FAILED_TOO_MANY_BROADCASTERS = 1,
/**
- * 2: The operation of changing role is not authorized.
+ * 2: The request is rejected by the Agora server. Agora recommends you prompt the user to try to
+ * switch their user role again.
*/
CLIENT_ROLE_CHANGE_FAILED_NOT_AUTHORIZED = 2,
/**
- * 3: The operation of changing role is timeout.
+ * 3: The request is timed out. Agora recommends you prompt the user to check the network connection
+ * and try to switch their user role again.
* @deprecated This reason is deprecated.
*/
CLIENT_ROLE_CHANGE_FAILED_REQUEST_TIME_OUT __deprecated = 3,
/**
- * 4: The operation of changing role is interrupted since we lost connection with agora service.
+ * 4: The SDK is disconnected from the Agora edge server. You can troubleshoot the failure through
+ * the `reason` reported by `onConnectionStateChanged`.
* @deprecated This reason is deprecated.
*/
CLIENT_ROLE_CHANGE_FAILED_CONNECTION_FAILED __deprecated = 4,
};
/**
- * The network type.
+ * @brief Network type.
*/
enum NETWORK_TYPE {
/**
@@ -4964,91 +5239,95 @@ enum NETWORK_TYPE {
};
/**
- * The mode of setting up video views.
+ * @brief Setting mode of the view.
*/
enum VIDEO_VIEW_SETUP_MODE {
/**
- * 0: replace one view
+ * 0: (Default) Clear all added views and replace with a new view.
*/
VIDEO_VIEW_SETUP_REPLACE = 0,
/**
- * 1: add one view
+ * 1: Adds a view.
*/
VIDEO_VIEW_SETUP_ADD = 1,
/**
- * 2: remove one view
+ * 2: Deletes a view.
+ * @note When you no longer need to use a certain view, it is recommended to delete the view by
+ * setting `setupMode` to VIDEO_VIEW_SETUP_REMOVE, otherwise it may lead to leak of rendering
+ * resources.
*/
VIDEO_VIEW_SETUP_REMOVE = 2,
};
/**
- * Attributes of video canvas object.
+ * @brief Attributes of the video canvas object.
*/
struct VideoCanvas {
/**
- * The user id of local video.
+ * User ID that publishes the video source.
*/
uid_t uid;
/**
- * The uid of video stream composing the video stream from transcoder which will be drawn on this
- * video canvas.
+ * The ID of the user who publishes a specific sub-video stream within the mixed video stream.
*/
uid_t subviewUid;
/**
- * Video display window.
+ * The video display window.
+ * @note In one `VideoCanvas`, you can only choose to set either `view` or `surfaceTexture`. If both
+ * are set, only the settings in `view` take effect.
*/
view_t view;
/**
- * A RGBA value indicates background color of the render view. Defaults to 0x00000000.
+ * The background color of the video canvas in RGBA format. The default value is 0x00000000, which
+ * represents black.
*/
uint32_t backgroundColor;
/**
- * The video render mode. See \ref agora::media::base::RENDER_MODE_TYPE "RENDER_MODE_TYPE".
- * The default value is RENDER_MODE_HIDDEN.
+ * The rendering mode of the video. See `RENDER_MODE_TYPE`.
*/
media::base::RENDER_MODE_TYPE renderMode;
/**
- * The video mirror mode. See \ref VIDEO_MIRROR_MODE_TYPE "VIDEO_MIRROR_MODE_TYPE".
- * The default value is VIDEO_MIRROR_MODE_AUTO.
+ * The mirror mode of the view. See `VIDEO_MIRROR_MODE_TYPE`.
* @note
- * - For the mirror mode of the local video view:
- * If you use a front camera, the SDK enables the mirror mode by default;
- * if you use a rear camera, the SDK disables the mirror mode by default.
+ * - For the mirror mode of the local video view: If you use a front camera, the SDK enables the
+ * mirror mode by default; if you use a rear camera, the SDK disables the mirror mode by default.
* - For the remote user: The mirror mode is disabled by default.
*/
VIDEO_MIRROR_MODE_TYPE mirrorMode;
/**
- * The mode of setting up video view. See \ref VIDEO_VIEW_SETUP_MODE "VIDEO_VIEW_SETUP_MODE"
- * The default value is VIDEO_VIEW_SETUP_REPLACE.
+ * Setting mode of the view. See `VIDEO_VIEW_SETUP_MODE`.
*/
VIDEO_VIEW_SETUP_MODE setupMode;
/**
- * The video source type. See \ref VIDEO_SOURCE_TYPE "VIDEO_SOURCE_TYPE".
- * The default value is VIDEO_SOURCE_CAMERA_PRIMARY.
+ * The type of the video source. See `VIDEO_SOURCE_TYPE`.
*/
VIDEO_SOURCE_TYPE sourceType;
/**
- * The media player id of AgoraMediaPlayer. It should set this parameter when the
- * sourceType is VIDEO_SOURCE_MEDIA_PLAYER to show the video that AgoraMediaPlayer is playing.
- * You can get this value by calling the method \ref getMediaPlayerId().
+ * The ID of the media player. You can get the Device ID by calling `getMediaPlayerId`.
*/
int mediaPlayerId;
/**
- * If you want to display a certain part of a video frame, you can set
- * this value to crop the video frame to show.
- * The default value is empty(that is, if it has zero width or height), which means no cropping.
+ * (Optional) Display area of the video frame, see `Rectangle`. `width` and `height` represent the
+ * video pixel width and height of the area. The default value is null (width or height is 0), which
+ * means that the actual resolution of the video frame is displayed.
*/
Rectangle cropArea;
/**
- * Whether to apply alpha mask to the video frame if exsit:
- * true: Apply alpha mask to video frame.
- * false: (Default) Do not apply alpha mask to video frame.
+ * (Optional) Whether to enable alpha mask rendering:
+ * - `true`: Enable alpha mask rendering.
+ * - `false`: (Default) Disable alpha mask rendering.
+ * Alpha mask rendering can create images with transparent effects and extract portraits from
+ * videos. When used in combination with other methods, you can implement effects such as
+ * portrait-in-picture and watermarking.
+ * @note
+ * - The receiver can render alpha channel information only when the sender enables alpha
+ * transmission.
+ * - To enable alpha transmission, `technical support`.
*/
bool enableAlphaMask;
/**
- * The video frame position in pipeline. See \ref VIDEO_MODULE_POSITION "VIDEO_MODULE_POSITION".
- * The default value is POSITION_POST_CAPTURER.
+ * The observation position of the video frame in the video link. See `VIDEO_MODULE_POSITION`.
*/
media::base::VIDEO_MODULE_POSITION position;
@@ -5110,41 +5389,55 @@ struct VideoCanvas {
position(media::base::POSITION_POST_CAPTURER) {}
};
-/** Image enhancement options.
+/**
+ * @brief Image enhancement options.
*/
struct BeautyOptions {
- /** The contrast level.
+ /**
+ * @brief The contrast level.
*/
enum LIGHTENING_CONTRAST_LEVEL {
- /** Low contrast level. */
+ /**
+ * 0: Low contrast level.
+ */
LIGHTENING_CONTRAST_LOW = 0,
- /** (Default) Normal contrast level. */
+ /**
+ * 1: (Default) Normal contrast level.
+ */
LIGHTENING_CONTRAST_NORMAL = 1,
- /** High contrast level. */
+ /**
+ * 2: High contrast level.
+ */
LIGHTENING_CONTRAST_HIGH = 2,
};
- /** The contrast level, used with the `lighteningLevel` parameter. The larger the value, the
- * greater the contrast between light and dark. See #LIGHTENING_CONTRAST_LEVEL.
+ /**
+ * The contrast level, used with the `lighteningLevel` parameter. The larger the value, the greater
+ * the contrast between light and dark. See `LIGHTENING_CONTRAST_LEVEL`.
*/
LIGHTENING_CONTRAST_LEVEL lighteningContrastLevel;
- /** The brightness level. The value ranges from 0.0 (original) to 1.0. The default value is 0.0.
- * The greater the value, the greater the degree of whitening. */
+ /**
+ * The brightening level, in the range [0.0,1.0], where 0.0 means the original brightening. The
+ * default value is 0.0. The higher the value, the greater the degree of brightening.
+ */
float lighteningLevel;
- /** The value ranges from 0.0 (original) to 1.0. The default value is 0.0. The greater the value,
- * the greater the degree of skin grinding.
+ /**
+ * The smoothness level, in the range [0.0,1.0], where 0.0 means the original smoothness. The
+ * default value is 0.0. The greater the value, the greater the smoothness level.
*/
float smoothnessLevel;
- /** The redness level. The value ranges from 0.0 (original) to 1.0. The default value is 0.0. The
- * larger the value, the greater the rosy degree.
+ /**
+ * The redness level, in the range [0.0,1.0], where 0.0 means the original redness. The default
+ * value is 0.0. The larger the value, the greater the redness level.
*/
float rednessLevel;
- /** The sharpness level. The value ranges from 0.0 (original) to 1.0. The default value is 0.0.
- * The larger the value, the greater the sharpening degree.
+ /**
+ * The sharpness level, in the range [0.0,1.0], where 0.0 means the original sharpness. The default
+ * value is 0.0. The larger the value, the greater the sharpness level.
*/
float sharpnessLevel;
@@ -5164,220 +5457,207 @@ struct BeautyOptions {
sharpnessLevel(0) {}
};
-/**
- * @brief Face shape area options. This structure defines options for facial adjustments on different facial areas.
+/**
+ * @brief Filter effect options.
*
* @since v4.4.0
*/
struct FaceShapeAreaOptions {
/**
- * @brief The specific facial area to be adjusted.
+ * @brief Chooses the specific facial areas that need to be adjusted.
*
* @since v4.4.0
*/
enum FACE_SHAPE_AREA {
- /** (Default) Invalid area. */
+ /**
+ * -1: (Default) Invalid area; facial enhancement effects do not take effect.
+ */
FACE_SHAPE_AREA_NONE = -1,
- /**
- * Head Scale, reduces the size of the head.
- * The value range is [0, 100]. The default value is 50.
- * The larger the value, the stronger the head reduction effect.
+ /**
+ * (100): Head, used to achieve a smaller head effect. The value range is 0 to 100, and the default
+ * value is 50. The larger the value, the more noticeable the adjustment.
*/
FACE_SHAPE_AREA_HEADSCALE = 100,
- /**
- * Forehead, adjusts the size of the forehead.
- * The value range is [0, 100]. The default value is 0.
- * The larger the value, the stronger the forehead effect.
+ /**
+ * (101): Forehead, used to adjust the hairline height. The range is [0, 100], with a default value
+ * of 0. The larger the value, the more noticeable the adjustment.
*/
FACE_SHAPE_AREA_FOREHEAD = 101,
- /**
- * Face Contour, slims the facial contour.
- * The value range is [0, 100]. The default value is 0.
- * The larger the value, the stronger the facial contour reduction effect.
+ /**
+ * (102): Face contour, used to achieve a slimmer face effect. The range is [0, 100], with a default
+ * value of 0. The larger the value, the more noticeable the adjustment.
*/
FACE_SHAPE_AREA_FACECONTOUR = 102,
- /**
- * Face Length, adjusts the length of the face.
- * The value range is [-100, 100]. The default value is 0.
- * The larger the absolute value, the stronger the face length effect, negative values indicate the opposite direction.
+ /**
+ * (103): Face length, used to achieve a longer face effect. The range is [-100, 100], with a
+ * default value of 0. The greater the absolute value, the more noticeable the adjustment. Negative
+ * values indicate the opposite direction.
*/
FACE_SHAPE_AREA_FACELENGTH = 103,
- /**
- * Face Width, narrows the width of the face.
- * The value range is [0, 100]. The default value is 0.
- * The larger the value, the stronger the face width reduction effect.
+ /**
+ * (104): Face width, used to achieve a narrower face effect. The range is [0, 100], with a default
+ * value of 0. The larger the value, the more noticeable the adjustment.
*/
FACE_SHAPE_AREA_FACEWIDTH = 104,
- /**
- * Cheekbone, adjusts the size of the cheekbone.
- * The value range is [0, 100]. The default value is 0.
- * The larger the value, the stronger the cheekbone effect.
+ /**
+ * (105): Cheekbone, used to adjust cheekbone width. The range is [0, 100], with a default value of
+ * 0. The larger the value, the more noticeable the adjustment.The larger the value, the more
+ * noticeable the adjustment.
*/
FACE_SHAPE_AREA_CHEEKBONE = 105,
- /**
- * Cheek, adjusts the size of the cheek.
- * The value range is [0, 100]. The default value is 0.
- * The larger the value, the stronger the cheek effect.
+ /**
+ * (106): Cheek, used to adjust cheek width. The range is [0, 100], with a default value of 0. The
+ * larger the value, the more noticeable the adjustment.
*/
FACE_SHAPE_AREA_CHEEK = 106,
- /**
- * Mandible, slims the mandible.
- * The value range is [0, 100]. The default value is 0.
- * The larger the value, the stronger the mandible effect.
+ /**
+ * (107): Adjustment of the mandible. The range is [0, 100], with a default value of 0. The larger
+ * the value, the more noticeable the adjustment.
* @since v4.6.0
*/
FACE_SHAPE_AREA_MANDIBLE = 107,
- /**
- * Chin, adjusts the length of the chin.
- * The value range is [-100, 100]. The default value is 0.
- * The larger the absolute value, the stronger the chin effect, negative values indicate the opposite direction.
- */
+ /**
+ * (108): Chin, used to adjust chin length. The range is [-100, 100], with a default value of 0. The
+ * greater the absolute value, the more noticeable the adjustment. Negative values indicate the
+ * opposite direction.
+ */
FACE_SHAPE_AREA_CHIN = 108,
- /**
- * Eye Scale, adjusts the size of the eyes.
- * The value range is [0, 100]. The default value is 50.
- * The larger the value, the stronger the eye size effect.
+ /**
+ * (200): Eyes, used to achieve a larger eye effect. The value range is 0 to 100, and the default
+ * value is 50. The larger the value, the more noticeable the adjustment.
*/
FACE_SHAPE_AREA_EYESCALE = 200,
- /**
- * Eye Distance, adjusts the distance between the two eyes.
- * The value range is [-100, 100]. The default value is 0.
- * The larger the absolute value, the stronger the eye distance effect, negative values indicate the opposite direction.
+ /**
+ * (201): Eye distance adjustment. The range is [-100, 100], with a default value of 0. The greater
+ * the absolute value, the more noticeable the adjustment. Negative values indicate the opposite
+ * direction.
* @since v4.6.0
*/
FACE_SHAPE_AREA_EYEDISTANCE = 201,
- /**
- * Eye Position, adjusts the upper and lower position of the eyes.
- * The value range is [-100, 100]. The default value is 0.
- * The larger the absolute value, the stronger the eye position effect, negative values indicate the opposite direction.
+ /**
+ * (202): Eye position adjustment. The range is [-100, 100], with a default value of 0. The greater
+ * the absolute value, the more noticeable the adjustment. Negative values indicate the opposite
+ * direction.
* @since v4.6.0
*/
FACE_SHAPE_AREA_EYEPOSITION = 202,
- /**
- * Lower Eyelid, adjusts the downward position of the eyelids.
- * The value range is [0, 100]. The default value is 0.
- * The larger the value, the stronger the lower eyelid effect.
+ /**
+ * (203): Lower eyelid adjustment.(203): Lower eyelid adjustment. The range is [0, 100], with a
+ * default value of 0. The larger the value, the more noticeable the adjustment.
* @since v4.6.0
*/
FACE_SHAPE_AREA_LOWEREYELID = 203,
- /**
- * Eye Pupils, adjusts the size of the pupils.
- * The value range is [0, 100]. The default value is 0.
- * The larger the value, the stronger the eye pupils effect.
+ /**
+ * (204): Pupil size adjustment. The range is [0, 100], with a default value of 0. The larger the
+ * value, the more noticeable the adjustment.
* @since v4.6.0
*/
FACE_SHAPE_AREA_EYEPUPILS = 204,
- /**
- * Eye Inner Corner, adjusts the inner corners of the eyes.
- * The value range is [-100, 100]. The default value is 0.
- * The larger the absolute value, the stronger the eye inner corner effect, negative values indicate the opposite direction.
+ /**
+ * (205): Inner eye corner adjustment. The range is [-100, 100], with a default value of 0. The
+ * greater the absolute value, the more noticeable the adjustment. Negative values indicate the
+ * opposite direction.
* @since v4.6.0
*/
FACE_SHAPE_AREA_EYEINNERCORNER = 205,
- /**
- * Eye Outer Corner, adjusts the outer corners of the eyes.
- * The value range is [-100, 100]. The default value is 0.
- * The larger the absolute value, the stronger the eye outer corner effect, negative values indicate the opposite direction.
+ /**
+ * (206): Outer eye corner adjustment. The range is [-100, 100], with a default value of 0. The
+ * greater the absolute value, the more noticeable the adjustment. Negative values indicate the
+ * opposite direction.
* @since v4.6.0
*/
FACE_SHAPE_AREA_EYEOUTERCORNER = 206,
- /**
- * Nose Length, adjusts the length of the nose.
- * The value range is [-100, 100]. The default value is 0.
+ /**
+ * (300): Nose length, used to achieve a longer nose effect. The range is [-100, 100], with a
+ * default value of 0.
*/
FACE_SHAPE_AREA_NOSELENGTH = 300,
- /**
- * Nose Width, adjusts the width of the nose.
- * The value range is [0, 100]. The default value is 0.
- * The larger the value, the stronger the nose width effect.
+ /**
+ * (301): Nose width, used to achieve a slimmer nose effect. The range is [0, 100], with a default
+ * value of 0. The larger the value, the more noticiable the effect of narrowing the nose.
* @since v4.6.0
*/
FACE_SHAPE_AREA_NOSEWIDTH = 301,
- /**
- * Nose Wing, adjusts the size of the nose wings.
- * The value range is [0, 100]. The default value is 10.
- * The larger the value, the stronger the nose wing effect.
+ /**
+ * (302): Nose wing adjustment. The value range is 0 to 100, and the default value is 10. The larger
+ * the value, the more noticeable the adjustment.
* @since v4.6.0
*/
FACE_SHAPE_AREA_NOSEWING = 302,
- /**
- * Nose Root, adjusts the size of the nose root.
- * The value range is [0, 100]. The default value is 0.
- * The larger the value, the stronger the nose root effect.
+ /**
+ * (303): Nose root adjustment. The range is [0, 100], with a default value of 0. The larger the
+ * value, the more noticeable the adjustment.
* @since v4.6.0
*/
FACE_SHAPE_AREA_NOSEROOT = 303,
- /**
- * Nose Bridge, adjusts the size of the nose bridge.
- * The value range is [0, 100]. The default value is 50.
- * The larger the value, the stronger the nose bridge effect.
+ /**
+ * (304): Nose bridge adjustment. The value range is 0 to 100, and the default value is 50. The
+ * larger the value, the more noticeable the adjustment.
* @since v4.6.0
*/
FACE_SHAPE_AREA_NOSEBRIDGE = 304,
- /**
- * Nose Tip, adjusts the size of the nose tip.
- * The value range is [0, 100]. The default value is 50.
- * The larger the value, the stronger the nose tip effect.
+ /**
+ * (305): Nose tip adjustment. The value range is 0 to 100, and the default value is 50. The larger
+ * the value, the more noticeable the adjustment.
* @since v4.6.0
*/
FACE_SHAPE_AREA_NOSETIP = 305,
- /**
- * Nose General, adjusts the overall size of the nose.
- * The value range is [-100, 100]. The default value is 50.
- * The larger the absolute value, the stronger the nose general effect, negative values indicate the opposite direction.
+ /**
+ * (306): Overall nose adjustment. The range is [-100, 100], with a default value of 50. The greater
+ * the absolute value, the more noticeable the adjustment. Negative values indicate the opposite
+ * direction.
* @since v4.6.0
*/
FACE_SHAPE_AREA_NOSEGENERAL = 306,
- /**
- * Mouth Scale, adjusts the size of the mouth.
- * The value range is [-100, 100]. The default value is 20.
- * The larger the absolute value, the stronger the mouth size effect, negative values indicate the opposite direction.
+ /**
+ * (400): Mouth, used to achieve a larger mouth effect. The range is [-100, 100], with a default
+ * value of 20. The greater the absolute value, the more noticeable the adjustment. Negative values
+ * indicate the opposite direction.
* @since v4.6.0
*/
FACE_SHAPE_AREA_MOUTHSCALE = 400,
- /**
- * Mouth Position, adjusts the position of the mouth.
- * The value range is [0, 100]. The default value is 0.
- * The larger the value, the stronger the mouth position effect.
+ /**
+ * (401): Mouth position adjustment. The range is [0, 100], with a default value of 0. The larger
+ * the value, the more noticeable the adjustment.
* @since v4.6.0
*/
FACE_SHAPE_AREA_MOUTHPOSITION = 401,
- /**
- * Mouth Smile, adjusts the degree of the mouth's smile.
- * The value range is [0, 100]. The default value is 30.
- * The larger the value, the stronger the mouth smile effect.
+ /**
+ * (402): Mouth smile adjustment. The value range is [0,1], and the default value is 0. The larger
+ * the value, the more noticeable the adjustment.
* @since v4.6.0
*/
FACE_SHAPE_AREA_MOUTHSMILE = 402,
- /**
- * Mouth Lip, adjusts the size of the lips.
- * The value range is [0, 100]. The default value is 0.
- * The larger the value, the stronger the mouth lip effect.
+ /**
+ * (403): Lip shape adjustment. The range is [0, 100], with a default value of 0. The larger the
+ * value, the more noticeable the adjustment.
+ * @note v.4.6.0.
* @since v4.6.0
*/
FACE_SHAPE_AREA_MOUTHLIP = 403,
- /**
- * Eyebrow Position, adjusts the position of the eyebrows.
- * The value range is [-100, 100]. The default value is 0.
- * The larger the absolute value, the stronger the eyebrow position effect, negative values indicate the opposite direction.
+ /**
+ * (500): Eyebrow position adjustment. The range is [-100, 100], with a default value of 0. The
+ * greater the absolute value, the more noticeable the adjustment. Negative values indicate the
+ * opposite direction.
* @since v4.6.0
*/
FACE_SHAPE_AREA_EYEBROWPOSITION = 500,
- /**
- * Eyebrow Thickness, adjusts the thickness of the eyebrows.
- * The value range is [-100, 100]. The default value is 0.
- * The larger the value, the stronger the eyebrow thickness effect.
+ /**
+ * (501): Eyebrow thickness adjustment. The range is [-100, 100], with a default value of 0. The
+ * larger the value, the more noticeable the adjustment.
* @since v4.6.0
*/
FACE_SHAPE_AREA_EYEBROWTHICKNESS = 501,
};
- /** The specific facial area to be adjusted, See #FACE_SHAPE_AREA.
- */
+ /**
+ * Facial enhancement areas: `FACE_SHAPE_AREA`
+ */
FACE_SHAPE_AREA shapeArea;
- /**
- * The intensity of the pinching effect applied to the specified facial area.
+ /**
+ * The intensity of the enhancement. The definition of enhancement intensity varies according to the
+ * different face areas, such as its orientation, range, and preset value. See `FACE_SHAPE_AREA`.
*/
int shapeIntensity;
@@ -5386,38 +5666,43 @@ struct FaceShapeAreaOptions {
FaceShapeAreaOptions() : shapeArea(FACE_SHAPE_AREA_NONE), shapeIntensity(0) {}
};
-/** @brief Face shape beauty options. This structure defines options for facial adjustments of different facial styles.
+/**
+ * @brief The facial enhancement style options.
*
* @since v4.4.0
*/
struct FaceShapeBeautyOptions {
/**
- * @brief The face shape beauty style options.
+ * @brief The facial enhancement style options.
*
* @since v4.4.0
*/
enum FACE_SHAPE_BEAUTY_STYLE {
/**
- * (Default) Female face shape style.
+ * 0: (Default) Feminine style.
*/
FACE_SHAPE_BEAUTY_STYLE_FEMALE = 0,
/**
- * Male face shape style.
+ * 1: Masculine style.
*/
FACE_SHAPE_BEAUTY_STYLE_MALE = 1,
/**
- * A natural-looking face shape style that applies minimal modification to facial features.
+ * 2: The natural style beauty effect only makes minimal adjustments to facial features.
* @since v4.6.0
*/
FACE_SHAPE_BEAUTY_STYLE_NATURAL = 2,
};
- /** The face shape style, See #FACE_SHAPE_BEAUTY_STYLE.
- */
+ /**
+ * Facial enhancement style options: `FACE_SHAPE_BEAUTY_STYLE`.
+ */
FACE_SHAPE_BEAUTY_STYLE shapeStyle;
- /** The intensity of the pinching effect applied to the specified facial style. The value ranges from 0 (original) to 100. The default value is 0. The greater the value, the stronger the intensity applied to face pinching.
- */
+ /**
+ * The intensity of the facial enhancement style, with a value range oof [0.0,1.0]. The default
+ * value is 0.0, which means no face enhancement effect. The higher the value, the more obvious the
+ * facial enhancement effect.
+ */
int styleIntensity;
FaceShapeBeautyOptions(FACE_SHAPE_BEAUTY_STYLE shapeStyle, int styleIntensity) : shapeStyle(shapeStyle), styleIntensity(styleIntensity) {}
@@ -5425,29 +5710,34 @@ struct FaceShapeBeautyOptions {
FaceShapeBeautyOptions() : shapeStyle(FACE_SHAPE_BEAUTY_STYLE_FEMALE), styleIntensity(50) {}
};
-/** Filter effect options. This structure defines options for filter effect.
+/**
+ * @brief Filter effect options.
*
* @since v4.4.1
*/
struct FilterEffectOptions {
/**
- * The local absolute path of the custom 3D Cube path. Only cube format is supported.
- * The cube file must strictly comply with the Cube LUT Specification; otherwise, the filter effects will not take effect.
- *
- * The following is an example of the Cube file format. The cube file starts with `LUT_3D_SIZE`, which indicates the cube size. In filter effects, the cube size is limited to 32.
-
+ * The absolute path to the local cube map texture file, which can be used to customize the filter
+ * effect. The specified .cude file should strictly follow the Cube LUT Format Specification;
+ * otherwise, the filter options do not take effect. The following is a sample of the .cude file:
+ * ```
* LUT_3D_SIZE 32
* 0.0039215689 0 0.0039215682
* 0.0086021447 0.0037950677 0
- * 0.0728652592 0.0039215689 0
* ...
- *
- * The SDK provides a built-in cube named `built_in_whiten.cube` for whitening. To use this cube, specify the path to `built_in_whiten_filter`
+ * 0.0728652592 0.0039215689 0
+ * ```
+ * @note
+ * - The identifier `LUT_3D_SIZE` on the first line of the cube map file represents the size of the
+ * three-dimensional lookup table. The LUT size for filter effect can only be set to 32.
+ * - The SDK provides a built-in `built_in_whiten_filter.cube` file. You can pass the absolute path
+ * of this file to get the whitening filter effect.
*/
const char * path;
/**
- * The intensity of specified filter effect. The value ranges from 0.0 to 1.0. The default value is 0.5. The greater the value, the stronger the intensity of the filter.
+ * The intensity of the filter effect, with a range value of [0.0,1.0], in which 0.0 represents no
+ * filter effect. The default value is 0.5. The higher the value, the stronger the filter effect.
*/
float strength;
@@ -5456,40 +5746,49 @@ struct FilterEffectOptions {
FilterEffectOptions() : path(OPTIONAL_NULLPTR), strength(0.5) {}
};
+/**
+ * @brief The low-light enhancement options.
+ */
struct LowlightEnhanceOptions {
/**
- * The low-light enhancement mode.
+ * @brief The low-light enhancement mode.
*/
enum LOW_LIGHT_ENHANCE_MODE {
- /** 0: (Default) Automatic mode. The SDK automatically enables or disables the low-light
- enhancement feature according to the ambient light to compensate for the lighting level or
- prevent overexposure, as necessary. */
+ /**
+ * 0: (Default) Automatic mode. The SDK automatically enables or disables the low-light enhancement
+ * feature according to the ambient light to compensate for the lighting level or prevent
+ * overexposure, as necessary.
+ */
LOW_LIGHT_ENHANCE_AUTO = 0,
- /** Manual mode. Users need to enable or disable the low-light enhancement feature manually. */
+ /**
+ * 1: Manual mode. Users need to enable or disable the low-light enhancement feature manually.
+ */
LOW_LIGHT_ENHANCE_MANUAL = 1,
};
/**
- * The low-light enhancement level.
+ * @brief The low-light enhancement level.
*/
enum LOW_LIGHT_ENHANCE_LEVEL {
/**
- * 0: (Default) Promotes video quality during low-light enhancement. It processes the
- * brightness, details, and noise of the video image. The performance consumption is moderate,
- * the processing speed is moderate, and the overall video quality is optimal.
+ * 0: (Default) Promotes video quality during low-light enhancement. It processes the brightness,
+ * details, and noise of the video image. The performance consumption is moderate, the processing
+ * speed is moderate, and the overall video quality is optimal.
*/
LOW_LIGHT_ENHANCE_LEVEL_HIGH_QUALITY = 0,
/**
- * Promotes performance during low-light enhancement. It processes the brightness and details of
+ * 1: Promotes performance during low-light enhancement. It processes the brightness and details of
* the video image. The processing speed is faster.
*/
LOW_LIGHT_ENHANCE_LEVEL_FAST = 1,
};
- /** The low-light enhancement mode. See #LOW_LIGHT_ENHANCE_MODE.
+ /**
+ * The low-light enhancement mode. See `LOW_LIGHT_ENHANCE_MODE`.
*/
LOW_LIGHT_ENHANCE_MODE mode;
- /** The low-light enhancement level. See #LOW_LIGHT_ENHANCE_LEVEL.
+ /**
+ * The low-light enhancement level. See `LOW_LIGHT_ENHANCE_LEVEL`.
*/
LOW_LIGHT_ENHANCE_LEVEL level;
@@ -5500,45 +5799,51 @@ struct LowlightEnhanceOptions {
: mode(LOW_LIGHT_ENHANCE_AUTO), level(LOW_LIGHT_ENHANCE_LEVEL_HIGH_QUALITY) {}
};
/**
- * The video noise reduction options.
+ * @brief Video noise reduction options.
*
* @since v4.0.0
*/
struct VideoDenoiserOptions {
- /** The video noise reduction mode.
+ /**
+ * @brief Video noise reduction mode.
*/
enum VIDEO_DENOISER_MODE {
- /** 0: (Default) Automatic mode. The SDK automatically enables or disables the video noise
- reduction feature according to the ambient light. */
+ /**
+ * 0: (Default) Automatic mode. The SDK automatically enables or disables the video noise reduction
+ * feature according to the ambient light.
+ */
VIDEO_DENOISER_AUTO = 0,
- /** Manual mode. Users need to enable or disable the video noise reduction feature manually. */
+ /**
+ * 1: Manual mode. Users need to enable or disable the video noise reduction feature manually.
+ */
VIDEO_DENOISER_MANUAL = 1,
};
/**
- * The video noise reduction level.
+ * @brief Video noise reduction level.
*/
enum VIDEO_DENOISER_LEVEL {
/**
- * 0: (Default) Promotes video quality during video noise reduction. `HIGH_QUALITY` balances
- * performance consumption and video noise reduction quality. The performance consumption is
- * moderate, the video noise reduction speed is moderate, and the overall video quality is
- * optimal.
+ * 0: (Default) Promotes video quality during video noise reduction. balances performance
+ * consumption and video noise reduction quality. The performance consumption is moderate, the video
+ * noise reduction speed is moderate, and the overall video quality is optimal.
*/
VIDEO_DENOISER_LEVEL_HIGH_QUALITY = 0,
/**
- * Promotes reducing performance consumption during video noise reduction. `FAST` prioritizes
- * reducing performance consumption over video noise reduction quality. The performance
- * consumption is lower, and the video noise reduction speed is faster. To avoid a noticeable
- * shadowing effect (shadows trailing behind moving objects) in the processed video, Agora
- * recommends that you use `FAST` when the camera is fixed.
+ * 1: Promotes reducing performance consumption during video noise reduction. It prioritizes
+ * reducing performance consumption over video noise reduction quality. The performance consumption
+ * is lower, and the video noise reduction speed is faster. To avoid a noticeable shadowing effect
+ * (shadows trailing behind moving objects) in the processed video, Agora recommends that you use
+ * this setting when the camera is fixed.
*/
VIDEO_DENOISER_LEVEL_FAST = 1,
};
- /** The video noise reduction mode. See #VIDEO_DENOISER_MODE.
+ /**
+ * Video noise reduction mode. See `VIDEO_DENOISER_MODE`.
*/
VIDEO_DENOISER_MODE mode;
- /** The video noise reduction level. See #VIDEO_DENOISER_LEVEL.
+ /**
+ * Video noise reduction level. See `VIDEO_DENOISER_LEVEL`.
*/
VIDEO_DENOISER_LEVEL level;
@@ -5548,22 +5853,27 @@ struct VideoDenoiserOptions {
VideoDenoiserOptions() : mode(VIDEO_DENOISER_AUTO), level(VIDEO_DENOISER_LEVEL_HIGH_QUALITY) {}
};
-/** The color enhancement options.
+/**
+ * @brief The color enhancement options.
*
* @since v4.0.0
*/
struct ColorEnhanceOptions {
- /** The level of color enhancement. The value range is [0.0,1.0]. `0.0` is the default value,
- * which means no color enhancement is applied to the video. The higher the value, the higher the
- * level of color enhancement.
+ /**
+ * The level of color enhancement. The value range is [0.0, 1.0]. `0.0` is the default value, which
+ * means no color enhancement is applied to the video. The higher the value, the higher the level of
+ * color enhancement. The default value is `0.5`.
*/
float strengthLevel;
- /** The level of skin tone protection. The value range is [0.0,1.0]. `0.0` means no skin tone
- * protection. The higher the value, the higher the level of skin tone protection. The default
- * value is `1.0`. When the level of color enhancement is higher, the portrait skin tone can be
- * significantly distorted, so you need to set the level of skin tone protection; when the level
- * of skin tone protection is higher, the color enhancement effect can be slightly reduced.
+ /**
+ * The level of skin tone protection. The value range is [0.0, 1.0]. `0.0` means no skin tone
+ * protection. The higher the value, the higher the level of skin tone protection. The default value
+ * is `1.0`.
+ * - When the level of color enhancement is higher, the portrait skin tone can be significantly
+ * distorted, so you need to set the level of skin tone protection.
+ * - When the level of skin tone protection is higher, the color enhancement effect can be slightly
+ * reduced.
* Therefore, to get the best color enhancement effect, Agora recommends that you adjust
* `strengthLevel` and `skinProtectLevel` to get the most appropriate values.
*/
@@ -5576,76 +5886,90 @@ struct ColorEnhanceOptions {
};
/**
- * The custom background image.
+ * @brief The custom background.
*/
struct VirtualBackgroundSource {
- /** The type of the custom background source.
+ /**
+ * @brief The custom background.
*/
enum BACKGROUND_SOURCE_TYPE {
/**
- * 0: Enable segementation with the captured video frame without replacing the background.
+ * 0: Process the background as alpha data without replacement, only separating the portrait and the
+ * background. After setting this value, you can call `startLocalVideoTranscoder` to implement the
+ * picture-in-picture effect.
*/
BACKGROUND_NONE = 0,
/**
- * 1: (Default) The background source is a solid color.
+ * 1: (Default) The background image is a solid color.
*/
BACKGROUND_COLOR = 1,
/**
- * The background source is a file in PNG or JPG format.
+ * 2: The background is an image in PNG or JPG format.
*/
BACKGROUND_IMG = 2,
/**
- * The background source is the blurred original video frame.
- * */
+ * 3: The background is a blurred version of the original background.
+ */
BACKGROUND_BLUR = 3,
/**
- * The background source is a file in MP4, AVI, MKV, FLV format.
- * */
+ * 4: The background is a local video in MP4, AVI, MKV, FLV, or other supported formats.
+ */
BACKGROUND_VIDEO = 4,
};
- /** The degree of blurring applied to the background source.
+ /**
+ * @brief The degree of blurring applied to the custom background image.
*/
enum BACKGROUND_BLUR_DEGREE {
- /** 1: The degree of blurring applied to the custom background image is low. The user can almost
- see the background clearly. */
+ /**
+ * 1: The degree of blurring applied to the custom background image is low. The user can almost see
+ * the background clearly.
+ */
BLUR_DEGREE_LOW = 1,
- /** 2: The degree of blurring applied to the custom background image is medium. It is difficult
- for the user to recognize details in the background. */
+ /**
+ * 2: The degree of blurring applied to the custom background image is medium. It is difficult for
+ * the user to recognize details in the background.
+ */
BLUR_DEGREE_MEDIUM = 2,
- /** 3: (Default) The degree of blurring applied to the custom background image is high. The user
- can barely see any distinguishing features in the background. */
+ /**
+ * 3: (Default) The degree of blurring applied to the custom background image is high. The user can
+ * barely see any distinguishing features in the background.
+ */
BLUR_DEGREE_HIGH = 3,
};
- /** The type of the custom background image. See #BACKGROUND_SOURCE_TYPE.
+ /**
+ * The custom background. See `BACKGROUND_SOURCE_TYPE`.
*/
BACKGROUND_SOURCE_TYPE background_source_type;
/**
- * The color of the custom background image. The format is a hexadecimal integer defined by RGB,
- * without the # sign, such as 0xFFB6C1 for light pink. The default value is 0xFFFFFF, which
- * signifies white. The value range is [0x000000,0xFFFFFF]. If the value is invalid, the SDK
- * replaces the original background image with a white background image.
- *
- * @note This parameter takes effect only when the type of the custom background image is
- * `BACKGROUND_COLOR`.
+ * The type of the custom background image. The color of the custom background image. The format is
+ * a hexadecimal integer defined by RGB, without the # sign, such as 0xFFB6C1 for light pink. The
+ * default value is 0xFFFFFF, which signifies white. The value range is [0x000000, 0xffffff]. If the
+ * value is invalid, the SDK replaces the original background image with a white background image.
+ * @note
+ * This parameter is only applicable to custom backgrounds of the following types:
+ * - BACKGROUND_COLOR: The background image is a solid-colored image of the color passed in by the
+ * parameter.
+ * - BACKGROUND_IMG: If the image in `source` has a transparent background, the transparent
+ * background will be filled with the color passed in by the parameter.
*/
unsigned int color;
/**
- * The local absolute path of the custom background image. PNG and JPG formats are supported. If
- * the path is invalid, the SDK replaces the original background image with a white background
- * image.
- *
+ * The local absolute path of the custom background image. Supports PNG, JPG, MP4, AVI, MKV, and FLV
+ * formats. If the path is invalid, the SDK will use either the original background image or the
+ * solid color image specified by `color`.
* @note This parameter takes effect only when the type of the custom background image is
- * `BACKGROUND_IMG`.
+ * BACKGROUND_IMG or BACKGROUND_VIDEO.
*/
const char* source;
- /** The degree of blurring applied to the custom background image. See BACKGROUND_BLUR_DEGREE.
+ /**
+ * The degree of blurring applied to the custom background image. See `BACKGROUND_BLUR_DEGREE`.
* @note This parameter takes effect only when the type of the custom background image is
- * `BACKGROUND_BLUR`.
+ * BACKGROUND_BLUR.
*/
BACKGROUND_BLUR_DEGREE blur_degree;
@@ -5656,28 +5980,67 @@ struct VirtualBackgroundSource {
blur_degree(BLUR_DEGREE_HIGH) {}
};
+/**
+ * @brief Processing properties for background images.
+ */
struct SegmentationProperty {
+ /**
+ * @brief The type of algorithms to user for background processing.
+ */
enum SEG_MODEL_TYPE {
+ /**
+ * 1: (Default) Use the algorithm suitable for all scenarios.
+ */
SEG_MODEL_AI = 1,
+ /**
+ * 2: Use the algorithm designed specifically for scenarios with a green screen background.
+ */
SEG_MODEL_GREEN = 2
};
+ /**
+ * @brief Screen color type.
+ */
enum SCREEN_COLOR_TYPE {
+ /**
+ * (0): Automatically selects screen color.
+ */
SCREEN_COLOR_AUTO = 0,
+ /**
+ * (1): Green screen.
+ */
SCREEN_COLOR_GREEN = 1,
+ /**
+ * (2): Blue screen.
+ */
SCREEN_COLOR_BLUE = 2
};
+ /**
+ * The type of algorithms to user for background processing. See `SEG_MODEL_TYPE`.
+ */
SEG_MODEL_TYPE modelType;
+ /**
+ * The accuracy range for recognizing background colors in the image. The value range is [0,1], and
+ * the default value is 0.5. The larger the value, the wider the range of identifiable shades of
+ * pure color. When the value of this parameter is too large, the edge of the portrait and the pure
+ * color in the portrait range are also detected. Agora recommends that you dynamically adjust the
+ * value of this parameter according to the actual effect.
+ * @note This parameter only takes effect when `modelType` is set to `SEG_MODEL_GREEN`.
+ */
float greenCapacity;
+ /**
+ * The screen color. See `SCREEN_COLOR_TYPE`.
+ */
SCREEN_COLOR_TYPE screenColorType;
SegmentationProperty() : modelType(SEG_MODEL_AI), greenCapacity(0.5), screenColorType(SCREEN_COLOR_AUTO) {}
};
-/** The type of custom audio track
+/**
+ * @brief The type of the audio track.
*/
enum AUDIO_TRACK_TYPE {
/**
@@ -5685,33 +6048,39 @@ enum AUDIO_TRACK_TYPE {
*/
AUDIO_TRACK_INVALID = -1,
/**
- * 0: Mixable audio track
- * You can push more than one mixable Audio tracks into one RTC connection(channel id + uid),
- * and SDK will mix these tracks into one audio track automatically.
- * However, compare to direct audio track, mixable track might cause extra 30ms+ delay.
+ * 0: Mixable audio tracks. This type of audio track supports mixing with other audio streams (such
+ * as audio streams captured by microphone) and playing locally or publishing to channels after
+ * mixing. The latency of mixable audio tracks is higher than that of direct audio tracks.
*/
AUDIO_TRACK_MIXABLE = 0,
/**
- * 1: Direct audio track
- * You can only push one direct (non-mixable) audio track into one RTC connection(channel id +
- * uid). Compare to mixable stream, you can have lower lantency using direct audio track.
+ * 1: Direct audio tracks. This type of audio track will replace the audio streams captured by the
+ * microphone and does not support mixing with other audio streams. The latency of direct audio
+ * tracks is lower than that of mixable audio tracks.
+ * @note If `AUDIO_TRACK_DIRECT` is specified for this parameter, you must set
+ * `publishMicrophoneTrack` to `false` in `ChannelMediaOptions` when calling `joinChannel(const
+ * char* token, const char* channelId, uid_t uid, const ChannelMediaOptions& options)` to
+ * join the channel; otherwise, joining the channel fails and returns the error code -2.
*/
AUDIO_TRACK_DIRECT = 1,
};
-/** The configuration of custom audio track
+/**
+ * @brief The configuration of custom audio tracks.
*/
struct AudioTrackConfig {
/**
- * Enable local playback, enabled by default
- * true: (Default) Enable local playback
- * false: Do not enable local playback
+ * Whether to enable the local audio-playback device:
+ * - `true`: (Default) Enable the local audio-playback device.
+ * - `false`: Do not enable the local audio-playback device.
*/
bool enableLocalPlayback;
/**
- * Whether to enable APM (AEC/ANS/AGC) processing when the trackType is AUDIO_TRACK_DIRECT.
- * false: (Default) Do not enable APM processing.
- * true: Enable APM processing.
+ * Whether to enable audio processing module:
+ * - `true`Enable the audio processing module to apply the Automatic Echo Cancellation (AEC),
+ * Automatic Noise Suppression (ANS), and Automatic Gain Control (AGC) effects.
+ * - `false`: (Default) Do not enable the audio processing module.
+ * @note This parameter only takes effect on AUDIO_TRACK_DIRECT in custom audio capturing.
*/
bool enableAudioProcessing;
@@ -5736,213 +6105,199 @@ struct AudioTrackConfig {
* | |--------------------|-----------------------------| | |
* | | 0x3: voice changer | 0x1: voice transform | | |
*/
-/** The options for SDK preset voice beautifier effects.
+/**
+ * @brief The options for SDK preset voice beautifier effects.
*/
enum VOICE_BEAUTIFIER_PRESET {
- /** Turn off voice beautifier effects and use the original voice.
+ /**
+ * Turn off voice beautifier effects and use the original voice.
*/
VOICE_BEAUTIFIER_OFF = 0x00000000,
- /** A more magnetic voice.
- *
- * @note Agora recommends using this enumerator to process a male-sounding voice; otherwise, you
- * may experience vocal distortion.
+ /**
+ * A more magnetic voice.
+ * @note Agora recommends using this enumerator to process a male-sounding voice; otherwise, you may
+ * experience vocal distortion.
*/
CHAT_BEAUTIFIER_MAGNETIC = 0x01010100,
- /** A fresher voice.
- *
+ /**
+ * A fresher voice.
* @note Agora recommends using this enumerator to process a female-sounding voice; otherwise, you
* may experience vocal distortion.
*/
CHAT_BEAUTIFIER_FRESH = 0x01010200,
- /** A more vital voice.
- *
+ /**
+ * A more vital voice.
* @note Agora recommends using this enumerator to process a female-sounding voice; otherwise, you
* may experience vocal distortion.
*/
CHAT_BEAUTIFIER_VITALITY = 0x01010300,
/**
* Singing beautifier effect.
- * - If you call `setVoiceBeautifierPreset`(SINGING_BEAUTIFIER), you can beautify a male-sounding
+ * - If you call `setVoiceBeautifierPreset` ( SINGING_BEAUTIFIER ), you can beautify a male-sounding
* voice and add a reverberation effect that sounds like singing in a small room. Agora recommends
- * not using `setVoiceBeautifierPreset`(SINGING_BEAUTIFIER) to process a female-sounding voice;
- * otherwise, you may experience vocal distortion.
- * - If you call `setVoiceBeautifierParameters`(SINGING_BEAUTIFIER, param1, param2), you can
- * beautify a male- or female-sounding voice and add a reverberation effect.
+ * using this enumerator to process a male-sounding voice; otherwise, you might experience vocal
+ * distortion.
+ * - If you call `setVoiceBeautifierParameters` ( SINGING_BEAUTIFIER, param1, param2), you can
+ * beautify a male or female-sounding voice and add a reverberation effect.
*/
SINGING_BEAUTIFIER = 0x01020100,
- /** A more vigorous voice.
+ /**
+ * A more vigorous voice.
*/
TIMBRE_TRANSFORMATION_VIGOROUS = 0x01030100,
- /** A deeper voice.
+ /**
+ * A deep voice.
*/
TIMBRE_TRANSFORMATION_DEEP = 0x01030200,
- /** A mellower voice.
+ /**
+ * A mellower voice.
*/
TIMBRE_TRANSFORMATION_MELLOW = 0x01030300,
- /** A falsetto voice.
+ /**
+ * Falsetto.
*/
TIMBRE_TRANSFORMATION_FALSETTO = 0x01030400,
- /** A fuller voice.
+ /**
+ * A fuller voice.
*/
TIMBRE_TRANSFORMATION_FULL = 0x01030500,
- /** A clearer voice.
+ /**
+ * A clearer voice.
*/
TIMBRE_TRANSFORMATION_CLEAR = 0x01030600,
- /** A more resounding voice.
+ /**
+ * A more resounding voice.
*/
TIMBRE_TRANSFORMATION_RESOUNDING = 0x01030700,
- /** A more ringing voice.
+ /**
+ * A more ringing voice.
*/
TIMBRE_TRANSFORMATION_RINGING = 0x01030800,
/**
* A ultra-high quality voice, which makes the audio clearer and restores more details.
- * - To achieve better audio effect quality, Agora recommends that you call `setAudioProfile`
- * and set the `profile` to `AUDIO_PROFILE_MUSIC_HIGH_QUALITY(4)` or
- * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)` and `scenario` to
- * `AUDIO_SCENARIO_HIGH_DEFINITION(6)` before calling `setVoiceBeautifierPreset`.
- * - If you have an audio capturing device that can already restore audio details to a high
- * degree, Agora recommends that you do not enable ultra-high quality; otherwise, the SDK may
- * over-restore audio details, and you may not hear the anticipated voice effect.
+ * - To achieve better audio effect quality, Agora recommends that you set the `profile` of
+ * `setAudioProfile(AUDIO_PROFILE_TYPE profile)` to `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` (4) or
+ * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` (5) and `scenario` to `AUDIO_SCENARIO_GAME_STREAMING`
+ * (3) before calling `setVoiceBeautifierPreset`.
+ * - If you have an audio capturing device that can already restore audio details to a high degree,
+ * Agora recommends that you do not enable ultra-high quality; otherwise, the SDK may over-restore
+ * audio details, and you may not hear the anticipated voice effect.
*/
ULTRA_HIGH_QUALITY_VOICE = 0x01040100
};
-/** Preset voice effects.
+/**
+ * @brief Preset audio effects.
*
- * For better voice effects, Agora recommends setting the `profile` parameter of `setAudioProfile`
- * to `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` before using
- * the following presets:
+ * @details
+ * To get better audio effects, Agora recommends calling `setAudioProfile(AUDIO_PROFILE_TYPE profile, AUDIO_SCENARIO_TYPE scenario)` and setting the `profile` parameter as recommended below before using the preset audio effects.
+ * | Preset audio effects | `profile` |
+ * | ------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- |
+ * | - ROOM_ACOUSTICS_VIRTUAL_STEREO - ROOM_ACOUSTICS_3D_VOICE - ROOM_ACOUSTICS_VIRTUAL_SURROUND_SOUND | `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` or `AUDIO_PROFILE_MUSIC_STANDARD_STEREO` |
+ * | Other preset audio effects (except for `AUDIO_EFFECT_OFF` ) | `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` |
*
- * - `ROOM_ACOUSTICS_KTV`
- * - `ROOM_ACOUSTICS_VOCAL_CONCERT`
- * - `ROOM_ACOUSTICS_STUDIO`
- * - `ROOM_ACOUSTICS_PHONOGRAPH`
- * - `ROOM_ACOUSTICS_SPACIAL`
- * - `ROOM_ACOUSTICS_ETHEREAL`
- * - `ROOM_ACOUSTICS_CHORUS`
- * - `VOICE_CHANGER_EFFECT_UNCLE`
- * - `VOICE_CHANGER_EFFECT_OLDMAN`
- * - `VOICE_CHANGER_EFFECT_BOY`
- * - `VOICE_CHANGER_EFFECT_SISTER`
- * - `VOICE_CHANGER_EFFECT_GIRL`
- * - `VOICE_CHANGER_EFFECT_PIGKING`
- * - `VOICE_CHANGER_EFFECT_HULK`
- * - `PITCH_CORRECTION`
*/
enum AUDIO_EFFECT_PRESET {
- /** Turn off voice effects, that is, use the original voice.
+ /**
+ * Turn off voice effects, that is, use the original voice.
*/
AUDIO_EFFECT_OFF = 0x00000000,
- /** The voice effect typical of a KTV venue.
+ /**
+ * The voice effect typical of a KTV venue.
*/
ROOM_ACOUSTICS_KTV = 0x02010100,
- /** The voice effect typical of a concert hall.
+ /**
+ * The voice effect typical of a concert hall.
*/
ROOM_ACOUSTICS_VOCAL_CONCERT = 0x02010200,
- /** The voice effect typical of a recording studio.
+ /**
+ * The voice effect typical of a recording studio.
*/
ROOM_ACOUSTICS_STUDIO = 0x02010300,
- /** The voice effect typical of a vintage phonograph.
+ /**
+ * The voice effect typical of a vintage phonograph.
*/
ROOM_ACOUSTICS_PHONOGRAPH = 0x02010400,
- /** The virtual stereo effect, which renders monophonic audio as stereo audio.
- *
- * @note Before using this preset, set the `profile` parameter of `setAudioProfile`
- * to `AUDIO_PROFILE_MUSIC_STANDARD_STEREO(3)` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)`;
- * otherwise, the preset setting is invalid.
+ /**
+ * The virtual stereo effect, which renders monophonic audio as stereo audio.
*/
ROOM_ACOUSTICS_VIRTUAL_STEREO = 0x02010500,
- /** A more spatial voice effect.
+ /**
+ * A more spatial voice effect.
*/
ROOM_ACOUSTICS_SPACIAL = 0x02010600,
- /** A more ethereal voice effect.
+ /**
+ * A more ethereal voice effect.
*/
ROOM_ACOUSTICS_ETHEREAL = 0x02010700,
- /** A 3D voice effect that makes the voice appear to be moving around the user. The default cycle
- * period of the 3D voice effect is 10 seconds. To change the cycle period, call
- * `setAudioEffectParameters` after this method.
- *
- * @note
- * - Before using this preset, set the `profile` parameter of `setAudioProfile` to
- * `AUDIO_PROFILE_MUSIC_STANDARD_STEREO` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO`; otherwise,
- * the preset setting is invalid.
- * - If the 3D voice effect is enabled, users need to use stereo audio playback devices to hear
+ /**
+ * A 3D voice effect that makes the voice appear to be moving around the user. The default cycle
+ * period is 10 seconds. After setting this effect, you can call `setAudioEffectParameters` to
+ * modify the movement period.
+ * @note If the 3D voice effect is enabled, users need to use stereo audio playback devices to hear
* the anticipated voice effect.
*/
ROOM_ACOUSTICS_3D_VOICE = 0x02010800,
- /** virtual suround sound.
- *
- * @note
- * - Agora recommends using this enumerator to process virtual suround sound; otherwise, you may
- * not hear the anticipated voice effect.
- * - To achieve better audio effect quality, Agora recommends calling \ref
- * IRtcEngine::setAudioProfile "setAudioProfile" and setting the `profile` parameter to
- * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY(4)` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)` before
- * setting this enumerator.
+ /**
+ * Virtual surround sound, that is, the SDK generates a simulated surround sound field on the basis
+ * of stereo channels, thereby creating a surround sound effect.
+ * @note If the virtual surround sound is enabled, users need to use stereo audio playback devices
+ * to hear the anticipated audio effect.
*/
ROOM_ACOUSTICS_VIRTUAL_SURROUND_SOUND = 0x02010900,
- /** The voice effect for chorus.
- *
- * @note: To achieve better audio effect quality, Agora recommends calling \ref
- * IRtcEngine::setAudioProfile "setAudioProfile" and setting the `profile` parameter to
- * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY(4)` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)` before
- * setting this enumerator.
+ /**
+ * The audio effect of chorus. Agora recommends using this effect in chorus scenarios to enhance the
+ * sense of depth and dimension in the vocals.
*/
ROOM_ACOUSTICS_CHORUS = 0x02010D00,
- /** A middle-aged man's voice.
- *
- * @note
- * Agora recommends using this enumerator to process a male-sounding voice; otherwise, you may
- * not hear the anticipated voice effect.
+ /**
+ * A middle-aged man's voice.
+ * @note Agora recommends using this preset to process a male-sounding voice; otherwise, you may not
+ * hear the anticipated voice effect.
*/
VOICE_CHANGER_EFFECT_UNCLE = 0x02020100,
- /** A senior man's voice.
- *
- * @note Agora recommends using this enumerator to process a male-sounding voice; otherwise, you
- * may not hear the anticipated voice effect.
+ /**
+ * An older man's voice.
+ * @note Agora recommends using this preset to process a male-sounding voice; otherwise, you may not
+ * hear the anticipated voice effect.
*/
VOICE_CHANGER_EFFECT_OLDMAN = 0x02020200,
- /** A boy's voice.
- *
- * @note Agora recommends using this enumerator to process a male-sounding voice; otherwise, you
- * may not hear the anticipated voice effect.
+ /**
+ * A boy's voice.
+ * @note Agora recommends using this preset to process a male-sounding voice; otherwise, you may not
+ * hear the anticipated voice effect.
*/
VOICE_CHANGER_EFFECT_BOY = 0x02020300,
- /** A young woman's voice.
- *
- * @note
- * - Agora recommends using this enumerator to process a female-sounding voice; otherwise, you may
+ /**
+ * A young woman's voice.
+ * @note Agora recommends using this preset to process a female-sounding voice; otherwise, you may
* not hear the anticipated voice effect.
*/
VOICE_CHANGER_EFFECT_SISTER = 0x02020400,
- /** A girl's voice.
- *
- * @note Agora recommends using this enumerator to process a female-sounding voice; otherwise, you
- * may not hear the anticipated voice effect.
+ /**
+ * A girl's voice.
+ * @note Agora recommends using this preset to process a female-sounding voice; otherwise, you may
+ * not hear the anticipated voice effect.
*/
VOICE_CHANGER_EFFECT_GIRL = 0x02020500,
- /** The voice of Pig King, a character in Journey to the West who has a voice like a growling
- * bear.
+ /**
+ * The voice of Pig King, a character in Journey to the West who has a voice like a growling bear.
*/
VOICE_CHANGER_EFFECT_PIGKING = 0x02020600,
- /** The Hulk's voice.
+ /**
+ * The Hulk's voice.
*/
VOICE_CHANGER_EFFECT_HULK = 0x02020700,
- /** An audio effect typical of R&B music.
- *
- * @note Before using this preset, set the `profile` parameter of `setAudioProfile` to
- - `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO`; otherwise,
- * the preset setting is invalid.
+ /**
+ * The voice effect typical of R&B music.
*/
STYLE_TRANSFORMATION_RNB = 0x02030100,
- /** The voice effect typical of popular music.
- *
- * @note Before using this preset, set the `profile` parameter of `setAudioProfile` to
- - `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO`; otherwise,
- * the preset setting is invalid.
+ /**
+ * The voice effect typical of popular music.
*/
STYLE_TRANSFORMATION_POPULAR = 0x02030200,
- /** A pitch correction effect that corrects the user's pitch based on the pitch of the natural C
+ /**
+ * A pitch correction effect that corrects the user's pitch based on the pitch of the natural C
* major scale. After setting this voice effect, you can call `setAudioEffectParameters` to adjust
* the basic mode of tuning and the pitch of the main tone.
*/
@@ -5953,25 +6308,31 @@ enum AUDIO_EFFECT_PRESET {
*/
};
-/** The options for SDK preset voice conversion.
+/**
+ * @brief The options for SDK preset voice conversion effects.
*/
enum VOICE_CONVERSION_PRESET {
- /** Turn off voice conversion and use the original voice.
+ /**
+ * Turn off voice conversion effects and use the original voice.
*/
VOICE_CONVERSION_OFF = 0x00000000,
- /** A gender-neutral voice. To avoid audio distortion, ensure that you use this enumerator to
- * process a female-sounding voice.
+ /**
+ * A gender-neutral voice. To avoid audio distortion, ensure that you use this enumerator to process
+ * a female-sounding voice.
*/
VOICE_CHANGER_NEUTRAL = 0x03010100,
- /** A sweet voice. To avoid audio distortion, ensure that you use this enumerator to process a
+ /**
+ * A sweet voice. To avoid audio distortion, ensure that you use this enumerator to process a
* female-sounding voice.
*/
VOICE_CHANGER_SWEET = 0x03010200,
- /** A steady voice. To avoid audio distortion, ensure that you use this enumerator to process a
+ /**
+ * A steady voice. To avoid audio distortion, ensure that you use this enumerator to process a
* male-sounding voice.
*/
VOICE_CHANGER_SOLID = 0x03010300,
- /** A deep voice. To avoid audio distortion, ensure that you use this enumerator to process a
+ /**
+ * A deep voice. To avoid audio distortion, ensure that you use this enumerator to process a
* male-sounding voice.
*/
VOICE_CHANGER_BASS = 0x03010400,
@@ -6011,88 +6372,166 @@ enum VOICE_CONVERSION_PRESET {
};
-/** The options for SDK preset headphone equalizer.
+/**
+ * @brief Preset headphone equalizer types.
*/
enum HEADPHONE_EQUALIZER_PRESET {
- /** Turn off headphone EQ and use the original voice.
+ /**
+ * The headphone equalizer is disabled, and the original audio is heard.
*/
HEADPHONE_EQUALIZER_OFF = 0x00000000,
- /** For over-ear headphones.
+ /**
+ * An equalizer is used for headphones.
*/
HEADPHONE_EQUALIZER_OVEREAR = 0x04000001,
- /** For in-ear headphones.
+ /**
+ * An equalizer is used for in-ear headphones.
*/
HEADPHONE_EQUALIZER_INEAR = 0x04000002
};
-/** The options for SDK voice AI tuner.
+/**
+ * @brief Voice AI tuner sound types.
*/
enum VOICE_AI_TUNER_TYPE {
- /** Uncle, deep and magnetic male voice.
+ /**
+ * 0: Mature male voice. A deep and magnetic male voice.
*/
VOICE_AI_TUNER_MATURE_MALE,
- /** Fresh male, refreshing and sweet male voice.
+ /**
+ * 1: Fresh male voice. A fresh and slightly sweet male voice.
*/
VOICE_AI_TUNER_FRESH_MALE,
- /** Big sister, deep and charming female voice.
+ /**
+ * 2: Elegant female voice. A deep and charming female voice.
*/
VOICE_AI_TUNER_ELEGANT_FEMALE,
- /** Lolita, high-pitched and cute female voice.
+ /**
+ * 3: Sweet female voice. A high-pitched and cute female voice.
*/
VOICE_AI_TUNER_SWEET_FEMALE,
- /** Warm man singing, warm and melodic male voice that is suitable for male lyrical songs.
+ /**
+ * 4: Warm male singing. A warm and melodious male voice.
*/
VOICE_AI_TUNER_WARM_MALE_SINGING,
- /** Gentle female singing, soft and delicate female voice that is suitable for female lyrical songs.
+ /**
+ * 5: Gentle female singing. A soft and delicate female voice.
*/
VOICE_AI_TUNER_GENTLE_FEMALE_SINGING,
- /** Smoky uncle singing, unique husky male voice that is suitable for rock or blues songs.
+ /**
+ * 6: Husky male singing. A unique husky male voice.
*/
VOICE_AI_TUNER_HUSKY_MALE_SINGING,
- /** Warm big sister singing, warm and mature female voice that is suitable for emotionally powerful songs.
+ /**
+ * 7: Warm elegant female singing. A warm and mature female voice.
*/
VOICE_AI_TUNER_WARM_ELEGANT_FEMALE_SINGING,
- /** Forceful male singing, strong and powerful male voice that is suitable for passionate songs.
+ /**
+ * 8: Powerful male singing. A strong and powerful male voice.
*/
VOICE_AI_TUNER_POWERFUL_MALE_SINGING,
- /** Dreamy female singing, dreamlike and soft female voice that is suitable for airy and dream-like songs.
+ /**
+ * 9: Dreamy female singing. A dreamy and soft female voice.
*/
VOICE_AI_TUNER_DREAMY_FEMALE_SINGING,
};
/**
- * Screen sharing configurations.
+ * @brief The audio configuration for the shared screen stream.
+ *
+ * @details
+ * Only available where `captureAudio` is `true`.
+ *
+ */
+struct ScreenAudioParameters {
+ /**
+ * Audio sample rate (Hz).
+ */
+ int sampleRate;
+ /**
+ * The number of audio channels. The default value is 2, which means stereo.
+ */
+ int channels;
+ /**
+ * The volume of the captured system audio. The value range is [0, 100]. The default value is 100.
+ */
+ int captureSignalVolume;
+
+#if defined(__APPLE__) && !TARGET_OS_IOS
+ /**
+ * @technical preview
+ */
+ bool excludeCurrentProcessAudio = true;
+ ScreenAudioParameters(): sampleRate(48000), channels(2), captureSignalVolume(100) {}
+#else
+ ScreenAudioParameters(): sampleRate(16000), channels(2), captureSignalVolume(100) {}
+#endif
+};
+
+/**
+ * @brief Screen sharing configurations.
*/
struct ScreenCaptureParameters {
+
/**
- * On Windows and macOS, it represents the video encoding resolution of the shared screen stream.
- * See `VideoDimensions`. The default value is 1920 x 1080, that is, 2,073,600 pixels. Agora uses
- * the value of this parameter to calculate the charges.
+ * Determines whether to capture system audio during screen sharing:
+ * - `true`: Capture.
+ * - `false`: (Default) Do not capture.
*
- * If the aspect ratio is different between the encoding dimensions and screen dimensions, Agora
- * applies the following algorithms for encoding. Suppose dimensions are 1920 x 1080:
- * - If the value of the screen dimensions is lower than that of dimensions, for example,
- * 1000 x 1000 pixels, the SDK uses 1000 x 1000 pixels for encoding.
- * - If the value of the screen dimensions is higher than that of dimensions, for example,
- * 2000 x 1500, the SDK uses the maximum value under dimensions with the aspect ratio of
- * the screen dimension (4:3) for encoding, that is, 1440 x 1080.
+ * @note
+ * Due to system limitations, capturing system audio is only available for Android API level 29
+ * and later (that is, Android 10 and later).
+ */
+ bool captureAudio;
+ /**
+ * The audio configuration for the shared screen stream.
+ * @note This parameter only takes effect when `captureAudio` is `true`.
+ * See `ScreenAudioParameters`.
+ */
+ ScreenAudioParameters audioParams;
+
+ /**
+ * The video encoding resolution of the screen sharing stream. See `VideoDimensions`. The default
+ * value is 1920 × 1080, that is, 2,073,600 pixels. Agora uses the value of this parameter to
+ * calculate the charges.
+ * If the screen dimensions are different from the value of this parameter, Agora applies the
+ * following strategies for encoding. Suppose `dimensions` is set to 1920 × 1080:
+ * - If the value of the screen dimensions is lower than that of `dimensions`, for example, 1000 ×
+ * 1000 pixels, the SDK uses the screen dimensions, that is, 1000 × 1000 pixels, for encoding.
+ * - If the value of the screen dimensions is higher than that of `dimensions`, for example, 2000 ×
+ * 1500, the SDK uses the maximum value under `dimensions` with the aspect ratio of the screen
+ * dimension (4:3) for encoding, that is, 1440 × 1080.
+ * @note
+ * When setting the encoding resolution in the scenario of sharing documents (
+ * SCREEN_SCENARIO_DOCUMENT ), choose one of the following two methods:
+ * - If you require the best image quality, it is recommended to set the encoding resolution to be
+ * the same as the capture resolution.
+ * - If you wish to achieve a relative balance between image quality, bandwidth, and system
+ * performance, then:
+ * - When the capture resolution is greater than 1920 × 1080, it is recommended that the encoding
+ * resolution is not less than 1920 × 1080.
+ * - When the capture resolution is less than 1920 × 1080, it is recommended that the encoding
+ * resolution is not less than 1280 × 720.
*/
VideoDimensions dimensions;
/**
- * On Windows and macOS, it represents the video encoding frame rate (fps) of the shared screen
- * stream. The frame rate (fps) of the shared region. The default value is 5. We do not recommend
- * setting this to a value greater than 15.
+ * On Windows and macOS, this represents the video encoding frame rate (fps) of the screen sharing
+ * stream. The frame rate (fps) of the shared region. The default value is 5. Agora does not
+ * recommend setting this to a value greater than 15.
*/
int frameRate;
/**
- * On Windows and macOS, it represents the video encoding bitrate of the shared screen stream.
+ * On Windows and macOS, this represents the video encoding bitrate of the screen sharing stream.
* The bitrate (Kbps) of the shared region. The default value is 0 (the SDK works out a bitrate
* according to the dimensions of the current screen).
*/
int bitrate;
- /** Whether to capture the mouse in screen sharing:
+ /**
+ * Whether to capture the mouse in screen sharing:
* - `true`: (Default) Capture the mouse.
* - `false`: Do not capture the mouse.
+ * @note Due to macOS system restrictions, setting this parameter to `false` is ineffective during
+ * screen sharing (it has no impact when sharing a window).
*/
bool captureMouseCursor;
/**
@@ -6100,40 +6539,48 @@ struct ScreenCaptureParameters {
* to share it:
* - `true`: Bring the window to the front.
* - `false`: (Default) Do not bring the window to the front.
+ * @note Due to macOS system limitations, when setting this member to bring the window to the front,
+ * if the current app has multiple windows, only the main window will be brought to the front.
*/
bool windowFocus;
/**
- * A list of IDs of windows to be blocked. When calling `startScreenCaptureByDisplayId` to start
+ * The ID list of the windows to be blocked. When calling `startScreenCaptureByDisplayId` to start
* screen sharing, you can use this parameter to block a specified window. When calling
* `updateScreenCaptureParameters` to update screen sharing configurations, you can use this
- * parameter to dynamically block the specified windows during screen sharing.
+ * parameter to dynamically block a specified window.
*/
view_t* excludeWindowList;
/**
- * The number of windows to be blocked.
+ * The number of windows to be excluded.
+ * @note On the Windows platform, the maximum value of this parameter is 24; if this value is
+ * exceeded, excluding the window fails.
*/
int excludeWindowCount;
- /** The width (px) of the border. Defaults to 0, and the value range is [0,50].
- *
+ /**
+ * (For macOS and Windows only) The width (px) of the border. The default value is 5, and the value
+ * range is (0, 50].
+ * @note This parameter only takes effect when `highLighted` is set to `true`.
*/
int highLightWidth;
- /** The color of the border in RGBA format. The default value is 0xFF8CBF26.
- *
+ /**
+ * (For macOS and Windows only)
+ * - On Windows platforms, the color of the border in ARGB format. The default value is 0xFF8CBF26.
+ * - On macOS, `COLOR_CLASS` refers to `NSColor`.
*/
unsigned int highLightColor;
- /** Whether to place a border around the shared window or screen:
- * - true: Place a border.
- * - false: (Default) Do not place a border.
- *
+ /**
+ * (For macOS and Windows only) Whether to place a border around the shared window or screen:
+ * - `true`: Place a border.
+ * - `false`: (Default) Do not place a border.
* @note When you share a part of a window or screen, the SDK places a border around the entire
- * window or screen if you set `enableHighLight` as true.
- *
+ * window or screen if you set this parameter to `true`.
*/
bool enableHighLight;
ScreenCaptureParameters()
- : dimensions(1920, 1080),
+ : captureAudio(false),
+ dimensions(1920, 1080),
frameRate(5),
bitrate(STANDARD_BITRATE),
captureMouseCursor(true),
@@ -6144,7 +6591,7 @@ struct ScreenCaptureParameters {
highLightColor(0),
enableHighLight(false) {}
ScreenCaptureParameters(const VideoDimensions& d, int f, int b)
- : dimensions(d),
+ : captureAudio(false),dimensions(d),
frameRate(f),
bitrate(b),
captureMouseCursor(true),
@@ -6155,7 +6602,8 @@ struct ScreenCaptureParameters {
highLightColor(0),
enableHighLight(false) {}
ScreenCaptureParameters(int width, int height, int f, int b)
- : dimensions(width, height),
+ : captureAudio(false),
+ dimensions(width, height),
frameRate(f),
bitrate(b),
captureMouseCursor(true),
@@ -6166,7 +6614,8 @@ struct ScreenCaptureParameters {
highLightColor(0),
enableHighLight(false) {}
ScreenCaptureParameters(int width, int height, int f, int b, bool cur, bool fcs)
- : dimensions(width, height),
+ : captureAudio(false),
+ dimensions(width, height),
frameRate(f),
bitrate(b),
captureMouseCursor(cur),
@@ -6177,7 +6626,8 @@ struct ScreenCaptureParameters {
highLightColor(0),
enableHighLight(false) {}
ScreenCaptureParameters(int width, int height, int f, int b, view_t* ex, int cnt)
- : dimensions(width, height),
+ : captureAudio(false),
+ dimensions(width, height),
frameRate(f),
bitrate(b),
captureMouseCursor(true),
@@ -6189,7 +6639,8 @@ struct ScreenCaptureParameters {
enableHighLight(false) {}
ScreenCaptureParameters(int width, int height, int f, int b, bool cur, bool fcs, view_t* ex,
int cnt)
- : dimensions(width, height),
+ : captureAudio(false),
+ dimensions(width, height),
frameRate(f),
bitrate(b),
captureMouseCursor(cur),
@@ -6202,12 +6653,12 @@ struct ScreenCaptureParameters {
};
/**
- * Audio recording quality.
+ * @brief Recording quality.
*/
enum AUDIO_RECORDING_QUALITY_TYPE {
/**
- * 0: Low quality. The sample rate is 32 kHz, and the file size is around 1.2 MB after 10 minutes
- * of recording.
+ * 0: Low quality. The sample rate is 32 kHz, and the file size is around 1.2 MB after 10 minutes of
+ * recording.
*/
AUDIO_RECORDING_QUALITY_LOW = 0,
/**
@@ -6216,18 +6667,19 @@ enum AUDIO_RECORDING_QUALITY_TYPE {
*/
AUDIO_RECORDING_QUALITY_MEDIUM = 1,
/**
- * 2: High quality. The sample rate is 32 kHz, and the file size is around 3.75 MB after 10
- * minutes of recording.
+ * 2: High quality. The sample rate is 32 kHz, and the file size is around 3.75 MB after 10 minutes
+ * of recording.
*/
AUDIO_RECORDING_QUALITY_HIGH = 2,
/**
- * 3: Ultra high audio recording quality.
+ * 3: Ultra high quality. The sample rate is 32 kHz, and the file size is around 7.5 MB after 10
+ * minutes of recording.
*/
AUDIO_RECORDING_QUALITY_ULTRA_HIGH = 3,
};
/**
- * Recording content. Set in `startAudioRecording`.
+ * @brief Recording content. Set in `startAudioRecording [3/3]`.
*/
enum AUDIO_FILE_RECORDING_TYPE {
/**
@@ -6245,7 +6697,7 @@ enum AUDIO_FILE_RECORDING_TYPE {
};
/**
- * Audio encoded frame observer position.
+ * @brief Audio profile.
*/
enum AUDIO_ENCODED_FRAME_OBSERVER_POSITION {
/**
@@ -6263,12 +6715,12 @@ enum AUDIO_ENCODED_FRAME_OBSERVER_POSITION {
};
/**
- * Recording configuration.
+ * @brief Recording configurations.
*/
struct AudioRecordingConfiguration {
/**
* The absolute path (including the filename extensions) of the recording file. For example:
- * `C:\music\audio.mp4`.
+ * `C:\music\audio.aac`.
* @note Ensure that the directory for the log files exists and is writable.
*/
const char* filePath;
@@ -6285,7 +6737,7 @@ struct AudioRecordingConfiguration {
* - 44100
* - 48000
* @note If you set this parameter to 44100 or 48000, Agora recommends recording WAV files, or AAC
- * files with quality to be `AUDIO_RECORDING_QUALITY_MEDIUM` or `AUDIO_RECORDING_QUALITY_HIGH` for
+ * files with `quality` set as AUDIO_RECORDING_QUALITY_MEDIUM or AUDIO_RECORDING_QUALITY_HIGH for
* better recording quality.
*/
int sampleRate;
@@ -6300,9 +6752,17 @@ struct AudioRecordingConfiguration {
AUDIO_RECORDING_QUALITY_TYPE quality;
/**
- * Recording channel. The following values are supported:
- * - (Default) 1
- * - 2
+ * The audio channel of recording: The parameter supports the following values:
+ * - 1: (Default) Mono.
+ * - 2: Stereo.
+ * @note
+ * The actual recorded audio channel is related to the audio channel that you capture.
+ * - If the captured audio is mono and `recordingChannel` is `2`, the recorded audio is the
+ * dual-channel data that is copied from mono data, not stereo.
+ * - If the captured audio is dual channel and `recordingChannel` is `1`, the recorded audio is the
+ * mono data that is mixed by dual-channel data.
+ * The integration scheme also affects the final recorded audio channel. If you need to record in
+ * stereo, contact `technical support`.
*/
int recordingChannel;
@@ -6343,15 +6803,15 @@ struct AudioRecordingConfiguration {
};
/**
- * Observer settings for the encoded audio.
+ * @brief Observer settings for the encoded audio.
*/
struct AudioEncodedFrameObserverConfig {
/**
- * Audio profile. For details, see `AUDIO_ENCODED_FRAME_OBSERVER_POSITION`.
+ * Audio profile. See `AUDIO_ENCODED_FRAME_OBSERVER_POSITION`.
*/
AUDIO_ENCODED_FRAME_OBSERVER_POSITION postionType;
/**
- * Audio encoding type. For details, see `AUDIO_ENCODING_TYPE`.
+ * Audio encoding type. See `AUDIO_ENCODING_TYPE`.
*/
AUDIO_ENCODING_TYPE encodingType;
@@ -6365,46 +6825,49 @@ struct AudioEncodedFrameObserverConfig {
class IAudioEncodedFrameObserver {
public:
/**
- * Gets the encoded audio data of the local user.
+ * @brief Gets the encoded audio data of the local user.
*
+ * @details
* After calling `registerAudioEncodedFrameObserver` and setting the encoded audio as
- * `AUDIO_ENCODED_FRAME_OBSERVER_POSITION_RECORD`, you can get the encoded audio data of the local
+ * AUDIO_ENCODED_FRAME_OBSERVER_POSITION_RECORD, you can get the encoded audio data of the local
* user from this callback.
*
- * @param frameBuffer The pointer to the audio frame buffer.
- * @param length The data length (byte) of the audio frame.
- * @param audioEncodedFrameInfo Audio information after encoding. For details, see
- * `EncodedAudioFrameInfo`.
+ * @param frameBuffer The audio buffer.
+ * @param length The data length (byte).
+ * @param audioEncodedFrameInfo Audio information after encoding. See `EncodedAudioFrameInfo`.
+ *
*/
virtual void onRecordAudioEncodedFrame(const uint8_t* frameBuffer, int length,
const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0;
/**
- * Gets the encoded audio data of all remote users.
+ * @brief Gets the encoded audio data of all remote users.
*
+ * @details
* After calling `registerAudioEncodedFrameObserver` and setting the encoded audio as
- * `AUDIO_ENCODED_FRAME_OBSERVER_POSITION_PLAYBACK`, you can get encoded audio data of all remote
+ * AUDIO_ENCODED_FRAME_OBSERVER_POSITION_PLAYBACK, you can get encoded audio data of all remote
* users through this callback.
*
- * @param frameBuffer The pointer to the audio frame buffer.
- * @param length The data length (byte) of the audio frame.
- * @param audioEncodedFrameInfo Audio information after encoding. For details, see
- * `EncodedAudioFrameInfo`.
+ * @param frameBuffer The audio buffer.
+ * @param length The data length (byte).
+ * @param audioEncodedFrameInfo Audio information after encoding. See `EncodedAudioFrameInfo`.
+ *
*/
virtual void onPlaybackAudioEncodedFrame(const uint8_t* frameBuffer, int length,
const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0;
/**
- * Gets the mixed and encoded audio data of the local and all remote users.
+ * @brief Gets the mixed and encoded audio data of the local and all remote users.
*
+ * @details
* After calling `registerAudioEncodedFrameObserver` and setting the audio profile as
- * `AUDIO_ENCODED_FRAME_OBSERVER_POSITION_MIXED`, you can get the mixed and encoded audio data of
- * the local and all remote users through this callback.
+ * AUDIO_ENCODED_FRAME_OBSERVER_POSITION_MIXED, you can get the mixed and encoded audio data of the
+ * local and all remote users through this callback.
+ *
+ * @param frameBuffer The audio buffer.
+ * @param length The data length (byte).
+ * @param audioEncodedFrameInfo Audio information after encoding. See `EncodedAudioFrameInfo`.
*
- * @param frameBuffer The pointer to the audio frame buffer.
- * @param length The data length (byte) of the audio frame.
- * @param audioEncodedFrameInfo Audio information after encoding. For details, see
- * `EncodedAudioFrameInfo`.
*/
virtual void onMixedAudioEncodedFrame(const uint8_t* frameBuffer, int length,
const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0;
@@ -6412,7 +6875,9 @@ class IAudioEncodedFrameObserver {
virtual ~IAudioEncodedFrameObserver() {}
};
-/** The region for connection, which is the region where the server the SDK connects to is located.
+/**
+ * @brief The region for connection, which is the region where the server the SDK connects to is
+ * located.
*/
enum AREA_CODE {
/**
@@ -6440,7 +6905,7 @@ enum AREA_CODE {
*/
AREA_CODE_IN = 0x00000020,
/**
- * (Default) Global.
+ * Global.
*/
AREA_CODE_GLOB = (0xFFFFFFFF)
};
@@ -6485,83 +6950,102 @@ enum AREA_CODE_EX {
};
/**
- * The error code of the channel media replay.
+ * @brief The error code of the channel media relay.
*/
enum CHANNEL_MEDIA_RELAY_ERROR {
- /** 0: No error.
+ /**
+ * 0: No error.
*/
RELAY_OK = 0,
- /** 1: An error occurs in the server response.
+ /**
+ * 1: An error occurs in the server response.
*/
RELAY_ERROR_SERVER_ERROR_RESPONSE = 1,
- /** 2: No server response. You can call the `leaveChannel` method to leave the channel.
- *
- * This error can also occur if your project has not enabled co-host token authentication. You can
- * contact technical support to enable the service for cohosting across channels before starting a
- * channel media relay.
+ /**
+ * 2: No server response.
+ * This error may be caused by poor network connections. If this error occurs when initiating a
+ * channel media relay, you can try again later; if this error occurs during channel media relay,
+ * you can call `leaveChannel(const LeaveChannelOptions& options)` to leave the channel.
+ * This error can also occur if the channel media relay service is not enabled in the project. You
+ * can contact `technical support` to enable the service.
*/
RELAY_ERROR_SERVER_NO_RESPONSE = 2,
- /** 3: The SDK fails to access the service, probably due to limited resources of the server.
+ /**
+ * 3: The SDK fails to access the service, probably due to limited resources of the server.
*/
RELAY_ERROR_NO_RESOURCE_AVAILABLE = 3,
- /** 4: Fails to send the relay request.
+ /**
+ * 4: Fails to send the relay request.
*/
RELAY_ERROR_FAILED_JOIN_SRC = 4,
- /** 5: Fails to accept the relay request.
+ /**
+ * 5: Fails to accept the relay request.
*/
RELAY_ERROR_FAILED_JOIN_DEST = 5,
- /** 6: The server fails to receive the media stream.
+ /**
+ * 6: The server fails to receive the media stream.
*/
RELAY_ERROR_FAILED_PACKET_RECEIVED_FROM_SRC = 6,
- /** 7: The server fails to send the media stream.
+ /**
+ * 7: The server fails to send the media stream.
*/
RELAY_ERROR_FAILED_PACKET_SENT_TO_DEST = 7,
- /** 8: The SDK disconnects from the server due to poor network connections. You can call the
- * `leaveChannel` method to leave the channel.
+ /**
+ * 8: The SDK disconnects from the server due to poor network connections. You can call
+ * `leaveChannel(const LeaveChannelOptions& options)` to leave the channel.
*/
RELAY_ERROR_SERVER_CONNECTION_LOST = 8,
- /** 9: An internal error occurs in the server.
+ /**
+ * 9: An internal error occurs in the server.
*/
RELAY_ERROR_INTERNAL_ERROR = 9,
- /** 10: The token of the source channel has expired.
+ /**
+ * 10: The token of the source channel has expired.
*/
RELAY_ERROR_SRC_TOKEN_EXPIRED = 10,
- /** 11: The token of the destination channel has expired.
+ /**
+ * 11: The token of the destination channel has expired.
*/
RELAY_ERROR_DEST_TOKEN_EXPIRED = 11,
};
/**
- * The state code of the channel media relay.
+ * @brief The state code of the channel media relay.
*/
enum CHANNEL_MEDIA_RELAY_STATE {
- /** 0: The initial state. After you successfully stop the channel media relay by calling
+ /**
+ * 0: The initial state. After you successfully stop the channel media relay by calling
* `stopChannelMediaRelay`, the `onChannelMediaRelayStateChanged` callback returns this state.
*/
RELAY_STATE_IDLE = 0,
- /** 1: The SDK tries to relay the media stream to the destination channel.
+ /**
+ * 1: The SDK tries to relay the media stream to the destination channel.
*/
RELAY_STATE_CONNECTING = 1,
- /** 2: The SDK successfully relays the media stream to the destination channel.
+ /**
+ * 2: The SDK successfully relays the media stream to the destination channel.
*/
RELAY_STATE_RUNNING = 2,
- /** 3: An error occurs. See `code` in `onChannelMediaRelayStateChanged` for the error code.
+ /**
+ * 3: An error occurs. See `code` in `onChannelMediaRelayStateChanged` for the error code.
*/
RELAY_STATE_FAILURE = 3,
};
-/** The definition of ChannelMediaInfo.
+/**
+ * @brief Channel media information.
*/
struct ChannelMediaInfo {
- /** The user ID.
+ /**
+ * The user ID.
*/
uid_t uid;
- /** The channel name. The default value is NULL, which means that the SDK
- * applies the current channel name.
+ /**
+ * The channel name.
*/
const char* channelName;
- /** The token that enables the user to join the channel. The default value
- * is NULL, which means that the SDK applies the current token.
+ /**
+ * The token that enables the user to join the channel.
*/
const char* token;
@@ -6569,41 +7053,45 @@ struct ChannelMediaInfo {
ChannelMediaInfo(const char* c, const char* t, uid_t u) : uid(u), channelName(c), token(t) {}
};
-/** The definition of ChannelMediaRelayConfiguration.
+/**
+ * @brief Configuration of cross channel media relay.
*/
struct ChannelMediaRelayConfiguration {
- /** The information of the source channel `ChannelMediaInfo`. It contains the following members:
- * - `channelName`: The name of the source channel. The default value is `NULL`, which means the
- * SDK applies the name of the current channel.
- * - `uid`: The unique ID to identify the relay stream in the source channel. The default value is
- * 0, which means the SDK generates a random UID. You must set it as 0.
- * - `token`: The token for joining the source channel. It is generated with the `channelName` and
- * `uid` you set in `srcInfo`.
- * - If you have not enabled the App Certificate, set this parameter as the default value
- * `NULL`, which means the SDK applies the App ID.
- * - If you have enabled the App Certificate, you must use the token generated with the
+ /**
+ * The information of the source channel. See `ChannelMediaInfo`. It contains the following members:
+ * - `channelName`: The name of the source channel. The default value is `NULL`, which means the SDK
+ * applies the name of the current channel.
+ * - `token`: The `token` for joining the source channel. This token is generated with the
+ * `channelName` and `uid` you set in `srcInfo`.
+ * - If you have not enabled the App Certificate, set this parameter as the default value `NULL`,
+ * which means the SDK applies the App ID.
+ * - If you have enabled the App Certificate, you must use the `token` generated with the
* `channelName` and `uid`, and the `uid` must be set as 0.
+ * - `uid`: The unique user ID to identify the relay stream in the source channel. Agora recommends
+ * leaving the default value of 0 unchanged.
*/
ChannelMediaInfo* srcInfo;
- /** The information of the destination channel `ChannelMediaInfo`. It contains the following
- * members:
- * - `channelName`: The name of the destination channel.
- * - `uid`: The unique ID to identify the relay stream in the destination channel. The value
- * ranges from 0 to (2^32-1). To avoid UID conflicts, this `UID` must be different from any
- * other `UID` in the destination channel. The default value is 0, which means the SDK generates
- * a random `UID`. Do not set this parameter as the `UID` of the host in the destination channel,
- * and ensure that this `UID` is different from any other `UID` in the channel.
- * - `token`: The token for joining the destination channel. It is generated with the
- * `channelName` and `uid` you set in `destInfos`.
- * - If you have not enabled the App Certificate, set this parameter as the default value NULL,
+ /**
+ * The information of the target channel `ChannelMediaInfo`. It contains the following members:
+ * - `channelName`: The name of the target channel.
+ * - `token`: The `token` for joining the target channel. It is generated with the `channelName` and
+ * `uid` you set in `destInfos`.
+ * - If you have not enabled the App Certificate, set this parameter as the default value `NULL`,
* which means the SDK applies the App ID.
- * If you have enabled the App Certificate, you must use the token generated with the
+ * - If you have enabled the App Certificate, you must use the `token` generated with the
* `channelName` and `uid`.
+ * - `uid`: The unique user ID to identify the relay stream in the target channel. The value ranges
+ * from 0 to (2 32-1). To avoid user ID conflicts, this user ID must be different from any other
+ * user ID in the target channel. The default value is 0, which means the SDK generates a random
+ * UID.
+ * @note If the token of any target channel expires, the whole media relay stops; hence Agora
+ * recommends that you specify the same expiration time for the tokens of all the target channels.
*/
ChannelMediaInfo* destInfos;
- /** The number of destination channels. The default value is 0, and the value range is from 0 to
- * 6. Ensure that the value of this parameter corresponds to the number of `ChannelMediaInfo`
- * structs you define in `destInfo`.
+ /**
+ * The number of target channels. The default value is 0, and the value range is from 0 to 6. Ensure
+ * that the value of this parameter corresponds to the number of `ChannelMediaInfo` structs you
+ * define in `destInfo`.
*/
int destCount;
@@ -6612,7 +7100,7 @@ struct ChannelMediaRelayConfiguration {
};
/**
- * The uplink network information.
+ * @brief The uplink network information.
*/
struct UplinkNetworkInfo {
/**
@@ -6743,66 +7231,82 @@ struct DownlinkNetworkInfo {
};
/**
- * The built-in encryption mode.
+ * @brief The built-in encryption mode.
*
+ * @details
* Agora recommends using AES_128_GCM2 or AES_256_GCM2 encrypted mode. These two modes support the
* use of salt for higher security.
+ *
*/
enum ENCRYPTION_MODE {
- /** 1: 128-bit AES encryption, XTS mode.
+ /**
+ * 1: 128-bit AES encryption, XTS mode.
*/
AES_128_XTS = 1,
- /** 2: 128-bit AES encryption, ECB mode.
+ /**
+ * 2: 128-bit AES encryption, ECB mode.
*/
AES_128_ECB = 2,
- /** 3: 256-bit AES encryption, XTS mode.
+ /**
+ * 3: 256-bit AES encryption, XTS mode.
*/
AES_256_XTS = 3,
- /** 4: 128-bit SM4 encryption, ECB mode.
+ /**
+ * 4: 128-bit SM4 encryption, ECB mode.
*/
SM4_128_ECB = 4,
- /** 5: 128-bit AES encryption, GCM mode.
+ /**
+ * 5: 128-bit AES encryption, GCM mode.
*/
AES_128_GCM = 5,
- /** 6: 256-bit AES encryption, GCM mode.
+ /**
+ * 6: 256-bit AES encryption, GCM mode.
*/
AES_256_GCM = 6,
- /** 7: (Default) 128-bit AES encryption, GCM mode. This encryption mode requires the setting of
- * salt (`encryptionKdfSalt`).
+ /**
+ * 7: (Default) 128-bit AES encryption, GCM mode. This encryption mode requires the setting of salt
+ * (`encryptionKdfSalt`).
*/
AES_128_GCM2 = 7,
- /** 8: 256-bit AES encryption, GCM mode. This encryption mode requires the setting of salt
+ /**
+ * 8: 256-bit AES encryption, GCM mode. This encryption mode requires the setting of salt
* (`encryptionKdfSalt`).
*/
AES_256_GCM2 = 8,
- /** Enumerator boundary.
+ /**
+ * Enumerator boundary.
*/
MODE_END,
};
-/** Built-in encryption configurations. */
+/**
+ * @brief Built-in encryption configurations.
+ */
struct EncryptionConfig {
/**
- * The built-in encryption mode. See #ENCRYPTION_MODE. Agora recommends using `AES_128_GCM2`
- * or `AES_256_GCM2` encrypted mode. These two modes support the use of salt for higher security.
+ * The built-in encryption mode. See `ENCRYPTION_MODE`. Agora recommends using `AES_128_GCM2` or
+ * `AES_256_GCM2` encrypted mode. These two modes support the use of salt for higher security.
*/
ENCRYPTION_MODE encryptionMode;
/**
* Encryption key in string type with unlimited length. Agora recommends using a 32-byte key.
- *
- * @note If you do not set an encryption key or set it as NULL, you cannot use the built-in
- * encryption, and the SDK returns #ERR_INVALID_ARGUMENT (-2).
+ * @note If you do not set an encryption key or set it as `NULL`, you cannot use the built-in
+ * encryption, and the SDK returns `-2`.
*/
const char* encryptionKey;
/**
* Salt, 32 bytes in length. Agora recommends that you use OpenSSL to generate salt on the server
- * side.
- *
- * @note This parameter takes effect only in `AES_128_GCM2` or `AES_256_GCM2` encrypted mode.
- * In this case, ensure that this parameter is not 0.
+ * side. See Media Stream Encryption for details.
+ * @note This parameter takes effect only in `AES_128_GCM2` or `AES_256_GCM2` encrypted mode. In
+ * this case, ensure that this parameter is not `0`.
*/
uint8_t encryptionKdfSalt[32];
+ /**
+ * Whether to enable data stream encryption:
+ * - `true`: Enable data stream encryption.
+ * - `false`: (Default) Disable data stream encryption.
+ */
bool datastreamEncryptionEnabled;
EncryptionConfig()
@@ -6839,7 +7343,8 @@ struct EncryptionConfig {
/// @endcond
};
-/** Encryption error type.
+/**
+ * @brief Encryption error type.
*/
enum ENCRYPTION_ERROR_TYPE {
/**
@@ -6847,21 +7352,21 @@ enum ENCRYPTION_ERROR_TYPE {
*/
ENCRYPTION_ERROR_INTERNAL_FAILURE = 0,
/**
- * 1: MediaStream decryption errors. Ensure that the receiver and the sender use the same
- * encryption mode and key.
+ * 1: Media stream decryption error. Ensure that the receiver and the sender use the same encryption
+ * mode and key.
*/
ENCRYPTION_ERROR_DECRYPTION_FAILURE = 1,
/**
- * 2: MediaStream encryption errors.
+ * 2: Media stream encryption error.
*/
ENCRYPTION_ERROR_ENCRYPTION_FAILURE = 2,
/**
- * 3: DataStream decryption errors. Ensure that the receiver and the sender use the same
- * encryption mode and key.
+ * 3: Data stream decryption error. Ensure that the receiver and the sender use the same encryption
+ * mode and key.
*/
ENCRYPTION_ERROR_DATASTREAM_DECRYPTION_FAILURE = 3,
/**
- * 4: DataStream encryption errors.
+ * 4: Data stream encryption error.
*/
ENCRYPTION_ERROR_DATASTREAM_ENCRYPTION_FAILURE = 4,
};
@@ -6873,47 +7378,53 @@ enum UPLOAD_ERROR_REASON {
};
/**
- * Error codes for renewing a token.
+ * @brief Represents the error codes after calling `renewToken`.
*
- * These error codes indicate the result of calling renewToken.
* @since 4.6.0
*/
enum RENEW_TOKEN_ERROR_CODE {
/**
- * 0: The token is renewed successfully.
+ * (0): Token updated successfully.
*/
RENEW_TOKEN_SUCCESS = 0,
/**
- * 1: It is recommended that the user generate a new token and retry renewToken.
+ * (1): Token update failed due to an unknown server error. It is recommended to check the
+ * parameters used to generate the Token, regenerate the Token, and retry `renewToken`.
*/
RENEW_TOKEN_FAILURE = 1,
/**
- * 2: The token renewal failed because the provided token has expired.
- * It is recommended that the user generate a new token with a longer expiration time and retry renewToken.
+ * (2): Token update failed because the provided Token has expired. It is recommended to generate a
+ * new Token with a longer expiration time and retry `renewToken`.
*/
RENEW_TOKEN_TOKEN_EXPIRED = 2,
/**
- * 3: The token renewal failed because the provided token is invalid.
- * It is recommended that the user check the token generation process, generate a new token, and retry renewToken.
+ * (3): Token update failed because the provided Token is invalid. Common reasons include: the
+ * project has enabled App Certificate in the Agora Console but did not use a Token when joining the
+ * channel; the uid specified in `joinChannel` is inconsistent with the uid used when generating the
+ * Token; the channel name specified in `joinChannel` is inconsistent with the one used when
+ * generating the Token. It is recommended to check the Token generation process, generate a new
+ * Token, and retry `renewToken`.
*/
RENEW_TOKEN_INVALID_TOKEN = 3,
/**
- * 4: The token renewal failed because the channel name in the token does not match the current channel.
- * It is recommended that the user check the channel name, generate a new token, and retry renewToken.
+ * (4): Token update failed because the channel name in the Token does not match the current
+ * channel. It is recommended to check the channel name, generate a new Token, and retry
+ * `renewToken`.
*/
RENEW_TOKEN_INVALID_CHANNEL_NAME = 4,
/**
- * 5: The token renewal failed because the app ID in the token does not match the current app ID.
- * It is recommended that the user check the app ID, generate a new token, and retry renewToken.
+ * (5): Token update failed because the App ID in the Token does not match the current App ID. It is
+ * recommended to check the App ID, generate a new Token, and retry `renewToken`.
*/
RENEW_TOKEN_INCONSISTENT_APPID = 5,
/**
- * 6: The token renewal was canceled because a new request was made, and the previous one was canceled.
+ * (6): The previous Token update request was canceled due to a new request being initiated.
*/
RENEW_TOKEN_CANCELED_BY_NEW_REQUEST = 6,
};
-/** The type of the device permission.
+/**
+ * @brief The type of the device permission.
*/
enum PERMISSION_TYPE {
/**
@@ -6925,29 +7436,34 @@ enum PERMISSION_TYPE {
*/
CAMERA = 1,
+ /**
+ * (For Android only) 2: Permission for screen sharing.
+ */
SCREEN_CAPTURE = 2,
};
/**
- * The subscribing state.
+ * @brief The subscribing state.
*/
enum STREAM_SUBSCRIBE_STATE {
/**
- * 0: The initial subscribing state after joining the channel.
+ * 0: The initial publishing state after joining the channel.
*/
SUB_STATE_IDLE = 0,
/**
* 1: Fails to subscribe to the remote stream. Possible reasons:
* - The remote user:
- * - Calls `muteLocalAudioStream(true)` or `muteLocalVideoStream(true)` to stop sending local
- * media stream.
- * - Calls `disableAudio` or `disableVideo `to disable the local audio or video module.
- * - Calls `enableLocalAudio(false)` or `enableLocalVideo(false)` to disable the local audio or
+ * - Calls `muteLocalAudioStream` (`true`) or `muteLocalVideoStream` (`true`) to stop sending
+ * local media stream.
+ * - Calls `disableAudio` or `disableVideo` to disable the local audio or video module.
+ * - Calls `enableLocalAudio` ( false ) or `enableLocalVideo` ( false ) to disable local audio or
* video capture.
* - The role of the remote user is audience.
* - The local user calls the following methods to stop receiving remote streams:
- * - Calls `muteRemoteAudioStream(true)`, `muteAllRemoteAudioStreams(true)` to stop receiving the remote audio streams.
- * - Calls `muteRemoteVideoStream(true)`, `muteAllRemoteVideoStreams(true)` to stop receiving the remote video streams.
+ * - Call `muteRemoteAudioStream` ( true ) or `muteAllRemoteAudioStreams` ( true ) to stop
+ * receiving the remote audio stream.
+ * - Call `muteRemoteVideoStream` ( true ) or `muteAllRemoteVideoStreams` ( true ) to stop
+ * receiving the remote video stream.
*/
SUB_STATE_NO_SUBSCRIBED = 1,
/**
@@ -6955,13 +7471,13 @@ enum STREAM_SUBSCRIBE_STATE {
*/
SUB_STATE_SUBSCRIBING = 2,
/**
- * 3: Subscribes to and receives the remote stream successfully.
+ * 3: The remote stream is received, and the subscription is successful.
*/
SUB_STATE_SUBSCRIBED = 3
};
/**
- * The publishing state.
+ * @brief The publishing state.
*/
enum STREAM_PUBLISH_STATE {
/**
@@ -6970,12 +7486,12 @@ enum STREAM_PUBLISH_STATE {
PUB_STATE_IDLE = 0,
/**
* 1: Fails to publish the local stream. Possible reasons:
- * - The local user calls `muteLocalAudioStream(true)` or `muteLocalVideoStream(true)` to stop
- * sending the local media stream.
+ * - The local user calls `muteLocalAudioStream` (`true`) or `muteLocalVideoStream` (`true`) to stop
+ * sending local media streams.
* - The local user calls `disableAudio` or `disableVideo` to disable the local audio or video
* module.
- * - The local user calls `enableLocalAudio(false)` or `enableLocalVideo(false)` to disable the
- * local audio or video capture.
+ * - The local user calls `enableLocalAudio` (`false`) or `enableLocalVideo` (`false`) to disable
+ * the local audio or video capture.
* - The role of the local user is audience.
*/
PUB_STATE_NO_PUBLISHED = 1,
@@ -6990,14 +7506,49 @@ enum STREAM_PUBLISH_STATE {
};
/**
- * The EchoTestConfiguration struct.
+ * @brief The configuration of the audio and video call loop test.
*/
struct EchoTestConfiguration {
+ /**
+ * The view used to render the local user's video. This parameter is only applicable to scenarios
+ * testing video devices, that is, when `enableVideo` is true.
+ */
view_t view;
+ /**
+ * Whether to enable the audio device for the loop test:
+ * - `true`: (Default) Enable the audio device. To test the audio device, set this parameter as
+ * true.
+ * - `false`: Disable the audio device.
+ */
bool enableAudio;
+ /**
+ * Whether to enable the video device for the loop test:
+ * - `true`: (Default) Enable the video device. To test the video device, set this parameter as
+ * true.
+ * - `false`: Disable the video device.
+ */
bool enableVideo;
+ /**
+ * The token used to secure the audio and video call loop test. If you do not enable App Certificate
+ * in Agora Console, you do not need to pass a value in this parameter; if you have enabled App
+ * Certificate in Agora Console, you must pass a token in this parameter; the `uid` used when you
+ * generate the token must be 0xFFFFFFFF, and the channel name used must be the channel name that
+ * identifies each audio and video call loop tested. For server-side token generation, see .
+ */
const char* token;
+ /**
+ * The channel name that identifies each audio and video call loop. To ensure proper loop test
+ * functionality, the channel name passed in to identify each loop test cannot be the same when
+ * users of the same project (App ID) perform audio and video call loop tests on different devices.
+ */
const char* channelId;
+ /**
+ * Set the time interval or delay for returning the results of the audio and video loop test. The
+ * value range is [2,10], in seconds, with the default value being 2 seconds.
+ * - For audio loop tests, the test results will be returned according to the time interval you set.
+ * - For video loop tests, the video will be displayed in a short time, after which the delay will
+ * gradually increase until it reaches the delay you set.
+ */
int intervalInSeconds;
EchoTestConfiguration(view_t v, bool ea, bool ev, const char* t, const char* c, const int is)
@@ -7013,7 +7564,7 @@ struct EchoTestConfiguration {
};
/**
- * The information of the user.
+ * @brief The information of the user.
*/
struct UserInfo {
/**
@@ -7021,7 +7572,7 @@ struct UserInfo {
*/
uid_t uid;
/**
- * The user account. The maximum data length is `MAX_USER_ACCOUNT_LENGTH_TYPE`.
+ * User account. The maximum data length is `MAX_USER_ACCOUNT_LENGTH_TYPE`.
*/
char userAccount[MAX_USER_ACCOUNT_LENGTH];
@@ -7029,26 +7580,26 @@ struct UserInfo {
};
/**
- * The audio filter of in-ear monitoring.
+ * @brief The audio filter types of in-ear monitoring.
*/
enum EAR_MONITORING_FILTER_TYPE {
/**
- * 1: Do not add an audio filter to the in-ear monitor.
+ * 1<<0: No audio filter added to in-ear monitoring.
*/
EAR_MONITORING_FILTER_NONE = (1 << 0),
/**
- * 2: Enable audio filters to the in-ear monitor. If you implement functions such as voice
- * beautifier and audio effect, users can hear the voice after adding these effects.
+ * 1<<1: Add vocal effects audio filter to in-ear monitoring. If you implement functions such as
+ * voice beautifier and audio effect, users can hear the voice after adding these effects.
*/
EAR_MONITORING_FILTER_BUILT_IN_AUDIO_FILTERS = (1 << 1),
/**
- * 4: Enable noise suppression to the in-ear monitor.
+ * 1<<2: Add noise suppression audio filter to in-ear monitoring.
*/
EAR_MONITORING_FILTER_NOISE_SUPPRESSION = (1 << 2),
/**
- * 32768: Enable audio filters by reuse post-processing filter to the in-ear monitor.
- * This bit is intended to be used in exclusive mode, which means, if this bit is set, all other
- * bits will be disregarded.
+ * 1<<15: Reuse the audio filter that has been processed on the sending end for in-ear monitoring.
+ * This enumerator reduces CPU usage while increasing in-ear monitoring latency, which is suitable
+ * for latency-tolerant scenarios requiring low CPU consumption.
*/
EAR_MONITORING_FILTER_REUSE_POST_PROCESSING_FILTER = (1 << 15),
};
@@ -7086,49 +7637,23 @@ enum THREAD_PRIORITY_TYPE {
#if defined(__ANDROID__) || (defined(__APPLE__) && TARGET_OS_IOS) || defined(__OHOS__)
/**
- * The video configuration for the shared screen stream.
+ * @brief The video configuration for the shared screen stream.
*/
struct ScreenVideoParameters {
/**
- * The dimensions of the video encoding resolution. The default value is `1280` x `720`.
- * For recommended values, see [Recommended video
- * profiles](https://docs.agora.io/en/Interactive%20Broadcast/game_streaming_video_profile?platform=Android#recommended-video-profiles).
- * If the aspect ratio is different between width and height and the screen, the SDK adjusts the
- * video encoding resolution according to the following rules (using an example where `width` ×
- * `height` is 1280 × 720):
- * - When the width and height of the screen are both lower than `width` and `height`, the SDK
- * uses the resolution of the screen for video encoding. For example, if the screen is 640 ×
- * 360, The SDK uses 640 × 360 for video encoding.
- * - When either the width or height of the screen is higher than `width` or `height`, the SDK
- * uses the maximum values that do not exceed those of `width` and `height` while maintaining
- * the aspect ratio of the screen for video encoding. For example, if the screen is 2000 × 1500,
- * the SDK uses 960 × 720 for video encoding.
- *
- * @note
- * - The billing of the screen sharing stream is based on the values of width and height.
- * When you do not pass in these values, Agora bills you at 1280 × 720;
- * when you pass in these values, Agora bills you at those values.
- * For details, see [Pricing for Real-time
- * Communication](https://docs.agora.io/en/Interactive%20Broadcast/billing_rtc).
- * - This value does not indicate the orientation mode of the output ratio.
- * For how to set the video orientation, see `ORIENTATION_MODE`.
- * - Whether the SDK can support a resolution at 720P depends on the performance of the device.
- * If you set 720P but the device cannot support it, the video frame rate can be lower.
+ * The video encoding dimension. The default value is 1280 × 720.
*/
VideoDimensions dimensions;
/**
- * The video encoding frame rate (fps). The default value is `15`.
- * For recommended values, see [Recommended video
- * profiles](https://docs.agora.io/en/Interactive%20Broadcast/game_streaming_video_profile?platform=Android#recommended-video-profiles).
+ * The video encoding frame rate (fps). The default value is 15.
*/
int frameRate = 15;
/**
- * The video encoding bitrate (Kbps). For recommended values, see [Recommended video
- * profiles](https://docs.agora.io/en/Interactive%20Broadcast/game_streaming_video_profile?platform=Android#recommended-video-profiles).
+ * The video encoding bitrate (Kbps).
*/
int bitrate;
- /*
- * The content hint of the screen sharing:
+ /**
+ * The content hint for screen sharing. See `VIDEO_CONTENT_HINT`.
*/
VIDEO_CONTENT_HINT contentHint = VIDEO_CONTENT_HINT::CONTENT_HINT_MOTION;
@@ -7136,124 +7661,140 @@ struct ScreenVideoParameters {
};
/**
- * The audio configuration for the shared screen stream.
- */
-struct ScreenAudioParameters {
- /**
- * The audio sample rate (Hz). The default value is `16000`.
- */
- int sampleRate = 16000;
- /**
- * The number of audio channels. The default value is `2`, indicating dual channels.
- */
- int channels = 2;
- /**
- * The volume of the captured system audio. The value range is [0,100]. The default value is
- * `100`.
- */
- int captureSignalVolume = 100;
-};
-
-/**
- * The configuration of the screen sharing
+ * @brief Screen sharing configurations.
*/
struct ScreenCaptureParameters2 {
/**
* Determines whether to capture system audio during screen sharing:
- * - `true`: Capture.
- * - `false`: (Default) Do not capture.
- *
- * **Note**
- * Due to system limitations, capturing system audio is only available for Android API level 29
+ * - `true`: Capture system audio.
+ * - `false`: (Default) Do not capture system audio.
+ * @note
+ * - Due to system limitations, capturing system audio is only applicable to Android API level 29
* and later (that is, Android 10 and later).
+ * - To improve the success rate of capturing system audio during screen sharing, ensure that you
+ * have called the `setAudioScenario` method and set the audio scenario to
+ * `AUDIO_SCENARIO_GAME_STREAMING`.
*/
bool captureAudio = false;
/**
- * The audio configuration for the shared screen stream.
+ * The audio configuration for the shared screen stream. See `ScreenAudioParameters`.
+ * @note This parameter only takes effect when `captureAudio` is `true`.
*/
ScreenAudioParameters audioParams;
/**
- * Determines whether to capture the screen during screen sharing:
- * - `true`: (Default) Capture.
- * - `false`: Do not capture.
- *
- * **Note**
- * Due to system limitations, screen capture is only available for Android API level 21 and later
- * (that is, Android 5 and later).
+ * Whether to capture the screen when screen sharing:
+ * - `true`: (Default) Capture the screen.
+ * - `false`: Do not capture the screen.
+ * @note Due to system limitations, the capture screen is only applicable to Android API level 21
+ * and above, that is, Android 5 and above.
*/
bool captureVideo = true;
/**
- * The video configuration for the shared screen stream.
+ * The video configuration for the shared screen stream. See `ScreenVideoParameters`.
+ * @note This parameter only takes effect when `captureVideo` is `true`.
*/
ScreenVideoParameters videoParams;
};
#endif
/**
- * The tracing event of media rendering.
+ * @brief The rendering state of the media frame.
*/
enum MEDIA_TRACE_EVENT {
/**
- * 0: The media frame has been rendered.
+ * 0: The video frame has been rendered.
*/
MEDIA_TRACE_EVENT_VIDEO_RENDERED = 0,
/**
- * 1: The media frame has been decoded.
+ * 1: The video frame has been decoded.
*/
MEDIA_TRACE_EVENT_VIDEO_DECODED,
};
/**
- * The video rendering tracing result
+ * @brief Indicators during video frame rendering progress.
*/
struct VideoRenderingTracingInfo {
/**
- * Elapsed time from the start tracing time to the time when the tracing event occurred.
+ * The time interval (ms) from `startMediaRenderingTracing` to SDK triggering the
+ * `onVideoRenderingTracingResult` callback. Agora recommends you call `startMediaRenderingTracing`
+ * before joining a channel.
*/
int elapsedTime;
/**
- * Elapsed time from the start tracing time to the time when join channel.
- *
- * **Note**
- * If the start tracing time is behind the time when join channel, this value will be negative.
+ * The time interval (ms) from `startMediaRenderingTracing` to `joinChannel(const char* token, const
+ * char* channelId, const char* info, uid_t uid)` or `joinChannel(const char* token, const char*
+ * channelId, uid_t uid, const ChannelMediaOptions& options)`
+ * . A negative number indicates that `startMediaRenderingTracing` is called after calling
+ * `joinChannel(const char* token, const char* channelId, uid_t uid, const ChannelMediaOptions&
+ * options)`.
*/
int start2JoinChannel;
/**
- * Elapsed time from joining channel to finishing joining channel.
+ * The time interval (ms) from `joinChannel(const char* token, const char* channelId, const char*
+ * info, uid_t uid)` or `joinChannel(const char* token, const char* channelId, uid_t uid, const
+ * ChannelMediaOptions& options)` to successfully joining
+ * the channel.
*/
int join2JoinSuccess;
/**
- * Elapsed time from finishing joining channel to remote user joined.
- *
- * **Note**
- * If the start tracing time is after the time finishing join channel, this value will be
- * the elapsed time from the start tracing time to remote user joined. The minimum value is 0.
+ * - If the local user calls `startMediaRenderingTracing` before successfully joining the channel,
+ * this value is the time interval (ms) from the local user successfully joining the channel to the
+ * remote user joining the channel.
+ * - If the local user calls `startMediaRenderingTracing` after successfully joining the channel,
+ * the value is the time interval (ms) from `startMediaRenderingTracing` to when the remote user
+ * joins the channel.
+ * @note
+ * - If the local user calls `startMediaRenderingTracing` after the remote user joins the channel,
+ * the value is 0 and meaningless.
+ * - In order to reduce the time of rendering the first frame for remote users, Agora recommends
+ * that the local user joins the channel when the remote user is in the channel to reduce this
+ * value.
*/
int joinSuccess2RemoteJoined;
/**
- * Elapsed time from remote user joined to set the view.
- *
- * **Note**
- * If the start tracing time is after the time when remote user joined, this value will be
- * the elapsed time from the start tracing time to set the view. The minimum value is 0.
+ * - If the local user calls `startMediaRenderingTracing` before the remote user joins the channel,
+ * this value is the time interval (ms) from when the remote user joins the channel to when the
+ * local user sets the remote view.
+ * - If the local user calls `startMediaRenderingTracing` after the remote user joins the channel,
+ * this value is the time interval (ms) from calling `startMediaRenderingTracing` to setting the
+ * remote view.
+ * @note
+ * - If the local user calls `startMediaRenderingTracing` after setting the remote view, the value
+ * is 0 and has no effect.
+ * - In order to reduce the time of rendering the first frame for remote users, Agora recommends
+ * that the local user sets the remote view before the remote user joins the channel, or sets the
+ * remote view immediately after the remote user joins the channel to reduce this value.
*/
int remoteJoined2SetView;
/**
- * Elapsed time from remote user joined to the time subscribing remote video stream.
- *
- * **Note**
- * If the start tracing time is after the time when remote user joined, this value will be
- * the elapsed time from the start tracing time to the time subscribing remote video stream.
- * The minimum value is 0.
+ * - If the local user calls `startMediaRenderingTracing` before the remote user joins the channel,
+ * this value is the time interval (ms) from the remote user joining the channel to subscribing to
+ * the remote video stream.
+ * - If the local user calls `startMediaRenderingTracing` after the remote user joins the channel,
+ * this value is the time interval (ms) from `startMediaRenderingTracing` to subscribing to the
+ * remote video stream.
+ * @note
+ * - If the local user calls `startMediaRenderingTracing` after subscribing to the remote video
+ * stream, the value is 0 and has no effect.
+ * - In order to reduce the time of rendering the first frame for remote users, Agora recommends
+ * that after the remote user joins the channel, the local user immediately subscribes to the remote
+ * video stream to reduce this value.
*/
int remoteJoined2UnmuteVideo;
/**
- * Elapsed time from remote user joined to the remote video packet received.
- *
- * **Note**
- * If the start tracing time is after the time when remote user joined, this value will be
- * the elapsed time from the start tracing time to the time subscribing remote video stream.
- * The minimum value is 0.
+ * - If the local user calls `startMediaRenderingTracing` before the remote user joins the channel,
+ * this value is the time interval (ms) from when the remote user joins the channel to when the
+ * local user receives the remote video stream.
+ * - If the local user calls `startMediaRenderingTracing` after the remote user joins the channel,
+ * this value is the time interval (ms) from `startMediaRenderingTracing` to receiving the remote
+ * video stream.
+ * @note
+ * - If the local user calls `startMediaRenderingTracing` after receiving the remote video stream,
+ * the value is 0 and has no effect.
+ * - In order to reduce the time of rendering the first frame for remote users, Agora recommends
+ * that the remote user publishes video streams immediately after joining the channel, and the local
+ * user immediately subscribes to remote video streams to reduce this value.
*/
int remoteJoined2PacketReceived;
};
@@ -7269,29 +7810,41 @@ enum CONFIG_FETCH_TYPE {
CONFIG_FETCH_TYPE_JOIN_CHANNEL = 2,
};
-/** The local proxy mode type. */
+/**
+ * @brief Connection mode with the Agora Private Media Server.
+ */
enum LOCAL_PROXY_MODE {
- /** 0: Connect local proxy with high priority, if not connected to local proxy, fallback to sdrtn.
+ /**
+ * 0: The SDK first tries to connect to the specified Agora Private Media Server; if it fails, it
+ * connects to the Agora SD-RTN™.
*/
ConnectivityFirst = 0,
- /** 1: Only connect local proxy
+ /**
+ * 1: The SDK only tries to connect to the specified Agora Private Media Server.
*/
LocalOnly = 1,
};
+/**
+ * @brief Configuration information for the log server.
+ */
struct LogUploadServerInfo {
- /** Log upload server domain
+ /**
+ * Domain name of the log server.
*/
const char* serverDomain;
- /** Log upload server path
+ /**
+ * Storage path for logs on the server.
*/
const char* serverPath;
- /** Log upload server port
+ /**
+ * Port of the log server.
*/
int serverPort;
- /** Whether to use HTTPS request:
- - true: Use HTTPS request
- - fasle: Use HTTP request
+ /**
+ * Whether the log server uses HTTPS protocol:
+ * - `true`: Uses HTTPS.
+ * - `false`: Uses HTTP.
*/
bool serverHttps;
@@ -7301,34 +7854,55 @@ struct LogUploadServerInfo {
: serverDomain(domain), serverPath(path), serverPort(port), serverHttps(https) {}
};
+/**
+ * @brief Advanced options for the Local Access Point.
+ */
struct AdvancedConfigInfo {
- /** Log upload server
+ /**
+ * Custom log upload server. By default, the SDK uploads logs to the Agora log server. You can use
+ * this parameter to change the log upload server. See `LogUploadServerInfo`.
*/
LogUploadServerInfo logUploadServer;
};
+/**
+ * @brief Configuration for the Local Access Point.
+ */
struct LocalAccessPointConfiguration {
- /** Local access point IP address list.
+ /**
+ * Internal IP address list of the Local Access Point. Either ipList or domainList must be
+ * specified.
*/
const char** ipList;
- /** The number of local access point IP address.
+ /**
+ * Number of internal IP addresses for the Local Access Point. This value must match the number of
+ * IP addresses you provide.
*/
int ipListSize;
- /** Local access point domain list.
+ /**
+ * Domain name list of the Local Access Point. The SDK resolves the IP addresses of the Local Access
+ * Point from the provided domain names. The DNS resolution timeout is 10 seconds. Either ipList or
+ * domainList must be specified. If you specify both IP addresses and domain names, the SDK merges
+ * and deduplicates the resolved IP addresses and the specified IP addresses, then randomly selects
+ * one for load balancing.
*/
const char** domainList;
- /** The number of local access point domain.
+ /**
+ * Number of domain names for the Local Access Point. This value must match the number of domain
+ * names you provide.
*/
int domainListSize;
- /** Certificate domain name installed on specific local access point. pass "" means using sni
- * domain on specific local access point SNI(Server Name Indication) is an extension to the TLS
- * protocol.
+ /**
+ * Domain name for internal certificate verification. If left empty, the SDK uses the default domain
+ * name `secure-edge.local` for certificate verification.
*/
const char* verifyDomainName;
- /** Local proxy connection mode, connectivity first or local only.
+ /**
+ * Connection mode. See `LOCAL_PROXY_MODE`.
*/
LOCAL_PROXY_MODE mode;
- /** Local proxy connection, advanced Config info.
+ /**
+ * Advanced options for the Local Access Point. See `AdvancedConfigInfo`.
*/
AdvancedConfigInfo advancedConfig;
/**
@@ -7347,21 +7921,30 @@ struct LocalAccessPointConfiguration {
disableAut(true) {}
};
+/**
+ * @brief Type of video stream to be recorded.
+ */
enum RecorderStreamType {
+ /**
+ * 0: (Default) Video stream in the channel.
+ */
RTC,
+ /**
+ * 1: Local preview video stream before joining the channel.
+ */
PREVIEW,
};
/**
- * The information about recorded media streams.
+ * @brief The information about the media streams to be recorded.
*/
struct RecorderStreamInfo {
/**
- * The channel ID of the audio/video stream needs to be recorded.
+ * The name of the channel in which the media streams publish.
*/
const char* channelId;
/**
- * The user ID.
+ * The ID of the user whose media streams you want to record.
*/
uid_t uid;
/**
@@ -7467,77 +8050,128 @@ class LicenseCallback {
} // namespace base
/**
- * Spatial audio parameters
+ * @brief The spatial audio parameters.
*/
struct SpatialAudioParams {
/**
- * Speaker azimuth in a spherical coordinate system centered on the listener.
+ * The azimuth angle of the remote user or media player relative to the local user. The value range
+ * is [0,360], and the unit is degrees, The values are as follows:
+ * - 0: (Default) 0 degrees, which means directly in front on the horizontal plane.
+ * - 90: 90 degrees, which means directly to the left on the horizontal plane.
+ * - 180: 180 degrees, which means directly behind on the horizontal plane.
+ * - 270: 270 degrees, which means directly to the right on the horizontal plane.
+ * - 360: 360 degrees, which means directly in front on the horizontal plane.
*/
Optional speaker_azimuth;
/**
- * Speaker elevation in a spherical coordinate system centered on the listener.
+ * The elevation angle of the remote user or media player relative to the local user. The value
+ * range is [-90,90], and the unit is degrees, The values are as follows:
+ * - 0: (Default) 0 degrees, which means that the horizontal plane is not rotated.
+ * - -90: -90 degrees, which means that the horizontal plane is rotated 90 degrees downwards.
+ * - 90: 90 degrees, which means that the horizontal plane is rotated 90 degrees upwards.
*/
Optional speaker_elevation;
/**
- * Distance between speaker and listener.
+ * The distance of the remote user or media player relative to the local user. The value range is
+ * [1,50], and the unit is meters. The default value is 1 meter.
*/
Optional speaker_distance;
/**
- * Speaker orientation [0-180], 0 degree is the same with listener orientation.
+ * The orientation of the remote user or media player relative to the local user. The value range is
+ * [0,180], and the unit is degrees, The values are as follows:
+ * - 0: (Default) 0 degrees, which means that the sound source and listener face the same direction.
+ * - 180: 180 degrees, which means that the sound source and listener face each other.
*/
Optional speaker_orientation;
/**
- * Enable blur or not for the speaker.
+ * Whether to enable audio blurring:
+ * - `true`: Enable audio blurring.
+ * - `false`: (Default) Disable audio blurring.
*/
Optional enable_blur;
/**
- * Enable air absorb or not for the speaker.
+ * Whether to enable air absorption, that is, to simulate the sound attenuation effect of sound
+ * transmitting in the air; under a certain transmission distance, the attenuation speed of
+ * high-frequency sound is fast, and the attenuation speed of low-frequency sound is slow.
+ * - `true`: (Default) Enable air absorption. Make sure that the value of `speaker_attenuation` is
+ * not `0`; otherwise, this setting does not take effect.
+ * - `false`: Disable air absorption.
*/
Optional enable_air_absorb;
/**
- * Speaker attenuation factor.
+ * The sound attenuation coefficient of the remote user or media player. The value range is [0,1].
+ * The values are as follows:
+ * - 0: Broadcast mode, where the volume and timbre are not attenuated with distance, and the volume
+ * and timbre heard by local users do not change regardless of distance.
+ * - (0,0.5): Weak attenuation mode, where the volume and timbre only have a weak attenuation during
+ * the propagation, and the sound can travel farther than that in a real environment.
+ * `enable_air_absorb` needs to be enabled at the same time.
+ * - 0.5: (Default) Simulates the attenuation of the volume in the real environment; the effect is
+ * equivalent to not setting the `speaker_attenuation` parameter.
+ * - (0.5,1]: Strong attenuation mode, where volume and timbre attenuate rapidly during the
+ * propagation. `enable_air_absorb` needs to be enabled at the same time.
*/
Optional speaker_attenuation;
/**
- * Enable doppler factor.
+ * Whether to enable the Doppler effect: When there is a relative displacement between the sound
+ * source and the receiver of the sound source, the tone heard by the receiver changes.
+ * - `true`: Enable the Doppler effect.
+ * - `false`: (Default) Disable the Doppler effect.
+ * @note
+ * - This parameter is suitable for scenarios where the sound source is moving at high speed (for
+ * example, racing games). It is not recommended for common audio and video interactive scenarios
+ * (for example, voice chat, co-streaming, or online KTV).
+ * - When this parameter is enabled, Agora recommends that you set a regular period (such as 30 ms),
+ * and then call the `updatePlayerPositionInfo`, `updateSelfPosition`, and `updateRemotePosition`
+ * methods to continuously update the relative distance between the sound source and the receiver.
+ * The following factors can cause the Doppler effect to be unpredictable or the sound to be
+ * jittery: the period of updating the distance is too long, the updating period is irregular, or
+ * the distance information is lost due to network packet loss or delay.
*/
Optional enable_doppler;
};
/**
- * Layout info of video stream which compose a transcoder video stream.
+ * @brief Layout information of a specific sub-video stream within the mixed stream.
*/
struct VideoLayout {
/**
- * Channel Id from which this video stream come from.
+ * The channel name to which the sub-video stream belongs.
*/
const char* channelId;
/**
- * User id of video stream.
+ * User ID who published this sub-video stream.
*/
rtc::uid_t uid;
/**
- * User account of video stream.
+ * Reserved for future use.
*/
user_id_t strUid;
/**
- * x coordinate of video stream on a transcoded video stream canvas.
+ * X-coordinate (px) of the sub-video stream on the mixing canvas. The relative lateral displacement
+ * of the top left corner of the video for video mixing to the origin (the top left corner of the
+ * canvas).
*/
uint32_t x;
/**
- * y coordinate of video stream on a transcoded video stream canvas.
+ * Y-coordinate (px) of the sub-video stream on the mixing canvas. The relative longitudinal
+ * displacement of the top left corner of the captured video to the origin (the top left corner of
+ * the canvas).
*/
uint32_t y;
/**
- * width of video stream on a transcoded video stream canvas.
+ * Width (px) of the sub-video stream.
*/
uint32_t width;
/**
- * height of video stream on a transcoded video stream canvas.
+ * Heitht (px) of the sub-video stream.
*/
uint32_t height;
/**
- * video state of video stream on a transcoded video stream canvas.
- * 0 for normal video , 1 for placeholder image showed , 2 for black image.
+ * Status of the sub-video stream on the video mixing canvas.
+ * - 0: Normal. The sub-video stream has been rendered onto the mixing canvas.
+ * - 1: Placeholder image. The sub-video stream has no video frames and is displayed as a
+ * placeholder on the mixing canvas.
+ * - 2: Black image. The sub-video stream is replaced by a black image.
*/
uint32_t videoState;
@@ -7606,7 +8240,7 @@ AGORA_API int AGORA_CALL getAgoraCertificateVerifyResult(const char* credential_
AGORA_API void setAgoraLicenseCallback(agora::base::LicenseCallback* callback);
/**
- * @brief Get the LicenseCallback pointer if already setup,
+ * @brief Gets the LicenseCallback pointer if already setup,
* otherwise, return null.
*
* @return a pointer of agora::base::LicenseCallback
diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/AgoraMediaBase.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/AgoraMediaBase.h
index 6da9d7931..8e25b24c5 100644
--- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/AgoraMediaBase.h
+++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/AgoraMediaBase.h
@@ -36,26 +36,25 @@ static const unsigned int DUMMY_CONNECTION_ID = (std::numeric_limits\AppData\Local\Agora\\example.jpg`
- * - iOS: `/App Sandbox/Library/Caches/example.jpg`
+ * - iOS:` /App Sandbox/Library/Caches/example.jpg`
* - macOS: `~/Library/Logs/example.jpg`
- * - Android: `/storage/emulated/0/Android/data//files/example.jpg`
+ * - Android:` /storage/emulated/0/Android/data//files/example.jpg`
+ * @note Ensure that the path you specify exists and is writable.
*/
const char* filePath;
- /**
- * The position of the video observation. See VIDEO_MODULE_POSITION.
- *
- * Allowed values vary depending on the `uid` parameter passed in `takeSnapshot` or `takeSnapshotEx`:
- * - uid = 0: Position 2, 4 and 8 are allowed.
- * - uid != 0: Only position 2 is allowed.
- *
+ /**
+ * The position of the snapshot video frame in the video pipeline. See `VIDEO_MODULE_POSITION`.
*/
media::base::VIDEO_MODULE_POSITION position;
SnapshotConfig() :filePath(NULL), position(media::base::POSITION_PRE_ENCODER) {}
@@ -1250,12 +1395,14 @@ struct SnapshotConfig {
class IAudioPcmFrameSink {
public:
/**
- * Occurs when each time the player receives an audio frame.
+ * @brief Occurs each time the player receives an audio frame.
+ *
+ * @details
+ * After registering the audio frame observer, the callback occurs every time the player receives an
+ * audio frame, reporting the detailed information of the audio frame.
+ *
+ * @param frame The audio frame information. See AudioPcmFrame.
*
- * After registering the audio frame observer,
- * the callback occurs when each time the player receives an audio frame,
- * reporting the detailed information of the audio frame.
- * @param frame The detailed information of the audio frame. See {@link AudioPcmFrame}.
*/
virtual void onFrame(agora::media::base::AudioPcmFrame* frame) = 0;
virtual ~IAudioPcmFrameSink() {}
@@ -1267,62 +1414,56 @@ class IAudioPcmFrameSink {
class IAudioFrameObserverBase {
public:
/**
- * Audio frame types.
+ * @brief Audio frame type.
*/
enum AUDIO_FRAME_TYPE {
/**
- * 0: 16-bit PCM.
+ * 0: PCM 16
*/
FRAME_TYPE_PCM16 = 0,
};
enum { MAX_HANDLE_TIME_CNT = 10 };
/**
- * The definition of the AudioFrame struct.
+ * @brief Raw audio data.
*/
struct AudioFrame {
/**
- * The audio frame type: #AUDIO_FRAME_TYPE.
+ * The type of the audio frame. See `AUDIO_FRAME_TYPE`.
*/
AUDIO_FRAME_TYPE type;
/**
- * The number of samples per channel in this frame.
+ * The number of samples per channel in the audio frame.
*/
int samplesPerChannel;
/**
- * The number of bytes per sample: #BYTES_PER_SAMPLE
+ * The number of bytes per sample. For PCM, this parameter is generally set to 16 bits (2 bytes).
*/
agora::rtc::BYTES_PER_SAMPLE bytesPerSample;
/**
- * The number of audio channels (data is interleaved, if stereo).
+ * The number of audio channels (the data are interleaved if it is stereo).
* - 1: Mono.
* - 2: Stereo.
*/
int channels;
/**
- * The sample rate
+ * The number of samples per channel in the audio frame.
*/
int samplesPerSec;
/**
- * The data buffer of the audio frame. When the audio frame uses a stereo channel, the data
- * buffer is interleaved.
- *
- * Buffer data size: buffer = samplesPerChannel × channels × bytesPerSample.
+ * The data buffer of the audio frame. When the audio frame uses a stereo channel, the data buffer
+ * is interleaved.
+ * The size of the data buffer is as follows: `buffer` = `samples` × `channels` × `bytesPerSample`.
*/
void* buffer;
/**
- * The timestamp to render the audio data.
- *
+ * The timestamp (ms) of the external audio frame.
* You can use this timestamp to restore the order of the captured audio frame, and synchronize
- * audio and video frames in video scenarios, including scenarios where external video sources
- * are used.
+ * audio and video frames in video scenarios, including scenarios where external video sources are
+ * used.
*/
int64_t renderTimeMs;
/**
- * A reserved parameter.
- *
- * You can use this presentationMs parameter to indicate the presenation milisecond timestamp,
- * this will then filled into audio4 extension part, the remote side could use this pts in av
- * sync process with video frame.
+ * Reserved for future use.
*/
int avsync_type;
/**
@@ -1374,30 +1515,52 @@ class IAudioFrameObserverBase {
AUDIO_FRAME_POSITION_EAR_MONITORING = 0x0010,
};
+ /**
+ * @brief Audio data format.
+ *
+ * @details
+ * You can pass the `AudioParams` object in the following APIs to set the audio data format for the
+ * corresponding callback:
+ * - `getRecordAudioParams`: Sets the audio data format for the `onRecordAudioFrame` callback.
+ * - `getPlaybackAudioParams`: Sets the audio data format for the `onPlaybackAudioFrame` callback.
+ * - `getMixedAudioParams`: Sets the audio data format for the `onMixedAudioFrame` callback.
+ * - `getEarMonitoringAudioParams`: Sets the audio data format for the `onEarMonitoringAudioFrame`
+ * callback.
+ *
+ * @note
+ * - The SDK calculates the sampling interval through the `samplesPerCall`, `sampleRate`, and
+ * `channel` parameters in `AudioParams`, and triggers the `onRecordAudioFrame`,
+ * `onPlaybackAudioFrame`, `onMixedAudioFrame`, and `onEarMonitoringAudioFrame` callbacks according
+ * to the sampling interval.
+ * - Sample interval (sec) = `samplePerCall` /( `sampleRate` × `channel` ).
+ * - Ensure that the sample interval ≥ 0.01 (s).
+ *
+ */
struct AudioParams {
- /** The audio sample rate (Hz), which can be set as one of the following values:
-
- - `8000`
- - `16000` (Default)
- - `32000`
- - `44100 `
- - `48000`
+ /**
+ * The audio sample rate (Hz), which can be set as one of the following values:
+ * - 8000.
+ * - (Default) 16000.
+ * - 32000.
+ * - 44100
+ * - 48000
*/
int sample_rate;
- /* The number of audio channels, which can be set as either of the following values:
-
- - `1`: Mono (Default)
- - `2`: Stereo
+ /**
+ * The number of audio channels, which can be set as either of the following values:
+ * - 1: (Default) Mono.
+ * - 2: Stereo.
*/
int channels;
- /* The use mode of the audio data. See AgoraAudioRawFrameOperationMode.
+ /**
+ * The use mode of the audio data. See `RAW_AUDIO_FRAME_OP_MODE_TYPE`.
*/
rtc::RAW_AUDIO_FRAME_OP_MODE_TYPE mode;
- /** The number of samples. For example, set it as 1024 for RTMP or RTMPS
- streaming.
+ /**
+ * The number of samples, such as 1024 for the media push.
*/
int samples_per_call;
@@ -1418,38 +1581,107 @@ class IAudioFrameObserverBase {
virtual ~IAudioFrameObserverBase() {}
/**
- * Occurs when the recorded audio frame is received.
- * @param channelId The channel name
- * @param audioFrame The reference to the audio frame: AudioFrame.
+ * @brief Gets the captured audio frame.
+ *
+ * @details
+ * To ensure that the format of the cpatured audio frame is as expected, you can choose one of the
+ * following two methods to set the audio data format:
+ * - Method 1: After calling `setRecordingAudioFrameParameters` to set the audio data format and
+ * `registerAudioFrameObserver` to register the audio frame observer object, the SDK calculates the
+ * sampling interval according to the parameters set in the methods, and triggers the
+ * `onRecordAudioFrame` callback according to the sampling interval.
+ * - Method 2: After calling `registerAudioFrameObserver` to register the audio frame observer
+ * object, set the audio data format in the return value of the `getObservedAudioFramePosition`
+ * callback. The SDK then calculates the sampling interval according to the return value of the
+ * `getRecordAudioParams` callback, and triggers the `onRecordAudioFrame` callback according to the
+ * sampling interval.
+ *
+ * @note The priority of method 1 is higher than that of method 2. If method 1 is used to set the
+ * audio data format, the setting of method 2 is invalid.
+ *
+ * @param audioFrame The raw audio data. See `AudioFrame`.
+ * @param channelId The channel ID.
+ *
* @return
- * - true: The recorded audio frame is valid and is encoded and sent.
- * - false: The recorded audio frame is invalid and is not encoded or sent.
+ * Without practical meaning.
*/
virtual bool onRecordAudioFrame(const char* channelId, AudioFrame& audioFrame) = 0;
/**
- * Occurs when the playback audio frame is received.
- * @param channelId The channel name
- * @param audioFrame The reference to the audio frame: AudioFrame.
+ * @brief Gets the raw audio frame for playback.
+ *
+ * @details
+ * To ensure that the data format of audio frame for playback is as expected, Agora recommends that
+ * you choose one of the following two methods to set the audio data format:
+ * - Method 1: After calling `setPlaybackAudioFrameParameters` to set the audio data format and
+ * `registerAudioFrameObserver` to register the audio frame observer object, the SDK calculates the
+ * sampling interval according to the parameters set in the methods, and triggers the
+ * `onPlaybackAudioFrame` callback according to the sampling interval.
+ * - Method 2: After calling `registerAudioFrameObserver` to register the audio frame observer
+ * object, set the audio data format in the return value of the `getObservedAudioFramePosition`
+ * callback. The SDK then calculates the sampling interval according to the return value of the
+ * `getPlaybackAudioParams` callback, and triggers the `onPlaybackAudioFrame` callback according to
+ * the sampling interval.
+ *
+ * @note The priority of method 1 is higher than that of method 2. If method 1 is used to set the
+ * audio data format, the setting of method 2 is invalid.
+ *
+ * @param audioFrame The raw audio data. See `AudioFrame`.
+ * @param channelId The channel ID.
+ *
* @return
- * - true: The playback audio frame is valid and is encoded and sent.
- * - false: The playback audio frame is invalid and is not encoded or sent.
+ * Without practical meaning.
*/
virtual bool onPlaybackAudioFrame(const char* channelId, AudioFrame& audioFrame) = 0;
/**
- * Occurs when the mixed audio data is received.
- * @param channelId The channel name
- * @param audioFrame The reference to the audio frame: AudioFrame.
+ * @brief Retrieves the mixed captured and playback audio frame.
+ *
+ * @details
+ * To ensure that the data format of mixed captured and playback audio frame meets the expectations,
+ * Agora recommends that you choose one of the following two ways to set the data format:
+ * - Method 1: After calling `setMixedAudioFrameParameters` to set the audio data format and
+ * `registerAudioFrameObserver` to register the audio frame observer object, the SDK calculates the
+ * sampling interval according to the parameters set in the methods, and triggers the
+ * `onMixedAudioFrame` callback according to the sampling interval.
+ * - Method 2: After calling `registerAudioFrameObserver` to register the audio frame observer
+ * object, set the audio data format in the return value of the `getObservedAudioFramePosition`
+ * callback. The SDK then calculates the sampling interval according to the return value of the
+ * `getMixedAudioParams` callback, and triggers the `onMixedAudioFrame` callback according to the
+ * sampling interval.
+ *
+ * @note The priority of method 1 is higher than that of method 2. If method 1 is used to set the
+ * audio data format, the setting of method 2 is invalid.
+ *
+ * @param audioFrame The raw audio data. See `AudioFrame`.
+ * @param channelId The channel ID.
+ *
* @return
- * - true: The mixed audio data is valid and is encoded and sent.
- * - false: The mixed audio data is invalid and is not encoded or sent.
+ * Without practical meaning.
*/
virtual bool onMixedAudioFrame(const char* channelId, AudioFrame& audioFrame) = 0;
/**
- * Occurs when the ear monitoring audio frame is received.
- * @param audioFrame The reference to the audio frame: AudioFrame.
+ * @brief Gets the in-ear monitoring audio frame.
+ *
+ * @details
+ * In order to ensure that the obtained in-ear audio data meets the expectations, Agora recommends
+ * that you choose one of the following two methods to set the in-ear monitoring-ear audio data
+ * format:
+ * - Method 1: After calling `setEarMonitoringAudioFrameParameters` to set the audio data format and
+ * `registerAudioFrameObserver` to register the audio frame observer object, the SDK calculates the
+ * sampling interval according to the parameters set in the methods, and triggers the
+ * `onEarMonitoringAudioFrame` callback according to the sampling interval.
+ * - Method 2: After calling `registerAudioFrameObserver` to register the audio frame observer
+ * object, set the audio data format in the return value of the `getObservedAudioFramePosition`
+ * callback. The SDK then calculates the sampling interval according to the return value of the
+ * `getEarMonitoringAudioParams` callback, and triggers the `onEarMonitoringAudioFrame` callback
+ * according to the sampling interval.
+ *
+ * @note The priority of method 1 is higher than that of method 2. If method 1 is used to set the
+ * audio data format, the setting of method 2 is invalid.
+ *
+ * @param audioFrame The raw audio data. See `AudioFrame`.
+ *
* @return
- * - true: The ear monitoring audio data is valid and is encoded and sent.
- * - false: The ear monitoring audio data is invalid and is not encoded or sent.
+ * Without practical meaning.
*/
virtual bool onEarMonitoringAudioFrame(AudioFrame& audioFrame) = 0;
/**
@@ -1470,75 +1702,109 @@ class IAudioFrameObserverBase {
}
/**
- * Sets the frame position for the audio observer.
- * @return A bit mask that controls the frame position of the audio observer.
- * @note - Use '|' (the OR operator) to observe multiple frame positions.
- *
- * After you successfully register the audio observer, the SDK triggers this callback each time it
- * receives a audio frame. You can determine which position to observe by setting the return
- * value. The SDK provides 4 positions for observer. Each position corresponds to a callback
- * function:
- * - `AUDIO_FRAME_POSITION_PLAYBACK (1 << 0)`: The position for playback audio frame is received,
- * which corresponds to the \ref onPlaybackFrame "onPlaybackFrame" callback.
- * - `AUDIO_FRAME_POSITION_RECORD (1 << 1)`: The position for record audio frame is received,
- * which corresponds to the \ref onRecordFrame "onRecordFrame" callback.
- * - `AUDIO_FRAME_POSITION_MIXED (1 << 2)`: The position for mixed audio frame is received, which
- * corresponds to the \ref onMixedFrame "onMixedFrame" callback.
- * - `AUDIO_FRAME_POSITION_BEFORE_MIXING (1 << 3)`: The position for playback audio frame before
- * mixing is received, which corresponds to the \ref onPlaybackFrameBeforeMixing
- * "onPlaybackFrameBeforeMixing" callback.
- * @return The bit mask that controls the audio observation positions.
- * See AUDIO_FRAME_POSITION.
+ * @brief Sets the frame position for the video observer.
+ *
+ * @details
+ * After successfully registering the audio data observer, the SDK uses this callback for each
+ * specific audio frame processing node to determine whether to trigger the following callbacks:
+ * - `onRecordAudioFrame`
+ * - `onPlaybackAudioFrame`
+ * - `onPlaybackAudioFrameBeforeMixing`
+ * - `onMixedAudioFrame`
+ * - `onEarMonitoringAudioFrame`
+ * You can set one or more positions you need to observe by modifying the return value of
+ * `getObservedAudioFramePosition` based on your scenario requirements:
+ * When the annotation observes multiple locations, the | (or operator) is required. To conserve
+ * system resources, you can reduce the number of frame positions that you want to observe.
+ *
+ * @return
+ * a bitmask that sets the observation position, with the following values:
+ * - AUDIO_FRAME_POSITION_PLAYBACK (0x0001): This position can observe the playback audio mixed by
+ * all remote users, corresponding to the `onPlaybackAudioFrame` callback.
+ * - AUDIO_FRAME_POSITION_RECORD (0x0002): This position can observe the collected local user's
+ * audio, corresponding to the `onRecordAudioFrame` callback.
+ * - AUDIO_FRAME_POSITION_MIXED (0x0004): This position can observe the playback audio mixed by the
+ * loacl user and all remote users, corresponding to the `onMixedAudioFrame` callback.
+ * - AUDIO_FRAME_POSITION_BEFORE_MIXING (0x0008): This position can observe the audio of a single
+ * remote user before mixing, corresponding to the `onPlaybackAudioFrameBeforeMixing` callback.
+ * - AUDIO_FRAME_POSITION_EAR_MONITORING (0x0010): This position can observe the in-ear monitoring
+ * audio of the local user, corresponding to the `onEarMonitoringAudioFrame` callback.
*/
-
virtual int getObservedAudioFramePosition() = 0;
- /** Sets the audio playback format
- **Note**:
-
- - The SDK calculates the sample interval according to the `AudioParams`
- you set in the return value of this callback and triggers the
- `onPlaybackAudioFrame` callback at the calculated sample interval.
- Sample interval (seconds) = `samplesPerCall`/(`sampleRate` × `channel`).
- Ensure that the value of sample interval is equal to or greater than 0.01.
-
- @return Sets the audio format. See AgoraAudioParams.
+ /**
+ * @brief Sets the audio format for the `onPlaybackAudioFrame` callback.
+ *
+ * @details
+ * You need to register the callback when calling the `registerAudioFrameObserver` method. After you
+ * successfully register the audio observer, the SDK triggers this callback, and you can set the
+ * audio format in the return value of this callback.
+ *
+ * @note
+ * The SDK triggers the `onPlaybackAudioFrame` callback with the `AudioParams` calculated sampling
+ * interval you set in the return value. The calculation formula is Sample interval (sec) =
+ * `samplePerCall` /( `sampleRate` × `channel` ).
+ * Ensure that the sample interval ≥ 0.01 (s).
+ *
+ * @return
+ * The audio data for playback, see `AudioParams`.
*/
virtual AudioParams getPlaybackAudioParams() = 0;
- /** Sets the audio recording format
- **Note**:
- - The SDK calculates the sample interval according to the `AudioParams`
- you set in the return value of this callback and triggers the
- `onRecordAudioFrame` callback at the calculated sample interval.
- Sample interval (seconds) = `samplesPerCall`/(`sampleRate` × `channel`).
- Ensure that the value of sample interval is equal to or greater than 0.01.
-
- @return Sets the audio format. See AgoraAudioParams.
+ /**
+ * @brief Sets the audio format for the `onRecordAudioFrame` callback.
+ *
+ * @details
+ * You need to register the callback when calling the `registerAudioFrameObserver` method. After you
+ * successfully register the audio observer, the SDK triggers this callback, and you can set the
+ * audio format in the return value of this callback.
+ *
+ * @note
+ * The SDK triggers the `onRecordAudioFrame` callback with the `AudioParams` calculated sampling
+ * interval you set in the return value. The calculation formula is Sample interval (sec) =
+ * `samplePerCall` /( `sampleRate` × `channel` ).
+ * Ensure that the sample interval ≥ 0.01 (s).
+ *
+ * @return
+ * The captured audio data, see `AudioParams`.
*/
virtual AudioParams getRecordAudioParams() = 0;
- /** Sets the audio mixing format
- **Note**:
- - The SDK calculates the sample interval according to the `AudioParams`
- you set in the return value of this callback and triggers the
- `onMixedAudioFrame` callback at the calculated sample interval.
- Sample interval (seconds) = `samplesPerCall`/(`sampleRate` × `channel`).
- Ensure that the value of sample interval is equal to or greater than 0.01.
-
- @return Sets the audio format. See AgoraAudioParams.
+ /**
+ * @brief Sets the audio format for the `onMixedAudioFrame` callback.
+ *
+ * @details
+ * You need to register the callback when calling the `registerAudioFrameObserver` method. After you
+ * successfully register the audio observer, the SDK triggers this callback, and you can set the
+ * audio format in the return value of this callback.
+ *
+ * @note
+ * The SDK triggers the `onMixedAudioFrame` callback with the `AudioParams` calculated sampling
+ * interval you set in the return value. The calculation formula is Sample interval (sec) =
+ * `samplePerCall` /( `sampleRate` × `channel` ).
+ * Ensure that the sample interval ≥ 0.01 (s).
+ *
+ * @return
+ * The mixed captured and playback audio data. See `AudioParams`.
*/
virtual AudioParams getMixedAudioParams() = 0;
- /** Sets the ear monitoring audio format
- **Note**:
- - The SDK calculates the sample interval according to the `AudioParams`
- you set in the return value of this callback and triggers the
- `onEarMonitoringAudioFrame` callback at the calculated sample interval.
- Sample interval (seconds) = `samplesPerCall`/(`sampleRate` × `channel`).
- Ensure that the value of sample interval is equal to or greater than 0.01.
-
- @return Sets the audio format. See AgoraAudioParams.
+ /**
+ * @brief Sets the audio format for the `onEarMonitoringAudioFrame` callback.
+ *
+ * @details
+ * You need to register the callback when calling the `registerAudioFrameObserver` method. After you
+ * successfully register the audio observer, the SDK triggers this callback, and you can set the
+ * audio format in the return value of this callback.
+ *
+ * @note
+ * The SDK triggers the `onEarMonitoringAudioFrame` callback with the `AudioParams` calculated
+ * sampling interval you set in the return value. The calculation formula is `Sample` interval (
+ * `sec` ) = `samplePerCall` /( `sampleRate` × `channel` ).
+ * Ensure that the sample interval ≥ 0.01 (s).
+ *
+ * @return
+ * The audio data of in-ear monitoring, see `AudioParams`.
*/
virtual AudioParams getEarMonitoringAudioParams() = 0;
};
@@ -1550,25 +1816,31 @@ class IAudioFrameObserver : public IAudioFrameObserverBase {
public:
using IAudioFrameObserverBase::onPlaybackAudioFrameBeforeMixing;
/**
- * Occurs when the before-mixing playback audio frame is received.
- * @param channelId The channel name
- * @param uid ID of the remote user.
- * @param audioFrame The reference to the audio frame: AudioFrame.
+ * @brief Retrieves the audio frame before mixing of subscribed remote users.
+ *
+ * @param channelId The channel ID.
+ * @param uid The ID of subscribed remote users.
+ * @param audioFrame The raw audio data. See `AudioFrame`.
+ *
* @return
- * - true: The before-mixing playback audio frame is valid and is encoded and sent.
- * - false: The before-mixing playback audio frame is invalid and is not encoded or sent.
+ * Without practical meaning.
*/
virtual bool onPlaybackAudioFrameBeforeMixing(const char* channelId, rtc::uid_t uid,
AudioFrame& audioFrame) = 0;
};
+/**
+ * @brief The audio spectrum data.
+ */
struct AudioSpectrumData {
/**
- * The audio spectrum data of audio.
+ * The audio spectrum data. Agora divides the audio frequency into 256 frequency domains, and
+ * reports the energy value of each frequency domain through this parameter. The value range of each
+ * energy type is [-300, 1] and the unit is dBFS.
*/
const float* audioSpectrumData;
/**
- * The data length of audio spectrum data.
+ * The audio spectrum data length is 256.
*/
int dataLength;
@@ -1576,13 +1848,16 @@ struct AudioSpectrumData {
AudioSpectrumData(const float* data, int length) : audioSpectrumData(data), dataLength(length) {}
};
+/**
+ * @brief Audio spectrum information of the remote user.
+ */
struct UserAudioSpectrumInfo {
/**
- * User ID of the speaker.
+ * The user ID of the remote user.
*/
agora::rtc::uid_t uid;
/**
- * The audio spectrum data of audio.
+ * Audio spectrum information of the remote user. See `AudioSpectrumData`.
*/
struct AudioSpectrumData spectrumData;
@@ -1600,37 +1875,40 @@ class IAudioSpectrumObserver {
virtual ~IAudioSpectrumObserver() {}
/**
- * Reports the audio spectrum of local audio.
+ * @brief Gets the statistics of a local audio spectrum.
*
- * This callback reports the audio spectrum data of the local audio at the moment
- * in the channel.
+ * @details
+ * After successfully calling `registerAudioSpectrumObserver` to implement the
+ * `onLocalAudioSpectrum` callback in `IAudioSpectrumObserver` and calling
+ * `enableAudioSpectrumMonitor` to enable audio spectrum monitoring, the SDK triggers this callback
+ * as the time interval you set to report the received remote audio data spectrum before encoding.
*
- * You can set the time interval of this callback using \ref
- * ILocalUser::enableAudioSpectrumMonitor "enableAudioSpectrumMonitor".
+ * @param data The audio spectrum data of the local user. See `AudioSpectrumData`.
*
- * @param data The audio spectrum data of local audio.
- * - true: Processed.
- * - false: Not processed.
+ * @return
+ * Whether the spectrum data is received:
+ * - `true`: Spectrum data is received.
+ * - `false`: No spectrum data is received.
*/
virtual bool onLocalAudioSpectrum(const AudioSpectrumData& data) = 0;
/**
- * Reports the audio spectrum of remote user.
+ * @brief Gets the remote audio spectrum.
*
- * This callback reports the IDs and audio spectrum data of the loudest speakers at the moment
- * in the channel.
+ * @details
+ * After successfully calling `registerAudioSpectrumObserver` to implement the
+ * `onRemoteAudioSpectrum` callback in the `IAudioSpectrumObserver` and calling
+ * `enableAudioSpectrumMonitor` to enable audio spectrum monitoring, the SDK will trigger the
+ * callback as the time interval you set to report the received remote audio data spectrum.
*
- * You can set the time interval of this callback using \ref
- * ILocalUser::enableAudioSpectrumMonitor "enableAudioSpectrumMonitor".
+ * @param spectrums The audio spectrum information of the remote user. See `UserAudioSpectrumInfo`.
+ * The number of arrays is the number of remote users monitored by the SDK. If the array is null, it
+ * means that no audio spectrum of remote users is detected.
+ * @param spectrumNumber The number of remote users.
*
- * @param spectrums The pointer to \ref agora::media::UserAudioSpectrumInfo
- * "UserAudioSpectrumInfo", which is an array containing the user ID and audio spectrum data for
- * each speaker.
- * - This array contains the following members:
- * - `uid`, which is the UID of each remote speaker
- * - `spectrumData`, which reports the audio spectrum of each remote speaker.
- * @param spectrumNumber The array length of the spectrums.
- * - true: Processed.
- * - false: Not processed.
+ * @return
+ * Whether the spectrum data is received:
+ * - `true`: Spectrum data is received.
+ * - `false`: No spectrum data is received.
*/
virtual bool onRemoteAudioSpectrum(const UserAudioSpectrumInfo* spectrums,
unsigned int spectrumNumber) = 0;
@@ -1642,17 +1920,26 @@ class IAudioSpectrumObserver {
class IVideoEncodedFrameObserver {
public:
/**
- * Occurs each time the SDK receives an encoded video image.
- * @param uid The user id of remote user.
- * @param imageBuffer The pointer to the video image buffer.
+ * @brief Reports that the receiver has received the to-be-decoded video frame sent by the remote
+ * end.
+ *
+ * @details
+ * If you call the `setRemoteVideoSubscriptionOptions` method and set `encodedFrameOnly` to `true`,
+ * the SDK triggers this callback locally to report the received encoded video frame information.
+ *
+ * @since 4.6.0
+ * @param channelId The channel name.
+ * @param uid The user ID of the remote user.
+ * @param imageBuffer The encoded video image buffer.
* @param length The data length of the video image.
- * @param videoEncodedFrameInfo The information of the encoded video frame: EncodedVideoFrameInfo.
- * @return Determines whether to accept encoded video image.
- * - true: Accept.
- * - false: Do not accept.
+ * @param videoEncodedFrameInfo For the information of the encoded video frame, see
+ * `EncodedVideoFrameInfo`.
+ *
+ * @return
+ * Without practical meaning.
*/
virtual bool onEncodedVideoFrameReceived(
- rtc::uid_t uid, const uint8_t* imageBuffer, size_t length,
+ const char* channelId, rtc::uid_t uid, const uint8_t* imageBuffer, size_t length,
const rtc::EncodedVideoFrameInfo& videoEncodedFrameInfo) = 0;
virtual ~IVideoEncodedFrameObserver() {}
@@ -1665,19 +1952,17 @@ class IVideoFrameObserver {
public:
typedef media::base::VideoFrame VideoFrame;
/**
- * The process mode of the video frame:
+ * @brief The process mode of the video frame:
*/
enum VIDEO_FRAME_PROCESS_MODE {
/**
* Read-only mode.
- *
* In this mode, you do not modify the video frame. The video frame observer is a renderer.
*/
PROCESS_MODE_READ_ONLY, // Observer works as a pure renderer and will not modify the original
// frame.
/**
* Read and write mode.
- *
* In this mode, you modify the video frame. The video frame observer is a video filter.
*/
PROCESS_MODE_READ_WRITE, // Observer works as a filter that will process the video frame and
@@ -1688,52 +1973,87 @@ class IVideoFrameObserver {
virtual ~IVideoFrameObserver() {}
/**
- * Occurs each time the SDK receives a video frame captured by the local camera.
- *
- * After you successfully register the video frame observer, the SDK triggers this callback each
- * time a video frame is received. In this callback, you can get the video data captured by the
- * local camera. You can then pre-process the data according to your scenarios.
+ * @brief Occurs each time the SDK receives a video frame captured by local devices.
*
- * After pre-processing, you can send the processed video data back to the SDK by setting the
- * `videoFrame` parameter in this callback.
+ * @details
+ * You can get raw video data collected by the local device through this callback and preprocess it
+ * as needed. Once the preprocessing is complete, you can directly modify `videoFrame` in this
+ * callback, and set the return value to `true` to send the modified video data to the SDK.
+ * If you need to send the preprocessed data to the SDK, you need to call `getVideoFrameProcessMode`
+ * first to set the video processing mode to read and write mode ( PROCESS_MODE_READ_WRITE ).
+ * Applicable scenarios: - Preprocess the locally collected video data before it is processed by the
+ * SDK. For example, get video data through this callback and process it with filters, watermarks,
+ * cropping, rotation, etc.
+ * - Get information about the locally collected video data before it is processed by the SDK. For
+ * example, the original width, height, frame rate of the video frame, etc.
+ * Call timing: After the successful registration of the video data observer, each time the SDK
+ * captures a video frame.
*
* @note
- * - If you get the video data in RGBA color encoding format, Agora does not support using this
- * callback to send the processed data in RGBA color encoding format back to the SDK.
- * - The video data that this callback gets has not been pre-processed, such as watermarking,
- * cropping content, rotating, or image enhancement.
+ * - If the video data type you get is RGBA, the SDK does not support processing the data of the
+ * alpha channel.
+ * - It is recommended that you ensure the modified parameters in `videoFrame` are consistent with
+ * the actual situation of the video frames in the video frame buffer. Otherwise, it may cause
+ * unexpected rotation, distortion, and other issues in the local preview and remote video display.
+ * The default video format that you get from this callback is YUV420. If you need other formats,
+ * you can set the expected data format in the getVideoFormatPreference callback.
*
- * @param videoFrame A pointer to the video frame: VideoFrame
- * @param sourceType source type of video frame. See #VIDEO_SOURCE_TYPE.
- * @return Determines whether to ignore the current video frame if the pre-processing fails:
- * - true: Do not ignore.
- * - false: Ignore, in which case this method does not sent the current video frame to the SDK.
+ * @param sourceType Video source types, including cameras, screens, or media player. See
+ * `VIDEO_SOURCE_TYPE`.
+ * @param videoFrame The video frame. See `VideoFrame`.Note: The default value of the video frame
+ * data format obtained through this callback is as follows:
+ * - Android: I420 or RGB (GLES20.GL_TEXTURE_2D)
+ * - iOS: I420 or CVPixelBufferRef
+ * - macOS: I420 or CVPixelBufferRef
+ * - Windows: YUV420
+ *
+ * @return
+ * - When the video processing mode is `PROCESS_MODE_READ_ONLY`:
+ * - `true`: Reserved for future use.
+ * - `false`: Reserved for future use.
+ * - When the video processing mode is `PROCESS_MODE_READ_WRITE`:
+ * - `true`: Sets the SDK to receive the video frame.
+ * - `false`: Sets the SDK to discard the video frame.
*/
virtual bool onCaptureVideoFrame(agora::rtc::VIDEO_SOURCE_TYPE sourceType,
VideoFrame& videoFrame) = 0;
/**
- * Occurs each time the SDK receives a video frame before encoding.
+ * @brief Occurs each time the SDK receives a video frame before encoding.
*
+ * @details
* After you successfully register the video frame observer, the SDK triggers this callback each
- * time when it receives a video frame. In this callback, you can get the video data before
- * encoding. You can then process the data according to your particular scenarios.
- *
- * After processing, you can send the processed video data back to the SDK by setting the
- * `videoFrame` parameter in this callback.
+ * time it receives a video frame. In this callback, you can get the video data before encoding and
+ * then process the data according to your particular scenarios.
+ * After processing, you can send the processed video data back to the SDK in this callback.
*
* @note
- * - To get the video data captured from the second screen before encoding, you need to set (1 <<
- * 2) as a frame position through `getObservedFramePosition`.
- * - The video data that this callback gets has been pre-processed, such as watermarking, cropping
- * content, rotating, or image enhancement.
- * - This callback does not support sending processed RGBA video data back to the SDK.
+ * - If you need to send the preprocessed data to the SDK, you need to call
+ * `getVideoFrameProcessMode` first to set the video processing mode to read and write mode (
+ * PROCESS_MODE_READ_WRITE ).
+ * - To get the video data captured from the second screen before encoding, you need to set
+ * `POSITION_PRE_ENCODER` (1 << 2) as a frame position through `getObservedFramePosition`.
+ * - The video data that this callback gets has been preprocessed, with its content cropped and
+ * rotated, and the image enhanced.
+ * - It is recommended that you ensure the modified parameters in `videoFrame` are consistent with
+ * the actual situation of the video frames in the video frame buffer. Otherwise, it may cause
+ * unexpected rotation, distortion, and other issues in the local preview and remote video display.
*
- * @param videoFrame A pointer to the video frame: VideoFrame
- * @param sourceType source type of video frame. See #VIDEO_SOURCE_TYPE.
- * @return Determines whether to ignore the current video frame if the pre-processing fails:
- * - true: Do not ignore.
- * - false: Ignore, in which case this method does not sent the current video frame to the SDK.
+ * @param sourceType The type of the video source. See `VIDEO_SOURCE_TYPE`.
+ * @param videoFrame The video frame. See `VideoFrame`.Note: The default value of the video frame
+ * data format obtained through this callback is as follows:
+ * - Android: I420 or RGB (GLES20.GL_TEXTURE_2D)
+ * - iOS: I420 or CVPixelBufferRef
+ * - macOS: I420 or CVPixelBufferRef
+ * - Windows: YUV420
+ *
+ * @return
+ * - When the video processing mode is `PROCESS_MODE_READ_ONLY`:
+ * - `true`: Reserved for future use.
+ * - `false`: Reserved for future use.
+ * - When the video processing mode is `PROCESS_MODE_READ_WRITE`:
+ * - `true`: Sets the SDK to receive the video frame.
+ * - `false`: Sets the SDK to discard the video frame.
*/
virtual bool onPreEncodeVideoFrame(agora::rtc::VIDEO_SOURCE_TYPE sourceType,
VideoFrame& videoFrame) = 0;
@@ -1764,23 +2084,41 @@ class IVideoFrameObserver {
virtual bool onMediaPlayerVideoFrame(VideoFrame& videoFrame, int mediaPlayerId) = 0;
/**
- * Occurs each time the SDK receives a video frame sent by the remote user.
+ * @brief Occurs each time the SDK receives a video frame sent by the remote user.
*
+ * @details
* After you successfully register the video frame observer, the SDK triggers this callback each
- * time a video frame is received. In this callback, you can get the video data sent by the remote
- * user. You can then post-process the data according to your scenarios.
+ * time it receives a video frame. In this callback, you can get the video data sent from the remote
+ * end before rendering, and then process it according to the particular scenarios.
+ * The default video format that you get from this callback is YUV420. If you need other formats,
+ * you can set the expected data format in the `getVideoFormatPreference` callback.
*
- * After post-processing, you can send the processed data back to the SDK by setting the
- * `videoFrame` parameter in this callback.
+ * @note
+ * - If you need to send the preprocessed data to the SDK, you need to call
+ * `getVideoFrameProcessMode` first to set the video processing mode to read and write mode (
+ * PROCESS_MODE_READ_WRITE ).
+ * - If the video data type you get is RGBA, the SDK does not support processing the data of the
+ * alpha channel.
+ * - It is recommended that you ensure the modified parameters in `videoFrame` are consistent with
+ * the actual situation of the video frames in the video frame buffer. Otherwise, it may cause
+ * unexpected rotation, distortion, and other issues in the local preview and remote video display.
*
- * @note This callback does not support sending processed RGBA video data back to the SDK.
+ * @param remoteUid The user ID of the remote user who sends the current video frame.
+ * @param videoFrame The video frame. See `VideoFrame`.Note: The default value of the video frame
+ * data format obtained through this callback is as follows:
+ * - Android: I420 or RGB (GLES20.GL_TEXTURE_2D)
+ * - iOS: I420 or CVPixelBufferRef
+ * - macOS: I420 or CVPixelBufferRef
+ * - Windows: YUV420
+ * @param channelId The channel ID.
*
- * @param channelId The channel name
- * @param remoteUid ID of the remote user who sends the current video frame.
- * @param videoFrame A pointer to the video frame: VideoFrame
- * @return Determines whether to ignore the current video frame if the post-processing fails:
- * - true: Do not ignore.
- * - false: Ignore, in which case this method does not sent the current video frame to the SDK.
+ * @return
+ * - When the video processing mode is `PROCESS_MODE_READ_ONLY`:
+ * - `true`: Reserved for future use.
+ * - `false`: Reserved for future use.
+ * - When the video processing mode is `PROCESS_MODE_READ_WRITE`:
+ * - `true`: Sets the SDK to receive the video frame.
+ * - `false`: Sets the SDK to discard the video frame.
*/
virtual bool onRenderVideoFrame(const char* channelId, rtc::uid_t remoteUid,
VideoFrame& videoFrame) = 0;
@@ -1788,13 +2126,16 @@ class IVideoFrameObserver {
virtual bool onTranscodedVideoFrame(VideoFrame& videoFrame) = 0;
/**
- * Occurs each time the SDK receives a video frame and prompts you to set the process mode of the
- * video frame.
+ * @brief Occurs each time the SDK receives a video frame and prompts you to set the process mode of
+ * the video frame.
*
+ * @details
* After you successfully register the video frame observer, the SDK triggers this callback each
* time it receives a video frame. You need to set your preferred process mode in the return value
* of this callback.
- * @return VIDEO_FRAME_PROCESS_MODE.
+ *
+ * @return
+ * See `VIDEO_FRAME_PROCESS_MODE`.
*/
virtual VIDEO_FRAME_PROCESS_MODE getVideoFrameProcessMode() { return PROCESS_MODE_READ_ONLY; }
@@ -1816,61 +2157,90 @@ class IVideoFrameObserver {
virtual base::VIDEO_PIXEL_FORMAT getVideoFormatPreference() { return base::VIDEO_PIXEL_DEFAULT; }
/**
- * Occurs each time the SDK receives a video frame, and prompts you whether to rotate the captured
- * video.
+ * @brief Occurs each time the SDK receives a video frame, and prompts you whether to rotate the
+ * captured video.
*
- * If you want to rotate the captured video according to the rotation member in the `VideoFrame`
- * class, register this callback by calling `registerVideoFrameObserver`. After you successfully
- * register the video frame observer, the SDK triggers this callback each time it receives a video
- * frame. You need to set whether to rotate the video frame in the return value of this callback.
+ * @details
+ * If you want to rotate the captured video according to the `rotation` member in the `VideoFrame`
+ * class, ensure that you register this callback when calling `registerVideoFrameObserver`. After
+ * you successfully register the video frame observer, the SDK triggers this callback each time it
+ * receives a video frame. You need to set whether to rotate the video frame in the return value of
+ * this callback.
*
- * @note This function only supports video data in RGBA or YUV420.
+ * @note
+ * - On the Android platform, the supported video data formats for this callback are: I420, RGBA,
+ * and Texture.
+ * - On the Windows platform, the supported video data formats for this callback are: I420, RGBA,
+ * and TextureBuffer.
+ * - On the iOS platform, the supported video data formats for this callback are: I420, RGBA, and
+ * CVPixelBuffer.
+ * - On the macOS platform, the supported video data formats for this callback are: I420 and RGBA.
*
- * @return Determines whether to rotate.
+ * @return
+ * Sets whether to rotate the captured video:
* - `true`: Rotate the captured video.
* - `false`: (Default) Do not rotate the captured video.
*/
virtual bool getRotationApplied() { return false; }
/**
- * Occurs each time the SDK receives a video frame and prompts you whether or not to mirror the
- * captured video.
+ * @brief Occurs each time the SDK receives a video frame and prompts you whether or not to mirror
+ * the captured video.
*
+ * @details
* If the video data you want to obtain is a mirror image of the original video, you need to
- * register this callback when calling `registerVideoFrameObserver`. After you successfully
- * register the video frame observer, the SDK triggers this callback each time it receives a video
- * frame. You need to set whether or not to mirror the video frame in the return value of this
- * callback.
+ * register this callback when calling `registerVideoFrameObserver`. After you successfully register
+ * the video frame observer, the SDK triggers this callback each time it receives a video frame. You
+ * need to set whether or not to mirror the video frame in the return value of this callback.
*
- * @note This function only supports video data in RGBA and YUV420 formats.
+ * @note
+ * - On the Android platform, the supported video data formats for this callback are: I420, RGBA,
+ * and Texture.
+ * - On the Windows platform, the supported video data formats for this callback are: I420, RGBA,
+ * and TextureBuffer.
+ * - On the iOS platform, the supported video data formats for this callback are: I420, RGBA, and
+ * CVPixelBuffer.
+ * - On the macOS platform, the supported video data formats for this callback are: I420 and RGBA.
+ * - Both this method and the `setVideoEncoderConfiguration` method support setting the mirroring
+ * effect. Agora recommends that you only choose one method to set it up. Using both methods at the
+ * same time causes the mirroring effect to overlap, and the mirroring settings fail.
*
- * @return Determines whether to mirror.
+ * @return
+ * Sets whether or not to mirror the captured video:
* - `true`: Mirror the captured video.
* - `false`: (Default) Do not mirror the captured video.
*/
virtual bool getMirrorApplied() { return false; }
/**
- * Sets the frame position for the video observer.
- *
- * After you successfully register the video observer, the SDK triggers this callback each time it
- * receives a video frame. You can determine which position to observe by setting the return
- * value. The SDK provides 3 positions for observer. Each position corresponds to a callback
- * function:
+ * @brief Sets the frame position for the video observer.
*
- * POSITION_POST_CAPTURER(1 << 0): The position after capturing the video data, which corresponds
- * to the onCaptureVideoFrame callback. POSITION_PRE_RENDERER(1 << 1): The position before
- * receiving the remote video data, which corresponds to the onRenderVideoFrame callback.
- * POSITION_PRE_ENCODER(1 << 2): The position before encoding the video data, which corresponds to
- * the onPreEncodeVideoFrame callback.
+ * @details
+ * After successfully registering the video data observer, the SDK uses this callback to determine
+ * whether to trigger `onCaptureVideoFrame`, `onRenderVideoFrame` and `onPreEncodeVideoFrame`
+ * callback at each specific video frame processing position, so that you can observe the locally
+ * collected video data, the video data sent by the remote end, and the video data before encoding.
+ * You can set one or more positions you need to observe by modifying the return value according to
+ * your scenario:
+ * - `POSITION_POST_CAPTURER` (1 << 0): The position after capturing the video data, which
+ * corresponds to the `onCaptureVideoFrame` callback.
+ * - `POSITION_PRE_RENDERER` (1 << 1): The position of the received remote video data before
+ * rendering, which corresponds to the `onRenderVideoFrame` callback.
+ * - `POSITION_PRE_ENCODER` (1 << 2): The position before encoding the video data, which corresponds
+ * to the `onPreEncodeVideoFrame` callback.
*
- * To observe multiple frame positions, use '|' (the OR operator).
- * This callback observes POSITION_POST_CAPTURER(1 << 0) and POSITION_PRE_RENDERER(1 << 1) by
- * default. To conserve the system consumption, you can reduce the number of frame positions that
- * you want to observe.
+ * @note
+ * - Use '|' (the OR operator) to observe multiple frame positions.
+ * - This callback observes `POSITION_POST_CAPTURER` (1 << 0) and `POSITION_PRE_RENDERER` (1 << 1)
+ * by default.
+ * - To conserve system resources, you can reduce the number of frame positions that you want to
+ * observe.
+ * - When the video processing mode is `PROCESS_MODE_READ_WRITE` and the observation position is set
+ * to `POSITION_PRE_ENCODER` | `POSITION_POST_CAPTURER`, the `getMirrorApplied` does not take
+ * effect; you need to modify the video processing mode or the position of the observer.
*
- * @return A bit mask that controls the frame position of the video observer:
- * VIDEO_OBSERVER_POSITION.
+ * @return
+ * A bit mask that controls the frame position of the video observer. See `VIDEO_MODULE_POSITION`.
*/
virtual uint32_t getObservedFramePosition() {
return base::POSITION_POST_CAPTURER | base::POSITION_PRE_RENDERER;
@@ -1887,21 +2257,21 @@ class IVideoFrameObserver {
};
/**
- * The external video source type.
+ * @brief The external video frame encoding type.
*/
enum EXTERNAL_VIDEO_SOURCE_TYPE {
/**
- * 0: non-encoded video frame.
+ * 0: The video frame is not encoded.
*/
VIDEO_FRAME = 0,
/**
- * 1: encoded video frame.
+ * 1: The video frame is encoded.
*/
ENCODED_VIDEO_FRAME,
};
/**
- * The format of the recording file.
+ * @brief Format of the recording file.
*
* @since v3.5.2
*/
@@ -1912,7 +2282,7 @@ enum MediaRecorderContainerFormat {
FORMAT_MP4 = 1,
};
/**
- * The recording content.
+ * @brief The recording content.
*
* @since v3.5.2
*/
@@ -1931,32 +2301,32 @@ enum MediaRecorderStreamType {
STREAM_TYPE_BOTH = STREAM_TYPE_AUDIO | STREAM_TYPE_VIDEO,
};
/**
- * The current recording state.
+ * @brief The current recording state.
*
* @since v3.5.2
*/
enum RecorderState {
/**
- * -1: An error occurs during the recording. See RecorderReasonCode for the reason.
+ * -1: An error occurs during the recording. See `RecorderReasonCode` for the reason.
*/
RECORDER_STATE_ERROR = -1,
/**
- * 2: The audio and video recording is started.
+ * 2: The audio and video recording starts.
*/
RECORDER_STATE_START = 2,
/**
- * 3: The audio and video recording is stopped.
+ * 3: The audio and video recording stops.
*/
RECORDER_STATE_STOP = 3,
};
/**
- * The reason for the state change
+ * @brief The reason for the state change.
*
* @since v3.5.2
*/
enum RecorderReasonCode {
/**
- * 0: No error occurs.
+ * 0: No error.
*/
RECORDER_REASON_NONE = 0,
/**
@@ -1964,8 +2334,8 @@ enum RecorderReasonCode {
*/
RECORDER_REASON_WRITE_FAILED = 1,
/**
- * 2: The SDK does not detect audio and video streams to be recorded, or audio and video streams
- * are interrupted for more than five seconds during recording.
+ * 2: The SDK does not detect any audio and video streams, or audio and video streams are
+ * interrupted for more than five seconds during recording.
*/
RECORDER_REASON_NO_STREAM = 2,
/**
@@ -1978,62 +2348,77 @@ enum RecorderReasonCode {
RECORDER_REASON_CONFIG_CHANGED = 4,
};
/**
- * Configurations for the local audio and video recording.
+ * @brief Configuration for audio and video stream recording.
*
* @since v3.5.2
*/
struct MediaRecorderConfiguration {
/**
- * The absolute path (including the filename extensions) of the recording file.
- * For example, `C:\Users\\AppData\Local\Agora\\example.mp4` on Windows,
- * `/App Sandbox/Library/Caches/example.mp4` on iOS, `/Library/Logs/example.mp4` on macOS, and
- * `/storage/emulated/0/Android/data//files/example.mp4` on Android.
- *
- * @note Ensure that the specified path exists and is writable.
+ * The absolute path where the recording file will be saved locally, including the file name and
+ * format. For example:
+ * - Windows: `C:\Users\\AppData\Local\Agora\\example.mp4`
+ * - iOS: `/App Sandbox/Library/Caches/example.mp4`
+ * - macOS: `/Library/Logs/example.mp4`
+ * - Android: `/storage/emulated/0/Android/data//files/example.mp4`
+ * @note Make sure the specified path exists and is writable.
*/
const char* storagePath;
/**
- * The format of the recording file. See \ref agora::rtc::MediaRecorderContainerFormat
- * "MediaRecorderContainerFormat".
+ * The format of the recording file. See `MediaRecorderContainerFormat`.
*/
MediaRecorderContainerFormat containerFormat;
/**
- * The recording content. See \ref agora::rtc::MediaRecorderStreamType "MediaRecorderStreamType".
+ * The content to record. See `MediaRecorderStreamType`.
*/
MediaRecorderStreamType streamType;
/**
- * The maximum recording duration, in milliseconds. The default value is 120000.
+ * Maximum recording duration in milliseconds. Default is 120000.
*/
int maxDurationMs;
/**
- * The interval (ms) of updating the recording information. The value range is
- * [1000,10000]. Based on the set value of `recorderInfoUpdateInterval`, the
- * SDK triggers the \ref IMediaRecorderObserver::onRecorderInfoUpdated "onRecorderInfoUpdated"
- * callback to report the updated recording information.
+ * Interval for recording information updates, in milliseconds. The valid range is [1000,10000]. The
+ * SDK triggers the `onRecorderInfoUpdated` callback based on this value to report updated recording
+ * information.
*/
int recorderInfoUpdateInterval;
/**
- * The video width
+ * Width (px) of the recorded video. The maximum value for width × height must not exceed 3840 ×
+ * 2160.
+ * This parameter is required only when calling `createMediaRecorder` and setting `type` in
+ * `RecorderStreamInfo` to PREVIEW.
*/
int width;
/**
- * The video height
+ * Height (px) of the recorded video. The maximum value for width × height must not exceed 3840 ×
+ * 2160.
+ * This parameter is required only when calling `createMediaRecorder` and setting `type` in
+ * `RecorderStreamInfo` to PREVIEW.
*/
int height;
/**
- * The video fps
+ * Frame rate of the recorded video. The maximum is 30. For example: 5, 10, 15, 24, 30.
+ * This parameter is required only when calling `createMediaRecorder` and setting `type` in
+ * `RecorderStreamInfo` to PREVIEW.
*/
int fps;
/**
- * The audio sample rate
+ * Sample rate (Hz) of the recorded audio. Supported values: 16000, 32000, 44100, or 48000.
+ * This parameter is required only when calling `createMediaRecorder` and setting `type` in
+ * `RecorderStreamInfo` to PREVIEW.
*/
int sample_rate;
/**
- * The audio channel nums
+ * Number of audio channels to record:
+ * - 1: Mono
+ * - 2: Stereo
+ * This parameter is required only when calling `createMediaRecorder` and setting `type` in
+ * `RecorderStreamInfo` to PREVIEW.
*/
int channel_num;
/**
- * The video source just for out channel recoder
+ * Type of video source to record. See `VIDEO_SOURCE_TYPE`.
+ * This parameter is required only when calling `createMediaRecorder` and setting `type` in
+ * `RecorderStreamInfo` to PREVIEW.
*/
agora::rtc::VIDEO_SOURCE_TYPE videoSourceType;
@@ -2067,11 +2452,46 @@ struct MediaRecorderConfiguration {
class IFaceInfoObserver {
public:
/**
- * Occurs when the face info is received.
- * @param outFaceInfo The output face info.
+ * @brief Occurs when the facial information processed by speech driven extension is received.
+ *
+ * @param outFaceInfo Output parameter, the JSON string of the facial information processed by the
+ * voice driver plugin, including the following fields:
+ * - faces: Object sequence. The collection of facial information, with each face corresponding to
+ * an object.
+ * - blendshapes: Object. The collection of face capture coefficients, named according to ARkit
+ * standards, with each key-value pair representing a blendshape coefficient. The blendshape
+ * coefficient is a floating point number with a range of [0.0, 1.0].
+ * - rotation: Object sequence. The rotation of the head, which includes the following three
+ * key-value pairs, with values as floating point numbers ranging from -180.0 to 180.0:
+ * - pitch: Head pitch angle. A positve value means looking down, while a negative value means
+ * looking up.
+ * - yaw: Head yaw angle. A positve value means turning left, while a negative value means turning
+ * right.
+ * - roll: Head roll angle. A positve value means tilting to the right, while a negative value
+ * means tilting to the left.
+ * - timestamp: String. The timestamp of the output result, in milliseconds.
+ * Here is an example of JSON:
+ * ```json
+ * { "faces":[{ "blendshapes":{ "eyeBlinkLeft":0.9, "eyeLookDownLeft":0.0, "eyeLookInLeft":0.0,
+ * "eyeLookOutLeft":0.0, "eyeLookUpLeft":0.0, "eyeSquintLeft":0.0, "eyeWideLeft":0.0,
+ * "eyeBlinkRight":0.0, "eyeLookDownRight":0.0, "eyeLookInRight":0.0, "eyeLookOutRight":0.0,
+ * "eyeLookUpRight":0.0, "eyeSquintRight":0.0, "eyeWideRight":0.0, "jawForward":0.0, "jawLeft":0.0,
+ * "jawRight":0.0, "jawOpen":0.0, "mouthClose":0.0, "mouthFunnel":0.0, "mouthPucker":0.0,
+ * "mouthLeft":0.0, "mouthRight":0.0, "mouthSmileLeft":0.0, "mouthSmileRight":0.0,
+ * "mouthFrownLeft":0.0, "mouthFrownRight":0.0, "mouthDimpleLeft":0.0, "mouthDimpleRight":0.0,
+ * "mouthStretchLeft":0.0, "mouthStretchRight":0.0, "mouthRollLower":0.0, "mouthRollUpper":0.0,
+ * "mouthShrugLower":0.0, "mouthShrugUpper":0.0, "mouthPressLeft":0.0, "mouthPressRight":0.0,
+ * "mouthLowerDownLeft":0.0, "mouthLowerDownRight":0.0, "mouthUpperUpLeft":0.0,
+ * "mouthUpperUpRight":0.0, "browDownLeft":0.0, "browDownRight":0.0, "browInnerUp":0.0,
+ * "browOuterUpLeft":0.0, "browOuterUpRight":0.0, "cheekPuff":0.0, "cheekSquintLeft":0.0,
+ * "cheekSquintRight":0.0, "noseSneerLeft":0.0, "noseSneerRight":0.0, "tongueOut":0.0 },
+ * "rotation":{"pitch":30.0, "yaw":25.5, "roll":-15.5},
+ * }], "timestamp":"654879876546" }
+ * ```
+ *
* @return
- * - true: The face info is valid.
- * - false: The face info is invalid.
+ * - `true`: Facial information JSON parsing successful.
+ * - `false`: Facial information JSON parsing failed.
*/
virtual bool onFaceInfo(const char* outFaceInfo) = 0;
@@ -2079,21 +2499,21 @@ class IFaceInfoObserver {
};
/**
- * Information for the recording file.
+ * @brief Information about the recording file.
*
* @since v3.5.2
*/
struct RecorderInfo {
/**
- * The absolute path of the recording file.
+ * Absolute storage path of the recording file.
*/
const char* fileName;
/**
- * The recording duration, in milliseconds.
+ * Duration of the recording file in milliseconds.
*/
unsigned int durationMs;
/**
- * The size in bytes of the recording file.
+ * Size of the recording file in bytes.
*/
unsigned int fileSize;
@@ -2105,35 +2525,36 @@ struct RecorderInfo {
class IMediaRecorderObserver {
public:
/**
- * Occurs when the recording state changes.
+ * @brief Callback when the recording state changes.
*
* @since v4.0.0
*
- * When the local audio and video recording state changes, the SDK triggers this callback to
+ * @details
+ * When the recording state of the audio and video stream changes, the SDK triggers this callback to
* report the current recording state and the reason for the change.
*
- * @param channelId The channel name.
- * @param uid ID of the user.
- * @param state The current recording state. See \ref agora::media::RecorderState "RecorderState".
- * @param reason The reason for the state change. See \ref agora::media::RecorderReasonCode
- * "RecorderReasonCode".
+ * @param channelId Channel name.
+ * @param uid User ID.
+ * @param state Current recording state. See `RecorderState`.
+ * @param reason Reason for the recording state change. See `RecorderReasonCode`.
+ *
*/
virtual void onRecorderStateChanged(const char* channelId, rtc::uid_t uid, RecorderState state,
RecorderReasonCode reason) = 0;
/**
- * Occurs when the recording information is updated.
+ * @brief Callback for recording information updates.
*
* @since v4.0.0
*
- * After you successfully register this callback and enable the local audio and video recording,
- * the SDK periodically triggers the `onRecorderInfoUpdated` callback based on the set value of
- * `recorderInfoUpdateInterval`. This callback reports the filename, duration, and size of the
- * current recording file.
+ * @details
+ * After successfully registering this callback and starting audio and video stream recording, the
+ * SDK periodically triggers this callback based on the value of `recorderInfoUpdateInterval` set in
+ * `MediaRecorderConfiguration`,
+ * reporting the current recording file's name, duration, and size.
*
- * @param channelId The channel name.
- * @param uid ID of the user.
- * @param info Information about the recording file. See \ref agora::media::RecorderInfo
- * "RecorderInfo".
+ * @param channelId Channel name.
+ * @param uid User ID.
+ * @param info Recording file information. See `RecorderInfo`.
*
*/
virtual void onRecorderInfoUpdated(const char* channelId, rtc::uid_t uid,
diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/AgoraMediaPlayerTypes.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/AgoraMediaPlayerTypes.h
index d55d1d9e0..ceb0642ea 100644
--- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/AgoraMediaPlayerTypes.h
+++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/AgoraMediaPlayerTypes.h
@@ -49,31 +49,39 @@ namespace base {
static const uint8_t kMaxCharBufferLength = 50;
/**
* @brief The playback state.
- *
*/
enum MEDIA_PLAYER_STATE {
- /** Default state.
+ /**
+ * 0: The default state. The media player returns this state code before you open the media resource
+ * or after you stop the playback.
*/
PLAYER_STATE_IDLE = 0,
- /** Opening the media file.
+ /**
+ * 1: Opening the media resource.
*/
PLAYER_STATE_OPENING,
- /** The media file is opened successfully.
+ /**
+ * 2: Opens the media resource successfully.
*/
PLAYER_STATE_OPEN_COMPLETED,
- /** Playing the media file.
+ /**
+ * 3: The media resource is playing.
*/
PLAYER_STATE_PLAYING,
- /** The playback is paused.
+ /**
+ * 4: Pauses the playback.
*/
PLAYER_STATE_PAUSED,
- /** The playback is completed.
+ /**
+ * 5: The playback is complete.
*/
PLAYER_STATE_PLAYBACK_COMPLETED,
- /** All loops are completed.
+ /**
+ * 6: The loop is complete.
*/
PLAYER_STATE_PLAYBACK_ALL_LOOPS_COMPLETED,
- /** The playback is stopped.
+ /**
+ * 7: The playback stops.
*/
PLAYER_STATE_STOPPED,
/** Player pausing (internal)
@@ -97,58 +105,73 @@ enum MEDIA_PLAYER_STATE {
/** Player set track state (internal)
*/
PLAYER_STATE_SET_TRACK_INTERNAL,
- /** The playback fails.
+ /**
+ * 100: The media player fails to play the media resource.
*/
PLAYER_STATE_FAILED = 100,
};
/**
- * @brief Player error code
- *
+ * @brief Reasons for the changes in the media player status.
*/
enum MEDIA_PLAYER_REASON {
- /** No error.
+ /**
+ * 0: No error.
*/
PLAYER_REASON_NONE = 0,
- /** The parameter is invalid.
+ /**
+ * -1: Invalid arguments.
*/
PLAYER_REASON_INVALID_ARGUMENTS = -1,
- /** Internel error.
+ /**
+ * -2: Internal error.
*/
PLAYER_REASON_INTERNAL = -2,
- /** No resource.
+ /**
+ * -3: No resource.
*/
PLAYER_REASON_NO_RESOURCE = -3,
- /** Invalid media source.
+ /**
+ * -4: Invalid media resource.
*/
PLAYER_REASON_INVALID_MEDIA_SOURCE = -4,
- /** The type of the media stream is unknown.
+ /**
+ * -5: The media stream type is unknown.
*/
PLAYER_REASON_UNKNOWN_STREAM_TYPE = -5,
- /** The object is not initialized.
+ /**
+ * -6: The object is not initialized.
*/
PLAYER_REASON_OBJ_NOT_INITIALIZED = -6,
- /** The codec is not supported.
+ /**
+ * -7: The codec is not supported.
*/
PLAYER_REASON_CODEC_NOT_SUPPORTED = -7,
- /** Invalid renderer.
+ /**
+ * -8: Invalid renderer.
*/
PLAYER_REASON_VIDEO_RENDER_FAILED = -8,
- /** An error occurs in the internal state of the player.
+ /**
+ * -9: An error with the internal state of the player occurs.
*/
PLAYER_REASON_INVALID_STATE = -9,
- /** The URL of the media file cannot be found.
+ /**
+ * -10: The URL of the media resource cannot be found.
*/
PLAYER_REASON_URL_NOT_FOUND = -10,
- /** Invalid connection between the player and the Agora server.
+ /**
+ * -11: Invalid connection between the player and the Agora Server.
*/
PLAYER_REASON_INVALID_CONNECTION_STATE = -11,
- /** The playback buffer is insufficient.
+ /**
+ * -12: The playback buffer is insufficient.
*/
PLAYER_REASON_SRC_BUFFER_UNDERFLOW = -12,
- /** The audio mixing file playback is interrupted.
+ /**
+ * -13: The playback is interrupted.
*/
PLAYER_REASON_INTERRUPTED = -13,
- /** The SDK does not support this function.
+ /**
+ * -14: The SDK does not support the method being called.
*/
PLAYER_REASON_NOT_SUPPORTED = -14,
/** The token has expired.
@@ -157,75 +180,92 @@ enum MEDIA_PLAYER_REASON {
/** The ip has expired.
*/
PLAYER_REASON_IP_EXPIRED = -16,
- /** An unknown error occurs.
+ /**
+ * -17: An unknown error.
*/
PLAYER_REASON_UNKNOWN = -17,
};
/**
* @brief The type of the media stream.
- *
*/
enum MEDIA_STREAM_TYPE {
- /** The type is unknown.
+ /**
+ * 0: The type is unknown.
*/
STREAM_TYPE_UNKNOWN = 0,
- /** The video stream.
+ /**
+ * 1: The video stream.
*/
STREAM_TYPE_VIDEO = 1,
- /** The audio stream.
+ /**
+ * 2: The audio stream.
*/
STREAM_TYPE_AUDIO = 2,
- /** The subtitle stream.
+ /**
+ * 3: The subtitle stream.
*/
STREAM_TYPE_SUBTITLE = 3,
};
/**
- * @brief The playback event.
- *
+ * @brief Media player events.
*/
enum MEDIA_PLAYER_EVENT {
- /** The player begins to seek to the new playback position.
+ /**
+ * 0: The player begins to seek to a new playback position.
*/
PLAYER_EVENT_SEEK_BEGIN = 0,
- /** The seek operation completes.
+ /**
+ * 1: The player finishes seeking to a new playback position.
*/
PLAYER_EVENT_SEEK_COMPLETE = 1,
- /** An error occurs during the seek operation.
+ /**
+ * 2: An error occurs when seeking to a new playback position.
*/
PLAYER_EVENT_SEEK_ERROR = 2,
- /** The player changes the audio track for playback.
+ /**
+ * 5: The audio track used by the player has been changed.
*/
PLAYER_EVENT_AUDIO_TRACK_CHANGED = 5,
- /** player buffer low
+ /**
+ * 6: The currently buffered data is not enough to support playback.
*/
PLAYER_EVENT_BUFFER_LOW = 6,
- /** player buffer recover
+ /**
+ * 7: The currently buffered data is just enough to support playback.
*/
PLAYER_EVENT_BUFFER_RECOVER = 7,
- /** The video or audio is interrupted
+ /**
+ * 8: The audio or video playback freezes.
*/
PLAYER_EVENT_FREEZE_START = 8,
- /** Interrupt at the end of the video or audio
+ /**
+ * 9: The audio or video playback resumes without freezing.
*/
PLAYER_EVENT_FREEZE_STOP = 9,
- /** switch source begin
- */
+ /**
+ * 10: The player starts switching the media resource.
+ */
PLAYER_EVENT_SWITCH_BEGIN = 10,
- /** switch source complete
- */
+ /**
+ * 11: Media resource switching is complete.
+ */
PLAYER_EVENT_SWITCH_COMPLETE = 11,
- /** switch source error
- */
+ /**
+ * 12: Media resource switching error.
+ */
PLAYER_EVENT_SWITCH_ERROR = 12,
- /** An application can render the video to less than a second
+ /**
+ * 13: The first video frame is rendered.
*/
PLAYER_EVENT_FIRST_DISPLAYED = 13,
- /** cache resources exceed the maximum file count
+ /**
+ * 14: The cached media files reach the limit in number.
*/
PLAYER_EVENT_REACH_CACHE_FILE_MAX_COUNT = 14,
- /** cache resources exceed the maximum file size
+ /**
+ * 15: The cached media files reach the limit in aggregate storage space.
*/
PLAYER_EVENT_REACH_CACHE_FILE_MAX_SIZE = 15,
/** Triggered when a retry is required to open the media
@@ -244,63 +284,91 @@ enum MEDIA_PLAYER_EVENT {
};
/**
- * @brief The play preload another source event.
- *
+ * @brief Events that occur when media resources are preloaded.
*/
enum PLAYER_PRELOAD_EVENT {
- /** preload source begin
- */
+ /**
+ * 0: Starts preloading media resources.
+ */
PLAYER_PRELOAD_EVENT_BEGIN = 0,
- /** preload source complete
- */
+ /**
+ * 1: Preloading media resources is complete.
+ */
PLAYER_PRELOAD_EVENT_COMPLETE = 1,
- /** preload source error
- */
+ /**
+ * 2: An error occurs when preloading media resources.
+ */
PLAYER_PRELOAD_EVENT_ERROR = 2,
};
/**
- * @brief The information of the media stream object.
- *
+ * @brief The detailed information of the media stream.
*/
struct PlayerStreamInfo {
- /** The index of the media stream. */
+ /**
+ * The index of the media stream.
+ */
int streamIndex;
- /** The type of the media stream. See {@link MEDIA_STREAM_TYPE}. */
+ /**
+ * The type of the media stream. See `MEDIA_STREAM_TYPE`.
+ */
MEDIA_STREAM_TYPE streamType;
- /** The codec of the media stream. */
+ /**
+ * The codec of the media stream.
+ */
char codecName[kMaxCharBufferLength];
- /** The language of the media stream. */
+ /**
+ * The language of the media stream.
+ */
char language[kMaxCharBufferLength];
- /** The frame rate (fps) if the stream is video. */
+ /**
+ * This parameter only takes effect for video streams, and indicates the video frame rate (fps).
+ */
int videoFrameRate;
- /** The video bitrate (bps) if the stream is video. */
+ /**
+ * This parameter only takes effect for video streams, and indicates the video bitrate (bps).
+ */
int videoBitRate;
- /** The video width (pixel) if the stream is video. */
+ /**
+ * This parameter only takes effect for video streams, and indicates the video width (pixel).
+ */
int videoWidth;
- /** The video height (pixel) if the stream is video. */
+ /**
+ * This parameter only takes effect for video streams, and indicates the video height (pixel).
+ */
int videoHeight;
- /** The rotation angle if the steam is video. */
+ /**
+ * This parameter only takes effect for video streams, and indicates the video rotation angle.
+ */
int videoRotation;
- /** The sample rate if the stream is audio. */
+ /**
+ * This parameter only takes effect for audio streams, and indicates the audio sample rate (Hz).
+ */
int audioSampleRate;
- /** The number of audio channels if the stream is audio. */
+ /**
+ * This parameter only takes effect for audio streams, and indicates the audio channel number.
+ */
int audioChannels;
- /** The number of bits per sample if the stream is audio. */
+ /**
+ * This parameter only takes effect for audio streams, and indicates the bit number of each audio
+ * sample.
+ */
int audioBitsPerSample;
- /** The total duration (millisecond) of the media stream. */
+ /**
+ * The total duration (ms) of the media stream.
+ */
int64_t duration;
PlayerStreamInfo() : streamIndex(0),
@@ -320,90 +388,104 @@ struct PlayerStreamInfo {
};
/**
- * @brief The information of the media stream object.
- *
+ * @brief Information about the video bitrate of the media resource being played.
*/
struct SrcInfo {
- /** The bitrate of the media stream. The unit of the number is kbps.
- *
+ /**
+ * The video bitrate (Kbps) of the media resource being played.
*/
int bitrateInKbps;
- /** The name of the media stream.
- *
- */
+ /**
+ * The name of the media resource.
+ */
const char* name;
};
/**
- * @brief The type of the media metadata.
- *
+ * @brief The type of media metadata.
*/
enum MEDIA_PLAYER_METADATA_TYPE {
- /** The type is unknown.
+ /**
+ * 0: The type is unknown.
*/
PLAYER_METADATA_TYPE_UNKNOWN = 0,
- /** The type is SEI.
+ /**
+ * 1: The type is SEI.
*/
PLAYER_METADATA_TYPE_SEI = 1,
};
+/**
+ * @brief Statistics about the media files being cached.
+ */
struct CacheStatistics {
- /** total data size of uri
+ /**
+ * The size (bytes) of the media file being played.
*/
int64_t fileSize;
- /** data of uri has cached
+ /**
+ * The size (bytes) of the media file that you want to cache.
*/
int64_t cacheSize;
- /** data of uri has downloaded
+ /**
+ * The size (bytes) of the media file that has been downloaded.
*/
int64_t downloadSize;
};
/**
- * @brief The real time statistics of the media stream being played.
- *
+ * @brief The information of the media file being played.
*/
struct PlayerPlaybackStats {
- /** Video fps.
+ /**
+ * The frame rate (fps) of the video.
*/
int videoFps;
- /** Video bitrate (Kbps).
+ /**
+ * The bitrate (kbps) of the video.
*/
int videoBitrateInKbps;
- /** Audio bitrate (Kbps).
+ /**
+ * The bitrate (kbps) of the audio.
*/
int audioBitrateInKbps;
- /** Total bitrate (Kbps).
+ /**
+ * The total bitrate (kbps) of the media stream.
*/
int totalBitrateInKbps;
};
/**
- * @brief The updated information of media player.
- *
+ * @brief Information related to the media player.
*/
struct PlayerUpdatedInfo {
/** @technical preview
*/
const char* internalPlayerUuid;
- /** The device ID of the playback device.
+ /**
+ * The ID of a deivce.
*/
const char* deviceId;
- /** Video height.
+ /**
+ * Height (pixel) of the video.
*/
int videoHeight;
- /** Video width.
+ /**
+ * Width (pixel) of the video.
*/
int videoWidth;
- /** Audio sample rate.
+ /**
+ * Audio sample rate (Hz).
*/
int audioSampleRate;
- /** The audio channel number.
+ /**
+ * The number of audio channels.
*/
int audioChannels;
- /** The bit number of each audio sample.
+ /**
+ * The number of bits per audio sample point.
*/
int audioBitsPerSample;
@@ -424,89 +506,132 @@ class IMediaPlayerCustomDataProvider {
public:
/**
- * @brief The player requests to read the data callback, you need to fill the specified length of data into the buffer
- * @param buffer the buffer pointer that you need to fill data.
- * @param bufferSize the bufferSize need to fill of the buffer pointer.
- * @return you need return offset value if succeed. return 0 if failed.
+ * @brief Occurs when the SDK reads the media resource data.
+ *
+ * @details
+ * When you call the `openWithMediaSource` method to open a media resource, the SDK triggers this
+ * callback and request you to pass in the buffer of the media resource data.
+ *
+ * @param buffer An input parameter. Data buffer (bytes). Write the `bufferSize` data reported by
+ * the SDK into this parameter.
+ * @param bufferSize The length of the data buffer (bytes).
+ *
+ * @return
+ * - If the data is read successfully, pass in the length of the data (bytes) you actually read in
+ * the return value.
+ * - If reading the data fails, pass in 0 in the return value.
*/
virtual int onReadData(unsigned char *buffer, int bufferSize) = 0;
/**
- * @brief The Player seek event callback, you need to operate the corresponding stream seek operation, You can refer to the definition of lseek() at https://man7.org/linux/man-pages/man2/lseek.2.html
- * @param offset the value of seek offset.
- * @param whence the postion of start seeking, the directive whence as follows:
- * 0 - SEEK_SET : The file offset is set to offset bytes.
- * 1 - SEEK_CUR : The file offset is set to its current location plus offset bytes.
- * 2 - SEEK_END : The file offset is set to the size of the file plus offset bytes.
- * 65536 - AVSEEK_SIZE : Optional. Passing this as the "whence" parameter to a seek function causes it to return the filesize without seeking anywhere.
+ * @brief Occurs when the SDK seeks the media resource data.
+ *
+ * @details
+ * When you call the `openWithMediaSource` or `open` method to open a custom media resource, the SDK
+ * triggers this callback to request the specified location in the media resource.
+ *
+ * @param offset An input parameter. The offset of the target position relative to the starting
+ * point, in bytes. The value can be positive or negative.
+ * @param whence An input parameter. The starting point. You can set it as one of the following
+ * values:
+ * - 0: The starting point is the head of the data, and the actual data offset after seeking is
+ * `offset`.
+ * - 1: The starting point is the current position, and the actual data offset after seeking is the
+ * current position plus `offset`.
+ * - 2: The starting point is the end of the data, and the actual data offset after seeking is the
+ * whole data length plus `offset`.
+ * - 65536: Do not perform position seeking, return the file size. Agora recommends that you use
+ * this parameter value when playing pure audio files such as MP3 and WAV.
+ *
* @return
- * whence == 65536, return filesize if you need.
- * whence >= 0 && whence < 3 , return offset value if succeed. return -1 if failed.
+ * - When `whence` is `65536`, the media file size is returned.
+ * - When `whence` is `0`, `1`, or `2`, the actual data offset after the seeking is returned.
+ * - -1: Seeking failed.
*/
virtual int64_t onSeek(int64_t offset, int whence) = 0;
virtual ~IMediaPlayerCustomDataProvider() {}
};
+/**
+ * @brief Information related to the media file to be played and the playback scenario
+ * configurations.
+ */
struct MediaSource {
/**
- * The URL of the media file that you want to play.
+ * The URL of the media file to be played.
+ * @note If you open a common media resource, pass in the value to `url`. If you open a custom media
+ * resource, pass in the value to `provider`. Agora recommends that you do not pass in values to
+ * both parameters in one call; otherwise, this call may fail.
*/
const char* url;
/**
- * The URI of the media file
- *
- * When caching is enabled, if the url cannot distinguish the cache file name,
- * the uri must be able to ensure that the cache file name corresponding to the url is unique.
+ * The URI (Uniform Resource Identifier) of the media file.
*/
const char* uri;
/**
- * Set the starting position for playback, in ms.
+ * The starting position (ms) for playback. The default value is 0.
*/
int64_t startPos;
/**
- * Determines whether to autoplay after opening a media resource.
- * - true: (Default) Autoplay after opening a media resource.
- * - false: Do not autoplay after opening a media resource.
+ * Whether to enable autoplay once the media file is opened:
+ * - `true`: (Default) Yes.
+ * - `false`: No.
+ * @note If autoplay is disabled, you need to call the `play` method to play a media file after it
+ * is opened.
*/
bool autoPlay;
/**
- * Determines whether to enable cache streaming to local files. If enable cached, the media player will
- * use the url or uri as the cache index.
- *
+ * Whether to cache the media file when it is being played:
+ * - `true`:Enables caching.
+ * - `false`: (Default) Disables caching.
* @note
- * The local cache function only supports on-demand video/audio streams and does not support live streams.
- * Caching video and audio files based on the HLS protocol (m3u8) to your local device is not supported.
- *
- * - true: Enable cache.
- * - false: (Default) Disable cache.
+ * - Agora only supports caching on-demand audio and video streams that are not transmitted in HLS
+ * protocol.
+ * - If you need to enable caching, pass in a value to `uri`; otherwise, caching is based on the
+ * `url` of the media file.
+ * - If you enable this function, the Media Player caches part of the media file being played on
+ * your local device, and you can play the cached media file without internet connection. The
+ * statistics about the media file being cached are updated every second after the media file is
+ * played. See `CacheStatistics`.
*/
bool enableCache;
/**
- * Determines whether to enable multi-track audio stream decoding.
- * Then you can select multi audio track of the media file for playback or publish to channel
- *
- * @note
- * If you use the selectMultiAudioTrack API, you must set enableMultiAudioTrack to true.
- *
- * - true: Enable MultiAudioTrack;.
- * - false: (Default) Disable MultiAudioTrack;.
+ * Whether to allow the selection of different audio tracks when playing this media file:
+ * - `true`: Allow to select different audio tracks.
+ * - `false`: (Default) Do not allow to select different audio tracks.
+ * If you need to set different audio tracks for local playback and publishing to the channel, you
+ * need to set this parameter to `true`, and then call the `selectMultiAudioTrack` method to select
+ * the audio track.
*/
bool enableMultiAudioTrack;
/**
- * Determines whether the opened media resource is a stream through the Agora Broadcast Streaming Network(CDN).
- * - true: It is a stream through the Agora Broadcast Streaming Network.
- * - false: (Default) It is not a stream through the Agora Broadcast Streaming Network.
+ * Whether the media resource to be opened is a live stream or on-demand video distributed through
+ * Media Broadcast service:
+ * - `true`: The media resource to be played is a live or on-demand video distributed through Media
+ * Broadcast service.
+ * - `false`: (Default) The media resource is not a live stream or on-demand video distributed
+ * through Media Broadcast service.
+ * @note If you need to open a live stream or on-demand video distributed through Broadcast
+ * Streaming service, pass in the URL of the media resource to `url`, and set `isAgoraSource` as
+ * `true`; otherwise, you don't need to set the `isAgoraSource` parameter.
*/
Optional isAgoraSource;
/**
- * Determines whether the opened media resource is a live stream. If is a live stream, it can speed up the opening of media resources.
- * - true: It is a live stream.
- * - false: (Default) It is not is a live stream.
+ * Whether the media resource to be opened is a live stream:
+ * - `true`: The media resource is a live stream.
+ * - `false`: (Default) The media resource is not a live stream.
+ * If the media resource you want to open is a live stream, Agora recommends that you set this
+ * parameter as `true` so that the live stream can be loaded more quickly.
+ * @note If the media resource you open is not a live stream, but you set `isLiveSource` as `true`,
+ * the media resource is not to be loaded more quickly.
*/
Optional isLiveSource;
/**
- * External custom data source object
+ * The callback for custom media resource files. See `IMediaPlayerCustomDataProvider`.
+ * @note If you open a custom media resource, pass in the value to `provider`. If you open a common
+ * media resource, pass in the value to `url`. Agora recommends that you do not pass in values to
+ * both `url` and `provider` in one call; otherwise, this call may fail.
*/
IMediaPlayerCustomDataProvider* provider;
diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraLog.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraLog.h
index 20b6416ef..4bc586442 100644
--- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraLog.h
+++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraLog.h
@@ -28,13 +28,29 @@ namespace agora {
namespace commons {
/**
- * Supported logging severities of SDK
+ * @brief The output log level of the SDK.
*/
OPTIONAL_ENUM_CLASS LOG_LEVEL {
+ /**
+ * 0: Do not output any log information.
+ */
LOG_LEVEL_NONE = 0x0000,
+ /**
+ * 0x0001: (Default) Output `FATAL`, `ERROR`, `WARN`, and `INFO` level log information. We recommend
+ * setting your log filter to this level.
+ */
LOG_LEVEL_INFO = 0x0001,
+ /**
+ * 0x0002: Output `FATAL`, `ERROR`, and `WARN` level log information.
+ */
LOG_LEVEL_WARN = 0x0002,
+ /**
+ * 0x0004: Output `FATAL` and `ERROR` level log information.
+ */
LOG_LEVEL_ERROR = 0x0004,
+ /**
+ * 0x0008: Output `FATAL` level log information.
+ */
LOG_LEVEL_FATAL = 0x0008,
LOG_LEVEL_API_CALL = 0x0010,
LOG_LEVEL_DEBUG = 0x0020,
@@ -62,12 +78,36 @@ class ILogWriter {
virtual ~ILogWriter() {}
};
+/**
+ * @brief The output log level of the SDK.
+ */
enum LOG_FILTER_TYPE {
+ /**
+ * 0: Do not output any log information.
+ */
LOG_FILTER_OFF = 0,
+ /**
+ * 0x080f: Output all log information. Set your log filter to this level if you want to get the most
+ * complete log file.
+ */
LOG_FILTER_DEBUG = 0x080f,
+ /**
+ * 0x000f: Output `LOG_FILTER_CRITICAL`, `LOG_FILTER_ERROR`, `LOG_FILTER_WARN`, and
+ * `LOG_FILTER_INFO` level log information. We recommend setting your log filter to this level.
+ */
LOG_FILTER_INFO = 0x000f,
+ /**
+ * 0x000e: Output `LOG_FILTER_CRITICAL`, `LOG_FILTER_ERROR`, and `LOG_FILTER_WARN` level log
+ * information.
+ */
LOG_FILTER_WARN = 0x000e,
+ /**
+ * 0x000c: Output `LOG_FILTER_CRITICAL` and `LOG_FILTER_ERROR` level log information.
+ */
LOG_FILTER_ERROR = 0x000c,
+ /**
+ * 0x0008: Output `LOG_FILTER_CRITICAL` level log information.
+ */
LOG_FILTER_CRITICAL = 0x0008,
LOG_FILTER_MASK = 0x80f,
};
@@ -78,16 +118,34 @@ const uint32_t MIN_LOG_SIZE = 128 * 1024; // 128KB
*/
const uint32_t DEFAULT_LOG_SIZE_IN_KB = 2048;
-/** Definition of LogConfiguration
+/**
+ * @brief Configuration of Agora SDK log files.
*/
struct LogConfig {
- /**The log file path, default is NULL for default log path
+ /**
+ * The complete path of the log files. Agora recommends using the default log directory. If you need
+ * to modify the default directory, ensure that the directory you specify exists and is writable.
+ * The default log directory is:
+ * - Android: /storage/emulated/0/Android/data//files/agorasdk.log.
+ * - iOS: App Sandbox/Library/caches/agorasdk.log.
+ * - macOS:
+ * - If Sandbox is enabled: App Sandbox/Library/Logs/agorasdk.log. For example,
+ * /Users//Library/Containers//Data/Library/Logs/agorasdk.log.
+ * - If Sandbox is disabled: ~/Library/Logs/agorasdk.log
+ * - Windows: C:\Users\\AppData\Local\Agora\\agorasdk.log.
*/
const char* filePath;
- /** The log file size, KB , set 2048KB to use default log size
+ /**
+ * The size (KB) of an `agorasdk.log` file. The value range is [128,20480]. The default value is
+ * 2,048 KB. If you set `fileSizeInKByte` smaller than 128 KB, the SDK automatically adjusts it to
+ * 128 KB; if you set `fileSizeInKByte` greater than 20,480 KB, the SDK automatically adjusts it to
+ * 20,480 KB.
*/
uint32_t fileSizeInKB;
- /** The log level, set LOG_LEVEL_INFO to use default log level
+ /**
+ * The output level of the SDK log file. See `LOG_LEVEL`.
+ * For example, if you set the log level to WARN, the SDK outputs the logs within levels FATAL,
+ * ERROR, and WARN.
*/
LOG_LEVEL level;
diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraMediaEngine.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraMediaEngine.h
index 44975bfe9..42ab70d4c 100644
--- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraMediaEngine.h
+++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraMediaEngine.h
@@ -12,16 +12,28 @@
namespace agora {
namespace media {
-/** dual-mono music output mode
+/**
+ * @brief The channel mode.
*/
enum AUDIO_MIXING_DUAL_MONO_MODE {
- /* 0: Original mode */
+ /**
+ * 0: Original mode.
+ */
AUDIO_MIXING_DUAL_MONO_AUTO = 0,
- /* 1: Left channel mode */
+ /**
+ * 1: Left channel mode. This mode replaces the audio of the right channel with the audio of the
+ * left channel, which means the user can only hear the audio of the left channel.
+ */
AUDIO_MIXING_DUAL_MONO_L = 1,
- /* 2: Right channel mode */
+ /**
+ * 2: Right channel mode. This mode replaces the audio of the left channel with the audio of the
+ * right channel, which means the user can only hear the audio of the right channel.
+ */
AUDIO_MIXING_DUAL_MONO_R = 2,
- /* 3: Mixed channel mode */
+ /**
+ * 3: Mixed channel mode. This mode mixes the audio of the left channel and the right channel, which
+ * means the user can hear the audio of the left channel and the right channel at the same time.
+ */
AUDIO_MIXING_DUAL_MONO_MIX = 3
};
@@ -32,40 +44,64 @@ enum AUDIO_MIXING_DUAL_MONO_MODE {
class IMediaEngine {
public:
/**
- * Registers an audio frame observer object.
+ * @brief Registers an audio frame observer object.
*
- * @note
- * Ensure that you call this method before \ref IRtcEngine::joinChannel "joinChannel".
+ * @details
+ * Call this method to register an audio frame observer object (register a callback). When you need
+ * the SDK to trigger the `onMixedAudioFrame`, `onRecordAudioFrame`, `onPlaybackAudioFrame`,
+ * `onPlaybackAudioFrameBeforeMixing` or `onEarMonitoringAudioFrame` callback, you need to use this
+ * method to register the callbacks.
+ * Call timing: Call this method before joining a channel.
+ *
+ * @param observer The observer instance. See `IAudioFrameObserver`. Set the value as NULL to
+ * release the instance. Agora recommends calling this method after receiving `onLeaveChannel` to
+ * release the audio observer object.
*
- * @param observer A pointer to the audio frame observer object: IAudioFrameObserver,
- * nullptr means unregistering observer instead.
* @return
* - 0: Success.
* - < 0: Failure.
*/
virtual int registerAudioFrameObserver(IAudioFrameObserver* observer) = 0;
/**
- * Registers a video frame observer object.
+ * @brief Registers a raw video frame observer object.
+ *
+ * @details
+ * If you want to observe raw video frames (such as YUV or RGBA format), Agora recommends that you
+ * implement one `IVideoFrameObserver` class with this method.
+ * When calling this method to register a video observer, you can register callbacks in the
+ * `IVideoFrameObserver` class as needed. After you successfully register the video frame observer,
+ * the SDK triggers the registered callbacks each time a video frame is received.
+ * Applicable scenarios: After registering the raw video observer, you can use the obtained raw
+ * video data in various video pre-processing scenarios, such as virtual backgrounds and image
+ * enhacement by yourself.
+ * Call timing: Call this method before joining a channel.
*
* @note
- * - Ensure that you call this method before joining the channel.
- * - If you register an observer for video raw video data, you cannot register an IVideoEncodedFrameObserver
- * object.
+ * When handling the video data returned in the callbacks, pay attention to the changes in the
+ * `width` and `height` parameters, which may be adapted under the following circumstances:
+ * - When network conditions deteriorate, the video resolution decreases incrementally.
+ * - If the user adjusts the video profile, the resolution of the video returned in the callbacks
+ * also changes.
+ *
+ * @param observer The observer instance. See `IVideoFrameObserver`. To release the instance, set
+ * the value as NULL.
*
- * @param observer A pointer to the video frame observer: IVideoFrameObserver.
* @return
* - 0: Success.
* - < 0: Failure.
*/
virtual int registerVideoFrameObserver(IVideoFrameObserver* observer) = 0;
/**
- * Registers a receiver object for the encoded video image.
+ * @brief Registers a receiver object for the encoded video image.
*
- * @note
- * - Ensure that you call this method before joining the channel.
+ * @details
+ * If you only want to observe encoded video frames (such as H.264 format) without decoding and
+ * rendering the video, Agora recommends that you implement one `IVideoEncodedFrameObserver` class
+ * through this method.
*
- * @param observer A pointer to the observer of the encoded video image: \ref IVideoEncodedFrameObserver
- * "IVideoEncodedFrameObserver".
+ * @note Call this method before joining a channel.
+ *
+ * @param observer The video frame observer object. See `IVideoEncodedFrameObserver`.
*
* @return
* - 0: Success.
@@ -74,12 +110,26 @@ class IMediaEngine {
virtual int registerVideoEncodedFrameObserver(IVideoEncodedFrameObserver* observer) = 0;
/**
- * Registers a face info observer object.
+ * @brief Registers or unregisters a facial information observer.
+ *
+ * @details
+ * You can call this method to register the `onFaceInfo` callback to receive the facial information
+ * processed by Agora speech driven extension. When calling this method to register a facial
+ * information observer, you can register callbacks in the `IFaceInfoObserver` class as needed.
+ * After successfully registering the facial information observer, the SDK triggers the callback you
+ * have registered when it captures the facial information converted by the speech driven extension.
+ * Applicable scenarios: Facial information processed by the Agora speech driven extension is BS
+ * (Blend Shape) data that complies with ARkit standards. You can further process the BS data using
+ * third-party 3D rendering engines, such as driving avatar to make mouth movements corresponding to
+ * speech.
*
* @note
- * Ensure that you call this method before \ref IRtcEngine::joinChannel "joinChannel".
+ * - Call this method before joining a channel.
+ * - Before calling this method, you need to make sure that the speech driven extension has been
+ * enabled by calling `enableExtension`.
*
- * @param observer A pointer to the face info observer object: IFaceInfoObserver.
+ * @param observer Facial information observer, see `IFaceInfoObserver`. If you need to unregister a
+ * facial information observer, pass in NULL.
*
* @return
* - 0: Success.
@@ -88,29 +138,54 @@ class IMediaEngine {
virtual int registerFaceInfoObserver(IFaceInfoObserver* observer) = 0;
/**
- * Pushes the external audio data to the app.
+ * @brief Pushes the external audio frame.
+ *
+ * @details
+ * Call this method to push external audio frames through the audio track.
+ * Call timing: Before calling this method to push external audio data, perform the following
+ * steps:1. Call `createCustomAudioTrack` to create a custom audio track and get the audio track ID.
+ * 2. Call `joinChannel(const char* token, const char* channelId, uid_t uid, const
+ * ChannelMediaOptions& options)` to join the channel. In `ChannelMediaOptions`, set
+ * `publishCustomAudioTrackId` to the audio track ID that you want to publish, and set
+ * `publishCustomAudioTrack` to `true`.
+ *
+ * @param frame The external audio frame. See `AudioFrame`.
+ * @param trackId The audio track ID. If you want to publish a custom external audio source, set
+ * this parameter to the ID of the corresponding custom audio track you want to publish.
*
- * @param frame The audio buffer data.
- * @param trackId The audio track ID.
* @return
* - 0: Success.
* - < 0: Failure.
*/
-
virtual int pushAudioFrame(IAudioFrameObserverBase::AudioFrame* frame, rtc::track_id_t trackId = 0) = 0;
/**
- * Pulls the remote audio data.
+ * @brief Pulls the remote audio data.
*
- * After a successful method call, the app pulls the decoded and mixed audio data for playback.
+ * @details
+ * After a successful call of this method, the app pulls the decoded and mixed audio data for
+ * playback.
+ * Call timing: Call this method after joining a channel.
+ * Before calling this method, call `setExternalAudioSink` `(enabled: true)` to notify the app to
+ * enable and set the external audio rendering.
*
- * The difference between this method and the \ref onPlaybackAudioFrame "onPlaybackAudioFrame" is as follows:
- * - `onPlaybackAudioFrame`: The SDK sends the audio data to the app once every 10 ms. Any delay in processing
- * the audio frames may result in audio jitter.
- * - `pullAudioFrame`: The app pulls the remote audio data. After setting the audio data parameters, the
- * SDK adjusts the frame buffer and avoids problems caused by jitter in the external audio playback.
+ * @note
+ * Both this method and the `onPlaybackAudioFrame` callback can be used to get audio data after
+ * remote mixing. After calling `setExternalAudioSink` to enable external audio rendering, the app
+ * will no longer be able to obtain data from the `onPlaybackAudioFrame` callback. Therefore, you
+ * should choose between this method and the `onPlaybackAudioFrame` callback based on your actual
+ * business requirements. The specific distinctions between them are as follows:
+ * - After calling this method, the app automatically pulls the audio data from the SDK. By setting
+ * the audio data parameters, the SDK adjusts the frame buffer to help the app handle latency,
+ * effectively avoiding audio playback jitter.
+ * - After registering the `onPlaybackAudioFrame` callback, the SDK sends the audio data to the app
+ * through the callback. Any delay in processing the audio frames may result in audio jitter.
+ * This method is only used for retrieving audio data after remote mixing. If you need to get audio
+ * data from different audio processing stages such as capture and playback, you can register the
+ * corresponding callbacks by calling `registerAudioFrameObserver`.
+ *
+ * @param frame Pointers to `AudioFrame`.
*
- * @param frame The pointer to the audio frame: AudioFrame.
* @return
* - 0: Success.
* - < 0: Failure.
@@ -118,21 +193,27 @@ class IMediaEngine {
virtual int pullAudioFrame(IAudioFrameObserverBase::AudioFrame* frame) = 0;
/**
- * Sets the external video source.
- *
- * Once the external video source is enabled, the SDK prepares to accept the external video frame.
- *
- * @param enabled Determines whether to enable the external video source.
- * - true: Enable the external video source. Once set, the SDK creates the external source and prepares
- * video data from `pushVideoFrame` or `pushEncodedVideoImage`.
- * - false: Disable the external video source.
- * @param useTexture Determines whether to use textured video data.
- * - true: Use texture, which is not supported now.
- * - False: Do not use texture.
- * @param sourceType Determines the type of external video source frame.
- * - ENCODED_VIDEO_FRAME: The external video source is encoded.
- * - VIDEO_FRAME: The external video source is not encoded.
- * @param encodedVideoOption Video encoded track option, which is only used for ENCODED_VIDEO_FRAME.
+ * @brief Configures the external video source.
+ *
+ * @details
+ * After calling this method to enable an external video source, you can call `pushVideoFrame` to
+ * push external video data to the SDK.
+ * Call timing: Call this method before joining a channel.
+ *
+ * @note Dynamic switching of video sources is not supported within the channel. To switch from an
+ * external video source to an internal video source, you must first leave the channel, call this
+ * method to disable the external video source, and then rejoin the channel.
+ *
+ * @param enabled Whether to use the external video source:
+ * - `true`: Use the external video source. The SDK prepares to accept the external video frame.
+ * - `false`: (Default) Do not use the external video source.
+ * @param useTexture Whether to use the external video frame in the Texture format.
+ * - `true`: Use the external video frame in the Texture format.
+ * - `false`: (Default) Do not use the external video frame in the Texture format.
+ * @param sourceType Whether the external video frame is encoded. See `EXTERNAL_VIDEO_SOURCE_TYPE`.
+ * @param encodedVideoOption Video encoding options. This parameter needs to be set if `sourceType`
+ * is `ENCODED_VIDEO_FRAME`. To set this parameter, contact `technical support`.
+ *
* @return
* - 0: Success.
* - < 0: Failure.
@@ -143,14 +224,19 @@ class IMediaEngine {
#if defined(__ANDROID__)
/**
- * Sets the remote eglContext.
+ * @brief Sets the EGL context for rendering remote video streams.
*
- * When the engine is destroyed, the SDK will automatically release the eglContext.
+ * @details
+ * This method can replace the default remote EGL context within the SDK, making it easier to manage
+ * the EGL context.
+ * When the engine is destroyed, the SDK will automatically release the EGL context.
+ * Applicable scenarios: This method is suitable for using a custom video rendering method instead
+ * of the default SDK rendering method to render remote video frames in Texture format.
+ * Call timing: Call this method before joining a channel.
*
- * @param eglContext.
+ * @note This method is for Android only.
*
- * @note
- * setExternalRemoteEglContext needs to be called before joining the channel.
+ * @param eglContext The EGL context for rendering remote video streams.
*
* @return
* - 0: Success.
@@ -160,27 +246,28 @@ class IMediaEngine {
#endif
/**
- * Sets the external audio source.
+ * @brief Sets the external audio source parameters.
+ *
+ * @deprecated This method is deprecated. Use createCustomAudioTrack(rtc::AUDIO_TRACK_TYPE
+ * trackType, const rtc::AudioTrackConfig& config) instead.
+ *
+ * @details
+ * Call timing: Call this method before joining a channel.
+ *
+ * @param enabled Whether to enable the external audio source:
+ * - `true`: Enable the external audio source.
+ * - `false`: (Default) Disable the external audio source.
+ * @param sampleRate The sample rate (Hz) of the external audio source which can be set as `8000`,
+ * `16000`, `32000`, `44100`, or `48000`.
+ * @param channels The number of channels of the external audio source, which can be set as `1`
+ * (Mono) or `2` (Stereo).
+ * @param localPlayback Whether to play the external audio source:
+ * - `true`: Play the external audio source.
+ * - `false`: (Default) Do not play the external source.
+ * @param publish Whether to publish audio to the remote users:
+ * - `true`: (Default) Publish audio to the remote users.
+ * - `false`: Do not publish audio to the remote users.
*
- * @note
- * Ensure that you call this method before joining the channel.
- *
- * @deprecated This method is deprecated. Use createCustomAudioTrack(rtc::AUDIO_TRACK_TYPE trackType, const rtc::AudioTrackConfig& config) instead.
- *
- * @param enabled Determines whether to enable the external audio source:
- * - true: Enable the external audio source.
- * - false: (default) Disable the external audio source.
- * @param sampleRate The Sample rate (Hz) of the external audio source, which can set be as
- * 8000, 16000, 32000, 44100, or 48000.
- * @param channels The number of channels of the external audio source, which can be set as 1 or 2:
- * - 1: Mono.
- * - 2: Stereo.
- * @param localPlayback Enable/Disables the local playback of external audio track:
- * - true: Enable local playback
- * - false: (Default) Do not enable local playback
- * @param publish Determines whether to publish the external audio track:
- * - true: (Default) Publish the external audio track.
- * - false: Don`t publish the external audio track.
* @return
* - 0: Success.
* - < 0: Failure.
@@ -188,26 +275,38 @@ class IMediaEngine {
virtual int setExternalAudioSource(bool enabled, int sampleRate, int channels, bool localPlayback = false, bool publish = true) __deprecated = 0;
/**
- * Create a custom audio track and get the audio track id.
- *
- * @note Ensure that you call this method before calling `joinChannel`.
- *
- * @param trackType The type of custom audio track
- * See AUDIO_TRACK_TYPE.
- *
- * @param config The config of custom audio track
- * See AudioTrackConfig.
+ * @brief Creates a custom audio track.
+ *
+ * @details
+ * To publish a custom audio source, see the following steps:1. Call this method to create a custom
+ * audio track and get the audio track ID.
+ * 2. Call `joinChannel(const char* token, const char* channelId, uid_t uid, const
+ * ChannelMediaOptions& options)` to join the channel. In `ChannelMediaOptions`, set
+ * `publishCustomAudioTrackId` to the audio track ID that you want to publish, and set
+ * `publishCustomAudioTrack` to `true`.
+ * 3. Call `pushAudioFrame` and specify `trackId` as the audio track ID set in step 2. You can then
+ * publish the corresponding custom audio source in the channel.
+ *
+ * @note Call this method before joining a channel.
+ *
+ * @param trackType The type of the custom audio track. See `AUDIO_TRACK_TYPE`.Attention: If
+ * `AUDIO_TRACK_DIRECT` is specified for this parameter, you must set `publishMicrophoneTrack` to
+ * `false` in `ChannelMediaOptions` when calling `joinChannel(const char* token, const char*
+ * channelId, uid_t uid, const ChannelMediaOptions& options)` to join the channel; otherwise,
+ * joining the channel fails and returns the error code -2.
+ * @param config The configuration of the custom audio track. See `AudioTrackConfig`.
*
* @return
- * - If the call is successful, SDK returns audio track id.
- * - If the call fails, SDK returns 0xffffffff.
+ * - If the method call is successful, the audio track ID is returned as the unique identifier of
+ * the audio track.
+ * - If the method call fails, 0xffffffff is returned.
*/
virtual rtc::track_id_t createCustomAudioTrack(rtc::AUDIO_TRACK_TYPE trackType, const rtc::AudioTrackConfig& config) = 0;
/**
- * Destroy custom audio track by trackId
+ * @brief Destroys the specified audio track.
*
- * @param trackId The custom audio track id.
+ * @param trackId The custom audio track ID returned in `createCustomAudioTrack`.
*
* @return
* - 0: Success.
@@ -216,25 +315,24 @@ class IMediaEngine {
virtual int destroyCustomAudioTrack(rtc::track_id_t trackId) = 0;
/**
- * Sets the external audio sink.
- *
- * This method applies to scenarios where you want to use external audio
- * data for playback. After calling the \ref IRtcEngine::initialize "initialize"
- * method and pass value of false in the `enableAudioDevice` member in the RtcEngineContext struct, you can call
- * the \ref agora::media::IMediaEngine::pullAudioFrame "pullAudioFrame" method to pull the remote audio data, process
- * it, and play it with the audio effects that you want.
- *
- * @note
- * Once you call the \ref IRtcEngine::initialize "initialize" method and pass value of false in the `enableAudioDevice`
- * member in the RtcEngineContext struct, the app will not retrieve any audio data from the
- * \ref agora::media::IAudioFrameObserver::onPlaybackAudioFrame "onPlaybackAudioFrame" callback.
- *
- * @param enabled Sets whether or not to the external audio sink
- * - true: Enables the external audio sink.
- * - false: Disables the external audio sink.
- * @param sampleRate Sets the sample rate (Hz) of the external audio sink, which can be set as 16000, 32000, 44100 or 48000.
- * @param channels Sets the number of audio channels of the external
- * audio sink:
+ * @brief Sets the external audio sink.
+ *
+ * @details
+ * After enabling the external audio sink, you can call `pullAudioFrame` to pull remote audio
+ * frames. The app can process the remote audio and play it with the audio effects that you want.
+ * Applicable scenarios: This method applies to scenarios where you want to use external audio data
+ * for playback.
+ * Call timing: Call this method before joining a channel.
+ *
+ * @note Once you enable the external audio sink, the app will not retrieve any audio data from the
+ * `onPlaybackAudioFrame` callback.
+ *
+ * @param enabled Whether to enable or disable the external audio sink:
+ * - `true`: Enables the external audio sink.
+ * - `false`: (Default) Disables the external audio sink.
+ * @param sampleRate The sample rate (Hz) of the external audio sink, which can be set as 16000,
+ * 32000, 44100, or 48000.
+ * @param channels The number of audio channels of the external audio sink:
* - 1: Mono.
* - 2: Stereo.
*
@@ -261,10 +359,42 @@ class IMediaEngine {
virtual int enableCustomAudioLocalPlayback(rtc::track_id_t trackId, bool enabled) = 0;
/**
- * Pushes the external video frame to the app.
+ * @brief Pushes the external raw video frame to the SDK through video tracks.
+ *
+ * @details
+ * To publish a custom video source, see the following steps:1. Call `createCustomVideoTrack` to
+ * create a video track and get the video track ID.
+ * 2. Call `joinChannel(const char* token, const char* channelId, uid_t uid, const
+ * ChannelMediaOptions& options)` to join the channel. In `ChannelMediaOptions`, set
+ * `customVideoTrackId` to the video track ID that you want to publish, and set
+ * `publishCustomVideoTrack` to `true`.
+ * 3. Call this method and specify `videoTrackId` as the video track ID set in step 2. You can then
+ * publish the corresponding custom video source in the channel.
+ * Applicable scenarios: The SDK supports the ID3D11Texture2D video format since v4.2.3, which is
+ * widely used in game scenarios. When you need to push this type of video frame to the SDK, call
+ * this method and set the `format` in the `frame` to `VIDEO_TEXTURE_ID3D11TEXTURE2D`, set the
+ * `d3d11_texture_2d` and `texture_slice_index` members, and set the format of the video frame to
+ * ID3D11Texture2D.
*
- * @param frame The external video frame: ExternalVideoFrame.
- * @param videoTrackId The id of the video track.
+ * @note
+ * If you only need to push one custom video source to the channel, you can directly call the
+ * `setExternalVideoSource` method and the SDK will automatically create a video track with the
+ * `videoTrackId` set to 0.
+ * DANGER: After calling this method, even if you stop pushing external video frames to the SDK, the
+ * custom video stream will still be counted as the video duration usage and incur charges. Agora
+ * recommends that you take appropriate measures based on the actual situation to avoid such video
+ * billing.
+ * - If you no longer need to capture external video data, you can call `destroyCustomVideoTrack` to
+ * destroy the custom video track.
+ * - If you only want to use the external video data for local preview and not publish it in the
+ * channel, you can call `muteLocalVideoStream` to cancel sending video stream or call
+ * `updateChannelMediaOptions` to set `publishCustomVideoTrack` to `false`.
+ *
+ * @param frame The external raw video frame to be pushed. See `ExternalVideoFrame`.
+ * @param videoTrackId The video track ID returned by calling the `createCustomVideoTrack`
+ * method.Note: If you only need to push one custom video source, set `videoTrackId` to 0.
+ *
+ * @return
* - 0: Success.
* - < 0: Failure.
*/
diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraMediaPlayer.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraMediaPlayer.h
index 25f48a4a2..46f63a552 100644
--- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraMediaPlayer.h
+++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraMediaPlayer.h
@@ -33,16 +33,28 @@ class IMediaPlayer : public RefCountInterface {
virtual int initialize(base::IAgoraService* agora_service) = 0;
/**
- * Get unique media player id of the media player entity.
+ * @brief Gets the ID of the media player.
+ *
* @return
- * - >= 0: The source id of this media player entity.
+ * - Success. The ID of the media player.
* - < 0: Failure.
*/
virtual int getMediaPlayerId() const = 0;
/**
- * Opens a media file with a specified URL.
- * @param url The URL of the media file that you want to play.
+ * @brief Opens the media resource.
+ *
+ * @details
+ * Call timing: This method can be called either before or after joining the channel.
+ * Related callbacks: After calling this method, the SDK triggers the `onPlayerSourceStateChanged`
+ * callback. After receiving the report of the playback status as `PLAYER_STATE_OPEN_COMPLETED`, you
+ * can call the `play` method to play the media file.
+ *
+ * @note This method is called asynchronously.
+ *
+ * @param url The path of the media file. Both local path and online path are supported.
+ * @param startPos The starting position (ms) for playback. Default value is 0.
+ *
* @return
* - 0: Success.
* - < 0: Failure.
@@ -50,8 +62,19 @@ class IMediaPlayer : public RefCountInterface {
virtual int open(const char* url, int64_t startPos) = 0;
/**
- * @brief Open a media file with a media file source.
- * @param source Media file source that you want to play, see `MediaSource`
+ * @brief Opens a media file and configures the playback scenarios.
+ *
+ * @details
+ * This method supports opening media files of different sources, including a custom media source,
+ * and allows you to configure the playback scenarios.
+ * Call timing: You can call this method either before or after joining a channel.
+ *
+ * @note This method is called asynchronously. If you need to play a media file, make sure you
+ * receive the `onPlayerSourceStateChanged` callback reporting `PLAYER_STATE_OPEN_COMPLETED` before
+ * calling the `play` method to play the file.
+ *
+ * @param source Media resources. See `MediaSource`.
+ *
* @return
* - 0: Success.
* - < 0: Failure.
@@ -59,7 +82,14 @@ class IMediaPlayer : public RefCountInterface {
virtual int openWithMediaSource(const media::base::MediaSource &source) = 0;
/**
- * Plays the media file.
+ * @brief Plays the media file.
+ *
+ * @details
+ * Call timing: - Call this method after calling `open` or `openWithMediaSource` opening a media
+ * file and receiving a `onPlayerSourceStateChanged` callback reporting the status as
+ * PLAYER_STATE_OPEN_COMPLETED.
+ * - Call the method after calling `seek`.
+ *
* @return
* - 0: Success.
* - < 0: Failure.
@@ -67,34 +97,72 @@ class IMediaPlayer : public RefCountInterface {
virtual int play() = 0;
/**
- * Pauses playing the media file.
+ * @brief Pauses the playback.
+ *
+ * @details
+ * Call timing: You can call this method either before or after joining a channel.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int pause() = 0;
/**
- * Stops playing the current media file.
+ * @brief Stops playing the media track.
+ *
+ * @details
+ * After calling this method to stop playback, if you want to play again, you need to call `open` or
+ * `openWithMediaSource` to open the media resource.
+ * Call timing: Call this method after play.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int stop() = 0;
/**
- * Resumes playing the media file.
+ * @brief Resumes playing the media file.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int resume() = 0;
/**
- * Sets the current playback position of the media file.
+ * @brief Seeks to a new playback position.
+ *
+ * @details
+ * - If you call `seek` after the playback has completed (upon receiving callback
+ * `onPlayerSourceStateChanged` reporting playback status as PLAYER_STATE_PLAYBACK_COMPLETED or
+ * PLAYER_STATE_PLAYBACK_ALL_LOOPS_COMPLETED ), the SDK will play the media file from the specified
+ * position. At this point, you will receive callback `onPlayerSourceStateChanged` reporting
+ * playback status as PLAYER_STATE_PLAYING.
+ * - If you call `seek` while the playback is paused, upon successful call of this method, the SDK
+ * will seek to the specified position. To resume playback, call `resume` or `play` .
+ * Call timing: You can call this method either before or after joining a channel.
+ * Related callbacks: After successfully calling this method, you will receive the `onPlayerEvent`
+ * callback, reporting the result of the seek operation to the new playback position.
+ *
* @param newPos The new playback position (ms).
+ *
* @return
* - 0: Success.
* - < 0: Failure.
*/
virtual int seek(int64_t newPos) = 0;
- /** Sets the pitch of the current media file.
- * @param pitch Sets the pitch of the local music file by chromatic scale. The default value is 0,
- * which means keeping the original pitch. The value ranges from -12 to 12, and the pitch value between
- * consecutive values is a chromatic value. The greater the absolute value of this parameter, the
- * higher or lower the pitch of the local music file.
+ /**
+ * @brief Sets the pitch of the current media resource.
+ *
+ * @note Call this method after calling `open`.
+ *
+ * @param pitch Sets the pitch of the local music file by the chromatic scale. The default value is
+ * 0, which means keeping the original pitch. The value ranges from -12 to 12, and the pitch value
+ * between consecutive values is a chromatic value. The greater the absolute value of this
+ * parameter, the higher or lower the pitch of the local music file.
*
* @return
* - 0: Success.
@@ -103,8 +171,10 @@ class IMediaPlayer : public RefCountInterface {
virtual int setAudioPitch(int pitch) = 0;
/**
- * Gets the duration of the media file.
- * @param duration A reference to the duration of the media file.
+ * @brief Gets the duration of the media resource.
+ *
+ * @param duration An output parameter. The total duration (ms) of the media file.
+ *
* @return
* - 0: Success.
* - < 0: Failure.
@@ -112,24 +182,60 @@ class IMediaPlayer : public RefCountInterface {
virtual int getDuration(int64_t& duration) = 0;
/**
- * Gets the current playback position of the media file.
- * @param currentPosition A reference to the current playback position (ms).
+ * @brief Gets current local playback progress.
+ *
+ * @param pos The playback position (ms) of the audio effect file.
+ *
* @return
- * - 0: Success.
- * - < 0: Failure.
+ * - Returns the current playback progress (ms) if the call succeeds.
+ * - < 0: Failure. See `MEDIA_PLAYER_REASON`.
*/
virtual int getPlayPosition(int64_t& pos) = 0;
+ /**
+ * @brief Gets the number of the media streams in the media resource.
+ *
+ * @note Call this method after you call `open` and receive the `onPlayerSourceStateChanged`
+ * callback reporting the state `PLAYER_STATE_OPEN_COMPLETED`.
+ *
+ * @param count An output parameter. The number of the media streams in the media resource.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure. See `MEDIA_PLAYER_REASON`.
+ */
virtual int getStreamCount(int64_t& count) = 0;
+ /**
+ * @brief Gets the detailed information of the media stream.
+ *
+ * @details
+ * Call timing: Call this method after calling `getStreamCount`.
+ *
+ * @param index The index of the media stream. This parameter needs to be less than the `count`
+ * parameter of `getStreamCount`.
+ * @param info An output parameter. The detailed information of the media stream. See
+ * `PlayerStreamInfo`.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ */
virtual int getStreamInfo(int64_t index, media::base::PlayerStreamInfo* info) = 0;
/**
- * Sets whether to loop the media file for playback.
- * @param loopCount the number of times looping the media file.
- * - 0: Play the audio effect once.
- * - 1: Play the audio effect twice.
- * - -1: Play the audio effect in a loop indefinitely, until stopEffect() or stop() is called.
+ * @brief Sets the loop playback.
+ *
+ * @details
+ * If you want to loop, call this method and set the number of the loops.
+ * When the loop finishes, the SDK triggers `onPlayerSourceStateChanged` and reports the playback
+ * state as PLAYER_STATE_PLAYBACK_ALL_LOOPS_COMPLETED.
+ *
+ * @param loopCount The number of times the audio effect loops:
+ * - ≥0: Number of times for playing. For example, setting it to 0 means no loop playback, playing
+ * only once; setting it to 1 means loop playback once, playing a total of twice.
+ * - -1: Play the audio file in an infinite loop.
+ *
* @return
* - 0: Success.
* - < 0: Failure.
@@ -137,8 +243,17 @@ class IMediaPlayer : public RefCountInterface {
virtual int setLoopCount(int loopCount) = 0;
/**
- * Change playback speed
- * @param speed the value of playback speed ref [50-400]
+ * @brief Sets the channel mode of the current audio file.
+ *
+ * @details
+ * Call this method after calling `open`.
+ *
+ * @param speed The playback speed. Agora recommends that you set this to a value between 30 and
+ * 400, defined as follows:
+ * - 30: 0.3 times the original speed.
+ * - 100: The original speed.
+ * - 400: 4 times the original speed.
+ *
* @return
* - 0: Success.
* - < 0: Failure.
@@ -146,8 +261,18 @@ class IMediaPlayer : public RefCountInterface {
virtual int setPlaybackSpeed(int speed) = 0;
/**
- * Slect playback audio track of the media file
- * @param index the index of the audio track in media file
+ * @brief Selects the audio track used during playback.
+ *
+ * @details
+ * After getting the track index of the audio file, you can call this method to specify any track to
+ * play. For example, if different tracks of a multi-track file store songs in different languages,
+ * you can call this method to set the playback language.
+ *
+ * @note You need to call this method after calling `getStreamInfo` to get the audio stream index
+ * value.
+ *
+ * @param index The index of the audio track.
+ *
* @return
* - 0: Success.
* - < 0: Failure.
@@ -155,26 +280,46 @@ class IMediaPlayer : public RefCountInterface {
virtual int selectAudioTrack(int index) = 0;
/**
- * Selects multi audio track of the media file for playback or publish to channel.
- * @param playoutTrackIndex The index of the audio track in media file for local playback.
- * @param publishTrackIndex The index of the audio track in the media file published to the remote.
- *
- * @note
- * You can obtain the streamIndex of the audio track by calling getStreamInfo..
- * If you want to use selectMultiAudioTrack, you need to open the media file with openWithMediaSource and set enableMultiAudioTrack to true.
+ * @brief Selects the audio tracks that you want to play on your local device and publish to the
+ * channel respectively.
+ *
+ * @details
+ * You can call this method to determine the audio track to be played on your local device and
+ * published to the channel.
+ * Before calling this method, you need to open the media file with the `openWithMediaSource` method
+ * and set `enableMultiAudioTrack` in `MediaSource` as `true`.
+ * Applicable scenarios: For example, in KTV scenarios, the host can choose to play the original
+ * sound locally and publish the accompaniment track to the channel.
+ *
+ * @param playoutTrackIndex The index of audio tracks for local playback. You can obtain the index
+ * through `getStreamInfo`.
+ * @param publishTrackIndex The index of audio tracks to be published in the channel. You can obtain
+ * the index through `getStreamInfo`.
*
* @return
* - 0: Success.
- * - < 0: Failure. See {@link media::base::MEDIA_PLAYER_REASON MEDIA_PLAYER_REASON}.
- * - -2: Invalid argument. Argument must be greater than or equal to zero.
- * - -8: Invalid State.You must open the media file with openWithMediaSource and set enableMultiAudioTrack to true
+ * - < 0: Failure.
*/
virtual int selectMultiAudioTrack(int playoutTrackIndex, int publishTrackIndex) = 0;
/**
- * change player option before play a file
- * @param key the key of the option param
- * @param value the value of option param
+ * @brief Sets media player options.
+ *
+ * @details
+ * The media player supports setting options through `key` and `value`.
+ * The difference between this method and `setPlayerOption(const char* key, const char* value)` is
+ * that the `value` parameter of
+ * this method is of type Int, while the `value` of `setPlayerOption(const char* key, const char*
+ * value)` is of type String. These
+ * two methods cannot be used together.
+ * Applicable scenarios: Scenarios that require technical previews or special customization
+ * features. In general, you do not need to call this method; you can simply use the default options
+ * provided by the media player.
+ * Call timing: Call this method before the `open` or `openWithMediaSource` method.
+ *
+ * @param key The key of the option.
+ * @param value The value of the key.
+ *
* @return
* - 0: Success.
* - < 0: Failure.
@@ -182,9 +327,23 @@ class IMediaPlayer : public RefCountInterface {
virtual int setPlayerOption(const char* key, int value) = 0;
/**
- * change player option before play a file
- * @param key the key of the option param
- * @param value the value of option param
+ * @brief Sets media player options.
+ *
+ * @details
+ * The media player supports setting options through `key` and `value`.
+ * The difference between this method and `setPlayerOption(const char* key, int value)` is that the
+ * `value` parameter of
+ * this method is of type String, while the `value` of `setPlayerOption(const char* key, int value)`
+ * is of type String.
+ * These two methods cannot be used together.
+ * Applicable scenarios: Scenarios that require technical previews or special customization
+ * features. In general, you do not need to call this method; you can simply use the default options
+ * provided by the media player.
+ * Call timing: Call this method before the `open` or `openWithMediaSource` method.
+ *
+ * @param key The key of the option.
+ * @param value The value of the key.
+ *
* @return
* - 0: Success.
* - < 0: Failure.
@@ -217,79 +376,136 @@ class IMediaPlayer : public RefCountInterface {
*/
virtual int setExternalSubtitle(const char* url) = 0;
+ /**
+ * @brief Gets current playback state.
+ *
+ * @return
+ * The current playback state. See `MEDIA_PLAYER_STATE`.
+ */
virtual media::base::MEDIA_PLAYER_STATE getState() = 0;
/**
- * @brief Turn mute on or off
+ * @brief Sets whether to mute the media file.
+ *
+ * @details
+ * Call timing: You can call this method either before or after joining a channel.
*
- * @param muted Whether to mute on
- * @return int < 0 on behalf of an error, the value corresponds to one of MEDIA_PLAYER_REASON
+ * @param muted Whether to mute the media file:
+ * - `true`: Mute the media file.
+ * - `false`: (Default) Unmute the media file.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int mute(bool muted) = 0;
/**
- * @brief Get mute state
+ * @brief Reports whether the media resource is muted.
*
- * @param[out] muted Whether is mute on
- * @return int < 0 on behalf of an error, the value corresponds to one of MEDIA_PLAYER_REASON
+ * @param mute An output parameter. Whether the media file is muted:
+ * - `true`: The media file is muted.
+ * - `false`: The media file is not muted.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int getMute(bool& muted) = 0;
/**
- * @brief Adjust playback volume
+ * @brief Adjusts the local playback volume.
+ *
+ * @details
+ * Call timing: This method can be called either before or after joining the channel.
*
- * @param volume The volume value to be adjusted
- * The volume can be adjusted from 0 to 400:
- * 0: mute;
- * 100: original volume;
- * 400: Up to 4 times the original volume (with built-in overflow protection).
- * @return int < 0 on behalf of an error, the value corresponds to one of MEDIA_PLAYER_REASON
+ * @param volume The local playback volume, which ranges from 0 to 100:
+ * - 0: Mute.
+ * - 100: (Default) The original volume.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int adjustPlayoutVolume(int volume) = 0;
/**
- * @brief Get the current playback volume
+ * @brief Gets the local playback volume.
+ *
+ * @param volume An output parameter. The local playback volume, which ranges from 0 to 100:
+ * - 0: Mute.
+ * - 100: (Default) The original volume.
*
- * @param[out] volume
- * @return int < 0 on behalf of an error, the value corresponds to one of MEDIA_PLAYER_REASON
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int getPlayoutVolume(int& volume) = 0;
/**
- * @brief adjust publish signal volume
+ * @brief Adjusts the volume of the media file for publishing.
+ *
+ * @details
+ * After connected to the Agora server, you can call this method to adjust the volume of the media
+ * file heard by the remote user.
+ * Call timing: This method can be called either before or after joining the channel.
*
- * @return int < 0 on behalf of an error, the value corresponds to one of MEDIA_PLAYER_REASON
+ * @param volume The volume, which ranges from 0 to 400:
+ * - 0: Mute.
+ * - 100: (Default) The original volume.
+ * - 400: Four times the original volume (amplifying the audio signals by four times).
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int adjustPublishSignalVolume(int volume) = 0;
/**
- * @brief get publish signal volume
+ * @brief Gets the volume of the media file for publishing.
+ *
+ * @param volume An output parameter. The remote playback volume.
*
- * @return int < 0 on behalf of an error, the value corresponds to one of MEDIA_PLAYER_REASON
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int getPublishSignalVolume(int& volume) = 0;
/**
- * @brief Set video rendering view
+ * @brief Sets the view.
+ *
+ * @details
+ * Call timing: You can call this method either before or after joining a channel.
+ *
+ * @param view The render view. On Windows, this parameter sets the window handle (HWND).
*
- * @param view view object, windows platform is HWND
- * @return int < 0 on behalf of an error, the value corresponds to one of MEDIA_PLAYER_REASON
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int setView(media::base::view_t view) = 0;
/**
- * @brief Set video display mode
+ * @brief Sets the render mode of the media player.
+ *
+ * @param renderMode Sets the render mode of the view. See `RENDER_MODE_TYPE`.
*
- * @param renderMode Video display mode
- * @return int < 0 on behalf of an error, the value corresponds to one of MEDIA_PLAYER_REASON
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int setRenderMode(media::base::RENDER_MODE_TYPE renderMode) = 0;
/**
- * Registers a media player source observer.
+ * @brief Registers a media player observer.
+ *
+ * @details
+ * Call timing: This method can be called either before or after joining the channel.
+ *
+ * @param observer The player observer, listening for events during the playback. See
+ * `IMediaPlayerSourceObserver`.
*
- * Once the media player source observer is registered, you can use the observer to monitor the state change of the media player.
- * @param observer The pointer to the IMediaPlayerSourceObserver object.
* @return
* - 0: Success.
* - < 0: Failure.
@@ -297,8 +513,11 @@ class IMediaPlayer : public RefCountInterface {
virtual int registerPlayerSourceObserver(IMediaPlayerSourceObserver* observer) = 0;
/**
- * Releases the media player source observer.
- * @param observer The pointer to the IMediaPlayerSourceObserver object.
+ * @brief Releases a media player observer.
+ *
+ * @param observer The player observer, listening for events during the playback. See
+ * `IMediaPlayerSourceObserver`.
+ *
* @return
* - 0: Success.
* - < 0: Failure.
@@ -306,9 +525,16 @@ class IMediaPlayer : public RefCountInterface {
virtual int unregisterPlayerSourceObserver(IMediaPlayerSourceObserver* observer) = 0;
/**
- * Register the audio frame observer.
+ * @brief Registers a PCM audio frame observer object.
+ *
+ * @details
+ * You need to implement the `IAudioPcmFrameSink` class in this method and register callbacks
+ * according to your scenarios. After you successfully register the video frame observer, the SDK
+ * triggers the registered callbacks each time a video frame is received.
+ *
+ * @param observer The audio frame observer, reporting the reception of each audio frame. See
+ * `IAudioPcmFrameSink`.
*
- * @param observer The pointer to the IAudioFrameObserver object.
* @return
* - 0: Success.
* - < 0: Failure.
@@ -316,13 +542,12 @@ class IMediaPlayer : public RefCountInterface {
virtual int registerAudioFrameObserver(media::IAudioPcmFrameSink* observer) = 0;
/**
- * Registers an audio observer.
+ * @brief Registers an audio frame observer object.
+ *
+ * @param observer The audio frame observer, reporting the reception of each audio frame. See
+ * `IAudioPcmFrameSink`.
+ * @param mode The use mode of the audio frame. See `RAW_AUDIO_FRAME_OP_MODE_TYPE`.
*
- * @param observer The audio observer, reporting the reception of each audio
- * frame. See
- * \ref media::IAudioPcmFrameSink "IAudioFrameObserver" for
- * details.
- * @param mode Use mode of the audio frame. See #RAW_AUDIO_FRAME_OP_MODE_TYPE.
* @return
* - 0: Success.
* - < 0: Failure.
@@ -331,8 +556,10 @@ class IMediaPlayer : public RefCountInterface {
RAW_AUDIO_FRAME_OP_MODE_TYPE mode) = 0;
/**
- * Releases the audio frame observer.
- * @param observer The pointer to the IAudioFrameObserver object.
+ * @brief Unregisters an audio frame observer.
+ *
+ * @param observer The audio observer. See `IAudioPcmFrameSink`.
+ *
* @return
* - 0: Success.
* - < 0: Failure.
@@ -340,18 +567,31 @@ class IMediaPlayer : public RefCountInterface {
virtual int unregisterAudioFrameObserver(media::IAudioPcmFrameSink* observer) = 0;
/**
- * @brief Register the player video observer
+ * @brief Registers a video frame observer object.
+ *
+ * @details
+ * You need to implement the `IVideoFrameObserver` class in this method and register callbacks
+ * according to your scenarios. After you successfully register the video frame observer, the SDK
+ * triggers the registered callbacks each time a video frame is received.
*
- * @param observer observer object
- * @return int < 0 on behalf of an error, the value corresponds to one of MEDIA_PLAYER_REASON
+ * @param observer The video observer, reporting the reception of each video frame. See
+ * `IVideoFrameObserver`.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int registerVideoFrameObserver(media::base::IVideoFrameObserver* observer) = 0;
/**
- * @brief UnRegister the player video observer
+ * @brief Unregisters the video frame observer.
+ *
+ * @param observer The video observer, reporting the reception of each video frame. See
+ * `IVideoFrameObserver`.
*
- * @param observer observer object
- * @return int < 0 on behalf of an error, the value corresponds to one of MEDIA_PLAYER_REASON
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int unregisterVideoFrameObserver(agora::media::base::IVideoFrameObserver* observer) = 0;
@@ -378,9 +618,26 @@ class IMediaPlayer : public RefCountInterface {
virtual int unregisterMediaPlayerAudioSpectrumObserver(media::IAudioSpectrumObserver* observer) = 0;
/**
- * @brief Set dual-mono output mode of the music file.
- *
- * @param mode dual mono mode. See #agora::media::AUDIO_DUAL_MONO_MODE
+ * @brief Sets the channel mode of the current audio file.
+ *
+ * @details
+ * In a stereo music file, the left and right channels can store different audio data. According to
+ * your needs, you can set the channel mode to original mode, left channel mode, right channel mode,
+ * or mixed channel mode. For example, in the KTV scenario, the left channel of the music file
+ * stores the musical accompaniment, and the right channel stores the singing voice. If you only
+ * need to listen to the accompaniment, call this method to set the channel mode of the music file
+ * to left channel mode; if you need to listen to the accompaniment and the singing voice at the
+ * same time, call this method to set the channel mode to mixed channel mode.
+ *
+ * @note
+ * - Call this method after calling `open`.
+ * - This method only applies to stereo audio files.
+ *
+ * @param mode The channel mode. See `AUDIO_DUAL_MONO_MODE`.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int setAudioDualMonoMode(agora::media::base::AUDIO_DUAL_MONO_MODE mode) = 0;
@@ -393,15 +650,19 @@ class IMediaPlayer : public RefCountInterface {
virtual const char* getPlayerSdkVersion() = 0;
/**
- * Get the current play src.
+ * @brief Gets the path of the media resource being played.
+ *
* @return
- * - current play src of raw bytes.
+ * The path of the media resource being played.
*/
virtual const char* getPlaySrc() = 0;
/**
* Open the Agora CDN media source.
+ *
+ * @deprecated 4.6.0
+ *
* @param src The src of the media file that you want to play.
* @param startPos The playback position (ms).
* @return
@@ -412,6 +673,9 @@ class IMediaPlayer : public RefCountInterface {
/**
* Gets the number of Agora CDN lines.
+ *
+ * @deprecated 4.6.0
+ *
* @return
* - > 0: number of CDN.
* - <= 0: Failure.
@@ -420,6 +684,9 @@ class IMediaPlayer : public RefCountInterface {
/**
* Switch Agora CDN lines.
+ *
+ * @deprecated 4.6.0
+ *
* @param index Specific CDN line index.
* @return
* - 0: Success.
@@ -429,6 +696,9 @@ class IMediaPlayer : public RefCountInterface {
/**
* Gets the line of the current CDN.
+ *
+ * @deprecated 4.6.0
+ *
* @return
* - >= 0: Specific line.
* - < 0: Failure.
@@ -437,6 +707,9 @@ class IMediaPlayer : public RefCountInterface {
/**
* Enable automatic CDN line switching.
+ *
+ * @deprecated 4.6.0
+ *
* @param enable Whether enable.
* @return
* - 0: Success.
@@ -446,6 +719,9 @@ class IMediaPlayer : public RefCountInterface {
/**
* Update the CDN source token and timestamp.
+ *
+ * @deprecated 4.6.0
+ *
* @param token token.
* @param ts ts.
* @return
@@ -456,6 +732,9 @@ class IMediaPlayer : public RefCountInterface {
/**
* Switch the CDN source when open a media through "openWithAgoraCDNSrc" API
+ *
+ * @deprecated 4.6.0
+ *
* @param src Specific src.
* @param syncPts Live streaming must be set to false.
* @return
@@ -465,9 +744,34 @@ class IMediaPlayer : public RefCountInterface {
virtual int switchAgoraCDNSrc(const char* src, bool syncPts = false) = 0;
/**
- * Switch the media source when open a media through "open" API
- * @param src Specific src.
- * @param syncPts Live streaming must be set to false.
+ * @brief Switches the media resource being played.
+ *
+ * @details
+ * You can call this method to switch the media resource to be played according to the current
+ * network status. For example:
+ * - When the network is poor, the media resource to be played is switched to a media resource
+ * address with a lower bitrate.
+ * - When the network is good, the media resource to be played is switched to a media resource
+ * address with a higher bitrate.
+ * After calling this method, if you receive the `onPlayerEvent` callback report the
+ * `PLAYER_EVENT_SWITCH_COMPLETE` event, the switching is successful. If the switching fails, the
+ * SDK will automatically retry 3 times. If it still fails, you will receive the `onPlayerEvent`
+ * callback reporting the `PLAYER_EVENT_SWITCH_ERROR` event indicating an error occurred during
+ * media resource switching.
+ *
+ * @note
+ * - Ensure that you call this method after `open`.
+ * - To ensure normal playback, pay attention to the following when calling this method:
+ * - Do not call this method when playback is paused.
+ * - Do not call the `seek` method during switching.
+ * - Before switching the media resource, make sure that the playback position does not exceed the
+ * total duration of the media resource to be switched.
+ *
+ * @param src The URL of the media resource.
+ * @param syncPts Whether to synchronize the playback position (ms) before and after the switch:
+ * - `true`: Synchronize the playback position before and after the switch.
+ * - `false`: (Default) Do not synchronize the playback position before and after the switch.
+ *
* @return
* - 0: Success.
* - < 0: Failure.
@@ -475,9 +779,27 @@ class IMediaPlayer : public RefCountInterface {
virtual int switchSrc(const char* src, bool syncPts = true) = 0;
/**
- * Preload a media source
- * @param src Specific src.
- * @param startPos The starting position (ms) for playback. Default value is 0.
+ * @brief Preloads a media resource.
+ *
+ * @details
+ * You can call this method to preload a media resource into the playlist. If you need to preload
+ * multiple media resources, you can call this method multiple times.
+ * After calling this method, if you receive the `PLAYER_PRELOAD_EVENT_COMPLETE` event in the
+ * `onPreloadEvent` callback, the preload is successful; If you receive the
+ * `PLAYER_PRELOAD_EVENT_ERROR` event in the `onPreloadEvent` callback, the preload fails.
+ * If the preload is successful and you want to play the media resource, call `playPreloadedSrc`; if
+ * you want to clear the playlist, call `stop`.
+ *
+ * @note
+ * - Before calling this method, ensure that you have called `open` or `openWithMediaSource` to open
+ * the media resource successfully.
+ * - Agora does not support preloading duplicate media resources to the playlist. However, you can
+ * preload the media resources that are being played to the playlist again.
+ *
+ * @param src The URL of the media resource.
+ * @param startPos The starting position (ms) for playing after the media resource is preloaded to
+ * the playlist. When preloading a live stream, set this parameter to 0.
+ *
* @return
* - 0: Success.
* - < 0: Failure.
@@ -485,8 +807,24 @@ class IMediaPlayer : public RefCountInterface {
virtual int preloadSrc(const char* src, int64_t startPos) = 0;
/**
- * Play a pre-loaded media source
- * @param src Specific src.
+ * @brief Plays preloaded media resources.
+ *
+ * @details
+ * After calling the `preloadSrc` method to preload the media resource into the playlist, you can
+ * call this method to play the preloaded media resource. After calling this method, if you receive
+ * the `onPlayerSourceStateChanged` callback which reports the `PLAYER_STATE_PLAYING` state, the
+ * playback is successful.
+ * If you want to change the preloaded media resource to be played, you can call this method again
+ * and specify the URL of the new media resource that you want to preload. If you want to replay the
+ * media resource, you need to call `preloadSrc` to preload the media resource to the playlist again
+ * before playing. If you want to clear the playlist, call the `stop` method.
+ *
+ * @note If you call this method when playback is paused, this method does not take effect until
+ * playback is resumed.
+ *
+ * @param src The URL of the media resource in the playlist must be consistent with the `src` set by
+ * the `preloadSrc` method; otherwise, the media resource cannot be played.
+ *
* @return
* - 0: Success.
* - < 0: Failure.
@@ -494,8 +832,12 @@ class IMediaPlayer : public RefCountInterface {
virtual int playPreloadedSrc(const char* src) = 0;
/**
- * Unload a preloaded media source
- * @param src Specific src.
+ * @brief Unloads media resources that are preloaded.
+ *
+ * @note This method cannot release the media resource being played.
+ *
+ * @param src The URL of the media resource.
+ *
* @return
* - 0: Success.
* - < 0: Failure.
@@ -503,11 +845,17 @@ class IMediaPlayer : public RefCountInterface {
virtual int unloadSrc(const char* src) = 0;
/**
- * Set spatial audio params for the music file. It can be called after the media player
- * was created.
+ * @brief Enables or disables the spatial audio effect for the media player.
+ *
+ * @details
+ * After successfully setting the spatial audio effect parameters of the media player, the SDK
+ * enables the spatial audio effect for the media player, and the local user can hear the media
+ * resources with a sense of space.
+ * If you need to disable the spatial audio effect for the media player, set the `params` parameter
+ * to null.
+ *
+ * @param params The spatial audio effect parameters of the media player. See `SpatialAudioParams`.
*
- * @param params See #agora::SpatialAudioParams. If it's
- * not set, then the spatial audio will be disabled; or it will be enabled.
* @return
* - 0: Success.
* - < 0: Failure.
@@ -529,6 +877,15 @@ class IMediaPlayer : public RefCountInterface {
*/
virtual int setSoundPositionParams(float pan, float gain) = 0;
+ /**
+ * @brief Gets the audio buffer delay when playing the media file.
+ * @param[out] delayMs delay in millisecond.
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ */
+ virtual int getAudioBufferDelay(int32_t& delayMs) = 0;
+
};
/**
@@ -538,89 +895,139 @@ class IMediaPlayer : public RefCountInterface {
class IMediaPlayerCacheManager {
public:
/**
- * Delete the longest used cache file in order to release some of the cache file disk usage.
- * (usually used when the cache quota notification is received)
- *
+ * @brief Deletes all cached media files in the media player.
+ *
+ * @note The cached media file currently being played will not be deleted.
+ *
* @return
* - 0: Success.
- * - < 0: Failure.
+ * - < 0: Failure. See `MEDIA_PLAYER_REASON`.
*/
virtual int removeAllCaches() = 0;
/**
- * Remove the latest media resource cache file.
+ * @brief Deletes a cached media file that is the least recently used.
+ *
+ * @details
+ * You can call this method to delete a cached media file when the storage space for the cached
+ * files is about to reach its limit. After you call this method, the SDK deletes the cached media
+ * file that is least used.
+ *
+ * @note The cached media file currently being played will not be deleted.
+ *
* @return
* - 0: Success.
- * - < 0: Failure.
+ * - < 0: Failure. See `MEDIA_PLAYER_REASON`.
*/
virtual int removeOldCache() = 0;
/**
- * Remove the cache file by uri, setting by MediaSource.
- * @param uri URI,identify the uniqueness of the property, Set from `MeidaSource`
+ * @brief Deletes a cached media file.
+ *
+ * @note The cached media file currently being played will not be deleted.
+ *
+ * @param uri The URI (Uniform Resource Identifier) of the media file to be deleted.
+ *
* @return
* - 0: Success.
- * - < 0: Failure.
+ * - < 0: Failure. See `MEDIA_PLAYER_REASON`.
*/
virtual int removeCacheByUri(const char *uri) = 0;
/**
- * Set cache file path that files will be saved to.
- * @param path file path.
+ * @brief Sets the storage path for the media files that you want to cache.
+ *
+ * @note Make sure `IRtcEngine` is initialized before you call this method.
+ *
+ * @param path The absolute path of the media files to be cached. Ensure that the directory for the
+ * media files exists and is writable.
+ *
* @return
* - 0: Success.
- * - < 0: Failure.
+ * - < 0: Failure. See `MEDIA_PLAYER_REASON`.
*/
virtual int setCacheDir(const char *path) = 0;
/**
- * Set the maximum number of cached files.
- * @param count maximum number of cached files.
+ * @brief Sets the maximum number of media files that can be cached.
+ *
+ * @param count The maximum number of media files that can be cached. The default value is 1,000.
+ *
* @return
* - 0: Success.
- * - < 0: Failure.
+ * - < 0: Failure. See `MEDIA_PLAYER_REASON`.
*/
virtual int setMaxCacheFileCount(int count) = 0;
/**
- * Set the maximum size of cache file disk usage.
- * @param cacheSize total size of the largest cache file.
+ * @brief Sets the maximum size of the aggregate storage space for cached media files.
+ *
+ * @param cacheSize The maximum size (bytes) of the aggregate storage space for cached media files.
+ * The default value is 1 GB.
+ *
* @return
* - 0: Success.
- * - < 0: Failure.
+ * - < 0: Failure. See `MEDIA_PLAYER_REASON`.
*/
virtual int setMaxCacheFileSize(int64_t cacheSize) = 0;
/**
- * Whether to automatically delete old cache files when the cache file usage reaches the limit.
- * @param enable enable the player to automatically clear the cache.
+ * @brief Sets whether to delete cached media files automatically.
+ *
+ * @details
+ * If you enable this function to remove cached media files automatically, when the cached media
+ * files exceed either the number or size limit you set, the SDK automatically deletes the least
+ * recently used cache file.
+ *
+ * @param enable Whether to enable the SDK to delete cached media files automatically:
+ * - `true`: Delete cached media files automatically.
+ * - `false`: (Default) Do not delete cached media files automatically.
+ *
* @return
* - 0: Success.
- * - < 0: Failure.
+ * - < 0: Failure. See `MEDIA_PLAYER_REASON`.
*/
virtual int enableAutoRemoveCache(bool enable) = 0;
/**
- * Get the cache directory.
- * @param path cache path, recieve a pointer to be copied to.
- * @param length the length to be copied.
+ * @brief Gets the storage path of the cached media files.
+ *
+ * @details
+ * If you have not called the `setCacheDir` method to set the storage path for the media files to be
+ * cached before calling this method, you get the default storage path used by the SDK.
+ *
+ * @param path An output parameter; the storage path for the media file to be cached.
+ * @param length An input parameter; the maximum length of the cache file storage path string. Fill
+ * in according to the cache file storage `path` string you obtained from path.
+ *
* @return
* - 0: Success.
- * - < 0: Failure.
+ * - < 0: Failure. See `MEDIA_PLAYER_REASON`.
*/
virtual int getCacheDir(char* path, int length) = 0;
/**
- * Get the maximum number of cached files.
+ * @brief Gets the maximum number of media files that can be cached.
+ *
+ * @details
+ * By default, the maximum number of media files that can be cached is 1,000.
+ *
* @return
- * > 0: file count.
- * - < 0: Failure.
+ * - > 0: The call succeeds and returns the maximum number of media files that can be cached.
+ * - < 0: Failure. See `MEDIA_PLAYER_REASON`.
*/
virtual int getMaxCacheFileCount() = 0;
/**
- * Get the total size of the largest cache file
+ * @brief Gets the maximum size of the aggregate storage space for cached media files.
+ *
+ * @details
+ * By default, the maximum size of the aggregate storage space for cached media files is 1 GB. You
+ * can call the `setMaxCacheFileSize` method to set the limit according to your scenarios.
+ *
* @return
- * > 0: file size.
- * - < 0: Failure.
+ * - > 0: The call succeeds and returns the maximum size (in bytes) of the aggregate storage space
+ * for cached media files.
+ * - < 0: Failure. See `MEDIA_PLAYER_REASON`.
*/
virtual int64_t getMaxCacheFileSize() = 0;
/**
- * Get the number of all cache files.
+ * @brief Gets the number of media files that are cached.
+ *
* @return
- * > 0: file count.
- * - < 0: Failure.
+ * - ≥ 0: The call succeeds and returns the number of media files that are cached.
+ * - < 0: Failure. See `MEDIA_PLAYER_REASON`.
*/
virtual int getCacheFileCount() = 0;
@@ -630,4 +1037,18 @@ class IMediaPlayerCacheManager {
} //namespace rtc
} // namespace agora
+/**
+ * @brief Gets one `IMediaPlayerCacheManager` instance.
+ *
+ * @details
+ * Before calling any APIs in the `IMediaPlayerCacheManager` class, you need to call this method to
+ * get a cache manager instance of a media player.
+ * Call timing: Make sure the `IRtcEngine` is initialized before you call this method.
+ *
+ * @note The cache manager is a singleton pattern. Therefore, multiple calls to this method returns
+ * the same instance.
+ *
+ * @return
+ * The `IMediaPlayerCacheManager` instance.
+ */
AGORA_API agora::rtc::IMediaPlayerCacheManager* AGORA_CALL getMediaPlayerCacheManager();
diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraMediaPlayerSource.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraMediaPlayerSource.h
index 4cd8206ca..563497881 100644
--- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraMediaPlayerSource.h
+++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraMediaPlayerSource.h
@@ -403,58 +403,83 @@ class IMediaPlayerSourceObserver {
virtual ~IMediaPlayerSourceObserver() {}
/**
- * @brief Reports the playback state change.
+ * @brief Reports the changes of playback state.
+ *
+ * @details
+ * When the state of the media player changes, the SDK triggers this callback to report the current
+ * playback state.
+ *
+ * @param state The playback state. See `MEDIA_PLAYER_STATE`.
+ * @param reason The reason for the changes in the media player status. See `MEDIA_PLAYER_REASON`.
*
- * When the state of the playback changes, the SDK triggers this callback to report the new playback state and the reason or error for the change.
- * @param state The new playback state after change. See {@link media::base::MEDIA_PLAYER_STATE MEDIA_PLAYER_STATE}.
- * @param reason The player's error code. See {@link media::base::MEDIA_PLAYER_REASON MEDIA_PLAYER_REASON}.
*/
virtual void onPlayerSourceStateChanged(media::base::MEDIA_PLAYER_STATE state,
media::base::MEDIA_PLAYER_REASON reason) = 0;
/**
- * @brief Reports current playback progress.
+ * @brief Reports the playback progress of the media file.
+ *
+ * @details
+ * When playing media files, the SDK triggers this callback every two second to report current
+ * playback progress.
+ *
+ * @param positionMs The playback position (ms) of media files.
+ * @param timeStampMs The NTP timestamp (ms) of the current playback progress.
*
- * The callback occurs once every one second during the playback and reports the current playback progress.
- * @param positionMs Current playback progress (milisecond).
- * @param timestampMs Current NTP(Network Time Protocol) time (milisecond).
*/
virtual void onPositionChanged(int64_t positionMs, int64_t timestampMs) = 0;
/**
- * @brief Reports the playback event.
+ * @brief Reports the player events.
*
- * - After calling the `seek` method, the SDK triggers the callback to report the results of the seek operation.
- * - After calling the `selectAudioTrack` method, the SDK triggers the callback to report that the audio track changes.
+ * @details
+ * - After calling the `seek` method, the SDK triggers the callback to report the results of the
+ * seek operation.
+ *
+ * @param eventCode The player event. See `MEDIA_PLAYER_EVENT`.
+ * @param elapsedTime The time (ms) when the event occurs.
+ * @param message Information about the event.
*
- * @param eventCode The playback event. See {@link media::base::MEDIA_PLAYER_EVENT MEDIA_PLAYER_EVENT}.
- * @param elapsedTime The playback elapsed time.
- * @param message The playback message.
*/
virtual void onPlayerEvent(media::base::MEDIA_PLAYER_EVENT eventCode, int64_t elapsedTime, const char* message) = 0;
/**
- * @brief Occurs when the metadata is received.
+ * @brief Occurs when the media metadata is received.
+ *
+ * @details
+ * The callback occurs when the player receives the media metadata and reports the detailed
+ * information of the media metadata.
*
- * The callback occurs when the player receives the media metadata and reports the detailed information of the media metadata.
* @param data The detailed data of the media metadata.
* @param length The data length (bytes).
+ *
*/
virtual void onMetaData(const void* data, int length) = 0;
/**
- * @brief Triggered when play buffer updated, once every 1 second
+ * @brief Reports the playback duration that the buffered data can support.
+ *
+ * @details
+ * When playing online media resources, the SDK triggers this callback every two seconds to report
+ * the playback duration that the currently buffered data can support.
+ * - When the playback duration supported by the buffered data is less than the threshold (0 by
+ * default), the SDK returns `PLAYER_EVENT_BUFFER_LOW` (6).
+ * - When the playback duration supported by the buffered data is greater than the threshold (0 by
+ * default), the SDK returns `PLAYER_EVENT_BUFFER_RECOVER` (7).
+ *
+ * @param playCachedBuffer The playback duration (ms) that the buffered data can support.
*
- * @param int cached buffer during playing, in milliseconds
*/
virtual void onPlayBufferUpdated(int64_t playCachedBuffer) = 0;
/**
- * @brief Triggered when the player preloadSrc
+ * @brief Reports the events of preloaded media resources.
+ *
+ * @param src The URL of the media resource.
+ * @param event Events that occur when media resources are preloaded. See `PLAYER_PRELOAD_EVENT`.
*
- * @param event
*/
virtual void onPreloadEvent(const char* src, media::base::PLAYER_PRELOAD_EVENT event) = 0;
@@ -472,43 +497,65 @@ class IMediaPlayerSourceObserver {
virtual void onAgoraCDNTokenWillExpire() = 0;
/**
- * @brief Reports current playback source bitrate changed.
- * @brief Reports current playback source info changed.
+ * @brief Occurs when the video bitrate of the media resource changes.
+ *
+ * @param from Information about the video bitrate of the media resource being played. See
+ * `SrcInfo`.
+ * @param to Information about the changed video bitrate of media resource being played. See
+ * `SrcInfo`.
*
- * @param from Streaming media information before the change.
- * @param to Streaming media information after the change.
*/
virtual void onPlayerSrcInfoChanged(const media::base::SrcInfo& from, const media::base::SrcInfo& to) = 0;
- /**
- * @brief Triggered when media player information updated.
+ /**
+ * @brief Occurs when information related to the media player changes.
+ *
+ * @details
+ * When the information about the media player changes, the SDK triggers this callback. You can use
+ * this callback for troubleshooting.
+ *
+ * @param info Information related to the media player. See `PlayerUpdatedInfo`.
*
- * @param info Include information of media player.
*/
virtual void onPlayerInfoUpdated(const media::base::PlayerUpdatedInfo& info) = 0;
- /**
- * @brief Triggered every 1 second, reports the statistics of the files being cached.
- *
- * @param stats Cached file statistics.
+ /**
+ * @brief Reports the statistics of the media file being cached.
+ *
+ * @details
+ * After you call the `openWithMediaSource` method and set `enableCache` as `true`, the SDK triggers
+ * this callback once per second to report the statistics of the media file being cached.
+ *
+ * @param stats The statistics of the media file being cached. See `CacheStatistics`.
+ *
*/
virtual void onPlayerCacheStats(const media::base::CacheStatistics& stats) {
(void)stats;
}
- /**
- * @brief Triggered every 1 second, reports the statistics of the media stream being played.
- *
- * @param stats The statistics of the media stream.
+ /**
+ * @brief The statistics of the media file being played.
+ *
+ * @details
+ * The SDK triggers this callback once per second to report the statistics of the media file being
+ * played.
+ *
+ * @param stats The statistics of the media file. See `PlayerPlaybackStats`.
+ *
*/
virtual void onPlayerPlaybackStats(const media::base::PlayerPlaybackStats& stats) {
(void)stats;
}
/**
- * @brief Triggered every 200 millisecond ,update player current volume range [0,255]
+ * @brief Reports the volume of the media player.
+ *
+ * @details
+ * The SDK triggers this callback every 200 milliseconds to report the current volume of the media
+ * player.
+ *
+ * @param volume The volume of the media player. The value ranges from 0 to 255.
*
- * @param volume volume of current player.
*/
virtual void onAudioVolumeIndication(int volume) = 0;
};
diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraMediaRecorder.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraMediaRecorder.h
index 79a8db35e..b10ee2b37 100644
--- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraMediaRecorder.h
+++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraMediaRecorder.h
@@ -17,70 +17,82 @@ class IMediaRecorder : public RefCountInterface {
public:
/**
- * Registers the IMediaRecorderObserver object.
+ * @brief Registers the `IMediaRecorderObserver` observer.
*
* @since v4.0.0
*
- * @note Call this method before the startRecording method.
+ * @details
+ * This method sets the callback for audio and video recording, so the app can be notified of
+ * recording status and information during the recording process.
+ * Before calling this method, make sure that:
+ * - The `IRtcEngine` object has been created and initialized.
+ * - The media recorder object has been created using `createMediaRecorder`.
*
- * @param callback The callbacks for recording audio and video streams. See \ref IMediaRecorderObserver.
+ * @param callback Callback for audio and video stream recording. See `IMediaRecorderObserver`.
*
* @return
- * - 0(ERR_OK): Success.
- * - < 0: Failure:
+ * - 0: Success.
+ * - < 0: Failure. See `Error Codes` for details and resolution suggestions.
*/
virtual int setMediaRecorderObserver(media::IMediaRecorderObserver* callback) = 0;
/**
- * Starts recording the local or remote audio and video.
+ * @brief Starts audio and video stream recording.
*
* @since v4.0.0
*
- * After successfully calling \ref IRtcEngine::createMediaRecorder "createMediaRecorder" to get the media recorder object
- * , you can call this method to enable the recording of the local audio and video.
- *
- * This method can record the following content:
- * - The audio captured by the local microphone and encoded in AAC format.
- * - The video captured by the local camera and encoded by the SDK.
- * - The audio received from remote users and encoded in AAC format.
- * - The video received from remote users.
- *
- * The SDK can generate a recording file only when it detects the recordable audio and video streams; when there are
- * no audio and video streams to be recorded or the audio and video streams are interrupted for more than five
- * seconds, the SDK stops recording and triggers the
- * \ref IMediaRecorderObserver::onRecorderStateChanged "onRecorderStateChanged" (RECORDER_STATE_ERROR, RECORDER_ERROR_NO_STREAM)
- * callback.
+ * @details
+ * This method starts recording audio and video streams. The Agora SDK supports recording both local
+ * and remote users' audio and video streams simultaneously.
+ * Before starting the recording, make sure that:
+ * - You have created the media recorder object using `createMediaRecorder`.
+ * - You have registered a recorder observer using `setMediaRecorderObserver` to listen for
+ * recording callbacks.
+ * - You have joined a channel.
+ * This method supports recording the following data:
+ * - Audio captured from the microphone in AAC encoding format.
+ * - Video captured from the camera in H.264 or H.265 encoding format.
+ * After recording starts, if the video resolution changes during recording, the SDK stops the
+ * recording. If the audio sample rate or number of channels changes, the SDK continues recording
+ * and generates a single MP4 file.
+ * A recording file is only successfully generated when a recordable audio or video stream is
+ * detected. If there is no recordable stream, or if the stream is interrupted for more than 5
+ * seconds during recording, the SDK stops the recording and triggers the
+ * `onRecorderStateChanged` (`RECORDER_STATE_ERROR, RECORDER_REASON_NO_STREAM`) callback.
*
- * @note Call this method after joining the channel.
+ * @note
+ * - If you want to record local audio and video streams, make sure the local user role is set to
+ * broadcaster before starting recording.
+ * - If you want to record remote audio and video streams, make sure you have subscribed to the
+ * remote user's streams before starting recording.
*
- * @param config The recording configurations. See MediaRecorderConfiguration.
+ * @param config Audio and video stream recording configuration. See `MediaRecorderConfiguration`.
*
* @return
- * - 0(ERR_OK): Success.
- * - < 0: Failure:
- * - `-1(ERR_FAILED)`: IRtcEngine does not support the request because the remote user did not subscribe to the target channel or the media streams published by the local user during remote recording.
- * - `-2(ERR_INVALID_ARGUMENT)`: The parameter is invalid. Ensure the following:
- * - The specified path of the recording file exists and is writable.
- * - The specified format of the recording file is supported.
- * - The maximum recording duration is correctly set.
- * - During remote recording, ensure the user whose media streams you want record did join the channel.
- * - `-4(ERR_NOT_SUPPORTED)`: IRtcEngine does not support the request due to one of the following reasons:
- * - The recording is ongoing.
- * - The recording stops because an error occurs.
- * - No \ref IMediaRecorderObserver object is registered.
+ * - 0: Success.
+ * - < 0: Failure. See `Error Codes` for details and resolution suggestions.
+ * - -2: Invalid parameter. Please ensure that:
+ * - The specified recording file path is correct and writable.
+ * - The specified recording file format is correct.
+ * - The maximum recording duration is set correctly.
+ * - -4: `IRtcEngine` is in a state that does not support this operation. This may be because a
+ * recording is already in progress or has stopped due to an error.
+ * - -7: `IRtcEngine` is not initialized when this method is called. Please make sure the
+ * `IMediaRecorder` object has been created before calling this method.
*/
virtual int startRecording(const media::MediaRecorderConfiguration& config) = 0;
/**
- * Stops recording the audio and video.
+ * @brief Stops audio and video stream recording.
*
* @since v4.0.0
*
- * @note After calling \ref IMediaRecorder::startRecording "startRecording", if you want to stop the recording,
- * you must call `stopRecording`; otherwise, the generated recording files might not be playable.
- *
+ * @note After calling `startRecording`, you must call this method to stop the recording; otherwise,
+ * the generated recording file may not play properly.
*
* @return
- * - 0(ERR_OK): Success.
+ * - 0: Success.
* - < 0: Failure:
+ * - -7: `IRtcEngine` is not initialized when this method is called. Please make sure the
+ * `IMediaRecorder` object has been created before calling this method.
*/
virtual int stopRecording() = 0;
};
diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraMusicContentCenter.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraMusicContentCenter.h
index ebb4d5237..3dc0726df 100644
--- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraMusicContentCenter.h
+++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraMusicContentCenter.h
@@ -14,128 +14,159 @@ namespace agora {
namespace rtc {
/**
- * Modes for playing songs.
+ * @brief Playback mode of a music resource.
*/
typedef enum
{
/**
- * 0: The music player is in the origin mode, which means playing the original song.
+ * 0: Original vocals.
*/
kMusicPlayModeOriginal = 0,
/**
- * 1: The music player is in the accompany mode, which means playing the accompaniment only.
+ * 1: Accompaniment.
*/
kMusicPlayModeAccompany = 1,
/**
- * 2: The music player is in the lead sing mode, which means playing the lead vocals.
+ * 2: Vocal guide.
*/
kMusicPlayModeLeadSing = 2,
} MusicPlayMode;
+/**
+ * @brief Loading state of a music resource.
+ */
typedef enum
{
/**
- * 0: No error occurs and preload succeeds.
+ * 0: Music resource loading completed.
*/
kPreloadStateCompleted = 0,
/**
- * 1: A general error occurs.
+ * 1: Music resource loading failed.
*/
kPreloadStateFailed = 1,
/**
- * 2: The media file is preloading.
+ * 2: Music resource is currently loading.
*/
kPreloadStatePreloading = 2,
- /**
- * 3: The media file is removed.
+ /**
+ * 3: Cached music resource has been removed.
*/
kPreloadStateRemoved = 3,
} PreloadState;
+/**
+ * @brief Request status codes for the Music Content Center.
+ */
typedef enum
{
/**
- * 0: No error occurs and request succeeds.
+ * 0: Request succeeded.
*/
kMusicContentCenterReasonOk = 0,
/**
- * 1: A general error occurs.
+ * 1: General error with no specific cause.
*/
kMusicContentCenterReasonError = 1,
/**
- * 2: The gateway error. There are several possible reasons:
- * - Token is expired. Check if your token is expired.
- * - Token is invalid. Check the type of token you passed in.
- * - Network error. Check your network.
+ * 2: Gateway error. Possible reasons include:
+ * - The current token has expired. Please regenerate the token.
+ * - The token provided is invalid. Make sure you are using an RTM token.
+ * - Network error. Please check your connection.
*/
kMusicContentCenterReasonGateway = 2,
/**
- * 3: Permission and resource error. There are several possible reasons:
- * - Your appid may not have the mcc permission. Please contact technical support
- * - The resource may not exist. Please contact technical support
+ * 3: Permission error or music resource does not exist. Make sure your project has Music Content
+ * Center enabled. Please `contact technical support`.
*/
kMusicContentCenterReasonPermissionAndResource = 3,
/**
- * 4: Internal data parse error. Please contact technical support
+ * 4: Internal data parsing error. Please `contact technical support`.
*/
kMusicContentCenterReasonInternalDataParse = 4,
/**
- * 5: Music loading error. Please contact technical support
+ * 5: Error occurred while loading the music resource. Please `contact technical support`.
*/
kMusicContentCenterReasonMusicLoading = 5,
/**
- * 6: Music decryption error. Please contact technical support
+ * 6: Error occurred while decrypting the music resource. Please `contact technical support`.
*/
kMusicContentCenterReasonMusicDecryption = 6,
/**
- * 7: Http internal error. Please retry later.
+ * 7: Internal HTTP error. Please try again later.
*/
kMusicContentCenterReasonHttpInternalError = 7,
} MusicContentCenterStateReason;
+/**
+ * @brief Detailed information about a music chart.
+ */
typedef struct
{
/**
- * Name of the music chart
+ * Name of the chart.
*/
const char* chartName;
/**
- * Id of the music chart, which is used to get music list
+ * ID of the music chart.
*/
int32_t id;
} MusicChartInfo;
+/**
+ * @brief Cache status of a music resource.
+ */
enum MUSIC_CACHE_STATUS_TYPE {
/**
- * 0: Music is already cached.
+ * 0: The music resource is cached.
*/
MUSIC_CACHE_STATUS_TYPE_CACHED = 0,
/**
- * 1: Music is being cached.
+ * 1: The music resource is being cached.
*/
MUSIC_CACHE_STATUS_TYPE_CACHING = 1
};
+/**
+ * @brief Information about a cached music resource.
+ */
struct MusicCacheInfo {
/**
- * The songCode of music.
+ * The ID of the music resource, used to identify the resource.
*/
int64_t songCode;
/**
- * The cache status of the music.
+ * Cache status of the music resource. See `MUSIC_CACHE_STATUS_TYPE`.
*/
MUSIC_CACHE_STATUS_TYPE status;
MusicCacheInfo():songCode(0), status(MUSIC_CACHE_STATUS_TYPE_CACHED) {}
};
+/**
+ * @brief Detailed information about music charts.
+ */
class MusicChartCollection : public RefCountInterface {
public:
+ /**
+ * @brief Gets the number of music charts in this request.
+ *
+ * @return
+ * The number of music charts in this request.
+ */
virtual int getCount() = 0;
+ /**
+ * @brief Gets the detailed information of a music chart.
+ *
+ * @param index Index of the `MusicChartInfo` array.
+ *
+ * @return
+ * `MusicChartInfo`, containing the detailed information of the music chart.
+ */
virtual MusicChartInfo* get(int index) = 0;
protected:
virtual ~MusicChartCollection() = default;
@@ -153,77 +184,81 @@ struct MvProperty
const char* bandwidth;
};
+/**
+ * @brief The climax parts of the music.
+ */
struct ClimaxSegment
{
/**
- * The start time of climax segment
+ * The time (ms) when the climax part begins.
*/
int32_t startTimeMs;
/**
- * The end time of climax segment
+ * The time (ms) when the climax part ends.
*/
int32_t endTimeMs;
};
+/**
+ * @brief Detailed information of a music resource.
+ */
struct Music
{
/**
- * The songCode of music
+ * The ID of the music resource, used to identify a music item.
*/
int64_t songCode;
/**
- * The name of music
+ * Name of the music resource.
*/
const char* name;
/**
- * The singer of music
+ * Name of the singer.
*/
const char* singer;
/**
- * The poster url of music
+ * Download URL of the music poster.
*/
const char* poster;
/**
- * The release time of music
+ * Release time of the music resource.
*/
const char* releaseTime;
/**
- * The duration (in seconds) of music
+ * Total duration of the music resource (in seconds).
*/
int32_t durationS;
/**
- * The type of music
- * 1, mp3 with instrumental accompaniment and original
- * 2, mp3 only with instrumental accompaniment
- * 3, mp3 only with original
- * 4, mp4 with instrumental accompaniment and original
- * 5, mv only
- * 6, new type mp4 with instrumental accompaniment and original
- * detail at document of music media center
+ * Type of the music resource:
+ * - 1: Single-track with accompaniment on the left channel and original vocals on the right
+ * channel.
+ * - 2: Single-track with accompaniment only.
+ * - 3: Single-track with original vocals only.
+ * - 4: Multi-track audio.
*/
int32_t type;
/**
- * The pitch type of music.
- * 1, xml lyric has pitch
- * 2, lyric has no pitch
+ * Whether the song supports pitch scoring:
+ * - 1: The song supports pitch scoring.
+ * - 2: The song does not support pitch scoring.
*/
int32_t pitchType;
/**
- * The number of lyrics available for the music
+ * Number of lyrics available for the song.
*/
int32_t lyricCount;
/**
- * The lyric list of music
- * 0, xml
- * 1, lrc
+ * Supported lyric formats:
+ * - 0: XML format.
+ * - 1: LRC format.
*/
int32_t* lyricList;
/**
- * The number of climax segments of the music
+ * Number of climax segments.
*/
int32_t climaxSegmentCount;
/**
- * The climax segment list of music
+ * List of climax segments. See `ClimaxSegment`.
*/
ClimaxSegment* climaxSegmentList;
/**
@@ -237,12 +272,47 @@ struct Music
MvProperty* mvPropertyList;
};
+/**
+ * @brief Detailed information about the music resource list.
+ */
class MusicCollection : public RefCountInterface {
public:
+ /**
+ * @brief Gets the number of music items in this request.
+ *
+ * @return
+ * The number of music items in this request.
+ */
virtual int getCount() = 0;
+ /**
+ * @brief Gets the total number of music resources in the list.
+ *
+ * @return
+ * The total number of music resources in the list.
+ */
virtual int getTotal() = 0;
+ /**
+ * @brief Gets the current page number of the music resource list.
+ *
+ * @return
+ * The current page number.
+ */
virtual int getPage() = 0;
+ /**
+ * @brief Gets the actual number of music resources returned by the SDK.
+ *
+ * @return
+ * The actual number of music resources returned by the SDK.
+ */
virtual int getPageSize() = 0;
+ /**
+ * @brief Gets the detailed information of a music resource in the current page list.
+ *
+ * @param index Index of the `Music` array.
+ *
+ * @return
+ * A `Music` instance.
+ */
virtual Music* getMusic(int32_t index) = 0;
protected:
virtual ~MusicCollection() = default;
@@ -252,73 +322,118 @@ class MusicCollection : public RefCountInterface {
class IMusicContentCenterEventHandler {
public:
/**
- * The music chart result callback; occurs when getMusicCharts method is called.
- *
- * @param requestId The request id is same as that returned by getMusicCharts.
- * @param result The result of music chart collection
- * @param reason The status of the request. See MusicContentCenterStateReason
+ * @brief Callback for retrieving music charts.
+ *
+ * @details
+ * After you call the `getMusicCharts` method to retrieve all music charts, the SDK triggers this
+ * callback.
+ *
+ * @param requestId Request ID. A unique identifier for this request.
+ * @param reason The request status code from the Music Content Center. See
+ * `MusicContentCenterStateReason`.
+ * @param result The list of currently playable music charts. See `MusicChartCollection`.
+ *
*/
virtual void onMusicChartsResult(const char* requestId, agora_refptr result, MusicContentCenterStateReason reason) = 0;
/**
- * Music collection, occurs when getMusicCollectionByMusicChartId or searchMusic method is called.
- *
- * @param requestId The request id is same as that returned by getMusicCollectionByMusicChartId or searchMusic
- * @param result The result of music collection
- * @param reason The status of the request. See MusicContentCenterStateReason
+ * @brief Callback for retrieving the music resource list.
+ *
+ * @details
+ * When you call the `getMusicCollectionWithMusicChartId` method to get the music resource list of a
+ * specific chart or call `searchMusic` to search for music resources, the SDK
+ * triggers this callback to report the detailed information of the music resource list.
+ *
+ * @param requestId Request ID. A unique identifier for this request.
+ * @param reason The request status code from the Music Content Center. See
+ * `MusicContentCenterStateReason`.
+ * @param result Detailed information of the music resource list. See `MusicCollection`.
+ *
*/
virtual void onMusicCollectionResult(const char* requestId, agora_refptr result, MusicContentCenterStateReason reason) = 0;
/**
- * Lyric url callback of getLyric, occurs when getLyric is called
- *
- * @param requestId The request id is same as that returned by getLyric
- * @param songCode Song code
- * @param lyricUrl The lyric url of this music
- * @param reason The status of the request. See MusicContentCenterStateReason
+ * @brief Callback for the lyrics download URL.
+ *
+ * @details
+ * After you call `getLyric` to get the lyrics download URL for a specific song, the SDK triggers
+ * this callback.
+ *
+ * @param requestId Request ID. A unique identifier for this request.
+ * @param songCode The ID of the music resource, used to identify the music.
+ * @param lyricUrl The download URL of the lyrics.
+ * @param reason The request status code from the Music Content Center. See
+ * `MusicContentCenterStateReason`.
+ *
*/
virtual void onLyricResult(const char* requestId, int64_t songCode, const char* lyricUrl, MusicContentCenterStateReason reason) = 0;
/**
- * Simple info callback of getSongSimpleInfo, occurs when getSongSimpleInfo is called
- *
- * @param requestId The request id is same as that returned by getSongSimpleInfo.
- * @param songCode Song code
- * @param simpleInfo The metadata of the music.
- * @param reason The status of the request. See MusicContentCenterStateReason
+ * @brief Callback for detailed information of a music resource.
+ *
+ * @details
+ * After you call `getSongSimpleInfo` to get detailed information of a music resource, the SDK
+ * triggers this callback.
+ *
+ * @param requestId Request ID. A unique identifier for this request.
+ * @param songCode The ID of the music resource, used to identify the music.
+ * @param simpleInfo Information about the music resource, including the following:
+ * - Start and end time of the chorus segment (ms)
+ * - Download URL of the chorus lyrics
+ * - Duration of the chorus segment (ms)
+ * - Song name
+ * - Artist name
+ * @param reason Request status code from the Music Content Center. See
+ * `MusicContentCenterStateReason`.
+ *
*/
virtual void onSongSimpleInfoResult(const char* requestId, int64_t songCode, const char* simpleInfo, MusicContentCenterStateReason reason) = 0;
/**
- * Preload process callback, occurs when preload is called
- *
- * @param requestId The request id is same as that returned by preload.
- * @param songCode Song code
- * @param percent Preload progress (0 ~ 100)
- * @param lyricUrl The lyric url of this music
- * @param state Preload state; see PreloadState.
- * @param reason The status of the request. See MusicContentCenterStateReason
+ * @brief Reports events related to preloading music resources.
+ *
+ * @details
+ * After you call `preload(int64_t songCode, const char* jsonOption = nullptr)` or
+ * `preload(agora::util::AString& requestId, int64_t songCode)` to preload a music resource, the SDK
+ * triggers this callback.
+ *
+ * @param requestId Request ID. A unique identifier for this request.
+ * @param songCode The ID of the music resource, used to identify a music item.
+ * @param percent Current loading progress of the music resource, ranging from [0, 100].
+ * @param lyricUrl Download URL of the lyrics.
+ * @param state Current loading state of the music resource. See `PreloadState`.
+ * @param reason Request status code from the Music Content Center. See
+ * `MusicContentCenterStateReason`.
+ *
*/
virtual void onPreLoadEvent(const char* requestId, int64_t songCode, int percent, const char* lyricUrl, PreloadState state, MusicContentCenterStateReason reason) = 0;
virtual ~IMusicContentCenterEventHandler() {};
};
+/**
+ * @brief Configuration for the Music Content Center.
+ */
struct MusicContentCenterConfiguration {
/**
- * The app ID of the project that has enabled the music content center
+ * App ID of the project with Music Content Center enabled.
*/
const char *appId;
/**
- * Music content center need token to connect with server
+ * RTM Token used for authentication when using the Music Content Center.
+ * @note
+ * - Agora recommends using AccessToken2 for authentication. See `Deploy Token Server`. When
+ * generating the token, pass a `String` type `mccUid` to `uid`.
+ * - When your token is about to expire, you can call `renewToken` to pass in a new token.
*/
const char *token;
/**
- * The user ID when using music content center. It can be different from that of the rtc product.
+ * User ID for using the Music Content Center. This ID can be the same as the `uid` used when
+ * joining an RTC channel, but it cannot be 0.
*/
int64_t mccUid;
/**
- * The max number which the music content center caches cannot exceed 50.
+ * Number of music resources that can be cached. The maximum is 50.
*/
int32_t maxCacheSize;
/**
@@ -326,7 +441,7 @@ struct MusicContentCenterConfiguration {
*/
const char* mccDomain;
/**
- * Event handler to get callback result.
+ * Event handler to receive callbacks. See `IMusicContentCenterEventHandler`.
*/
IMusicContentCenterEventHandler* eventHandler;
MusicContentCenterConfiguration():appId(nullptr),token(nullptr),eventHandler(nullptr),mccUid(0),maxCacheSize(10), mccDomain(nullptr){}
@@ -342,26 +457,53 @@ class IMusicPlayer : public IMediaPlayer {
IMusicPlayer() {};
using IMediaPlayer::open;
/**
- * Open a media file with specified parameters.
- *
- * @param songCode The identifier of the media file that you want to play.
- * @param startPos The playback position (ms) of the music file.
- * @return
- * - 0: Success.
- * - < 0: Failure.
- */
+ * @brief Opens a music resource by its song code.
+ *
+ * @details
+ * Before calling this method, make sure the music resource to be played has been loaded. You can
+ * call `isPreloaded` to check whether the resource has been preloaded, or listen for the
+ * `onPreLoadEvent` callback.
+ * After calling this method, the `onPlayerSourceStateChanged` callback is triggered. Once you
+ * receive a playback state of `PLAYER_STATE_OPEN_COMPLETED`, you can call the `play` method to play
+ * the media file.
+ *
+ * @note Note: If the music resource you want to open is protected by digital rights management
+ * (DRM), you must use this method to open it. For non-DRM-protected resources, you can choose to
+ * open them using this method or the `open` method under the `IMediaPlayer` class.
+ *
+ * @param songCode The song code of the music resource, used to identify the music.
+ * @param startPos The start playback position in milliseconds. Default is 0.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure. See `Error Codes` for details and resolution suggestions.
+ */
virtual int open(int64_t songCode, int64_t startPos = 0) = 0;
/**
- * Set the mode for playing songs.
- * You can call this method to switch from original to accompaniment or lead vocals.
- * If you do not call this method to set the mode, the SDK plays the accompaniment by default.
- *
- * @param model The playing mode.
- * @return
- * - 0: Success.
- * - < 0: Failure.
- */
+ * @brief Sets the playback mode of a music resource.
+ *
+ * @details
+ * You can call this method to enable original vocals, accompaniment, or vocal guide. If you do not
+ * call this method, accompaniment is played by default; if the music resource has no accompaniment,
+ * the original vocals are played.
+ * Applicable scenarios: In entertainment scenarios such as online karaoke or talent shows, if you
+ * need to play copyrighted music provided by Agora's content center, you can call this method to
+ * set the playback mode.
+ * Call timing: This method must be called after `createMusicPlayer`.
+ *
+ * @note
+ * You can get detailed information about the music resource from the `onMusicCollectionResult`
+ * callback, and determine the supported playback types of the copyrighted music from the `result`
+ * parameter.
+ *
+ * @param mode Playback mode. See `MusicPlayMode`.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure. See `Error Codes` for details and resolution suggestions.
+ * - -2: Invalid parameter. Please reset the parameter.
+ */
virtual int setPlayMode(MusicPlayMode mode) = 0;
};
@@ -373,206 +515,340 @@ class IMusicContentCenter
IMusicContentCenter() {};
/**
- * Initializes the IMusicContentCenter
- * Set token of music content center and other params
+ * @brief Initializes the `IMusicContentCenter`.
+ *
+ * @details
+ * You must call this method to initialize `IMusicContentCenter` before using any other methods
+ * under the `IMusicContentCenter` class.
+ *
+ * @param configuration Configuration for `IMusicContentCenter`. See
+ * `MusicContentCenterConfiguration`.
*
- * @param configuration
* @return
* - 0: Success.
- * - < 0: Failure.
+ * - < 0: Failure. See `Error Codes` for details and resolution suggestions.
*/
virtual int initialize(const MusicContentCenterConfiguration & configuration) = 0;
/**
- * Renew token of music content center
- *
+ * @brief Renews the token.
+ *
+ * @details
+ * When the token used for authentication is about to expire or has already expired, you can call
+ * this method to pass in a newly generated token.
+ *
* @param token The new token.
- * @return
+ *
+ * @return
* - 0: Success.
- * - < 0: Failure.
+ * - < 0: Failure. See `Error Codes` for details and resolution suggestions.
*/
virtual int renewToken(const char* token) = 0;
/**
- * release music content center resource.
- *
+ * @brief Releases all resources used by the Music Content Center.
+ *
+ * @details
+ * This method must be called before the `release` method of `IRtcEngine`.
+ *
*/
virtual void release() = 0;
/**
- * register event handler.
- */
+ * @brief Registers the Music Content Center event handler.
+ *
+ * @param eventHandler The event handler to register. See `IMusicContentCenterEventHandler`.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure. See `Error Codes` for details and resolution suggestions.
+ */
virtual int registerEventHandler(IMusicContentCenterEventHandler* eventHandler) = 0;
/**
- * unregister event handler.
- */
+ * @brief Unregisters the Music Content Center event callback.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure. See `Error Codes` for details and resolution suggestions.
+ */
virtual int unregisterEventHandler() = 0;
/**
- * Creates a music player source object and return its pointer.
+ * @brief Creates a music player.
+ *
+ * @details
+ * If you need to play music resources from the Music Content Center, you must first call this
+ * method to create a music player.
+ *
* @return
- * - The pointer to \ref rtc::IMusicPlayer "IMusicPlayer",
- * if the method call succeeds.
- * - The empty pointer NULL, if the method call fails.
+ * - If the method call succeeds: Returns an `IMusicPlayer` object.
+ * - If the method call fails: Returns a null pointer.
*/
virtual agora_refptr createMusicPlayer() = 0;
/**
- * Destroy a music player source object and return result.
- * @param music_player The pointer to \ref rtc::IMusicPlayer "IMusicPlayer".
+ * @brief Destroys the music player object.
+ *
+ * @details
+ * When you no longer need to use the music player, you can call this method to destroy the music
+ * player object. If you need to use the music player again after destruction, call
+ * `createMusicPlayer` to recreate a music player object.
+ * Call timing: This method can be called before or after joining a channel, but make sure to call
+ * it before the `release` method of `IRtcEngine`.
+ *
+ * @param music_player Pointer to the `IMusicPlayer` object.
+ *
* @return
* - 0: Success.
- * - < 0: Failure.
+ * - < 0: Failure. See `Error Codes` for details and resolution suggestions.
*/
virtual int destroyMusicPlayer(agora_refptr music_player) = 0;
/**
- * Get music chart collection of music.
- * If the method call succeeds, get result from the
- * \ref agora::rtc::IMusicContentCenterEventHandler::onMusicChartsResult
- * "onMusicChartsResult" callback
- * @param requestId The request id you will get of this query, format is uuid.
+ * @brief Gets all music charts.
+ *
+ * @details
+ * After you call this method, the SDK triggers the `onMusicChartsResult` callback to report
+ * detailed information about the music charts.
+ *
+ * @param requestId Request ID. A unique identifier for this request.
+ *
* @return
* - 0: Success.
- * - < 0: Failure.
+ * - < 0: Failure. See `Error Codes` for details and resolution suggestions.
*/
virtual int getMusicCharts(agora::util::AString& requestId) = 0;
/**
- * Get music collection of the music chart by musicChartId and page info.
- * If the method call success, get result from the
- * \ref agora::rtc::IMusicContentCenterEventHandler::onMusicCollectionResult
- * "onMusicCollectionResult" callback
- * @param requestId The request id you will get of this query, format is uuid.
- * @param musicChartId The music chart id obtained from getMusicCharts.
- * @param page The page of the music chart, starting from 1
- * @param pageSize The page size, max is 50.
- * @param jsonOption The ext param, default is null.
+ * @brief Gets the list of music resources from a specified chart by its music chart ID.
+ *
+ * @details
+ * After successfully calling this method, the SDK triggers the `onMusicCollectionResult` callback
+ * to report detailed information about the music resources in the chart.
+ *
+ * @param requestId Request ID. A unique identifier for this request.
+ * @param musicChartId The ID of the music chart. You can obtain it from the `onMusicChartsResult`
+ * callback. You can also use RESTful APIs to `get the full music library list` or
+ * `get incremental music list`.
+ * @param page Current page number, starting from 1 by default.
+ * @param pageSize Total number of items per page in the music resource list. The maximum value is
+ * 50.
+ * @param jsonOption Extended JSON field, default is NULL. You can use this field to filter the
+ * music resources you need. Currently supports filtering by scoreable music and chorus segments:
+ * | Key | Value | Example |
+ * | ------------- | ---------------------------------------------------------------------- | ------------------------ |
+ * | pitchType | Whether scoring is supported: - 1: Scoreable music. - 2: Non-scoreable music. | {"pitchType":1} |
+ * | needHighPart | Whether chorus segment is needed: - `true`: Chorus segment needed. - `false`: Not needed. | {"needHighPart":true} |
+ *
* @return
* - 0: Success.
- * - < 0: Failure.
+ * - < 0: Failure. See `Error Codes` for details and resolution suggestions.
*/
virtual int getMusicCollectionByMusicChartId(agora::util::AString& requestId, int32_t musicChartId, int32_t page, int32_t pageSize, const char* jsonOption = nullptr) = 0;
/**
- * Search music by keyword and page info.
- * If the method call success, get result from the
- * \ref agora::rtc::IMusicContentCenterEventHandler::onMusicCollectionResult
- * "onMusicCollectionResult" callback
- * @param requestId The request id you will get of this query, format is uuid.
- * @param keyWord The key word to search.
- * @param page The page of music search result , start from 1.
- * @param pageSize The page size, max is 50.
- * @param jsonOption The ext param, default is null.
+ * @brief Searches for music resources.
+ *
+ * @details
+ * After successfully calling this method, the SDK triggers the `onMusicCollectionResult` callback
+ * to report the list of retrieved music resources.
+ *
+ * @param keyword Search keyword. Supports searching by song name or artist.
+ * @param page The target page number of the music resource list to retrieve.
+ * @param pageSize Maximum number of music resources displayed per page. The maximum value is 50.
+ * @param jsonOption Extended JSON field. Default is NULL. You can use this field to filter the
+ * music resources you need. Currently supports filtering by scoreable music and chorus segments:
+ * | Key | Value | Example |
+ * | ------------- | ---------------------------------------------------------------------- | ------------------------ |
+ * | pitchType | Whether scoring is supported: - 1: Scoreable music. - 2: Non-scoreable music. | {"pitchType":1} |
+ * | needHighPart | Whether chorus segment is needed: - `true`: Chorus segment needed. - `false`: Not needed. | {"needHighPart":true} |
+ * @param requestId Request ID. A unique identifier for this request.
+ *
* @return
* - 0: Success.
- * - < 0: Failure.
+ * - < 0: Failure. See `Error Codes` for details and resolution suggestions.
*/
virtual int searchMusic(agora::util::AString& requestId, const char* keyWord, int32_t page, int32_t pageSize, const char* jsonOption = nullptr) = 0;
/**
- * Preload a media file with specified parameters.
+ * @brief Preloads a music resource.
*
* @deprecated This method is deprecated. Use preload(int64_t songCode) instead.
- *
- * @param songCode The identifier of the media file that you want to play.
- * @param jsonOption The ext param, default is null.
+ *
+ * @details
+ * You can call this method to preload the music resource you want to play. After successfully
+ * calling this method, the SDK triggers the `onPreLoadEvent` callback to report the preload event.
+ * Before calling this method to preload a music resource, you need to call
+ * `getMusicCollectionWithMusicChartId` or `searchMusic`
+ * to get the music resource you want to play, and obtain the song code (`songCode`) from the
+ * `onMusicCollectionResult` callback.
+ *
+ * @note To destroy the `IRtcEngine` object, make sure to call the `release` method only after
+ * receiving the `onPreLoadEvent` callback.
+ *
+ * @param songCode The song code of the music resource, used to identify the music.
+ * @param jsonOption Extended JSON field.
+ * Agora charges based on the application scenario you pass in the `sceneType` field. Different
+ * scenarios have different rates. Refer to the `Billing Description` for details.
+ * - 1: Live scene: Karaoke and background music playback.
+ * - 2: Live scene: Background music playback.
+ * - 3: (Default) Voice chat scene: Karaoke.
+ * - 4: Voice chat scene: Background music playback.
+ * - 5: VR scene: Karaoke and background music playback.
+ * If you need to switch to a different scenario, call this method again and pass
+ * the new `sceneType` value in this field.
+ * Example: `{"sceneType":1}`
+ *
* @return
* - 0: Success.
- * - < 0: Failure.
+ * - < 0: Failure. See `Error Codes` for details and resolution suggestions.
*/
virtual int preload(int64_t songCode, const char* jsonOption) __deprecated = 0;
/**
- * Preload a media file with specified parameters.
- *
- * @param requestId The request id you will get of this query, format is uuid.
- * @param songCode The identifier of the media file that you want to play.
+ * @brief Preloads a music resource.
+ *
+ * @details
+ * You can call this method to preload the music resource you want to play. After successfully
+ * calling this method, the SDK triggers the `onPreLoadEvent` callback to report the preload event.
+ * Before calling this method to preload a music resource, you need to call
+ * `getMusicCollectionWithMusicChartId` or `searchMusic`
+ * to get the music resource you want to play, and obtain the song code (`songCode`) from the
+ * `onMusicCollectionResult` callback.
+ *
+ * @note To destroy the `IRtcEngine` object, make sure to call the `release` method only after
+ * receiving the `onPreLoadEvent` callback.
+ *
+ * @param songCode The song code of the music resource, used to identify the music.
+ * @param requestId Output parameter. Request ID. A unique identifier for this request.
+ *
* @return
* - 0: Success.
- * - < 0: Failure.
+ * - < 0: Failure. See `Error Codes` for details and resolution suggestions.
*/
virtual int preload(agora::util::AString& requestId, int64_t songCode) = 0;
/**
- * Remove a media file cache
+ * @brief Deletes a cached music resource.
+ *
+ * @details
+ * You can call this method to delete a specific cached music resource. To delete multiple
+ * resources, call this method multiple times.
+ *
+ * @note Note: This method does not delete cached music resources that are currently being played.
+ *
+ * @param songCode The ID of the music resource to be deleted.
*
- * @param songCode The identifier of the media file that you want to play.
* @return
- * - 0: Success; the cached media file is removed.
- * - < 0: Failure.
+ * - 0: Success. The music resource has been deleted.
+ * - < 0: Failure. See `Error Codes` for details and resolution suggestions.
*/
virtual int removeCache(int64_t songCode) = 0;
/**
- * Get cached media files.
- * Before calling this API, you should allocate a memory buffer that stores the cached media file information, and pass the pointer of the buffer as the input parameter cacheInfo, and set the size of the memory buffer to cacheInfoSize.
- * The sample code below illustrates how to request the cached media file information:
+ * @brief Gets information about cached music resources.
*
- * cacheInfoSize = 10 // Allocate a memory buffer of 10 MusicCacheInfo size
- * agora::rtc::MusicCacheInfo *infos = new agora::rtc::MusicCacheInfo[cacheInfoSize];
- * int ret = self.imcc->getCaches(infos, cacheInfoSize);
- * if (ret < 0) { // error occurred!
- * return;
- * }
- * std::cout << "the cache size:" << cacheInfoSize << std::endl; // The cache size: 5
+ * @details
+ * Before calling this method, you need to pre-allocate a certain amount of memory to store
+ * information about cached music resources. If you want to set the number of music resources that
+ * can be cached, you can configure it through the `configuration` parameter in `initialize`.
+ * When you no longer need the cached music resources, you should release the memory in time to
+ * prevent memory leaks.
*
+ * @param cacheInfo Output parameter. A pointer to the memory buffer used to store cached music
+ * resource information.
+ * @param cacheInfoSize Input and output parameter.
+ * - Input: The length of the `cacheInfo` array, i.e., the number of `MusicCacheInfo` structures you
+ * allocated.
+ * - Output: The number of `MusicCacheInfo` structures returned after the method execution.
*
- * @param cacheInfo An output parameter; A pointer to the memory buffer that stores the cached media file information. The memory buffer pointed to by cacheInfo should be allocated by yourself before calling this API.
- * @param cacheInfoSize
- * - Input: The number of MusicCacheInfo's size that you get from the memory.
- * - Output: The actual number of MusicCacheInfo struct that is returned.
* @return
* - 0: Success.
- * - < 0: Failure.
+ * - < 0: Failure. See `Error Codes` for details and resolution suggestions.
*/
virtual int getCaches(MusicCacheInfo *cacheInfo, int32_t* cacheInfoSize) = 0;
/**
- * Check if the media file is preloaded
+ * @brief Checks whether a music resource has been preloaded.
+ *
+ * @details
+ * This method is synchronous. To preload a new music resource, call `preload(agora::util::AString&
+ * requestId, int64_t songCode)`.
+ *
+ * @param songCode The ID of the music resource, used to identify a music item.
*
- * @param songCode The identifier of the media file that you want to play.
* @return
- * - 0: Success, file is preloaded.
- * - < 0: Failure.
+ * - 0: Success. The music resource has been preloaded.
+ * - < 0: Failure. See `Error Codes` for details and resolution suggestions.
*/
virtual int isPreloaded(int64_t songCode) = 0;
/**
- * Get lyric of the music.
+ * @brief Gets the download URL of the lyrics for a music resource.
+ *
+ * @details
+ * After successfully calling this method, the SDK triggers the `onLyricResult` callback to report
+ * the lyrics download URL.
+ *
+ * @param songCode The ID of the music resource, used to identify the music.
+ * @param lyricType Type of lyrics:
+ * - 0: XML format.
+ * - 1: LRC format.
+ * @param requestId Request ID. A unique identifier for this request.
*
- * @param requestId The request id you will get of this query, format is uuid.
- * @param songCode The identifier of the media file that you want to play.
- * @param lyricType The type of the lyric file. 0:xml or 1:lrc.
* @return
* - 0: Success.
- * - < 0: Failure.
+ * - < 0: Failure. See `Error Codes` for details and resolution suggestions.
*/
virtual int getLyric(agora::util::AString& requestId, int64_t songCode, int32_t lyricType = 0) = 0;
/**
- * Gets the metadata of a specific music. Once this method is called, the SDK triggers the onSongSimpleInfoResult callback to report the metadata of the music.
+ * @brief Gets detailed information of a specific music resource.
+ *
+ * @details
+ * Before calling this method, you need to obtain the song code of the corresponding music resource.
+ * You can get it by calling `getMusicCollectionWithMusicChartId` or `searchMusic`,
+ * and retrieve the song code from the `onMusicCollectionResult` callback triggered by those
+ * methods.
+ * After you call this method, the SDK triggers the `onSongSimpleInfoResult` callback to report the
+ * detailed information of the music resource.
+ *
+ * @param songCode The song code of the music resource, used to identify the music.
+ * @param requestId Request ID. A unique identifier for this request.
*
- * @param requestId The request id you will get of this query, format is uuid.
- * @param songCode The identifier of the media file.
* @return
* - 0: Success.
- * - < 0: Failure.
+ * - < 0: Failure. See `Error Codes` for details and resolution suggestions.
*/
virtual int getSongSimpleInfo(agora::util::AString& requestId, int64_t songCode) = 0;
/**
- * Get internal songCodeKey from songCode and jsonOption
+ * @brief Creates an internal song code for the chorus segment of a music resource.
+ *
+ * @details
+ * Applicable scenarios: Before playing the chorus segment of a music resource, you need to call
+ * this method to create an internal song code for the chorus segment using the `jsonOption`
+ * parameter and the music resource's `songCode`.
+ * This internal song code serves as the unique identifier for the resource. Once you obtain this
+ * code, use it as the `songCode` parameter when calling methods to open, preload, or remove the
+ * resource.
+ *
+ * @param songCode The song code of the music resource, used to identify the resource. You can
+ * obtain it by calling `getMusicCollectionWithMusicChartId` or `searchMusic`,
+ * and retrieve the song code from the `onMusicCollectionResult` callback triggered by those
+ * methods.
+ * @param jsonOption Extended JSON field, default is NULL. Currently supports the following values:
+ * | Key | Value | Example |
+ * | ---------- | -------------------------------- | ----------------------------- |
+ * | sceneType | Scene type: - 1: Live scene: Karaoke and background music playback. - 2: Live scene: Background music playback. - 3: (Default) Voice chat scene: Karaoke. - 4: Voice chat scene: Background music playback. - 5: VR scene: Karaoke and background music playback. Note: Agora charges based on the scene type you pass in `sceneType`. Different scenes have different rates. See `Billing Description` for details. To switch scenes, you need to call this method again with a new `sceneType`. | {"sceneType":1} |
+ * | highPart | Index of the chorus segment. You can get the index from the `onMusicCollectionResult` callback and pass it here. The index starts from 0. | {"format": {"highpart": 0}} |
+ * @param internalSongCode Output parameter, the internal song code of the music resource.
*
- * @param songCode The identifier of the media file.
- * @param jsonOption An extention parameter. The default value is null. it’s a json-format string and the `key` and `value` can be customized according to your scenarios.
- * @param internalSongCode The identifier of internal
* @return
* - 0: Success.
- * - < 0: Failure.
+ * - < 0: Failure. See `Error Codes` for details and resolution suggestions.
*/
-
virtual int getInternalSongCode(int64_t songCode, const char* jsonOption, int64_t& internalSongCode) = 0;
};
diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraParameter.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraParameter.h
index f50afe9b5..a463de955 100644
--- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraParameter.h
+++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraParameter.h
@@ -144,6 +144,10 @@ typedef CopyableAutoPtr AString;
namespace base {
+/**
+ * @brief The interface class of Agora RTC SDK, which provides JSON configuration information of the
+ * SDK.
+ */
class IAgoraParameter : public RefCountInterface {
public:
/**
@@ -291,10 +295,17 @@ class IAgoraParameter : public RefCountInterface {
virtual int getArray(const char* key, const char* args, agora::util::AString& value) = 0;
/**
- * set parameters of the sdk or engine
- * @param [in] parameters
- * the parameters
- * @return return 0 if success or an error code
+ * @brief Provides the technical preview functionalities or special customizations by configuring
+ * the SDK with JSON options.
+ *
+ * @details
+ * Contact `technical support` to get the JSON configuration method.
+ *
+ * @param parameters Pointer to the set parameters in a JSON string.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int setParameters(const char* parameters) = 0;
diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraRhythmPlayer.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraRhythmPlayer.h
index e2e00ac70..17f673388 100644
--- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraRhythmPlayer.h
+++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraRhythmPlayer.h
@@ -19,52 +19,70 @@ class ILocalAudioTrack;
class IRtcEngineEventHandler;
/**
- The states of the rhythm player.
+ * @brief Virtual metronome state.
*/
enum RHYTHM_PLAYER_STATE_TYPE {
- /** 810: The rhythm player is idle. */
+ /**
+ * 810: The virtual metronome is not enabled or disabled already.
+ */
RHYTHM_PLAYER_STATE_IDLE = 810,
- /** 811: The rhythm player is opening files. */
+ /**
+ * 811: Opening the beat files.
+ */
RHYTHM_PLAYER_STATE_OPENING,
- /** 812: Files opened successfully, the rhythm player starts decoding files. */
+ /**
+ * 812: Decoding the beat files.
+ */
RHYTHM_PLAYER_STATE_DECODING,
- /** 813: Files decoded successfully, the rhythm player starts mixing the two files and playing back them locally. */
+ /**
+ * 813: The beat files are playing.
+ */
RHYTHM_PLAYER_STATE_PLAYING,
- /** 814: The rhythm player is starting to fail, and you need to check the error code for detailed failure reasons. */
+ /**
+ * 814: Failed to start virtual metronome. You can use the reported `errorCode` to troubleshoot the
+ * cause of the error, or you can try to start the virtual metronome again.
+ */
RHYTHM_PLAYER_STATE_FAILED,
};
/**
- The reason codes of the rhythm player.
+ * @brief Virtual Metronome error message.
*/
enum RHYTHM_PLAYER_REASON {
- /** 0: The rhythm player works well. */
+ /**
+ * 0: The beat files are played normally without errors.
+ */
RHYTHM_PLAYER_REASON_OK = 0,
- /** 1: The rhythm player occurs a internal error. */
+ /**
+ * 1: A general error; no specific reason.
+ */
RHYTHM_PLAYER_REASON_FAILED = 1,
- /** 801: The rhythm player can not open the file. */
+ /**
+ * 801: There is an error when opening the beat files.
+ */
RHYTHM_PLAYER_REASON_CAN_NOT_OPEN = 801,
- /** 802: The rhythm player can not play the file. */
+ /**
+ * 802: There is an error when playing the beat files.
+ */
RHYTHM_PLAYER_REASON_CAN_NOT_PLAY,
- /** 803: The file duration over the limit. The file duration limit is 1.2 seconds */
+ /**
+ * 803: The duration of the beat file exceeds the limit. The maximum duration is 1.2 seconds.
+ */
RHYTHM_PLAYER_REASON_FILE_OVER_DURATION_LIMIT,
};
/**
- * The configuration of rhythm player,
- * which is set in startRhythmPlayer or configRhythmPlayer.
+ * @brief The metronome configuration.
*/
struct AgoraRhythmPlayerConfig {
/**
- * The number of beats per measure. The range is 1 to 9.
- * The default value is 4,
- * which means that each measure contains one downbeat and three upbeats.
+ * The number of beats per measure, which ranges from 1 to 9. The default value is 4, which means
+ * that each measure contains one downbeat and three upbeats.
*/
int beatsPerMeasure;
- /*
- * The range is 60 to 360.
- * The default value is 60,
- * which means that the rhythm player plays 60 beats in one minute.
+ /**
+ * The beat speed (beats/minute), which ranges from 60 to 360. The default value is 60, which means
+ * that the metronome plays 60 beats in one minute.
*/
int beatsPerMinute;
diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraRtcEngine.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraRtcEngine.h
index a591e39e4..09b1ed064 100644
--- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraRtcEngine.h
+++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraRtcEngine.h
@@ -35,7 +35,7 @@ static void ReplaceBy(Optional* s, const Optional& o) {
//class IAudioDeviceManager;
/**
- * The media device types.
+ * @brief Media device types.
*/
enum MEDIA_DEVICE_TYPE {
/**
@@ -43,69 +43,100 @@ enum MEDIA_DEVICE_TYPE {
*/
UNKNOWN_AUDIO_DEVICE = -1,
/**
- * 0: The audio playback device.
+ * 0: Audio playback device.
*/
AUDIO_PLAYOUT_DEVICE = 0,
/**
- * 1: The audio recording device.
+ * 1: Audio capturing device.
*/
AUDIO_RECORDING_DEVICE = 1,
/**
- * 2: The video renderer.
+ * 2: Video rendering device (graphics card).
*/
VIDEO_RENDER_DEVICE = 2,
/**
- * 3: The video capturer.
+ * 3: Video capturing device.
*/
VIDEO_CAPTURE_DEVICE = 3,
/**
- * 4: The audio playback device of the app.
+ * 4: Audio playback device for an app.
*/
AUDIO_APPLICATION_PLAYOUT_DEVICE = 4,
/**
- * 5: The virtual audio playback device.
+ * (For macOS only) 5: Virtual audio playback device (virtual sound card).
*/
AUDIO_VIRTUAL_PLAYOUT_DEVICE = 5,
/**
- * 6: The virtual audio recording device.
+ * (For macOS only) 6: Virtual audio capturing device (virtual sound card).
*/
AUDIO_VIRTUAL_RECORDING_DEVICE = 6,
};
/**
- The playback state of the music file.
+ * @brief The playback state of the music file.
*/
enum AUDIO_MIXING_STATE_TYPE {
- /** 710: The music file is playing. */
+ /**
+ * 710: The music file is playing.
+ */
AUDIO_MIXING_STATE_PLAYING = 710,
- /** 711: The music file pauses playing. */
+ /**
+ * 711: The music file pauses playing.
+ */
AUDIO_MIXING_STATE_PAUSED = 711,
- /** 713: The music file stops playing. */
+ /**
+ * 713: The music file stops playing.
+ * The possible reasons include:
+ * - AUDIO_MIXING_REASON_ALL_LOOPS_COMPLETED (723)
+ * - AUDIO_MIXING_REASON_STOPPED_BY_USER (724)
+ */
AUDIO_MIXING_STATE_STOPPED = 713,
- /** 714: An error occurs during the playback of the audio mixing file.
+ /**
+ * 714: An error occurs during the playback of the audio mixing file.
+ * The possible reasons include:
+ * - AUDIO_MIXING_REASON_CAN_NOT_OPEN (701)
+ * - AUDIO_MIXING_REASON_TOO_FREQUENT_CALL (702)
+ * - AUDIO_MIXING_REASON_INTERRUPTED_EOF (703)
*/
AUDIO_MIXING_STATE_FAILED = 714,
};
/**
- The reson codes of the local user's audio mixing file.
+ * @brief The reason why the playback state of the music file changes. Reported in the
+ * `onAudioMixingStateChanged` callback.
*/
enum AUDIO_MIXING_REASON_TYPE {
- /** 701: The SDK cannot open the audio mixing file. */
+ /**
+ * 701: The SDK cannot open the music file. For example, the local music file does not exist, the
+ * SDK does not support the file format, or the the SDK cannot access the music file URL.
+ */
AUDIO_MIXING_REASON_CAN_NOT_OPEN = 701,
- /** 702: The SDK opens the audio mixing file too frequently. */
+ /**
+ * 702: The SDK opens the music file too frequently. If you need to call `startAudioMixing` multiple
+ * times, ensure that the call interval is more than 500 ms.
+ */
AUDIO_MIXING_REASON_TOO_FREQUENT_CALL = 702,
- /** 703: The audio mixing file playback is interrupted. */
+ /**
+ * 703: The music file playback is interrupted.
+ */
AUDIO_MIXING_REASON_INTERRUPTED_EOF = 703,
- /** 721: The audio mixing file is played once. */
+ /**
+ * 721: The music file completes a loop playback.
+ */
AUDIO_MIXING_REASON_ONE_LOOP_COMPLETED = 721,
- /** 723: The audio mixing file is all played out. */
+ /**
+ * 723: The music file completes all loop playback.
+ */
AUDIO_MIXING_REASON_ALL_LOOPS_COMPLETED = 723,
- /** 724: The audio mixing file stopped by user */
+ /**
+ * 724: Successfully call `stopAudioMixing` to stop playing the music file.
+ */
AUDIO_MIXING_REASON_STOPPED_BY_USER = 724,
/** 726: The audio mixing playback has resumed by user */
AUDIO_MIXING_REASON_RESUMED_BY_USER = 726,
- /** 0: The SDK can open the audio mixing file. */
+ /**
+ * 0: The SDK opens music file successfully.
+ */
AUDIO_MIXING_REASON_OK = 0,
};
@@ -160,90 +191,95 @@ enum INJECT_STREAM_STATUS {
};
/**
- * The audio equalization band frequency.
+ * @brief The midrange frequency for audio equalization.
*/
enum AUDIO_EQUALIZATION_BAND_FREQUENCY {
/**
- * 0: 31 Hz.
+ * 0: 31 Hz
*/
AUDIO_EQUALIZATION_BAND_31 = 0,
/**
- * 1: 62 Hz.
+ * 1: 62 Hz
*/
AUDIO_EQUALIZATION_BAND_62 = 1,
/**
- * 2: 125 Hz.
+ * 2: 125 Hz
*/
AUDIO_EQUALIZATION_BAND_125 = 2,
/**
- * 3: 250 Hz.
+ * 3: 250 Hz
*/
AUDIO_EQUALIZATION_BAND_250 = 3,
/**
- * 4: 500 Hz.
+ * 4: 500 Hz
*/
AUDIO_EQUALIZATION_BAND_500 = 4,
/**
- * 5: 1 KHz.
+ * 5: 1 kHz
*/
AUDIO_EQUALIZATION_BAND_1K = 5,
/**
- * 6: 2 KHz.
+ * 6: 2 kHz
*/
AUDIO_EQUALIZATION_BAND_2K = 6,
/**
- * 7: 4 KHz.
+ * 7: 4 kHz
*/
AUDIO_EQUALIZATION_BAND_4K = 7,
/**
- * 8: 8 KHz.
+ * 8: 8 kHz
*/
AUDIO_EQUALIZATION_BAND_8K = 8,
/**
- * 9: 16 KHz.
+ * 9: 16 kHz
*/
AUDIO_EQUALIZATION_BAND_16K = 9,
};
/**
- * The audio reverberation type.
+ * @brief Audio reverberation types.
*/
enum AUDIO_REVERB_TYPE {
/**
- * 0: (-20 to 10 dB), the level of the dry signal.
+ * 0: The level of the dry signal (dB). The value is between -20 and 10.
*/
AUDIO_REVERB_DRY_LEVEL = 0,
/**
- * 1: (-20 to 10 dB), the level of the early reflection signal (wet signal).
+ * 1: The level of the early reflection signal (wet signal) (dB). The value is between -20 and 10.
*/
AUDIO_REVERB_WET_LEVEL = 1,
/**
- * 2: (0 to 100 dB), the room size of the reflection.
+ * 2: The room size of the reflection. The value is between 0 and 100.
*/
AUDIO_REVERB_ROOM_SIZE = 2,
/**
- * 3: (0 to 200 ms), the length of the initial delay of the wet signal in ms.
+ * 3: The length of the initial delay of the wet signal (ms). The value is between 0 and 200.
*/
AUDIO_REVERB_WET_DELAY = 3,
/**
- * 4: (0 to 100), the strength of the late reverberation.
+ * 4: The reverberation strength. The value is between 0 and 100.
*/
AUDIO_REVERB_STRENGTH = 4,
};
+/**
+ * @brief Options for handling audio and video stream fallback when network conditions are weak.
+ */
enum STREAM_FALLBACK_OPTIONS {
- /** 0: No fallback operation to a lower resolution stream when the network
- condition is poor. Fallback to Scalable Video Coding (e.g. SVC)
- is still possible, but the resolution remains in high stream.
- The stream quality cannot be guaranteed. */
+ /**
+ * 0: No fallback processing is performed on audio and video streams, the quality of the audio and
+ * video streams cannot be guaranteed.
+ */
STREAM_FALLBACK_OPTION_DISABLED = 0,
- /** 1: (Default) Under poor network conditions, the receiver SDK will receive
- agora::rtc::VIDEO_STREAM_LOW. You can only set this option in
- RtcEngineParameters::setRemoteSubscribeFallbackOption. */
+ /**
+ * 1: Only receive low-quality (low resolution, low bitrate) video stream.
+ */
STREAM_FALLBACK_OPTION_VIDEO_STREAM_LOW = 1,
- /** 2: Under poor network conditions, the SDK may receive agora::rtc::VIDEO_STREAM_LOW first,
- then agora::rtc::VIDEO_STREAM_LAYER_1 to agora::rtc::VIDEO_STREAM_LAYER_6 if the related layer exists.
- If the network still does not allow displaying the video, the SDK will receive audio only. */
+ /**
+ * 2: When the network conditions are weak, try to receive the low-quality video stream first. If
+ * the video cannot be displayed due to extremely weak network environment, then fall back to
+ * receiving audio-only stream.
+ */
STREAM_FALLBACK_OPTION_AUDIO_ONLY = 2,
/** 3~8: If the receiver SDK uses RtcEngineParameters::setRemoteSubscribeFallbackOption,it will receive
one of the streams from agora::rtc::VIDEO_STREAM_LAYER_1 to agora::rtc::VIDEO_STREAM_LAYER_6
@@ -268,107 +304,141 @@ enum PRIORITY_TYPE {
struct RtcConnection;
-/** Statistics of the local video stream.
+/**
+ * @brief The statistics of the local video stream.
*/
struct LocalVideoStats
{
/**
- * ID of the local user.
- */
+ * The ID of the local user.
+ */
uid_t uid;
- /** The actual bitrate (Kbps) while sending the local video stream.
- * @note This value does not include the bitrate for resending the video after packet loss.
- */
+ /**
+ * The actual bitrate (Kbps) while sending the local video stream.
+ * @note This value does not include the bitrate for resending the video after packet loss.
+ */
int sentBitrate;
- /** The actual frame rate (fps) while sending the local video stream.
- * @note This value does not include the frame rate for resending the video after packet loss.
- */
+ /**
+ * The actual frame rate (fps) while sending the local video stream.
+ * @note This value does not include the frame rate for resending the video after packet loss.
+ */
int sentFrameRate;
- /** The capture frame rate (fps) of the local video.
- */
+ /**
+ * The frame rate (fps) for capturing the local video stream.
+ */
int captureFrameRate;
- /** The width of the capture frame (px).
- */
+ /**
+ * The width (px) for capturing the local video stream.
+ */
int captureFrameWidth;
- /** The height of the capture frame (px).
- */
+ /**
+ * The height (px) for capturing the local video stream.
+ */
int captureFrameHeight;
/**
- * The regulated frame rate of capture frame rate according to video encoder configuration.
- */
+ * The frame rate (fps) adjusted by the built-in video capture adapter (regulator) of the SDK for
+ * capturing the local video stream. The regulator adjusts the frame rate of the video captured by
+ * the camera according to the video encoding configuration.
+ */
int regulatedCaptureFrameRate;
/**
- * The regulated frame width (pixel) of capture frame width according to video encoder configuration.
- */
+ * The width (px) adjusted by the built-in video capture adapter (regulator) of the SDK for
+ * capturing the local video stream. The regulator adjusts the height and width of the video
+ * captured by the camera according to the video encoding configuration.
+ */
int regulatedCaptureFrameWidth;
/**
- * The regulated frame height (pixel) of capture frame height according to video encoder configuration.
- */
+ * The height (px) adjusted by the built-in video capture adapter (regulator) of the SDK for
+ * capturing the local video stream. The regulator adjusts the height and width of the video
+ * captured by the camera according to the video encoding configuration.
+ */
int regulatedCaptureFrameHeight;
- /** The output frame rate (fps) of the local video encoder.
- */
+ /**
+ * The output frame rate (fps) of the local video encoder.
+ */
int encoderOutputFrameRate;
- /** The width of the encoding frame (px).
- */
+ /**
+ * The width of the encoded video (px).
+ */
int encodedFrameWidth;
- /** The height of the encoding frame (px).
- */
+ /**
+ * The height of the encoded video (px).
+ */
int encodedFrameHeight;
- /** The output frame rate (fps) of the local video renderer.
- */
+ /**
+ * The output frame rate (fps) of the local video renderer.
+ */
int rendererOutputFrameRate;
- /** The target bitrate (Kbps) of the current encoder. This is an estimate made by the SDK based on the current network conditions.
- */
+ /**
+ * The target bitrate (Kbps) of the current encoder. This is an estimate made by the SDK based on
+ * the current network conditions.
+ */
int targetBitrate;
- /** The target frame rate (fps) of the current encoder.
- */
+ /**
+ * The target frame rate (fps) of the current encoder.
+ */
int targetFrameRate;
- /** Quality adaption of the local video stream in the reported interval (based on the target frame
- * rate and target bitrate). See #QUALITY_ADAPT_INDICATION.
- */
+ /**
+ * The quality adaptation of the local video stream in the reported interval (based on the target
+ * frame rate and target bitrate). See `QUALITY_ADAPT_INDICATION`.
+ */
QUALITY_ADAPT_INDICATION qualityAdaptIndication;
- /** The bitrate (Kbps) while encoding the local video stream.
- * @note This value does not include the bitrate for resending the video after packet loss.
- */
+ /**
+ * The bitrate (Kbps) while encoding the local video stream.
+ * @note This value does not include the bitrate for resending the video after packet loss.
+ */
int encodedBitrate;
- /** The number of the sent video frames, represented by an aggregate value.
- */
+ /**
+ * The number of the sent video frames, represented by an aggregate value.
+ */
int encodedFrameCount;
- /** The codec type of the local video. See #VIDEO_CODEC_TYPE.
- */
+ /**
+ * The codec type of the local video. See `VIDEO_CODEC_TYPE`.
+ */
VIDEO_CODEC_TYPE codecType;
/**
- * The video packet loss rate (%) from the local client to the Agora server before applying the anti-packet loss strategies.
- */
+ * The video packet loss rate (%) from the local client to the Agora server before applying the
+ * anti-packet loss strategies.
+ */
unsigned short txPacketLossRate;
- /** The brightness level of the video image captured by the local camera. See #CAPTURE_BRIGHTNESS_LEVEL_TYPE.
- */
+ /**
+ * The brightness level of the video image captured by the local camera. See
+ * `CAPTURE_BRIGHTNESS_LEVEL_TYPE`.
+ */
CAPTURE_BRIGHTNESS_LEVEL_TYPE captureBrightnessLevel;
/**
* Whether we send dual stream now.
*/
bool dualStreamEnabled;
- /** The hwEncoderAccelerating of the local video:
- * - software = 0.
- * - hardware = 1.
- */
+ /**
+ * The local video encoding acceleration type.
+ * - 0: Software encoding is applied without acceleration.
+ * - 1: Hardware encoding is applied for acceleration.
+ */
int hwEncoderAccelerating;
/** The dimensions of the simulcast streams's encoding frame.
*/
VideoDimensions simulcastDimensions[SimulcastConfig::STREAM_LAYER_COUNT_MAX];
+ /**
+ * @technical preview
+ * The encodedFrameDepth of the local video:
+ * - SDR = 8.
+ * - HDR = 10.
+ */
+ int encodedFrameDepth;
};
/**
- * Audio statistics of the remote user.
+ * @brief Audio statistics of the remote user.
*/
struct RemoteAudioStats
{
/**
- * User ID of the remote user sending the audio stream.
+ * The user ID of the remote user.
*/
uid_t uid;
/**
- * The quality of the remote audio: #QUALITY_TYPE.
+ * The quality of the audio stream sent by the user. See `QUALITY_TYPE`.
*/
int quality;
/**
@@ -376,55 +446,52 @@ struct RemoteAudioStats
*/
int networkTransportDelay;
/**
- * The network delay (ms) from the receiver to the jitter buffer.
- * @note When the receiving end is an audience member and `audienceLatencyLevel` of `ClientRoleOptions`
- * is 1, this parameter does not take effect.
+ * The network delay (ms) from the audio receiver to the jitter buffer.
+ * @note When the receiving end is an audience member and `audienceLatencyLevel` of
+ * `ClientRoleOptions` is 1, this parameter does not take effect.
*/
int jitterBufferDelay;
/**
- * The audio frame loss rate in the reported interval.
+ * The frame loss rate (%) of the remote audio stream in the reported interval.
*/
int audioLossRate;
/**
- * The number of channels.
+ * The number of audio channels.
*/
int numChannels;
/**
- * The sample rate (Hz) of the remote audio stream in the reported interval.
+ * The sampling rate of the received audio stream in the reported interval.
*/
int receivedSampleRate;
/**
- * The average bitrate (Kbps) of the remote audio stream in the reported
- * interval.
+ * The average bitrate (Kbps) of the received audio stream in the reported interval.
*/
int receivedBitrate;
/**
- * The total freeze time (ms) of the remote audio stream after the remote
- * user joins the channel.
- *
- * In a session, audio freeze occurs when the audio frame loss rate reaches 4%.
+ * The total freeze time (ms) of the remote audio stream after the remote user joins the channel. In
+ * a session, audio freeze occurs when the audio frame loss rate reaches 4%.
*/
int totalFrozenTime;
/**
- * The total audio freeze time as a percentage (%) of the total time when the
- * audio is available.
+ * The total audio freeze time as a percentage (%) of the total time when the audio is available.
+ * The audio is considered available when the remote user neither stops sending the audio stream nor
+ * disables the audio module after joining the channel.
*/
int frozenRate;
/**
- * The quality of the remote audio stream as determined by the Agora
- * real-time audio MOS (Mean Opinion Score) measurement method in the
- * reported interval. The return value ranges from 0 to 500. Dividing the
- * return value by 100 gets the MOS score, which ranges from 0 to 5. The
- * higher the score, the better the audio quality.
- *
- * | MOS score | Perception of audio quality |
- * |-----------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------|
- * | Greater than 4 | Excellent. The audio sounds clear and smooth. |
- * | From 3.5 to 4 | Good. The audio has some perceptible impairment, but still sounds clear. |
- * | From 3 to 3.5 | Fair. The audio freezes occasionally and requires attentive listening. |
- * | From 2.5 to 3 | Poor. The audio sounds choppy and requires considerable effort to understand. |
- * | From 2 to 2.5 | Bad. The audio has occasional noise. Consecutive audio dropouts occur, resulting in some information loss. The users can communicate only with difficulty. |
- * | Less than 2 | Very bad. The audio has persistent noise. Consecutive audio dropouts are frequent, resulting in severe information loss. Communication is nearly impossible. |
+ * The quality of the remote audio stream in the reported interval. The quality is determined by the
+ * Agora real-time audio MOS (Mean Opinion Score) measurement method. The return value range is [0,
+ * 500]. Dividing the return value by 100 gets the MOS score, which ranges from 0 to 5. The higher
+ * the score, the better the audio quality.
+ * The subjective perception of audio quality corresponding to the Agora real-time audio MOS scores is as follows:
+ * | MOS score | Perception of audio quality |
+ * | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+ * | Greater than 4 | Excellent. The audio sounds clear and smooth. |
+ * | From 3.5 to 4 | Good. The audio has some perceptible impairment but still sounds clear. |
+ * | From 3 to 3.5 | Fair. The audio freezes occasionally and requires attentive listening. |
+ * | From 2.5 to 3 | Poor. The audio sounds choppy and requires considerable effort to understand. |
+ * | From 2 to 2.5 | Bad. The audio has occasional noise. Consecutive audio dropouts occur, resulting in some information loss. The users can communicate only with difficulty. |
+ * | Less than 2 | Very bad. The audio has persistent noise. Consecutive audio dropouts are frequent, resulting in severe information loss. Communication is nearly impossible. |
*/
int mosValue;
/**
@@ -450,20 +517,23 @@ struct RemoteAudioStats
uint32_t frozenTimeByCustom;
/**
- * The total time (ms) when the remote user neither stops sending the audio
- * stream nor disables the audio module after joining the channel.
+ * The total active time (ms) between the start of the audio call and the callback of the remote
+ * user.
+ * The active time refers to the total duration of the remote user without the mute state.
*/
int totalActiveTime;
/**
- * The total publish duration (ms) of the remote audio stream.
+ * The total duration (ms) of the remote audio stream.
*/
int publishDuration;
/**
- * Quality of experience (QoE) of the local user when receiving a remote audio stream. See #EXPERIENCE_QUALITY_TYPE.
+ * The Quality of Experience (QoE) of the local user when receiving a remote audio stream. See
+ * `EXPERIENCE_QUALITY_TYPE`.
*/
int qoeQuality;
/**
- * The reason for poor QoE of the local user when receiving a remote audio stream. See #EXPERIENCE_POOR_REASON.
+ * Reasons why the QoE of the local user when receiving a remote audio stream is poor. See
+ * `EXPERIENCE_POOR_REASON`.
*/
int qualityChangedReason;
/**
@@ -471,7 +541,8 @@ struct RemoteAudioStats
*/
unsigned int rxAudioBytes;
/**
- * The end-to-end delay (ms) from the sender to the receiver.
+ * End-to-end audio delay (in milliseconds), which refers to the time from when the audio is
+ * captured by the remote user to when it is played by the local user.
*/
int e2eDelay;
@@ -500,78 +571,86 @@ struct RemoteAudioStats
};
/**
- * The statistics of the remote video stream.
+ * @brief Statistics of the remote video stream.
*/
struct RemoteVideoStats {
/**
- * ID of the remote user sending the video stream.
+ * The user ID of the remote user sending the video stream.
*/
uid_t uid;
/**
+ * Deprecated:
+ * In scenarios where audio and video are synchronized, you can get the video delay data from
+ * `networkTransportDelay` and `jitterBufferDelay` in `RemoteAudioStats`.
+ * The video delay (ms).
* @deprecated Time delay (ms).
- *
- * In scenarios where audio and video is synchronized, you can use the
- * value of `networkTransportDelay` and `jitterBufferDelay` in `RemoteAudioStats`
- * to know the delay statistics of the remote video.
*/
int delay __deprecated;
/**
- * End-to-end delay from video capturer to video renderer. Hardware capture or render delay is excluded.
+ * End-to-end video latency (ms). That is, the time elapsed from the video capturing on the remote
+ * user's end to the receiving and rendering of the video on the local user's end.
*/
int e2eDelay;
/**
- * The width (pixels) of the video stream.
+ * The width (pixels) of the video.
*/
int width;
/**
- * The height (pixels) of the video stream.
+ * The height (pixels) of the video.
*/
int height;
/**
- * Bitrate (Kbps) received since the last count.
+ * The bitrate (Kbps) of the remote video received since the last count.
*/
int receivedBitrate;
/** The decoder input frame rate (fps) of the remote video.
*/
int decoderInputFrameRate;
- /** The decoder output frame rate (fps) of the remote video.
+ /**
+ * The frame rate (fps) of decoding the remote video.
*/
int decoderOutputFrameRate;
- /** The render output frame rate (fps) of the remote video.
+ /**
+ * The frame rate (fps) of rendering the remote video.
*/
int rendererOutputFrameRate;
- /** The video frame loss rate (%) of the remote video stream in the reported interval.
+ /**
+ * The packet loss rate (%) of the remote video.
*/
int frameLossRate;
- /** Packet loss rate (%) of the remote video stream after using the anti-packet-loss method.
+ /**
+ * The packet loss rate (%) of the remote video after using the anti-packet-loss technology.
*/
int packetLossRate;
/**
- * The type of the remote video stream: #VIDEO_STREAM_TYPE.
+ * The type of the video stream. See `VIDEO_STREAM_TYPE`.
*/
VIDEO_STREAM_TYPE rxStreamType;
/**
- The total freeze time (ms) of the remote video stream after the remote user joins the channel.
- In a video session where the frame rate is set to no less than 5 fps, video freeze occurs when
- the time interval between two adjacent renderable video frames is more than 500 ms.
- */
+ * The total freeze time (ms) of the remote video stream after the remote user joins the channel. In
+ * a video session where the frame rate is set to no less than 5 fps, video freeze occurs when the
+ * time interval between two adjacent renderable video frames is more than 500 ms.
+ */
int totalFrozenTime;
/**
- The total video freeze time as a percentage (%) of the total time when the video is available.
+ * The total video freeze time as a percentage (%) of the total time the video is available. The
+ * video is considered available as long as that the remote user neither stops sending the video
+ * stream nor disables the video module after joining the channel.
*/
int frozenRate;
/**
- The offset (ms) between audio and video stream. A positive value indicates the audio leads the
- video, and a negative value indicates the audio lags the video.
+ * The amount of time (ms) that the audio is ahead of the video.
+ * @note If this value is negative, the audio is lagging behind the video.
*/
int avSyncTimeMs;
/**
- * The total time (ms) when the remote user neither stops sending the audio
- * stream nor disables the audio module after joining the channel.
+ * The total active time (ms) of the video.
+ * As long as the remote user or host neither stops sending the video stream nor disables the video
+ * module after joining the channel, the video is available.
*/
int totalActiveTime;
/**
- * The total publish duration (ms) of the remote audio stream.
+ * The total duration (ms) of the remote video stream.
*/
int publishDuration;
/**
@@ -713,14 +792,19 @@ struct InjectStreamConfig {
audioChannels(1) {}
};
-/** The video stream lifecycle of CDN Live.
+/**
+ * @brief Lifecycle of the CDN live video stream.
*/
enum RTMP_STREAM_LIFE_CYCLE_TYPE {
- /** Bound to the channel lifecycle.
- */
+ /**
+ * Bind to the channel lifecycle. If all hosts leave the channel, the CDN live streaming stops after
+ * 30 seconds.
+ */
RTMP_STREAM_LIFE_CYCLE_BIND2CHANNEL = 1,
- /** Bound to the owner identity of the RTMP stream.
- */
+ /**
+ * Bind to the owner of the RTMP stream. If the owner leaves the channel, the CDN live streaming
+ * stops immediately.
+ */
RTMP_STREAM_LIFE_CYCLE_BIND2OWNER = 2,
};
@@ -795,69 +879,116 @@ struct PublisherConfiguration {
};
/**
- * The camera direction.
+ * @brief The camera direction.
*/
enum CAMERA_DIRECTION {
- /** The rear camera. */
+ /**
+ * 0: The rear camera.
+ */
CAMERA_REAR = 0,
- /** The front camera. */
+ /**
+ * 1: (Default) The front camera.
+ */
CAMERA_FRONT = 1,
};
-/** The cloud proxy type.
+/**
+ * @brief The cloud proxy type.
*
* @since v3.3.0
*/
enum CLOUD_PROXY_TYPE {
- /** 0: Do not use the cloud proxy.
+ /**
+ * 0: The automatic mode. The SDK has this mode enabled by default. In this mode, the SDK attempts a
+ * direct connection to SD-RTN™ and automatically switches to TCP/TLS 443 if the attempt fails.
*/
NONE_PROXY = 0,
- /** 1: The cloud proxy for the UDP protocol.
+ /**
+ * 1: The cloud proxy for the UDP protocol, that is, the Force UDP cloud proxy mode. In this mode,
+ * the SDK always transmits data over UDP.
*/
UDP_PROXY = 1,
/// @cond
- /** 2: The cloud proxy for the TCP (encrypted) protocol.
+ /**
+ * 2: The cloud proxy for the TCP (encryption) protocol, that is, the Force TCP cloud proxy mode. In
+ * this mode, the SDK always transmits data over TCP/TLS 443.
*/
TCP_PROXY = 2,
/// @endcond
};
-/** Camera capturer configuration.*/
+/**
+ * @brief The camera capturer preference.
+ */
struct CameraCapturerConfiguration {
/** Camera direction settings (for Android/iOS only). See: #CAMERA_DIRECTION. */
#if defined(__ANDROID__) || (defined(__APPLE__) && TARGET_OS_IOS) || defined(__OHOS__)
/**
- * The camera direction.
+ * (Optional) The camera direction. See `CAMERA_DIRECTION`.
+ * @note This parameter is for Android and iOS only.
*/
Optional cameraDirection;
- /*- CAMERA_FOCAL_LENGTH_TYPE.CAMERA_FOCAL_LENGTH_DEFAULT:
- For iOS, if iPhone/iPad has 3 or 2 back camera, it means combination of triple (wide + ultra wide + telephoto) camera
- or dual wide(wide + ultra wide) camera.In this situation, you can apply for ultra wide len by set smaller zoom fator
- and bigger zoom fator for telephoto len.Otherwise, it always means wide back/front camera.
-
- - CAMERA_FOCAL_LENGTH_TYPE.CAMERA_FOCAL_LENGTH_WIDE_ANGLE:wide camera
- - CAMERA_FOCAL_LENGTH_TYPE.CAMERA_FOCAL_LENGTH_ULTRA_WIDE:ultra wide camera
- - CAMERA_FOCAL_LENGTH_TYPE.CAMERA_FOCAL_LENGTH_TELEPHOTO:telephoto camera*/
+ /**
+ * (Optional) The camera focal length type. See `CAMERA_FOCAL_LENGTH_TYPE`.
+ * @note
+ * - This parameter is for Android and iOS only.
+ * - To set the focal length type of the camera, it is only supported to specify the camera through
+ * `cameraDirection`, and not supported to specify it through `cameraId`.
+ * - For iOS devices equipped with multi-lens rear cameras, such as those featuring dual-camera
+ * (wide-angle and ultra-wide-angle) or triple-camera (wide-angle, ultra-wide-angle, and telephoto),
+ * you can use one of the following methods to capture video with an ultra-wide-angle perspective:
+ * - Method one: Set this parameter to `CAMERA_FOCAL_LENGTH_ULTRA_WIDE` (2) (ultra-wide lens).
+ * - Method two: Set this parameter to `CAMERA_FOCAL_LENGTH_DEFAULT` (0) (standard lens), then
+ * call `setCameraZoomFactor` to set the camera's zoom factor to a value less than 1.0, with the
+ * minimum setting being 0.5.
+ * The difference is that the size of the ultra-wide angle in method one is not adjustable, whereas
+ * method two supports adjusting the camera's zoom factor freely.
+ */
Optional cameraFocalLengthType;
#else
- /** For windows. The device ID of the playback device. */
+ /**
+ * The camera ID. The maximum length is `MAX_DEVICE_ID_LENGTH_TYPE`.
+ * @note This parameter is for Windows and macOS only.
+ */
Optional deviceId;
#endif
#if defined(__ANDROID__)
/**
- * The camera id.
+ * (Optional) The camera ID. The default value is the camera ID of the front camera. You can get the
+ * camera ID through the Android native system API, see `Camera.open()` and
+ * `CameraManager.getCameraIdList()` for details.
+ * @note
+ * - This parameter is for Android only.
+ * - This parameter and `cameraDirection` are mutually exclusive in specifying the camera; you can
+ * choose one based on your needs. The differences are as follows:
+ * - Specifying the camera via `cameraDirection` is more straightforward. You only need to
+ * indicate the camera direction (front or rear), without specifying a specific camera ID; the SDK
+ * will retrieve and confirm the actual camera ID through Android native system APIs.
+ * - Specifying via `cameraId` allows for more precise identification of a particular camera. For
+ * devices with multiple cameras, where `cameraDirection` cannot recognize or access all available
+ * cameras, it is recommended to use `cameraId` to specify the desired camera ID directly.
*/
Optional cameraId;
#endif
+ /**
+ * (Optional) Whether to follow the video aspect ratio set in `setVideoEncoderConfiguration`:
+ * - `true`: (Default) Follow the set video aspect ratio. The SDK crops the captured video according
+ * to the set video aspect ratio and synchronously changes the local preview screen and the video
+ * frame in `onCaptureVideoFrame` and `onPreEncodeVideoFrame`.
+ * - `false`: Do not follow the system default audio playback device. The SDK does not change the
+ * aspect ratio of the captured video frame.
+ */
Optional followEncodeDimensionRatio;
- /** The video format. See VideoFormat. */
+ /**
+ * (Optional) The format of the video frame. See `VideoFormat`.
+ */
VideoFormat format;
CameraCapturerConfiguration() : format(VideoFormat(0, 0, 0)) {}
};
/**
- * The configuration of the captured screen.
+ * @brief The configuration of the captured screen.
*/
struct ScreenCaptureConfiguration {
/**
@@ -868,6 +999,7 @@ struct ScreenCaptureConfiguration {
bool isCaptureWindow; // true - capture window, false - capture display
/**
* (macOS only) The display ID of the screen.
+ * @note This parameter takes effect only when you want to capture the screen on macOS.
*/
int64_t displayId;
/**
@@ -876,20 +1008,19 @@ struct ScreenCaptureConfiguration {
*/
Rectangle screenRect; //Windows only
/**
- * (For Windows and macOS only) The window ID.
+ * (For Windows and macOS only) Window ID.
* @note This parameter takes effect only when you want to capture the window.
*/
int64_t windowId;
/**
- * (For Windows and macOS only) The screen capture configuration. For details, see ScreenCaptureParameters.
+ * (For Windows and macOS only) The screen capture configuration. See `ScreenCaptureParameters`.
*/
ScreenCaptureParameters params;
/**
- * (For Windows and macOS only) The relative position of the shared region to the whole screen. For details, see Rectangle.
- *
- * If you do not set this parameter, the SDK shares the whole screen. If the region you set exceeds the boundary of the
- * screen, only the region within in the screen is shared. If you set width or height in Rectangle as 0, the whole
- * screen is shared.
+ * (For Windows and macOS only) The relative position of the shared region to the whole screen. See
+ * `Rectangle`. If you do not set this parameter, the SDK shares the whole screen. If the region you
+ * set exceeds the boundary of the screen, only the region within in the screen is shared. If you
+ * set width or height in `Rectangle` as 0, the whole screen is shared.
*/
Rectangle regionRect;
@@ -914,13 +1045,15 @@ struct SIZE {
#if defined(_WIN32) || (defined(__APPLE__) && TARGET_OS_MAC && !TARGET_OS_IPHONE) || (defined(__linux__) && !defined(__ANDROID__) && !defined(__OHOS__))
/**
- * The image content of the thumbnail or icon.
- * @note The default image is in the RGBA format. If you need to use another format, you need to convert the image on
- * your own.
+ * @brief The image content of the thumbnail or icon. Set in `ScreenCaptureSourceInfo`.
+ *
+ * @note The default image is in the ARGB format. If you need to use another format, you need to
+ * convert the image on your own.
+ *
*/
struct ThumbImageBuffer {
/**
- * The buffer of the thumbnail ot icon.
+ * The buffer of the thumbnail or icon.
*/
const char* buffer;
/**
@@ -938,22 +1071,33 @@ struct ThumbImageBuffer {
ThumbImageBuffer() : buffer(nullptr), length(0), width(0), height(0) {}
};
/**
- * The type of the shared target. Set in ScreenCaptureSourceInfo.
+ * @brief The type of the shared target. Set in `ScreenCaptureSourceInfo`.
*/
enum ScreenCaptureSourceType {
- /** -1: Unknown type. */
+ /**
+ * -1: Unknown type.
+ */
ScreenCaptureSourceType_Unknown = -1,
- /** 0: The shared target is a window.*/
+ /**
+ * 0: The shared target is a window.
+ */
ScreenCaptureSourceType_Window = 0,
- /** 1: The shared target is a screen of a particular monitor.*/
+ /**
+ * 1: The shared target is a screen of a particular monitor.
+ */
ScreenCaptureSourceType_Screen = 1,
- /** 2: Reserved parameter.*/
+ /**
+ * 2: Reserved parameter.
+ */
ScreenCaptureSourceType_Custom = 2,
};
-/** The information about the specified shareable window or screen. It is returned in IScreenCaptureSourceList. */
+/**
+ * @brief The information about the specified shareable window or screen. The information is
+ * returned in `IScreenCaptureSourceList`.
+ */
struct ScreenCaptureSourceInfo {
/**
- * The type of the shared target. See \ref agora::rtc::ScreenCaptureSourceType "ScreenCaptureSourceType".
+ * The type of the shared target. See `ScreenCaptureSourceType`.
*/
ScreenCaptureSourceType type;
/**
@@ -965,11 +1109,11 @@ struct ScreenCaptureSourceInfo {
*/
const char* sourceName;
/**
- * The image content of the thumbnail. See ThumbImageBuffer.
+ * The image content of the thumbnail. See `ThumbImageBuffer`.
*/
ThumbImageBuffer thumbImage;
/**
- * The image content of the icon. See ThumbImageBuffer.
+ * The image content of the icon. See `ThumbImageBuffer`.
*/
ThumbImageBuffer iconImage;
/**
@@ -982,25 +1126,28 @@ struct ScreenCaptureSourceInfo {
const char* sourceTitle;
/**
* Determines whether the screen is the primary display:
- * - true: The screen is the primary display.
- * - false: The screen is not the primary display.
+ * - `true`: The screen is the primary display.
+ * - `false`: The screen is not the primary display.
*/
bool primaryMonitor;
bool isOccluded;
/**
- * The relative position of the shared region to the screen space (A virtual space include all the screens). See Rectangle.
+ * The position of a window relative to the entire screen space (including all shareable screens).
+ * See `Rectangle`.
*/
Rectangle position;
#if defined(_WIN32)
/**
- * Determines whether the window is minimized.
+ * (For Windows only) Whether the window is minimized:
+ * - `true`: The window is minimized.
+ * - `false`: The window is not minimized.
*/
bool minimizeWindow;
/**
- * The display ID to the window of interest.
- * If the window intersects one or more display monitor rectangles, the return value is an valid
- * ID to the display monitor that has the largest area of intersection with the window, Otherwise
- * the return value is -2.
+ * (For Windows only) Screen ID where the window is located. If the window is displayed across
+ * multiple screens, this parameter indicates the ID of the screen with which the window has the
+ * largest intersection area. If the window is located outside of the visible screens, the value of
+ * this member is -2.
*/
int64_t sourceDisplayId;
ScreenCaptureSourceInfo() : type(ScreenCaptureSourceType_Unknown), sourceId(0), sourceName(nullptr),
@@ -1018,226 +1165,275 @@ class IScreenCaptureSourceList {
public:
/**
- * Gets the number of shareable cpp and screens.
+ * @brief Gets the number of shareable windows and screens.
*
- * @return The number of shareable cpp and screens.
+ * @note This method applies to macOS and Windows only.
+ *
+ * @return
+ * The number of shareable windows and screens.
*/
virtual unsigned int getCount() = 0;
/**
- * Gets information about the specified shareable window or screen.
+ * @brief Gets information about the specified shareable window or screen.
*
- * After you get IScreenCaptureSourceList, you can pass in the index value of the specified shareable window or
- * screen to get information about that window or screen from ScreenCaptureSourceInfo.
+ * @details
+ * After you get `IScreenCaptureSourceList`, you can pass in the index value of the specified
+ * shareable window or screen to get information about that window or screen from
+ * `ScreenCaptureSourceInfo`.
*
- * @param index The index of the specified shareable window or screen. The value range is [0, getCount()).
- * @return ScreenCaptureSourceInfo The information of the specified window or screen.
+ * @note This method applies to macOS and Windows only.
+ *
+ * @param index The index of the specified shareable window or screen. The value range is [0,
+ * `getCount` `()`).
+ *
+ * @return
+ * `ScreenCaptureSourceInfo`
*/
virtual ScreenCaptureSourceInfo getSourceInfo(unsigned int index) = 0;
/**
- * Releases IScreenCaptureSourceList.
+ * @brief Releases `IScreenCaptureSourceList`.
+ *
+ * @details
+ * After you get the list of shareable windows and screens, to avoid memory leaks, call this method
+ * to release `IScreenCaptureSourceList` instead of deleting `IScreenCaptureSourceList` directly.
+ *
+ * @note This method applies to macOS and Windows only.
*
- * After you get the list of shareable cpp and screens, to avoid memory leaks, call this method to release
- * IScreenCaptureSourceList instead of deleting IScreenCaptureSourceList directly.
*/
virtual void release() = 0;
};
#endif // _WIN32 || (__APPLE__ && !TARGET_OS_IPHONE && TARGET_OS_MAC)
/**
- * The advanced options for audio.
+ * @brief The advanced options for audio.
*/
struct AdvancedAudioOptions {
- /**
- * Audio processing channels, only support 1 or 2.
- */
+ /**
+ * The number of channels for audio preprocessing:
+ * - 1: Mono.
+ * - 2: Stereo.
+ */
Optional audioProcessingChannels;
AdvancedAudioOptions() {}
~AdvancedAudioOptions() {}
};
+/**
+ * @brief Image configurations.
+ */
struct ImageTrackOptions {
+ /**
+ * The image URL. Supported formats of images include JPEG, JPG, PNG and GIF. This method supports
+ * adding an image from the local absolute or relative file path.
+ * @note On the Android platform, adding images from `/assets/` is not supported.
+ */
const char* imageUrl;
+ /**
+ * The frame rate of the video streams being published. The value range is [1,30]. The default value
+ * is 1.
+ */
int fps;
VIDEO_MIRROR_MODE_TYPE mirrorMode;
ImageTrackOptions() : imageUrl(NULL), fps(1), mirrorMode(VIDEO_MIRROR_MODE_DISABLED) {}
};
/**
- * The channel media options.
+ * @brief The channel media options.
+ *
+ * @details
+ * Agora supports publishing multiple audio streams and one video stream at the same time and in the
+ * same `RtcConnection`. For example, `publishMicrophoneTrack`, `publishCustomAudioTrack`, and
+ * `publishMediaPlayerAudioTrack` can be set as `true` at the same time, but only one of
+ * `publishCameraTrack`, `publishScreenCaptureVideo`, `publishScreenTrack`,
+ * `publishCustomVideoTrack`, or `publishEncodedVideoTrack` can be set as `true`.
+ *
+ * @note Agora recommends that you set member parameter values yourself according to your business
+ * scenario, otherwise the SDK will automatically assign values to member parameters.
*
- * Agora supports publishing multiple audio streams and one video stream at the same time and in the same RtcConnection.
- * For example, `publishAudioTrack`, `publishCustomAudioTrack` and `publishMediaPlayerAudioTrack` can be true at the same time;
- * but only one of `publishCameraTrack`, `publishScreenTrack`, `publishCustomVideoTrack`, and `publishEncodedVideoTrack` can be
- * true at the same time.
*/
struct ChannelMediaOptions {
/**
- * Whether to publish the video of the camera track.
- * - `true`: (Default) Publish the video track of the camera capturer.
- * - `false`: Do not publish the video track of the camera capturer.
+ * Whether to publish the video captured by the camera:
+ * - `true`: Publish the video captured by the camera.
+ * - `false`: Do not publish the video captured by the camera.
*/
Optional publishCameraTrack;
/**
- * Whether to publish the video of the secondary camera track.
- * - `true`: Publish the video track of the secondary camera capturer.
- * - `false`: (Default) Do not publish the video track of the secondary camera capturer.
+ * Whether to publish the video captured by the second camera:
+ * - `true`: Publish the video captured by the second camera.
+ * - `false`: Do not publish the video captured by the second camera.
*/
Optional publishSecondaryCameraTrack;
- /**
- * Whether to publish the video of the third camera track.
- * - `true`: Publish the video track of the third camera capturer.
- * - `false`: (Default) Do not publish the video track of the third camera capturer.
+ /**
+ * Whether to publish the video captured by the third camera:
+ * - `true`: Publish the video captured by the third camera.
+ * - `false`: Do not publish the video captured by the third camera.
+ * @note This parameter is for Android, Windows and macOS only.
*/
Optional publishThirdCameraTrack;
/**
- * Whether to publish the video of the fourth camera track.
- * - `true`: Publish the video track of the fourth camera capturer.
- * - `false`: (Default) Do not publish the video track of the fourth camera capturer.
+ * Whether to publish the video captured by the fourth camera:
+ * - `true`: Publish the video captured by the fourth camera.
+ * - `false`: Do not publish the video captured by the fourth camera.
+ * @note This parameter is for Android, Windows and macOS only.
*/
Optional publishFourthCameraTrack;
/**
- * Whether to publish the recorded audio.
- * - `true`: (Default) Publish the recorded audio.
- * - `false`: Do not publish the recorded audio.
+ * Whether to publish the audio captured by the microphone:
+ * - `true`: Publish the audio captured by the microphone.
+ * - `false`: Do not publish the audio captured by the microphone.
*/
Optional publishMicrophoneTrack;
-
- #if defined(__ANDROID__) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || defined(__OHOS__)
- /**
- * Whether to publish the video track of the screen capturer:
- * - `true`: Publish the video track of the screen capture.
- * - `false`: (Default) Do not publish the video track of the screen capture.
- */
- Optional publishScreenCaptureVideo;
+ #if defined(__ANDROID__) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || defined(TARGET_OS_MAC) || defined(__OHOS__)
/**
* Whether to publish the audio track of the screen capturer:
* - `true`: Publish the video audio of the screen capturer.
* - `false`: (Default) Do not publish the audio track of the screen capturer.
*/
- Optional publishScreenCaptureAudio;
+ Optional publishScreenCaptureAudio;
+ #endif
+ #if defined(__ANDROID__) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || defined(__OHOS__)
+ /**
+ * Whether to publish the video captured from the screen:
+ * - `true`: Publish the video captured from the screen.
+ * - `false`: Do not publish the video captured from the screen.
+ * @note This parameter is for Android and iOS only.
+ */
+ Optional publishScreenCaptureVideo;
+
#else
/**
- * Whether to publish the captured video from the screen:
- * - `true`: PPublish the captured video from the screen.
- * - `false`: (Default) Do not publish the captured video from the screen.
+ * Whether to publish the video captured from the screen:
+ * - `true`: Publish the video captured from the screen.
+ * - `false`: Do not publish the video captured from the screen.
+ * @note This is for Windows and macOS only.
*/
Optional publishScreenTrack;
/**
- * Whether to publish the captured video from the secondary screen:
- * - true: Publish the captured video from the secondary screen.
- * - false: (Default) Do not publish the captured video from the secondary screen.
+ * Whether to publish the video captured from the second screen:
+ * - `true`: Publish the video captured from the second screen.
+ * - `false`: Do not publish the video captured from the second screen.
*/
Optional publishSecondaryScreenTrack;
- /**
- * Whether to publish the captured video from the third screen:
- * - true: Publish the captured video from the third screen.
- * - false: (Default) Do not publish the captured video from the third screen.
+ /**
+ * Whether to publish the video captured from the third screen:
+ * - `true`: Publish the captured video from the third screen.
+ * - `false`: Do not publish the video captured from the third screen.
+ * @note This is for Windows and macOS only.
*/
Optional publishThirdScreenTrack;
/**
- * Whether to publish the captured video from the fourth screen:
- * - true: Publish the captured video from the fourth screen.
- * - false: (Default) Do not publish the captured video from the fourth screen.
+ * Whether to publish the video captured from the fourth screen:
+ * - `true`: Publish the captured video from the fourth screen.
+ * - `false`: Do not publish the video captured from the fourth screen.
+ * @note This is for Windows and macOS only.
*/
Optional publishFourthScreenTrack;
#endif
/**
- * Whether to publish the captured audio from a custom source:
- * - true: Publish the captured audio from a custom source.
- * - false: (Default) Do not publish the captured audio from the custom source.
+ * Whether to publish the audio captured from a custom source:
+ * - `true`: Publish the audio captured from the custom source.
+ * - `false`: Do not publish the captured audio from a custom source.
*/
Optional publishCustomAudioTrack;
/**
- * The custom audio track id. The default value is 0.
+ * The ID of the custom audio track to be published. The default value is 0. You can obtain the
+ * custom audio track ID through the `createCustomAudioTrack` method.
*/
Optional publishCustomAudioTrackId;
/**
- * Whether to publish the captured video from a custom source:
- * - `true`: Publish the captured video from a custom source.
- * - `false`: (Default) Do not publish the captured video from the custom source.
+ * Whether to publish the video captured from a custom source:
+ * - `true`: Publish the video captured from the custom source.
+ * - `false`: Do not publish the captured video from a custom source.
*/
Optional publishCustomVideoTrack;
/**
* Whether to publish the encoded video:
* - `true`: Publish the encoded video.
- * - `false`: (Default) Do not publish the encoded video.
+ * - `false`: Do not publish the encoded video.
*/
Optional publishEncodedVideoTrack;
/**
- * Whether to publish the audio from the media player:
- * - `true`: Publish the audio from the media player.
- * - `false`: (Default) Do not publish the audio from the media player.
- */
+ * Whether to publish the audio from the media player:
+ * - `true`: Publish the audio from the media player.
+ * - `false`: Do not publish the audio from the media player.
+ */
Optional publishMediaPlayerAudioTrack;
/**
- * Whether to publish the video from the media player:
- * - `true`: Publish the video from the media player.
- * - `false`: (Default) Do not publish the video from the media player.
- */
+ * Whether to publish the video from the media player:
+ * - `true`: Publish the video from the media player.
+ * - `false`: Do not publish the video from the media player.
+ */
Optional publishMediaPlayerVideoTrack;
/**
- * Whether to publish the local transcoded video track.
- * - `true`: Publish the video track of local transcoded video track.
- * - `false`: (Default) Do not publish the local transcoded video track.
- */
+ * Whether to publish the local transcoded video:
+ * - `true`: Publish the local transcoded video.
+ * - `false`: Do not publish the local transcoded video.
+ * @note As of v4.2.0, the parameter name is corrected from `publishTrancodedVideoTrack` to
+ * `publishTranscodedVideoTrack`.
+ */
Optional publishTranscodedVideoTrack;
- /**
- * Whether to publish the local mixed track.
- * - `true`: Publish the audio track of local mixed track.
- * - `false`: (Default) Do not publish the local mixed track.
- */
+ /**
+ * Whether to publish the mixed audio track:
+ * - `true`: Publish the mixed audio track.
+ * - `false`: Do not publish the mixed audio track.
+ */
Optional publishMixedAudioTrack;
/**
- * Whether to publish the local lip sync video track.
- * - `true`: Publish the video track of local lip sync video track.
- * - `false`: (Default) Do not publish the local lip sync video track.
+ * Whether to publish the video track processed by the speech driven extension:
+ * - `true`: Publish the video track processed by the speech driven extension.
+ * - `false`: (Default) Do not publish the video track processed by the speech driven extension.
*/
Optional publishLipSyncTrack;
/**
* Whether to automatically subscribe to all remote audio streams when the user joins a channel:
- * - `true`: (Default) Subscribe to all remote audio streams.
- * - `false`: Do not subscribe to any remote audio stream.
+ * - `true`: Subscribe to all remote audio streams.
+ * - `false`: Do not automatically subscribe to any remote audio streams.
*/
Optional autoSubscribeAudio;
/**
- * Whether to subscribe to all remote video streams when the user joins the channel:
- * - `true`: (Default) Subscribe to all remote video streams.
- * - `false`: Do not subscribe to any remote video stream.
+ * Whether to automatically subscribe to all remote video streams when the user joins the channel:
+ * - `true`: Subscribe to all remote video streams.
+ * - `false`: Do not automatically subscribe to any remote video streams.
*/
Optional autoSubscribeVideo;
/**
- * Whether to enable audio capturing or playback.
- * - `true`: (Default) Enable audio capturing and playback.
+ * Whether to enable audio capturing or playback:
+ * - `true`: Enable audio capturing or playback.
* - `false`: Do not enable audio capturing or playback.
+ * @note If you need to publish the audio streams captured by your microphone, ensure this parameter
+ * is set as `true`.
*/
Optional enableAudioRecordingOrPlayout;
/**
- * The ID of the media player to be published. The default value is 0.
- */
+ * The ID of the media player to be published. The default value is 0.
+ */
Optional publishMediaPlayerId;
/**
- * The client role type. See \ref CLIENT_ROLE_TYPE.
- * Default is CLIENT_ROLE_AUDIENCE.
+ * The user role. See `CLIENT_ROLE_TYPE`.
+ * @note If you set the user role as an audience member, you cannot publish audio and video streams
+ * in the channel. If you want to publish media streams in a channel during live streaming, ensure
+ * you set the user role as broadcaster.
*/
Optional clientRoleType;
/**
- * The audience latency level type. See #AUDIENCE_LATENCY_LEVEL_TYPE.
+ * The latency level of an audience member in interactive live streaming. See
+ * `AUDIENCE_LATENCY_LEVEL_TYPE`.
*/
Optional audienceLatencyLevel;
/**
- * The default video stream type. See \ref VIDEO_STREAM_TYPE.
- * Default is VIDEO_STREAM_HIGH.
+ * The default video-stream type. See `VIDEO_STREAM_TYPE`.
*/
Optional defaultVideoStreamType;
/**
- * The channel profile. See \ref CHANNEL_PROFILE_TYPE.
- * Default is CHANNEL_PROFILE_LIVE_BROADCASTING.
+ * The channel profile. See `CHANNEL_PROFILE_TYPE`.
*/
Optional channelProfile;
/**
- * The delay in ms for sending audio frames. This is used for explicit control of A/V sync.
- * To switch off the delay, set the value to zero.
+ * Delay (in milliseconds) for sending audio frames. You can use this parameter to set the delay of
+ * the audio frames that need to be sent, to ensure audio and video synchronization.
+ * To switch off the delay, set the value to 0.
*/
Optional audioDelayMs;
/**
@@ -1246,12 +1442,14 @@ struct ChannelMediaOptions {
*/
Optional mediaPlayerAudioDelayMs;
/**
- * (Optional) The token generated on your server for authentication.
+ * (Optional) The token generated on your server for authentication. See .
* @note
- * - This parameter takes effect only when calling `updateChannelMediaOptions` or `updateChannelMediaOptionsEx`.
- * - Ensure that the App ID, channel name, and user name used for creating the token are the same ones as those
- * used by the initialize method for initializing the RTC engine, and those used by the `joinChannel [2/2]`
- * and `joinChannelEx` methods for joining the channel.
+ * - This parameter takes effect only when calling `updateChannelMediaOptions` or
+ * `updateChannelMediaOptionsEx`.
+ * - Ensure that the App ID, channel name, and user name used for creating the token are the same as
+ * those used by the `initialize` method for initializing the RTC engine, and those used by the
+ * `joinChannel(const char* token, const char* channelId, uid_t uid, const ChannelMediaOptions&
+ * options)` and `joinChannelEx` methods for joining the channel.
*/
Optional token;
/**
@@ -1263,28 +1461,36 @@ struct ChannelMediaOptions {
*/
Optional enableBuiltInMediaEncryption;
/**
- * Whether to publish the sound of the rhythm player to remote users:
- * - `true`: (Default) Publish the sound of the rhythm player.
- * - `false`: Do not publish the sound of the rhythm player.
+ * Whether to publish the sound of a metronome to remote users:
+ * - `true`: Publish processed audio frames. Both the local user and remote users can hear the
+ * metronome.
+ * - `false`: Do not publish the sound of the metronome. Only the local user can hear the metronome.
*/
Optional publishRhythmPlayerTrack;
/**
- * Whether the user is an interactive audience member in the channel.
- * - `true`: Enable low lentancy and smooth video when joining as an audience.
- * - `false`: (Default) Use default settings for audience role.
- * @note This mode is only used for audience. In PK mode, client might join one channel as broadcaster, and join
- * another channel as interactive audience to achieve low lentancy and smooth video from remote user.
+ * Whether to enable interactive mode:
+ * - `true`: Enable interactive mode. Once this mode is enabled and the user role is set as
+ * audience, the user can receive remote video streams with low latency.
+ * - `false`:Do not enable interactive mode. If this mode is disabled, the user receives the remote
+ * video streams in default settings.
+ * @note
+ * - This parameter only applies to co-streaming scenarios. The cohosts need to call the
+ * `joinChannelEx` method to join the other host's channel as an audience member, and set
+ * `isInteractiveAudience` to `true`.
+ * - This parameter takes effect only when the user role is `CLIENT_ROLE_AUDIENCE`.
*/
Optional isInteractiveAudience;
/**
- * The custom video track id which will used to publish or preview.
- * You can get the VideoTrackId after calling createCustomVideoTrack() of IRtcEngine.
+ * The video track ID returned by calling the `createCustomVideoTrack` method. The default value is
+ * 0.
*/
Optional customVideoTrackId;
/**
- * Whether local audio stream can be filtered.
- * - `true`: (Default) Can be filtered when audio level is low.
- * - `false`: Do not Filter this audio stream.
+ * Whether the audio stream being published is filtered according to the volume algorithm:
+ * - `true`: The audio stream is filtered. If the audio stream filter is not enabled, this setting
+ * does not takes effect.
+ * - `false`: The audio stream is not filtered.
+ * @note If you need to enable this function, contact `support@agora.io`.
*/
Optional isAudioFilterable;
@@ -1295,41 +1501,33 @@ struct ChannelMediaOptions {
Optional parameters;
/**
- * Whether to enable multipath transmission.
- * - `true`: Enable multipath transmission.
- * - `false`: Disable multipath transmission.
- *
+ * Whether to enable multiple transmisssion paths:
+ * - `true`: Enable multiple transmisssion path.
+ * - `false`: Dsiable multiple transmisssion path.
+ * @note Permissions and system requirements: Android: Android 7.0 or higher (API level 24 or
+ * higher), and the ACCESS_NETWORK_STATE and CHANGE_NETWORK_STATE permissions are required. iOS: iOS
+ * 12.0 or later. macOS: 10.14 or later. Windows: Windows Vista or higher.
* @since 4.6.0
*/
Optional enableMultipath;
/**
- * The mode for uplink multipath transmission.
- * This defines how the uplink multipath is managed.
- *
- * @note Ensure you set `enableMultipath` to `true` when using this parameter.
- *
+ * Uplink transmission mode. See `MultipathMode`.
+ * @note When using this parameter, make sure that `enableMultipath` is set to `true`.
* @since 4.6.0
*/
Optional uplinkMultipathMode;
/**
- * The mode for downlink multipath transmission.
- * This defines how the downlink multipath is managed.
- *
- * @note Ensure you set `enableMultipath` to `true` when using this parameter.
- *
+ * Downlink transmission mode. See `MultipathMode`.
+ * @note When using this parameter, make sure that `enableMultipath` is set to `true`.
* @since 4.6.0
*/
Optional downlinkMultipathMode;
/**
- * The preferred type of multipath transmission.
- * This allows the user to specify a preferred multipath type.
- *
- * @note Ensure you set `enableMultipath` to `true` when using this parameter.
- * This parameter is only effective when you set `MultipathMode` to `Dynamic`.
- *
+ * Preferred type of transmission path. See `MultipathType`.
+ * @note When using this parameter, make sure that `enableMultipath` is set to `true`.
* @since 4.6.0
*/
Optional preferMultipathType;
@@ -1345,9 +1543,11 @@ struct ChannelMediaOptions {
SET_FROM(publishThirdCameraTrack);
SET_FROM(publishFourthCameraTrack);
SET_FROM(publishMicrophoneTrack);
+#if defined(__ANDROID__) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || defined(TARGET_OS_MAC) || defined(__OHOS__)
+ SET_FROM(publishScreenCaptureAudio);
+#endif
#if defined(__ANDROID__) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || defined(__OHOS__)
SET_FROM(publishScreenCaptureVideo);
- SET_FROM(publishScreenCaptureAudio);
#else
SET_FROM(publishScreenTrack);
SET_FROM(publishSecondaryScreenTrack);
@@ -1398,9 +1598,11 @@ struct ChannelMediaOptions {
ADD_COMPARE(publishThirdCameraTrack);
ADD_COMPARE(publishFourthCameraTrack);
ADD_COMPARE(publishMicrophoneTrack);
+#if defined(__ANDROID__) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || defined(TARGET_OS_MAC) || defined(__OHOS__)
+ ADD_COMPARE(publishScreenCaptureAudio);
+#endif
#if defined(__ANDROID__) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || defined(__OHOS__)
ADD_COMPARE(publishScreenCaptureVideo);
- ADD_COMPARE(publishScreenCaptureAudio);
#else
ADD_COMPARE(publishScreenTrack);
ADD_COMPARE(publishSecondaryScreenTrack);
@@ -1454,9 +1656,11 @@ struct ChannelMediaOptions {
REPLACE_BY(publishThirdCameraTrack);
REPLACE_BY(publishFourthCameraTrack);
REPLACE_BY(publishMicrophoneTrack);
+#if defined(__ANDROID__) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || defined(TARGET_OS_MAC) || defined(__OHOS__)
+ REPLACE_BY(publishScreenCaptureAudio);
+#endif
#if defined(__ANDROID__) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || defined(__OHOS__)
REPLACE_BY(publishScreenCaptureVideo);
- REPLACE_BY(publishScreenCaptureAudio);
#else
REPLACE_BY(publishScreenTrack);
REPLACE_BY(publishSecondaryScreenTrack);
@@ -1499,20 +1703,31 @@ struct ChannelMediaOptions {
}
};
+/**
+ * @brief The cloud proxy type.
+ */
enum PROXY_TYPE {
- /** 0: Do not use the cloud proxy.
+ /**
+ * 0: Reserved for future use.
*/
NONE_PROXY_TYPE = 0,
- /** 1: The cloud proxy for the UDP protocol.
+ /**
+ * 1: The cloud proxy for the UDP protocol, that is, the Force UDP cloud proxy mode. In this mode,
+ * the SDK always transmits data over UDP.
*/
UDP_PROXY_TYPE = 1,
- /** 2: The cloud proxy for the TCP (encrypted) protocol.
+ /**
+ * 2: The cloud proxy for the TCP (encryption) protocol, that is, the Force TCP cloud proxy mode. In
+ * this mode, the SDK always transmits data over TCP/TLS 443.
*/
TCP_PROXY_TYPE = 2,
- /** 3: The local proxy.
+ /**
+ * 3: Reserved for future use.
*/
LOCAL_PROXY_TYPE = 3,
- /** 4: auto fallback to tcp cloud proxy
+ /**
+ * 4: Automatic mode. In this mode, the SDK attempts a direct connection to SD-RTN™ and
+ * automatically switches to TCP/TLS 443 if the attempt fails.
*/
TCP_PROXY_AUTO_FALLBACK_TYPE = 4,
/** 5: The http proxy.
@@ -1523,13 +1738,22 @@ enum PROXY_TYPE {
HTTPS_PROXY_TYPE = 6,
};
+/**
+ * @brief The type of the advanced feature.
+ */
enum FeatureType {
+ /**
+ * 1: Virtual background.
+ */
VIDEO_VIRTUAL_BACKGROUND = 1,
+ /**
+ * 2: Image enhancement.
+ */
VIDEO_BEAUTY_EFFECT = 2,
};
/**
- * The options for leaving a channel.
+ * @brief The options for leaving a channel.
*/
struct LeaveChannelOptions {
/**
@@ -1571,13 +1795,24 @@ class IRtcEngineEventHandler {
virtual const char* eventHandlerType() const { return "event_handler"; }
/**
- * Occurs when a user joins a channel.
+ * @brief Occurs when a user joins a channel.
*
+ * @details
* This callback notifies the application that a user joins a specified channel.
+ * Call timing: The SDK triggers this callback when you call `joinChannel(const char* token, const
+ * char* channelId, const char* info, uid_t uid)`, `joinChannel(const char* token, const char*
+ * channelId, uid_t uid, const ChannelMediaOptions& options)`
+ * , `joinChannelWithUserAccount(const char* token, const char* channelId, const char*
+ * userAccount)`, `joinChannelWithUserAccount(const char* token, const char* channelId, const char*
+ * userAccount, const ChannelMediaOptions& options)` , `joinChannelEx`
+ * or `joinChannelWithUserAccountEx` to join a channel.
*
* @param channel The channel name.
* @param uid The ID of the user who joins the channel.
- * @param elapsed The time elapsed (ms) from the local user calling joinChannel until the SDK triggers this callback.
+ * @param elapsed The time elapsed (ms) from the local user calling `joinChannel(const char* token,
+ * const char* channelId, uid_t uid, const ChannelMediaOptions& options)` until the
+ * SDK triggers this callback.
+ *
*/
virtual void onJoinChannelSuccess(const char* channel, uid_t uid, int elapsed) {
(void)channel;
@@ -1586,14 +1821,18 @@ class IRtcEngineEventHandler {
}
/**
- * Occurs when a user rejoins the channel.
+ * @brief Occurs when a user rejoins the channel.
*
- * When a user loses connection with the server because of network problems, the SDK automatically tries to reconnect
- * and triggers this callback upon reconnection.
+ * @details
+ * Call timing: When a user loses connection with the server because of network problems, the SDK
+ * automatically tries to reconnect and triggers this callback upon reconnection.
*
* @param channel The channel name.
* @param uid The ID of the user who rejoins the channel.
- * @param elapsed Time elapsed (ms) from the local user calling the joinChannel method until this callback is triggered.
+ * @param elapsed Time elapsed (ms) from the local user calling `joinChannel(const char* token,
+ * const char* channelId, uid_t uid, const ChannelMediaOptions& options)` until the SDK
+ * triggers this callback.
+ *
*/
virtual void onRejoinChannelSuccess(const char* channel, uid_t uid, int elapsed) {
(void)channel;
@@ -1601,12 +1840,24 @@ class IRtcEngineEventHandler {
(void)elapsed;
}
- /** Occurs when join success after calling \ref IRtcEngine::setLocalAccessPoint "setLocalAccessPoint" or \ref IRtcEngine::setCloudProxy "setCloudProxy"
- @param channel Channel name.
- @param uid User ID of the user joining the channel.
- @param proxyType type of proxy agora sdk connected, proxyType will be NONE_PROXY_TYPE if not connected to proxy(fallback).
- @param localProxyIp local proxy ip. if not join local proxy, it will be "".
- @param elapsed Time elapsed (ms) from the user calling the \ref IRtcEngine::joinChannel "joinChannel" method until the SDK triggers this callback.
+ /**
+ * @brief Reports the proxy connection state.
+ *
+ * @details
+ * You can use this callback to listen for the state of the SDK connecting to a proxy. For example,
+ * when a user calls `setCloudProxy` and joins a channel successfully, the SDK triggers this
+ * callback to report the user ID, the proxy type connected, and the time elapsed fromthe user
+ * calling `joinChannel(const char* token, const char* channelId, uid_t uid, const
+ * ChannelMediaOptions& options)` until this callback is triggered.
+ *
+ * @param channel The channel name.
+ * @param uid The user ID.
+ * @param proxyType The proxy type connected. See `PROXY_TYPE`.
+ * @param localProxyIp Reserved for future use.
+ * @param elapsed The time elapsed (ms) from the user calling `joinChannel(const char* token, const
+ * char* channelId, uid_t uid, const ChannelMediaOptions& options)` until this
+ * callback is triggered.
+ *
*/
virtual void onProxyConnected(const char* channel, uid_t uid, PROXY_TYPE proxyType, const char* localProxyIp, int elapsed) {
(void)channel;
@@ -1616,31 +1867,41 @@ class IRtcEngineEventHandler {
(void)elapsed;
}
- /** An error occurs during the SDK runtime.
-
- @param err The error code: #ERROR_CODE_TYPE.
- @param msg The detailed error message.
- */
+ /**
+ * @brief Reports an error during SDK runtime.
+ *
+ * @details
+ * This callback indicates that an error (concerning network or media) occurs during SDK runtime. In
+ * most cases, the SDK cannot fix the issue and resume running. The SDK requires the app to take
+ * action or informs the user about the issue.
+ *
+ * @param err Error code. See `ERROR_CODE_TYPE`.
+ * @param msg The error message.
+ *
+ */
virtual void onError(int err, const char* msg) {
(void)err;
(void)msg;
}
- /** Reports the statistics of the audio stream from each remote
- user/broadcaster.
-
- @deprecated This callback is deprecated. Use onRemoteAudioStats instead.
-
- The SDK triggers this callback once every two seconds to report the audio
- quality of each remote user/host sending an audio stream. If a channel has
- multiple remote users/hosts sending audio streams, the SDK triggers this
- callback as many times.
-
- @param uid The user ID of the remote user sending the audio stream.
- @param quality The audio quality of the user: #QUALITY_TYPE
- @param delay The network delay (ms) from the sender to the receiver, including the delay caused by audio sampling pre-processing, network transmission, and network jitter buffering.
- @param lost The audio packet loss rate (%) from the sender to the receiver.
- */
+ /**
+ * @brief Reports the statistics of the audio stream sent by each remote user.
+ *
+ * @deprecated This callback is deprecated. Use onRemoteAudioStats instead.
+ *
+ * @details
+ * The SDK triggers this callback once every two seconds to report the audio quality of each remote
+ * user who is sending an audio stream. If a channel has multiple users sending audio streams, the
+ * SDK triggers this callback as many times.
+ *
+ * @param uid The user ID of the remote user sending the audio stream.
+ * @param quality Audio quality of the user. See `QUALITY_TYPE`.
+ * @param delay The network delay (ms) from the sender to the receiver, including the delay caused
+ * by audio sampling pre-processing, network transmission, and network jitter buffering.
+ * @param lost The packet loss rate (%) of the audio packet sent from the remote user to the
+ * receiver.
+ *
+ */
virtual void onAudioQuality(uid_t uid, int quality, unsigned short delay, unsigned short lost) __deprecated {
(void)uid;
(void)quality;
@@ -1648,40 +1909,55 @@ class IRtcEngineEventHandler {
(void)lost;
}
- /** Reports the result of the last-mile network probe result.
+ /**
+ * @brief Reports the last mile network probe result.
+ *
+ * @details
+ * The SDK triggers this callback within 30 seconds after the app calls `startLastmileProbeTest`.
+ *
+ * @param result The uplink and downlink last-mile network probe test result. See
+ * `LastmileProbeResult`.
*
- * The SDK triggers this callback within 30 seconds after the app calls the `startLastmileProbeTest` method.
- * @param result The uplink and downlink last-mile network probe test result: LastmileProbeResult.
*/
virtual void onLastmileProbeResult(const LastmileProbeResult& result) {
(void)result;
}
/**
- * Reports the volume information of users.
+ * @brief Reports the volume information of users.
*
- * By default, this callback is disabled. You can enable it by calling `enableAudioVolumeIndication`. Once this
- * callback is enabled and users send streams in the channel, the SDK triggers the `onAudioVolumeIndication`
- * callback at the time interval set in `enableAudioVolumeIndication`. The SDK triggers two independent
- * `onAudioVolumeIndication` callbacks simultaneously, which separately report the volume information of the
- * local user who sends a stream and the remote users (up to three) whose instantaneous volume is the highest.
+ * @details
+ * By default, this callback is disabled. You can enable it by calling
+ * `enableAudioVolumeIndication`. Once this callback is enabled and users send streams in the
+ * channel, the SDK triggers the `onAudioVolumeIndication` callback according to the time interval
+ * set in `enableAudioVolumeIndication`. The SDK triggers two independent `onAudioVolumeIndication`
+ * callbacks simultaneously, which separately report the volume information of the local user who
+ * sends a stream and the remote users (up to three) whose instantaneous volume is the highest.
*
- * @note After you enable this callback, calling muteLocalAudioStream affects the SDK's behavior as follows:
- * - If the local user stops publishing the audio stream, the SDK stops triggering the local user's callback.
- * - 20 seconds after a remote user whose volume is one of the three highest stops publishing the audio stream,
- * the callback excludes this user's information; 20 seconds after all remote users stop publishing audio streams,
- * the SDK stops triggering the callback for remote users.
+ * @note
+ * Once this callback is enabled, if the local user calls the `muteLocalAudioStream` method to mute,
+ * the SDK continues to report the volume indication of the local user.
+ * If a remote user whose volume is one of the three highest in the channel stops publishing the
+ * audio stream for 20 seconds, the callback excludes this user's information; if all remote users
+ * stop publishing audio streams for 20 seconds, the SDK stops triggering the callback for remote
+ * users.
+ *
+ * @param speakers The volume information of the users. See `AudioVolumeInfo`. An empty `speakers`
+ * array in the callback indicates that no remote user is in the channel or is sending a stream.
+ * @param speakerNumber The total number of users.
+ * - In the callback for the local user, if the local user is sending streams, the value of
+ * `speakerNumber` is 1.
+ * - In the callback for remote users, the value range of `speakerNumber` is [0,3]. If the number of
+ * remote users who send streams is greater than or equal to three, the value of `speakerNumber` is
+ * 3.
+ * @param totalVolume The volume of the speaker. The value range is [0,255].
+ * - In the callback for the local user, `totalVolume` is the volume of the local user who sends a
+ * stream.
+ * - In the callback for remote users, `totalVolume` is the sum of the volume of all remote users
+ * (up to three) whose instantaneous volume is the highest. If the user calls
+ * `startAudioMixing(const char* filePath, bool loopback, int cycle, int startPos)`
+ * , then `totalVolume` is the volume after audio mixing.
*
- * @param speakers The volume information of the users, see AudioVolumeInfo. An empty `speakers` array in the
- * callback indicates that no remote user is in the channel or sending a stream at the moment.
- * @param speakerNumber The total number of speakers.
- * - In the local user's callback, when the local user sends a stream, `speakerNumber` is 1.
- * - In the callback for remote users, the value range of speakerNumber is [0,3]. If the number of remote users who
- * send streams is greater than or equal to three, the value of `speakerNumber` is 3.
- * @param totalVolume The volume of the speaker. The value ranges between 0 (lowest volume) and 255 (highest volume).
- * - In the local user's callback, `totalVolume` is the volume of the local user who sends a stream.
- * - In the remote users' callback, `totalVolume` is the sum of all remote users (up to three) whose instantaneous
- * volume is the highest. If the user calls `startAudioMixing`, `totalVolume` is the volume after audio mixing.
*/
virtual void onAudioVolumeIndication(const AudioVolumeInfo* speakers, unsigned int speakerNumber,
int totalVolume) {
@@ -1691,36 +1967,46 @@ class IRtcEngineEventHandler {
}
/**
- * Occurs when a user leaves a channel.
+ * @brief Occurs when a user leaves a channel.
*
- * This callback notifies the app that the user leaves the channel by calling `leaveChannel`. From this callback,
- * the app can get information such as the call duration and quality statistics.
+ * @details
+ * You can obtain information such as the total duration of a call, and the data traffic that the
+ * SDK transmits and receives.
+ * Call timing: The SDK triggers this callback after you call `leaveChannel()`, `leaveChannel(const
+ * LeaveChannelOptions& options)`
+ * , `leaveChannelEx(const RtcConnection& connection)`, or `leaveChannelEx(const RtcConnection&
+ * connection, const LeaveChannelOptions& options)` to leave a channel.
+ *
+ * @param stats Call statistics. See `RtcStats`.
*
- * @param stats The statistics on the call: RtcStats.
*/
virtual void onLeaveChannel(const RtcStats& stats) { (void)stats; }
/**
- * Reports the statistics of the current call.
+ * @brief Reports the statistics about the current call.
+ *
+ * @details
+ * Call timing: The SDK triggers this callback once every two seconds after the user joins the
+ * channel.
*
- * The SDK triggers this callback once every two seconds after the user joins the channel.
+ * @param stats Statistics of the RTC engine. See `RtcStats`.
*
- * @param stats The statistics of the current call: RtcStats.
*/
virtual void onRtcStats(const RtcStats& stats) { (void)stats; }
- /** Occurs when the audio device state changes.
-
- This callback notifies the application that the system's audio device state
- is changed. For example, a headset is unplugged from the device.
-
- @param deviceId The device ID.
- @param deviceType The device type: #MEDIA_DEVICE_TYPE.
- @param deviceState The device state:
- - On macOS:
- - 0: The device is ready for use.
- - 8: The device is not connected.
- - On Windows: #MEDIA_DEVICE_STATE_TYPE.
+ /**
+ * @brief Occurs when the audio device state changes.
+ *
+ * @details
+ * This callback notifies the application that the system's audio device state is changed. For
+ * example, a headset is unplugged from the device.
+ *
+ * @note This method is for Windows and macOS only.
+ *
+ * @param deviceId The device ID.
+ * @param deviceType The device type. See `MEDIA_DEVICE_TYPE`.
+ * @param deviceState The device state. See `MEDIA_DEVICE_STATE_TYPE`.
+ *
*/
virtual void onAudioDeviceStateChanged(const char* deviceId, int deviceType, int deviceState) {
(void)deviceId;
@@ -1729,38 +2015,61 @@ class IRtcEngineEventHandler {
}
/**
- * @brief Reports current AudioMixing progress.
+ * @brief Reports the playback progress of a music file.
+ *
+ * @details
+ * After you called the `startAudioMixing(const char* filePath, bool loopback, int cycle, int
+ * startPos)` method to play a music file, the SDK triggers this
+ * callback every two seconds to report the playback progress.
+ *
+ * @param position The playback progress (ms).
*
- * The callback occurs once every one second during the playback and reports the current playback progress.
- * @param position Current AudioMixing progress (millisecond).
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual void onAudioMixingPositionChanged(int64_t position) {}
- /** Occurs when the audio mixing file playback finishes.
- @deprecated This method is deprecated, use onAudioMixingStateChanged instead.
-
- After you call startAudioMixing to play a local music file, this callback occurs when the playback finishes.
- If the startAudioMixing method call fails, the SDK returns the error code 701.
+ /**
+ * @brief Occurs when the playback of the local music file finishes.
+ *
+ * @deprecated This method is deprecated, use onAudioMixingStateChanged instead.
+ *
+ * @details
+ * After you call `startAudioMixing(const char* filePath, bool loopback, int cycle, int startPos)`
+ * to play a local music file, this callback occurs when the
+ * playback finishes. If the call of `startAudioMixing(const char* filePath, bool loopback, int
+ * cycle, int startPos)` fails, the error code
+ * `WARN_AUDIO_MIXING_OPEN_ERROR` is returned.
+ *
*/
virtual void onAudioMixingFinished() __deprecated {}
/**
- * Occurs when the playback of the local audio effect file finishes.
+ * @brief Occurs when the playback of the local music file finishes.
*
+ * @details
* This callback occurs when the local audio effect file finishes playing.
*
- * @param soundId The audio effect ID. The ID of each audio effect file is unique.
+ * @param soundId The ID of the audio effect. The unique ID of each audio effect file.
+ *
*/
virtual void onAudioEffectFinished(int soundId) {}
- /** Occurs when the video device state changes.
-
- This callback notifies the application that the system's video device state
- is changed.
-
- @param deviceId Pointer to the device ID.
- @param deviceType Device type: #MEDIA_DEVICE_TYPE.
- @param deviceState Device state: #MEDIA_DEVICE_STATE_TYPE.
+ /**
+ * @brief Occurs when the video device state changes.
+ *
+ * @details
+ * This callback reports the change of system video devices, such as being unplugged or removed. On
+ * a Windows device with an external camera for video capturing, the video disables once the
+ * external camera is unplugged.
+ *
+ * @note This callback is for Windows and macOS only.
+ *
+ * @param deviceId The device ID.
+ * @param deviceType Media device types. See `MEDIA_DEVICE_TYPE`.
+ * @param deviceState Media device states. See `MEDIA_DEVICE_STATE_TYPE`.
+ *
*/
virtual void onVideoDeviceStateChanged(const char* deviceId, int deviceType, int deviceState) {
(void)deviceId;
@@ -1769,26 +2078,34 @@ class IRtcEngineEventHandler {
}
/**
- * Reports the last mile network quality of each user in the channel.
+ * @brief Reports the last mile network quality of each user in the channel.
*
- * This callback reports the last mile network conditions of each user in the channel. Last mile refers to the
- * connection between the local device and Agora's edge server.
+ * @details
+ * This callback reports the last mile network conditions of each user in the channel. Last mile
+ * refers to the connection between the local device and Agora's edge server.
+ * The SDK triggers this callback once every two seconds. If a channel includes multiple users, the
+ * SDK triggers this callback as many times.
+ * This callback provides feedback on network quality through sending and receiving broadcast
+ * packets within the channel. Excessive broadcast packets can lead to broadcast storms. To prevent
+ * broadcast storms from causing a large amount of data transmission within the channel, this
+ * callback supports feedback on the network quality of up to 4 remote hosts simultaneously by
+ * default.
*
- * The SDK triggers this callback once every two seconds. If a channel includes multiple users, the SDK triggers
- * this callback as many times.
+ * @note `txQuality` is `UNKNOWN` when the user is not sending a stream; `rxQuality` is `UNKNOWN`
+ * when the user is not receiving a stream.
*
- * @note `txQuality` is UNKNOWN when the user is not sending a stream; `rxQuality` is UNKNOWN when the user is not
- * receiving a stream.
+ * @param uid The user ID. The network quality of the user with this user ID is reported. If the uid
+ * is 0, the local network quality is reported.
+ * @param txQuality Uplink network quality rating of the user in terms of the transmission bit rate,
+ * packet loss rate, average RTT (Round-Trip Time) and jitter of the uplink network. This parameter
+ * is a quality rating helping you understand how well the current uplink network conditions can
+ * support the selected video encoder configuration. For example, a 1000 Kbps uplink network may be
+ * adequate for video frames with a resolution of 640 × 480 and a frame rate of 15 fps in the
+ * LIVE_BROADCASTING profile, but might be inadequate for resolutions higher than 1280 × 720. See
+ * `QUALITY_TYPE`.
+ * @param rxQuality Downlink network quality rating of the user in terms of packet loss rate,
+ * average RTT, and jitter of the downlink network. See `QUALITY_TYPE`.
*
- * @param uid The user ID. The network quality of the user with this user ID is reported.
- * @param txQuality Uplink network quality rating of the user in terms of the transmission bit rate, packet loss rate,
- * average RTT (Round-Trip Time) and jitter of the uplink network. This parameter is a quality rating helping you
- * understand how well the current uplink network conditions can support the selected video encoder configuration.
- * For example, a 1000 Kbps uplink network may be adequate for video frames with a resolution of 640 × 480 and a frame
- * rate of 15 fps in the LIVE_BROADCASTING profile, but may be inadequate for resolutions higher than 1280 × 720.
- * See #QUALITY_TYPE.
- * @param rxQuality Downlink network quality rating of the user in terms of packet loss rate, average RTT, and jitter
- * of the downlink network. See #QUALITY_TYPE.
*/
virtual void onNetworkQuality(uid_t uid, int txQuality, int rxQuality) {
(void)uid;
@@ -1805,42 +2122,56 @@ class IRtcEngineEventHandler {
virtual void onIntraRequestReceived() {}
/**
- * Occurs when uplink network info is updated.
+ * @brief Occurs when the uplink network information changes.
*
+ * @details
* The SDK triggers this callback when the uplink network information changes.
*
- * @note This callback only applies to scenarios where you push externally encoded
- * video data in H.264 format to the SDK.
+ * @note This callback only applies to scenarios where you push externally encoded video data in
+ * H.264 format to the SDK.
+ *
+ * @param info The uplink network information. See `UplinkNetworkInfo`.
*
- * @param info The uplink network information. See UplinkNetworkInfo.
*/
virtual void onUplinkNetworkInfoUpdated(const UplinkNetworkInfo& info) {
(void)info;
}
/**
- * Reports the last-mile network quality of the local user.
+ * @brief Reports the last-mile network quality of the local user.
*
+ * @details
* This callback reports the last-mile network conditions of the local user before the user joins
* the channel. Last mile refers to the connection between the local device and Agora's edge server.
+ * Before the user joins the channel, this callback is triggered by the SDK once
+ * `startLastmileProbeTest` is called and reports the last-mile network conditions of the local
+ * user.
*
- * When the user is not in a channel and the last-mile network test is enabled
- * (by calling `startLastmileProbeTest`), this callback function is triggered
- * to update the app on the network connection quality of the local user.
+ * @param quality The last-mile network quality. See `QUALITY_TYPE`.
*
- * @param quality The last mile network quality. See #QUALITY_TYPE.
*/
virtual void onLastmileQuality(int quality) { (void)quality; }
- /** Occurs when the first local video frame is rendered on the local video view.
+ /**
+ * @brief Occurs when the first local video frame is displayed on the local video view.
*
- * @param source The video source: #VIDEO_SOURCE_TYPE.
+ * @details
+ * The SDK triggers this callback when the first local video frame is displayed on the local video
+ * view.
+ *
+ * @param source The type of the video source. See `VIDEO_SOURCE_TYPE`.
* @param width The width (px) of the first local video frame.
* @param height The height (px) of the first local video frame.
- * @param elapsed Time elapsed (ms) from the local user calling the `joinChannel`
- * method until the SDK triggers this callback. If you call the `startPreview` method before calling
- * the `joinChannel` method, then `elapsed` is the time elapsed from calling the
- * `startPreview` method until the SDK triggers this callback.
+ * @param elapsed The time elapsed (ms) from the local user calling `joinChannel(const char* token,
+ * const char* channelId, const char* info, uid_t uid)` or
+ * `joinChannel(const char* token, const char* channelId, uid_t uid, const ChannelMediaOptions&
+ * options)` to join the channel to when the SDK triggers this callback. If `startPreview()`
+ * / `startPreview(VIDEO_SOURCE_TYPE sourceType)` is called before joining the channel, this
+ * parameter indicates the
+ * time elapsed from calling `startPreview()` or `startPreview(VIDEO_SOURCE_TYPE sourceType)` to
+ * when this event
+ * occurred.
+ *
*/
virtual void onFirstLocalVideoFrame(VIDEO_SOURCE_TYPE source, int width, int height, int elapsed) {
(void)source;
@@ -1849,37 +2180,53 @@ class IRtcEngineEventHandler {
(void)elapsed;
}
- /** Occurs when the first local video frame is published.
+ /**
+ * @brief Occurs when the first video frame is published.
+ *
+ * @details
* The SDK triggers this callback under one of the following circumstances:
- * - The local client enables the video module and calls `joinChannel` successfully.
- * - The local client calls `muteLocalVideoStream(true)` and muteLocalVideoStream(false) in sequence.
+ * - The local client enables the video module and calls `joinChannel(const char* token, const char*
+ * channelId, const char* info, uid_t uid)` or `joinChannel(const char* token, const char*
+ * channelId, uid_t uid, const ChannelMediaOptions& options)`
+ * to join the channel successfully.
+ * - The local client calls `muteLocalVideoStream` (`true`) and `muteLocalVideoStream` (`false`) in
+ * sequence.
* - The local client calls `disableVideo` and `enableVideo` in sequence.
* - The local client calls `pushVideoFrame` to successfully push the video frame to the SDK.
- * @param source The video source type.
- * @param elapsed The time elapsed (ms) from the local user calling joinChannel` to the SDK triggers
- * this callback.
- */
+ *
+ * @param source The type of the video source. See `VIDEO_SOURCE_TYPE`.
+ * @param elapsed Time elapsed (ms) from the local user calling `joinChannel(const char* token,
+ * const char* channelId, const char* info, uid_t uid)` or `joinChannel(const char* token, const
+ * char* channelId, uid_t uid, const ChannelMediaOptions& options)`
+ * until this callback is triggered.
+ *
+ */
virtual void onFirstLocalVideoFramePublished(VIDEO_SOURCE_TYPE source, int elapsed) {
(void)source;
(void)elapsed;
}
- /** Occurs when the first remote video frame is received and decoded.
-
- The SDK triggers this callback under one of the following circumstances:
- - The remote user joins the channel and sends the video stream.
- - The remote user stops sending the video stream and re-sends it after 15 seconds. Reasons for such an interruption include:
- - The remote user leaves the channel.
- - The remote user drops offline.
- - The remote user calls `muteLocalVideoStream` to stop sending the video stream.
- - The remote user calls `disableVideo` to disable video.
-
- @param uid The user ID of the remote user sending the video stream.
- @param width The width (pixels) of the video stream.
- @param height The height (pixels) of the video stream.
- @param elapsed The time elapsed (ms) from the local user calling `joinChannel`
- until the SDK triggers this callback.
- */
+ /**
+ * @brief Occurs when the first remote video frame is received and decoded.
+ *
+ * @details
+ * The SDK triggers this callback under one of the following circumstances:
+ * - The remote user joins the channel and sends the video stream.
+ * - The remote user stops sending the video stream and re-sends it after 15 seconds. Reasons for
+ * such an interruption include:
+ * - The remote user leaves the channel.
+ * - The remote user drops offline.
+ * - The remote user calls `disableVideo` to disable video.
+ *
+ * @param uid The user ID of the remote user sending the video stream.
+ * @param width The width (px) of the video stream.
+ * @param height The height (px) of the video stream.
+ * @param elapsed The time elapsed (ms) from the local user calling `joinChannel(const char* token,
+ * const char* channelId, const char* info, uid_t uid)` or
+ * `joinChannel(const char* token, const char* channelId, uid_t uid, const ChannelMediaOptions&
+ * options)` until the SDK triggers this callback.
+ *
+ */
virtual void onFirstRemoteVideoDecoded(uid_t uid, int width, int height, int elapsed) __deprecated {
(void)uid;
(void)width;
@@ -1888,12 +2235,16 @@ class IRtcEngineEventHandler {
}
/**
- * Occurs when the local or remote video size or rotation has changed.
- * @param sourceType The video source type: #VIDEO_SOURCE_TYPE.
- * @param uid The user ID. 0 indicates the local user.
- * @param width The new width (pixels) of the video.
- * @param height The new height (pixels) of the video.
- * @param rotation The rotation information of the video.
+ * @brief Occurs when the video size or rotation of a specified user changes.
+ *
+ * @param sourceType The type of the video source. See `VIDEO_SOURCE_TYPE`.
+ * @param uid The ID of the user whose video size or rotation changes. (The `uid` for the local user
+ * is 0. The video is the local user's video preview).
+ * @param width The width (pixels) of the video stream.
+ * @param height The height (pixels) of the video stream.
+ * @param rotation The rotation information. The value range is [0,360).Note: On the iOS platform,
+ * the parameter value is always 0.
+ *
*/
virtual void onVideoSizeChanged(VIDEO_SOURCE_TYPE sourceType, uid_t uid, int width, int height, int rotation) {
(void)uid;
@@ -1902,31 +2253,73 @@ class IRtcEngineEventHandler {
(void)rotation;
}
- /** Occurs when the local video stream state changes.
+ /**
+ * @brief Occurs when the local video event occurs.
*
- * When the state of the local video stream changes (including the state of the video capture and
- * encoding), the SDK triggers this callback to report the current state. This callback indicates
- * the state of the local video stream, including camera capturing and video encoding, and allows
- * you to troubleshoot issues when exceptions occur.
+ * @since v4.6.1
*
- * The SDK triggers the onLocalVideoStateChanged callback with the state code of `LOCAL_VIDEO_STREAM_STATE_FAILED`
- * and error code of `LOCAL_VIDEO_STREAM_REASON_CAPTURE_FAILURE` in the following situations:
- * - The app switches to the background, and the system gets the camera resource.
- * - The camera starts normally, but does not output video for four consecutive seconds.
+ * @details
+ * This callback is triggered when a video event occurs. You can use this callback to get the reason for such an event.
+ *
+ * @param source The video source type: #VIDEO_SOURCE_TYPE.
+ * @param event The local video event type: #LOCAL_VIDEO_EVENT_TYPE.
*
- * When the camera outputs the captured video frames, if the video frames are the same for 15
- * consecutive frames, the SDK triggers the `onLocalVideoStateChanged` callback with the state code
- * of `LOCAL_VIDEO_STREAM_STATE_CAPTURING` and error code of `LOCAL_VIDEO_STREAM_REASON_CAPTURE_FAILURE`.
- * Note that the video frame duplication detection is only available for video frames with a resolution
- * greater than 200 × 200, a frame rate greater than or equal to 10 fps, and a bitrate less than 20 Kbps.
+ */
+ virtual void onLocalVideoEvent(VIDEO_SOURCE_TYPE source, LOCAL_VIDEO_EVENT_TYPE event) {
+ (void)source;
+ (void)event;
+ }
+
+ /**
+ * @brief Occurs when the local video stream state changes.
+ *
+ * @details
+ * When the status of the local video changes, the SDK triggers this callback to report the current
+ * local video state and the reason for the state change.
+ * Applicable scenarios: You can use this callback to stay updated on the state changes of the local
+ * video stream, and take corresponding measures based on the reasons for the state changes, to
+ * better manage and debug issues related to the video stream.
+ * Call timing: - The SDK triggeres this callback under the following circumstances, with the
+ * `state` as LOCAL_VIDEO_STREAM_STATE_FAILED, and the `reason` as
+ * `LOCAL_VIDEO_STREAM_REASON_CAPTURE_FAILURE`:
+ * - The app switches to the background, and the system revokes the camera resource.
+ * - For Android 9 and later versions, after an app is in the background for a period, the system
+ * automatically revokes camera permissions.
+ * - For Android 6 and later versions, if the camera is held by a third-party app for a certain
+ * duration and then released, the SDK triggers this callback and reports the
+ * `onLocalVideoStateChanged` (`LOCAL_VIDEO_STREAM_STATE_CAPTURING,LOCAL_VIDEO_STREAM_REASON_OK`)
+ * callback.
+ * - The camera starts normally, but does not output video frames for four consecutive seconds.
+ * - When the camera outputs captured video frames, if the SDK detects 15 consecutive duplicate
+ * video frames, it triggers this callback, with the `state` as `LOCAL_VIDEO_STREAM_STATE_CAPTURING`
+ * and the `reason` as `LOCAL_VIDEO_STREAM_REASON_CAPTURE_FAILURE`.Note:
+ * - Note that the video frame duplication detection is only available for video frames with a
+ * resolution greater than 200 × 200, a frame rate greater than or equal to 10 fps, and a bitrate
+ * less than 20 Kbps.
+ * - Normally, if there is an error in video capturing, the issue can be troubleshooted through the
+ * `reason` parameter in this callback. However, on some devices, when there is an issue with
+ * capturing (such as freezing), the Android system will not throw any error callbacks, so the SDK
+ * cannot report the reason for the change in local video status. In this case, you can determine if
+ * there is no video frame being captured by checking the following: this callback reports the
+ * `state` as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` or `LOCAL_VIDEO_STREAM_STATE_ENCODING`, and the
+ * `captureFrameRate` in the `onLocalVideoStats` callback is 0.
*
- * @note For some device models, the SDK does not trigger this callback when the state of the local
- * video changes while the local video capturing device is in use, so you have to make your own
- * timeout judgment.
+ * @note
+ * - Note that the video frame duplication detection is only available for video frames with a
+ * resolution greater than 200 × 200, a frame rate greater than or equal to 10 fps, and a bitrate
+ * less than 20 Kbps.
+ * - Normally, if there is an error in video capturing, the issue can be troubleshooted through the
+ * `reason` parameter in this callback. However, on some devices, when there is an issue with
+ * capturing (such as freezing), the Android system will not throw any error callbacks, so the SDK
+ * cannot report the reason for the change in local video status. In this case, you can determine if
+ * there is no video frame being captured by checking the following: this callback reports the
+ * `state` as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` or `LOCAL_VIDEO_STREAM_STATE_ENCODING`, and the
+ * `captureFrameRate` in the `onLocalVideoStats` callback is 0.
+ *
+ * @param source The type of the video source. See `VIDEO_SOURCE_TYPE`.
+ * @param state The state of the local video, see `LOCAL_VIDEO_STREAM_STATE`.
+ * @param reason The reasons for changes in local video state. See `LOCAL_VIDEO_STREAM_REASON`.
*
- * @param source The video source type: #VIDEO_SOURCE_TYPE.
- * @param state The state of the local video. See #LOCAL_VIDEO_STREAM_STATE.
- * @param reason The detailed error information. See #LOCAL_VIDEO_STREAM_REASON.
*/
virtual void onLocalVideoStateChanged(VIDEO_SOURCE_TYPE source, LOCAL_VIDEO_STREAM_STATE state, LOCAL_VIDEO_STREAM_REASON reason) {
(void)source;
@@ -1935,15 +2328,18 @@ class IRtcEngineEventHandler {
}
/**
- * Occurs when the remote video state changes.
+ * @brief Occurs when the remote video stream state changes.
+ *
+ * @note This callback does not work properly when the number of users (in the communication
+ * profile) or hosts (in the live streaming channel) in a channel exceeds 32.
*
- * @note This callback does not work properly when the number of users (in the voice/video call
- * channel) or hosts (in the live streaming channel) in the channel exceeds 17.
+ * @param uid The ID of the remote user whose video state changes.
+ * @param state The state of the remote video. See `REMOTE_VIDEO_STATE`.
+ * @param reason The reason for the remote video state change. See `REMOTE_VIDEO_STATE_REASON`.
+ * @param elapsed Time elapsed (ms) from the local user calling the `joinChannel(const char* token,
+ * const char* channelId, uid_t uid, const ChannelMediaOptions& options)` method until
+ * the SDK triggers this callback.
*
- * @param uid The ID of the remote user or broadcaster who leaves the channel or drops offline.
- * @param state The remote video state: #REMOTE_VIDEO_STATE.
- * @param reason The reason of the remote video state change: #REMOTE_VIDEO_STATE_REASON.
- * @param elapsed The time elapsed (ms) from the local client calling `joinChannel` until this callback is triggered.
*/
virtual void onRemoteVideoStateChanged(uid_t uid, REMOTE_VIDEO_STATE state, REMOTE_VIDEO_STATE_REASON reason, int elapsed) {
(void)uid;
@@ -1952,12 +2348,21 @@ class IRtcEngineEventHandler {
(void)elapsed;
}
- /** Occurs when the renderer receives the first frame of the remote video.
+ /**
+ * @brief Occurs when the renderer receives the first frame of the remote video.
+ *
+ * @note This callback is only triggered when the video frame is rendered by the SDK; it will not be
+ * triggered if the user employs custom video rendering.You need to implement this independently
+ * using methods outside the SDK.
*
* @param uid The user ID of the remote user sending the video stream.
- * @param width The width (px) of the video frame.
+ * @param width The width (px) of the video stream.
* @param height The height (px) of the video stream.
- * @param elapsed The time elapsed (ms) from the local user calling `joinChannel` until the SDK triggers this callback.
+ * @param elapsed The time elapsed (ms) from the local user calling `joinChannel(const char* token,
+ * const char* channelId, const char* info, uid_t uid)` or
+ * `joinChannel(const char* token, const char* channelId, uid_t uid, const ChannelMediaOptions&
+ * options)` until the SDK triggers this callback.
+ *
*/
virtual void onFirstRemoteVideoFrame(uid_t uid, int width, int height, int elapsed) {
(void)uid;
@@ -1967,96 +2372,117 @@ class IRtcEngineEventHandler {
}
/**
- * Occurs when a remote user or broadcaster joins the channel.
+ * @brief Occurs when a remote user (in the communication profile)/ host (in the live streaming
+ * profile) joins the channel.
*
- * - In the COMMUNICATION channel profile, this callback indicates that a remote user joins the channel.
- * The SDK also triggers this callback to report the existing users in the channel when a user joins the
+ * @details
+ * - In a communication channel, this callback indicates that a remote user joins the channel. The
+ * SDK also triggers this callback to report the existing users in the channel when a user joins the
* channel.
- * In the LIVE_BROADCASTING channel profile, this callback indicates that a host joins the channel. The
- * SDK also triggers this callback to report the existing hosts in the channel when a host joins the
- * channel. Agora recommends limiting the number of hosts to 17.
- *
- * The SDK triggers this callback under one of the following circumstances:
- * - A remote user/host joins the channel by calling the `joinChannel` method.
+ * - In a live-broadcast channel, this callback indicates that a host joins the channel. The SDK
+ * also triggers this callback to report the existing hosts in the channel when a host joins the
+ * channel. Agora recommends limiting the number of co-hosts to 32, with a maximum of 17 video
+ * hosts.
+ * Call timing: The SDK triggers this callback under one of the following circumstances:
+ * - A remote user/host joins the channel.
* - A remote user switches the user role to the host after joining the channel.
* - A remote user/host rejoins the channel after a network interruption.
*
- * @param uid The ID of the remote user or broadcaster joining the channel.
- * @param elapsed The time elapsed (ms) from the local user calling `joinChannel` or `setClientRole`
+ * @param uid The ID of the user or host who joins the channel.
+ * @param elapsed Time delay (ms) from the local user calling `joinChannel(const char* token, const
+ * char* channelId, const char* info, uid_t uid)` or `joinChannel(const char* token, const char*
+ * channelId, uid_t uid, const ChannelMediaOptions& options)`
* until this callback is triggered.
- */
+ *
+ */
virtual void onUserJoined(uid_t uid, int elapsed) {
(void)uid;
(void)elapsed;
}
/**
- * Occurs when a remote user or broadcaster goes offline.
+ * @brief Occurs when a remote user (in the communication profile)/ host (in the live streaming
+ * profile) leaves the channel.
*
- * There are two reasons for a user to go offline:
- * - Leave the channel: When the user leaves the channel, the user sends a goodbye message. When this
- * message is received, the SDK determines that the user leaves the channel.
- * - Drop offline: When no data packet of the user is received for a certain period of time, the SDK assumes
- * that the user drops offline. A poor network connection may lead to false detection, so we recommend using
- * the RTM SDK for reliable offline detection.
- * - The user switches the user role from a broadcaster to an audience.
+ * @details
+ * There are generally two reasons for users to become offline:
+ * - Leave the channel: When a user/host leaves the channel, the user/host sends a goodbye message.
+ * - Drop offline: When no data packet of the user or host is received for a certain period of time
+ * (20 seconds for the communication profile, and more for the live broadcast profile), the SDK
+ * assumes that the user/host drops offline. A poor network connection may lead to false detections.
+ * It is recommended to use the Agora RTM SDK for reliable offline detection.
+ * Call timing: This callback is triggered when a remote user (in the communication profile) or host
+ * (in the live streaming profile) leaves a channel.
+ *
+ * @param uid The ID of the user who leaves the channel or goes offline.
+ * @param reason Reasons why a remote user (in the communication profile) or host (in the live
+ * streaming profile) goes offline. See `USER_OFFLINE_REASON_TYPE`.
*
- * @param uid The ID of the remote user or broadcaster who leaves the channel or drops offline.
- * @param reason The reason why the remote user goes offline: #USER_OFFLINE_REASON_TYPE.
*/
virtual void onUserOffline(uid_t uid, USER_OFFLINE_REASON_TYPE reason) {
(void)uid;
(void)reason;
}
- /** Occurs when a remote user's audio stream playback pauses/resumes.
-
- The SDK triggers this callback when the remote user stops or resumes sending the audio stream by
- calling the `muteLocalAudioStream` method.
-
- @note This callback can be inaccurate when the number of users (in the `COMMUNICATION` profile) or hosts (in the `LIVE_BROADCASTING` profile) in the channel exceeds 17.
-
- @param uid The user ID.
- @param muted Whether the remote user's audio stream is muted/unmuted:
- - true: Muted.
- - false: Unmuted.
- */
+ /**
+ * @brief Occurs when a remote user (in the communication profile) or a host (in the live streaming
+ * profile) stops/resumes sending the audio stream.
+ *
+ * @details
+ * The SDK triggers this callback when the remote user stops or resumes sending the audio stream by
+ * calling the `muteLocalAudioStream` method.
+ *
+ * @note This callback does not work properly when the number of users (in the communication
+ * profile) or hosts (in the live streaming channel) in a channel exceeds 32.
+ *
+ * @param uid The user ID.
+ * @param muted Whether the remote user's audio stream is muted:
+ * - `true`: User's audio stream is muted.
+ * - `false`: User's audio stream is unmuted.
+ *
+ */
virtual void onUserMuteAudio(uid_t uid, bool muted) {
(void)uid;
(void)muted;
}
- /** Occurs when a remote user pauses or resumes sending the video stream.
+ /**
+ * @brief Occurs when a remote user stops or resumes publishing the video stream.
+ *
+ * @details
+ * When a remote user calls `muteLocalVideoStream` to stop or resume publishing the video stream,
+ * the SDK triggers this callback to report to the local user the state of the streams published by
+ * the remote user.
+ *
+ * @note This callback can be inaccurate when the number of users (in the communication profile) or
+ * hosts (in the live streaming profile) in a channel exceeds 32.
+ *
+ * @param uid The user ID of the remote user.
+ * @param muted Whether the remote user stops publishing the video stream:
+ * - `true`: The remote user stops publishing the video stream.
+ * - `false`: The remote user resumes publishing the video stream.
*
- * When a remote user calls `muteLocalVideoStream` to stop or resume publishing the video stream, the
- * SDK triggers this callback to report the state of the remote user's publishing stream to the local
- * user.
-
- @note This callback is invalid when the number of users or broadacasters in a
- channel exceeds 20.
-
- @param userId ID of the remote user.
- @param muted Whether the remote user stops publishing the video stream:
- - true: The remote user has paused sending the video stream.
- - false: The remote user has resumed sending the video stream.
*/
virtual void onUserMuteVideo(uid_t uid, bool muted) {
(void)uid;
(void)muted;
}
- /** Occurs when a remote user enables or disables the video module.
-
- Once the video function is disabled, the users cannot see any video.
-
- The SDK triggers this callback when a remote user enables or disables the video module by calling the
- `enableVideo` or `disableVideo` method.
-
- @param uid The ID of the remote user.
- @param enabled Whether the video of the remote user is enabled:
- - true: The remote user has enabled video.
- - false: The remote user has disabled video.
- */
+ /**
+ * @brief Occurs when a remote user enables or disables the video module.
+ *
+ * @details
+ * Once the video module is disabled, the user can only use a voice call. The user cannot send or
+ * receive any video.
+ * The SDK triggers this callback when a remote user enables or disables the video module by calling
+ * the `enableVideo` or `disableVideo` method.
+ *
+ * @param uid The user ID of the remote user.
+ * @param enabled
+ * - `true`: The video module is enabled.
+ * - `false`: The video module is disabled.
+ *
+ */
virtual void onUserEnableVideo(uid_t uid, bool enabled) {
(void)uid;
(void)enabled;
@@ -2072,89 +2498,108 @@ class IRtcEngineEventHandler {
(void)state;
}
- /** Occurs when a remote user enables or disables local video capturing.
-
- The SDK triggers this callback when the remote user resumes or stops capturing the video stream by
- calling the `enableLocalVideo` method.
-
- @param uid The ID of the remote user.
- @param enabled Whether the specified remote user enables/disables local video:
- - `true`: The remote user has enabled local video capturing.
- - `false`: The remote user has disabled local video capturing.
- */
+ /**
+ * @brief Occurs when a specific remote user enables/disables the local video capturing function.
+ *
+ * @details
+ * The SDK triggers this callback when the remote user resumes or stops capturing the video stream
+ * by calling the `enableLocalVideo` method.
+ *
+ * @param uid The user ID of the remote user.
+ * @param enabled Whether the specified remote user enables/disables local video capturing:
+ * - `true`: The video module is enabled. Other users in the channel can see the video of this
+ * remote user.
+ * - `false`: The video module is disabled. Other users in the channel can no longer receive the
+ * video stream from this remote user, while this remote user can still receive the video streams
+ * from other users.
+ *
+ */
virtual void onUserEnableLocalVideo(uid_t uid, bool enabled) __deprecated {
(void)uid;
(void)enabled;
}
- /** Reports the statistics of the audio stream from each remote user/host.
-
- The SDK triggers this callback once every two seconds for each remote user who is sending audio
- streams. If a channel includes multiple remote users, the SDK triggers this callback as many times.
-
- @param stats Statistics of the received remote audio streams. See RemoteAudioStats.
+ /**
+ * @brief Reports the transport-layer statistics of each remote audio stream.
+ *
+ * @details
+ * The SDK triggers this callback once every two seconds for each remote user who is sending audio
+ * streams. If a channel includes multiple remote users, the SDK triggers this callback as many
+ * times.
+ *
+ * @param stats The statistics of the received remote audio streams. See `RemoteAudioStats`.
+ *
*/
virtual void onRemoteAudioStats(const RemoteAudioStats& stats) {
(void)stats;
}
- /** Reports the statistics of the local audio stream.
+ /**
+ * @brief Reports the statistics of the local audio stream.
*
+ * @details
* The SDK triggers this callback once every two seconds.
*
- * @param stats The statistics of the local audio stream.
- * See LocalAudioStats.
+ * @param stats Local audio statistics. See `LocalAudioStats`.
+ *
*/
virtual void onLocalAudioStats(const LocalAudioStats& stats) {
(void)stats;
}
- /** Reports the statistics of the local video stream.
+ /**
+ * @brief Reports the statistics of the local video stream.
*
- * The SDK triggers this callback once every two seconds for each
- * user/host. If there are multiple users/hosts in the channel, the SDK
- * triggers this callback as many times.
+ * @details
+ * The SDK triggers this callback once every two seconds to report the statistics of the local video
+ * stream.
*
- * @note If you have called the `enableDualStreamMode`
- * method, this callback reports the statistics of the high-video
- * stream (high bitrate, and high-resolution video stream).
+ * @param source The type of the video source. See `VIDEO_SOURCE_TYPE`.
+ * @param stats The statistics of the local video stream. See `LocalVideoStats`.
*
- * @param source The video source type. See #VIDEO_SOURCE_TYPE.
- * @param stats Statistics of the local video stream. See LocalVideoStats.
*/
virtual void onLocalVideoStats(VIDEO_SOURCE_TYPE source, const LocalVideoStats& stats) {
(void)source;
(void)stats;
}
- /** Reports the statistics of the video stream from each remote user/host.
+ /**
+ * @brief Reports the statistics of the video stream sent by each remote users.
*
- * The SDK triggers this callback once every two seconds for each remote user. If a channel has
- * multiple users/hosts sending video streams, the SDK triggers this callback as many times.
+ * @details
+ * Reports the statistics of the video stream from the remote users. The SDK triggers this callback
+ * once every two seconds for each remote user. If a channel has multiple users/hosts sending video
+ * streams, the SDK triggers this callback as many times.
+ *
+ * @param stats Statistics of the remote video stream. See `RemoteVideoStats`.
*
- * @param stats Statistics of the remote video stream. See
- * RemoteVideoStats.
*/
virtual void onRemoteVideoStats(const RemoteVideoStats& stats) {
(void)stats;
}
/**
- * Occurs when the camera turns on and is ready to capture the video.
+ * @brief Occurs when the camera turns on and is ready to capture the video.
+ *
* @deprecated Use `LOCAL_VIDEO_STREAM_STATE_CAPTURING(1)` in onLocalVideoStateChanged instead.
- * This callback indicates that the camera has been successfully turned on and you can start to capture video.
+ *
+ * @details
+ * This callback indicates that the camera has been successfully turned on and you can start to
+ * capture video.
+ *
*/
virtual void onCameraReady() __deprecated {}
/**
- * Occurs when the camera focus area changes.
+ * @brief Occurs when the camera focus area changes.
*
- * @note This method is for Andriod and iOS only.
+ * @note This callback is for Android and iOS only.
*
- * @param x The x coordinate of the changed camera focus area.
- * @param y The y coordinate of the changed camera focus area.
+ * @param x The x-coordinate of the changed camera focus area.
+ * @param y The y-coordinate of the changed camera focus area.
* @param width The width of the changed camera focus area.
* @param height The height of the changed camera focus area.
+ *
*/
virtual void onCameraFocusAreaChanged(int x, int y, int width, int height) {
(void)x;
@@ -2163,12 +2608,19 @@ class IRtcEngineEventHandler {
(void)height;
}
/**
- * Occurs when the camera exposure area changes.
+ * @brief Occurs when the camera exposure area changes.
+ *
+ * @details
+ * The SDK triggers this callback when the local user changes the camera exposure position by
+ * calling `setCameraExposurePosition`.
+ *
+ * @note This callback is for Android and iOS only.
*
* @param x The x coordinate of the changed camera exposure area.
* @param y The y coordinate of the changed camera exposure area.
* @param width The width of the changed camera exposure area.
* @param height The height of the changed exposure area.
+ *
*/
virtual void onCameraExposureAreaChanged(int x, int y, int width, int height) {
(void)x;
@@ -2178,34 +2630,42 @@ class IRtcEngineEventHandler {
}
#if defined(__ANDROID__) || (defined(__APPLE__) && TARGET_OS_IOS) || defined(__OHOS__)
/**
- * Reports the face detection result of the local user.
+ * @brief Reports the face detection result of the local user.
*
- * Once you enable face detection by calling enableFaceDetection(true), you can get the following
- * information on the local user in real-time:
+ * @details
+ * Once you enable face detection by calling `enableFaceDetection` `(true)`, you can get the
+ * following information on the local user in real-time:
* - The width and height of the local video.
* - The position of the human face in the local view.
* - The distance between the human face and the screen.
- *
- * This value is based on the fitting calculation of the local video size and the position of the human face.
+ * This value is based on the fitting calculation of the local video size and the position of the
+ * human face.
*
* @note
* - This callback is for Android and iOS only.
* - When it is detected that the face in front of the camera disappears, the callback will be
- * triggered immediately. In the state of no face, the trigger frequency of the callback will be
- * reduced to save power consumption on the local device.
+ * triggered immediately. When no human face is detected, the frequency of this callback to be
+ * triggered wil be decreased to reduce power consumption on the local device.
* - The SDK stops triggering this callback when a human face is in close proximity to the screen.
- * On Android, the value of `distance` reported in this callback may be slightly different from the
- * actual distance. Therefore, Agora does not recommend using it for accurate calculation.
+ * - On Android, the value of distance reported in this callback may be slightly different from the
+ * actual `distance`. Therefore, Agora does not recommend using it for accurate calculation.
*
* @param imageWidth The width (px) of the video image captured by the local camera.
* @param imageHeight The height (px) of the video image captured by the local camera.
- * @param vecRectangle A Rectangle array of length 'numFaces', which represents the position and size of the human face on the local video:
- * - x: The x-coordinate (px) of the human face in the local view. Taking the top left corner of the view as the origin, the x-coordinate represents the horizontal position of the human face relative to the origin.
- * - y: The y-coordinate (px) of the human face in the local view. Taking the top left corner of the view as the origin, the y-coordinate represents the vertical position of the human face relative to the origin.
- * - width: The width (px) of the human face in the captured view.
- * - height: The height (px) of the human face in the captured view.
- * @param vecDistance An int array of length 'numFaces', which represents distance (cm) between the human face and the screen.
- * @param numFaces The number of faces detected. If the value is 0, it means that no human face is detected.
+ * @param vecRectangle An array of `numFaces`, representing the detected face information:
+ * - `x`: The x-coordinate (px) of the human face in the local view. Taking the top left corner of
+ * the view as the origin, the x-coordinate represents the horizontal position of the human face
+ * relative to the origin.
+ * - `y`: The y-coordinate (px) of the human face in the local view. Taking the top left corner of
+ * the view as the origin, the y-coordinate represents the vertical position of the human face
+ * relative to the origin.
+ * - `width`: The width (px) of the human face in the captured view.
+ * - `height`: The height (px) of the human face in the captured view.
+ * @param vecDistance An array of `numFaces`, representing the distance (cm) between a face and the
+ * device screen.
+ * @param numFaces The number of faces detected. If the value is 0, it means that no human face is
+ * detected.
+ *
*/
virtual void onFacePositionChanged(int imageWidth, int imageHeight,
const Rectangle* vecRectangle, const int* vecDistance,
@@ -2218,33 +2678,48 @@ class IRtcEngineEventHandler {
}
#endif
/**
- * Occurs when the video stops playing.
- * @deprecated Use `LOCAL_VIDEO_STREAM_STATE_STOPPED(0)` in the onLocalVideoStateChanged callback instead.
+ * @brief Occurs when the video stops playing.
+ *
+ * @deprecated Use `LOCAL_VIDEO_STREAM_STATE_STOPPED(0)` in the onLocalVideoStateChanged callback
+ * instead.
+ *
+ * @details
+ * The application can use this callback to change the configuration of the `view` (for example,
+ * displaying other pictures in the view) after the video stops playing.
*
- * The app can use this callback to change the configuration of the view (for example, displaying
- * other pictures in the view) after the video stops playing.
*/
virtual void onVideoStopped() __deprecated {}
- /** Occurs when the playback state of the music file changes.
+ /**
+ * @brief Occurs when the playback state of the music file changes.
+ *
+ * @details
+ * This callback occurs when the playback state of the music file changes, and reports the current
+ * state and error code.
+ *
+ * @param state The playback state of the music file. See `AUDIO_MIXING_STATE_TYPE`.
+ * @param reason Error code. See `AUDIO_MIXING_REASON_TYPE`.
*
- * This callback occurs when the playback state of the music file changes, and reports the current state and error code.
-
- @param state The playback state of the music file. See #AUDIO_MIXING_STATE_TYPE.
- @param reason The reason for the change of the music file playback state. See #AUDIO_MIXING_REASON_TYPE.
*/
virtual void onAudioMixingStateChanged(AUDIO_MIXING_STATE_TYPE state, AUDIO_MIXING_REASON_TYPE reason) {
(void)state;
(void)reason;
}
- /** Occurs when the state of the rhythm player changes.
- When you call the \ref IRtcEngine::startRhythmPlayer "startRhythmPlayer"
- method and the state of rhythm player changes, the SDK triggers this
- callback.
-
- @param state The state code. See #RHYTHM_PLAYER_STATE_TYPE.
- @param reason The error code. See #RHYTHM_PLAYER_REASON.
+ /**
+ * @brief Occurs when the state of virtual metronome changes.
+ *
+ * @details
+ * When the state of the virtual metronome changes, the SDK triggers this callback to report the
+ * current state of the virtual metronome. This callback indicates the state of the local audio
+ * stream and enables you to troubleshoot issues when audio exceptions occur.
+ *
+ * @note This callback is for Android and iOS only.
+ *
+ * @param state For the current virtual metronome status, see `RHYTHM_PLAYER_STATE_TYPE`.
+ * @param reason For the error codes and error messages related to virtual metronome errors, see
+ * `RHYTHM_PLAYER_REASON`.
+ *
*/
virtual void onRhythmPlayerStateChanged(RHYTHM_PLAYER_STATE_TYPE state, RHYTHM_PLAYER_REASON reason) {
(void)state;
@@ -2252,43 +2727,62 @@ class IRtcEngineEventHandler {
}
/**
- * Occurs when the SDK cannot reconnect to the server 10 seconds after its connection to the server is
- * interrupted.
+ * @brief Occurs when the SDK cannot reconnect to Agora's edge server 10 seconds after its
+ * connection to the server is interrupted.
+ *
+ * @details
+ * The SDK triggers this callback when it cannot connect to the server 10 seconds after calling the
+ * `joinChannel(const char* token, const char* channelId, uid_t uid, const ChannelMediaOptions&
+ * options)` method, regardless of whether it is in the channel. If the SDK fails to
+ * rejoin the channel 20 minutes after being disconnected from Agora's edge server, the SDK stops
+ * rejoining the channel.
*
- * The SDK triggers this callback when it cannot connect to the server 10 seconds after calling
- * `joinChannel`, regardless of whether it is in the channel or not. If the SDK fails to rejoin
- * the channel 20 minutes after being disconnected from Agora's edge server, the SDK stops rejoining the channel.
*/
virtual void onConnectionLost() {}
- /** Occurs when the connection between the SDK and the server is interrupted.
+ /**
+ * @brief Occurs when the connection between the SDK and the server is interrupted.
+ *
* @deprecated Use `onConnectionStateChanged` instead.
-
- The SDK triggers this callback when it loses connection with the serer for more
- than 4 seconds after the connection is established. After triggering this
- callback, the SDK tries to reconnect to the server. If the reconnection fails
- within a certain period (10 seconds by default), the onConnectionLost()
- callback is triggered. If the SDK fails to rejoin the channel 20 minutes after
- being disconnected from Agora's edge server, the SDK stops rejoining the channel.
-
- */
+ *
+ * @details
+ * The SDK triggers this callback when it loses connection with the server for more than four
+ * seconds after the connection is established. After triggering this callback, the SDK tries to
+ * reconnect to the server. You can use this callback to implement pop-up reminders. The differences
+ * between this callback and `onConnectionLost` are as follow:
+ * - The SDK triggers the `onConnectionInterrupted` callback when it loses connection with the
+ * server for more than four seconds after it successfully joins the channel.
+ * - The SDK triggers the `onConnectionLost` callback when it loses connection with the server for
+ * more than 10 seconds, whether or not it joins the channel.
+ * If the SDK fails to rejoin the channel 20 minutes after being disconnected from Agora's edge
+ * server, the SDK stops rejoining the channel.
+ *
+ */
virtual void onConnectionInterrupted() __deprecated {}
- /** Occurs when your connection is banned by the Agora Server.
+ /**
+ * @brief Occurs when the connection is banned by the Agora server.
+ *
* @deprecated Use `onConnectionStateChanged` instead.
*/
virtual void onConnectionBanned() __deprecated {}
- /** Occurs when the local user receives the data stream from the remote user.
+ /**
+ * @brief Occurs when the local user receives the data stream from the remote user.
+ *
+ * @details
+ * The SDK triggers this callback when the local user receives the stream message that the remote
+ * user sends by calling the `sendStreamMessage` method.
+ *
+ * @note If you need a more comprehensive solution for low-latency, high-concurrency, and scalable
+ * real-time messaging and status synchronization, it is recommended to use `Signaling`.
*
- * The SDK triggers this callback when the user receives the data stream that another user sends
- * by calling the \ref agora::rtc::IRtcEngine::sendStreamMessage "sendStreamMessage" method.
+ * @param uid The ID of the remote user sending the message.
+ * @param streamId The stream ID of the received message.
+ * @param data The data received.
+ * @param length The data length (byte).
+ * @param sentTs The time when the data stream is sent.
*
- * @param uid ID of the user who sends the data stream.
- * @param streamId The ID of the stream data.
- * @param data The data stream.
- * @param length The length (byte) of the data stream.
- * @param sentTs The time when the data stream sent.
*/
virtual void onStreamMessage(uid_t uid, int streamId, const char* data, size_t length, uint64_t sentTs) {
(void)uid;
@@ -2298,17 +2792,22 @@ class IRtcEngineEventHandler {
(void)sentTs;
}
- /** Occurs when the local user does not receive the data stream from the remote user.
+ /**
+ * @brief Occurs when the local user does not receive the data stream from the remote user.
*
- * The SDK triggers this callback when the user fails to receive the data stream that another user sends
- * by calling the \ref agora::rtc::IRtcEngine::sendStreamMessage "sendStreamMessage" method.
+ * @details
+ * The SDK triggers this callback when the local user fails to receive the stream message that the
+ * remote user sends by calling the `sendStreamMessage` method.
*
- * @param uid ID of the user who sends the data stream.
- * @param streamId The ID of the stream data.
- * @param code The error code.
+ * @note If you need a more comprehensive solution for low-latency, high-concurrency, and scalable
+ * real-time messaging and status synchronization, it is recommended to use `Signaling`.
+ *
+ * @param uid The ID of the remote user sending the message.
+ * @param streamId The stream ID of the received message.
+ * @param code Error code.
* @param missed The number of lost messages.
- * @param cached The number of incoming cached messages when the data stream is
- * interrupted.
+ * @param cached Number of incoming cached messages when the data stream is interrupted.
+ *
*/
virtual void onStreamMessageError(uid_t uid, int streamId, int code, int missed, int cached) {
(void)uid;
@@ -2369,26 +2868,43 @@ class IRtcEngineEventHandler {
}
/**
- * Occurs when the token expires.
- *
- * When the token expires during a call, the SDK triggers this callback to remind the app to renew the token.
+ * @brief Occurs when the token expires.
*
- * Upon receiving this callback, generate a new token at your app server and call
- * `joinChannel` to pass the new token to the SDK.
+ * @details
+ * The SDK triggers this callback if the token expires.
+ * When receiving this callback, you need to generate a new token on your token server and you can
+ * renew your token through one of the following ways:
+ * - In scenarios involving one channel:
+ * - Call `renewToken` to pass in the new token.
+ * - Call `leaveChannel(const LeaveChannelOptions& options)` to leave the current channel and then
+ * pass in the new token when
+ * you call `joinChannel(const char* token, const char* channelId, uid_t uid, const
+ * ChannelMediaOptions& options)` to join a channel.
+ * - In scenarios involving mutiple channels: Call `updateChannelMediaOptionsEx` to pass in the new
+ * token.
*
*/
virtual void onRequestToken() {}
/**
- * Occurs when the token will expire in 30 seconds.
+ * @brief Occurs when the token expires in 30 seconds.
*
- * When the token is about to expire in 30 seconds, the SDK triggers this callback to remind the app to renew the token.
-
- * Upon receiving this callback, generate a new token at your app server and call
- * \ref IRtcEngine::renewToken "renewToken" to pass the new Token to the SDK.
+ * @details
+ * When receiving this callback, you need to generate a new token on your token server and you can
+ * renew your token through one of the following ways:
+ * - In scenarios involving one channel:
+ * - Call `renewToken` to pass in the new token.
+ * - Call `leaveChannel(const LeaveChannelOptions& options)` to leave the current channel and then
+ * pass in the new token when
+ * you call `joinChannel(const char* token, const char* channelId, uid_t uid, const
+ * ChannelMediaOptions& options)` to join a channel.
+ * - In scenarios involving mutiple channels: Call `updateChannelMediaOptionsEx` to pass in the new
+ * token.
+ * Call timing: The SDK triggers this callback 30 seconds before the token expires, reminding the
+ * app to update the token.
*
+ * @param token The token that is about to expire.
*
- * @param token The token that will expire in 30 seconds.
*/
virtual void onTokenPrivilegeWillExpire(const char* token) {
(void)token;
@@ -2403,87 +2919,108 @@ class IRtcEngineEventHandler {
(void)error;
}
- /** Occurs when the first local audio frame is published.
+ /**
+ * @brief Occurs when the first audio frame is published.
*
+ * @details
* The SDK triggers this callback under one of the following circumstances:
- * - The local client enables the audio module and calls `joinChannel` successfully.
- * - The local client calls `muteLocalAudioStream(true)` and `muteLocalAudioStream(false)` in sequence.
+ * - The local client enables the audio module and calls `joinChannel(const char* token, const char*
+ * channelId, uid_t uid, const ChannelMediaOptions& options)` successfully.
+ * - The local client calls `muteLocalAudioStream` (`true`) and `muteLocalAudioStream` (`false`) in
+ * sequence.
* - The local client calls `disableAudio` and `enableAudio` in sequence.
* - The local client calls `pushAudioFrame` to successfully push the audio frame to the SDK.
*
- * @param elapsed The time elapsed (ms) from the local user calling `joinChannel` to the SDK triggers this callback.
+ * @param elapsed Time elapsed (ms) from the local user calling `joinChannel(const char* token,
+ * const char* channelId, uid_t uid, const ChannelMediaOptions& options)` until the SDK
+ * triggers this callback.
+ *
*/
virtual void onFirstLocalAudioFramePublished(int elapsed) {
(void)elapsed;
}
/**
- * Occurs when the SDK decodes the first remote audio frame for playback.
+ * @brief Occurs when the SDK decodes the first remote audio frame for playback.
*
* @deprecated Use `onRemoteAudioStateChanged` instead.
+ *
+ * @details
* The SDK triggers this callback under one of the following circumstances:
* - The remote user joins the channel and sends the audio stream for the first time.
- * - The remote user's audio is offline and then goes online to re-send audio. It means the local user cannot
- * receive audio in 15 seconds. Reasons for such an interruption include:
+ * - The remote user's audio is offline and then goes online to re-send audio. It means the local
+ * user cannot receive audio in 15 seconds. Reasons for such an interruption include:
* - The remote user leaves channel.
* - The remote user drops offline.
- * - The remote user calls muteLocalAudioStream to stop sending the audio stream.
- * - The remote user calls disableAudio to disable audio.
- * @param uid User ID of the remote user sending the audio stream.
- * @param elapsed The time elapsed (ms) from the loca user calling `joinChannel`
- * until this callback is triggered.
+ * - The remote user calls `muteLocalAudioStream` to stop sending the audio stream.
+ * - The remote user calls `disableAudio` to disable audio.
+ *
+ * @param uid The user ID of the remote user.
+ * @param elapsed The time elapsed (ms) from the local user calling `joinChannel(const char* token,
+ * const char* channelId, uid_t uid, const ChannelMediaOptions& options)` until the
+ * SDK triggers this callback.
+ *
*/
virtual void onFirstRemoteAudioDecoded(uid_t uid, int elapsed) __deprecated {
(void)uid;
(void)elapsed;
}
- /** Occurs when the SDK receives the first audio frame from a specific remote user.
+ /**
+ * @brief Occurs when the SDK receives the first audio frame from a specific remote user.
+ *
* @deprecated Use `onRemoteAudioStateChanged` instead.
*
- * @param uid ID of the remote user.
- * @param elapsed The time elapsed (ms) from the loca user calling `joinChannel`
- * until this callback is triggered.
+ * @param uid The user ID of the remote user.
+ * @param elapsed The time elapsed (ms) from the local user calling `joinChannel(const char* token,
+ * const char* channelId, uid_t uid, const ChannelMediaOptions& options)` until the
+ * SDK triggers this callback.
+ *
*/
virtual void onFirstRemoteAudioFrame(uid_t uid, int elapsed) __deprecated {
(void)uid;
(void)elapsed;
}
- /** Occurs when the local audio state changes.
+ /**
+ * @brief Occurs when the local audio stream state changes.
*
- * When the state of the local audio stream changes (including the state of the audio capture and encoding), the SDK
- * triggers this callback to report the current state. This callback indicates the state of the local audio stream,
- * and allows you to troubleshoot issues when audio exceptions occur.
+ * @details
+ * When the state of the local audio stream changes (including the state of the audio capture and
+ * encoding), the SDK triggers this callback to report the current state. This callback indicates
+ * the state of the local audio stream, and allows you to troubleshoot issues when audio exceptions
+ * occur.
*
- * @note
- * When the state is `LOCAL_AUDIO_STREAM_STATE_FAILED(3)`, see the `error`
- * parameter for details.
+ * @note When the state is `LOCAL_AUDIO_STREAM_STATE_FAILED` (3), you can view the error information
+ * in the `error` parameter.
+ *
+ * @param state The state of the local audio. See `LOCAL_AUDIO_STREAM_STATE`.
+ * @param reason Reasons for local audio state changes. See `LOCAL_AUDIO_STREAM_REASON`.
*
- * @param state State of the local audio. See #LOCAL_AUDIO_STREAM_STATE.
- * @param reason The reason information of the local audio.
- * See #LOCAL_AUDIO_STREAM_REASON.
*/
virtual void onLocalAudioStateChanged(LOCAL_AUDIO_STREAM_STATE state, LOCAL_AUDIO_STREAM_REASON reason) {
(void)state;
(void)reason;
}
- /** Occurs when the remote audio state changes.
+ /**
+ * @brief Occurs when the remote audio state changes.
*
- * When the audio state of a remote user (in the voice/video call channel) or host (in the live streaming channel)
- * changes, the SDK triggers this callback to report the current state of the remote audio stream.
+ * @details
+ * When the audio state of a remote user (in a voice/video call channel) or host (in a live
+ * streaming channel) changes, the SDK triggers this callback to report the current state of the
+ * remote audio stream.
*
- * @note This callback does not work properly when the number of users (in the voice/video call channel) or hosts
- * (in the live streaming channel) in the channel exceeds 17.
+ * @note This callback does not work properly when the number of users (in the communication
+ * profile) or hosts (in the live streaming channel) in a channel exceeds 32.
+ *
+ * @param uid The ID of the remote user whose audio state changes.
+ * @param state The state of the remote audio. See `REMOTE_AUDIO_STATE`.
+ * @param reason The reason of the remote audio state change. See `REMOTE_AUDIO_STATE_REASON`.
+ * @param elapsed Time elapsed (ms) from the local user calling the `joinChannel(const char* token,
+ * const char* channelId, uid_t uid, const ChannelMediaOptions& options)` method until
+ * the SDK triggers this callback.
*
- * @param uid ID of the remote user whose audio state changes.
- * @param state State of the remote audio. See #REMOTE_AUDIO_STATE.
- * @param reason The reason of the remote audio state change.
- * See #REMOTE_AUDIO_STATE_REASON.
- * @param elapsed Time elapsed (ms) from the local user calling the
- * `joinChannel` method until the SDK
- * triggers this callback.
*/
virtual void onRemoteAudioStateChanged(uid_t uid, REMOTE_AUDIO_STATE state, REMOTE_AUDIO_STATE_REASON reason, int elapsed) {
(void)uid;
@@ -2493,17 +3030,21 @@ class IRtcEngineEventHandler {
}
/**
- * Occurs when an active speaker is detected.
+ * @brief Occurs when the most active remote speaker is detected.
*
- * After a successful call of `enableAudioVolumeIndication`, the SDK continuously detects which remote user has the
- * loudest volume. During the current period, the remote user, who is detected as the loudest for the most times,
- * is the most active user.
+ * @details
+ * After a successful call of `enableAudioVolumeIndication`, the SDK continuously detects which
+ * remote user has the loudest volume. During the current period, the remote user whose volume is
+ * detected as the loudest for the most times, is the most active user.
+ * When the number of users is no less than two and an active remote speaker exists, the SDK
+ * triggers this callback and reports the `uid` of the most active remote speaker.
+ * - If the most active remote speaker is always the same user, the SDK triggers the
+ * `onActiveSpeaker` callback only once.
+ * - If the most active remote speaker changes to another user, the SDK triggers this callback again
+ * and reports the `uid` of the new active remote speaker.
*
- * When the number of users is no less than two and an active remote speaker exists, the SDK triggers this callback and reports the uid of the most active remote speaker.
- * - If the most active remote speaker is always the same user, the SDK triggers the `onActiveSpeaker` callback only once.
- * - If the most active remote speaker changes to another user, the SDK triggers this callback again and reports the uid of the new active remote speaker.
+ * @param uid The user ID of the most active speaker.
*
- * @param userId The ID of the active speaker. A `uid` of 0 means the local user.
*/
virtual void onActiveSpeaker(uid_t uid) {
(void)uid;
@@ -2515,22 +3056,29 @@ class IRtcEngineEventHandler {
*/
virtual void onContentInspectResult(media::CONTENT_INSPECT_RESULT result) { (void)result; }
- /** Reports the result of taking a video snapshot.
+ /**
+ * @brief Reports the result of taking a video snapshot.
*
- * After a successful `takeSnapshot` method call, the SDK triggers this callback to report whether the snapshot is
- * successfully taken, as well as the details for that snapshot.
+ * @details
+ * After a successful `takeSnapshot(uid_t uid, const char* filePath)` method call, the SDK triggers
+ * this callback to report
+ * whether the snapshot is successfully taken as well as the details for the snapshot taken.
*
- * @param uid The user ID. A `uid` of 0 indicates the local user.
+ * @param uid The user ID. One `uid` of 0 indicates the local user.
* @param filePath The local path of the snapshot.
* @param width The width (px) of the snapshot.
* @param height The height (px) of the snapshot.
- * @param errCode The message that confirms success or gives the reason why the snapshot is not successfully taken:
+ * @param errCode The message that confirms success or gives the reason why the snapshot is not
+ * successfully taken:
* - 0: Success.
- * - < 0: Failure.
+ * - < 0: Failure:
* - -1: The SDK fails to write data to a file or encode a JPEG image.
- * - -2: The SDK does not find the video stream of the specified user within one second after the `takeSnapshot` method call succeeds.
- * - -3: Calling the `takeSnapshot` method too frequently. Call the `takeSnapshot` method after receiving the `onSnapshotTaken`
- * callback from the previous call.
+ * - -2: The SDK does not find the video stream of the specified user within one second after the
+ * `takeSnapshot(uid_t uid, const char* filePath)` method call succeeds. The possible reasons are:
+ * local capture stops, remote
+ * end stops publishing, or video data processing is blocked.
+ * - -3: Calling the `takeSnapshot(uid_t uid, const char* filePath)` method too frequently.
+ *
*/
virtual void onSnapshotTaken(uid_t uid, const char* filePath, int width, int height, int errCode) {
(void)uid;
@@ -2541,11 +3089,27 @@ class IRtcEngineEventHandler {
}
/**
- * Occurs when the user role switches in the interactive live streaming.
+ * @brief Occurs when the user role or the audience latency level changes.
+ *
+ * @details
+ * Call timing: This callback will be triggered in any of the following situations:
+ * - Calling `setClientRole(CLIENT_ROLE_TYPE role)` or `setClientRole(CLIENT_ROLE_TYPE role, const
+ * ClientRoleOptions& options)` to set the user role or audience latency
+ * level **after joining a channel**
+ * - Calling `setClientRole(CLIENT_ROLE_TYPE role)` or `setClientRole(CLIENT_ROLE_TYPE role, const
+ * ClientRoleOptions& options)` and set the user role to `AUDIENCE`
+ * **before joining a channel**.
+ *
+ * @note This callback will not be triggered when you call `setClientRole(CLIENT_ROLE_TYPE role)` or
+ * `setClientRole(CLIENT_ROLE_TYPE role, const ClientRoleOptions& options)`
+ * and set the user role to `BROADCASTER` **before joining a channel**.
+ *
+ * @param oldRole Role that the user switches from: `CLIENT_ROLE_TYPE`.
+ * @param newRole Role that the user switches to: `CLIENT_ROLE_TYPE`.
+ * @param newRoleOptions Since
+ * v4.1.0
+ * Properties of the role that the user switches to. See `ClientRoleOptions`.
*
- * @param oldRole The old role of the user: #CLIENT_ROLE_TYPE.
- * @param newRole The new role of the user: #CLIENT_ROLE_TYPE.
- * @param newRoleOptions The client role options of the new role: #ClientRoleOptions.
*/
virtual void onClientRoleChanged(CLIENT_ROLE_TYPE oldRole, CLIENT_ROLE_TYPE newRole, const ClientRoleOptions& newRoleOptions) {
(void)oldRole;
@@ -2554,23 +3118,39 @@ class IRtcEngineEventHandler {
}
/**
- * Occurs when the user role in a Live-Broadcast channel fails to switch, for example, from a broadcaster
- * to an audience or vice versa.
+ * @brief Occurs when switching a user role fails.
+ *
+ * @details
+ * This callback informs you about the reason for failing to switching and your current user role.
+ * Call timing: The SDK triggers this callback when the local user calls
+ * `setClientRole(CLIENT_ROLE_TYPE role)` or
+ * `setClientRole(CLIENT_ROLE_TYPE role, const ClientRoleOptions& options)` after joining a channel
+ * to switch the user role but the switching fails.
+ *
+ * @param reason The reason for a user role switch failure. See `CLIENT_ROLE_CHANGE_FAILED_REASON`.
+ * @param currentRole Current user role. See `CLIENT_ROLE_TYPE`.
*
- * @param reason The reason for failing to change the client role: #CLIENT_ROLE_CHANGE_FAILED_REASON.
- * @param currentRole The current role of the user: #CLIENT_ROLE_TYPE.
*/
virtual void onClientRoleChangeFailed(CLIENT_ROLE_CHANGE_FAILED_REASON reason, CLIENT_ROLE_TYPE currentRole) {
(void)reason;
(void)currentRole;
}
- /** Occurs when the audio device volume changes.
- @param deviceType The device type, see #MEDIA_DEVICE_TYPE
- @param volume The volume of the audio device.
- @param muted Whether the audio device is muted:
- - true: The audio device is muted.
- - false: The audio device is not muted.
+ /**
+ * @brief Reports the volume change of the audio device or app.
+ *
+ * @details
+ * Occurs when the volume on the playback device, audio capture device, or the volume of the app
+ * changes.
+ *
+ * @note This callback is for Windows and macOS only.
+ *
+ * @param deviceType The device type. See `MEDIA_DEVICE_TYPE`.
+ * @param volume The volume value. The range is [0, 255].
+ * @param muted Whether the audio device is muted:
+ * - `true`: The audio device is muted.
+ * - `false`: The audio device is not muted.
+ *
*/
virtual void onAudioDeviceVolumeChanged(MEDIA_DEVICE_TYPE deviceType, int volume, bool muted) {
(void)deviceType;
@@ -2579,15 +3159,18 @@ class IRtcEngineEventHandler {
}
/**
- * Occurs when the state of the RTMP streaming changes.
+ * @brief Occurs when the state of Media Push changes.
+ *
+ * @details
+ * When the state of Media Push changes, the SDK triggers this callback and reports the URL address
+ * and the current state of the Media Push. This callback indicates the state of the Media Push.
+ * When exceptions occur, you can troubleshoot issues by referring to the detailed error
+ * descriptions in the error code parameter.
*
- * When the media push state changes, the SDK triggers this callback and reports the URL address and the current state
- * of the media push. This callback indicates the state of the media push. When exceptions occur, you can troubleshoot
- * issues by referring to the detailed error descriptions in the error code.
+ * @param url The URL address where the state of the Media Push changes.
+ * @param state The current state of the Media Push. See `RTMP_STREAM_PUBLISH_STATE`.
+ * @param reason Reasons for the changes in the Media Push status. See `RTMP_STREAM_PUBLISH_REASON`.
*
- * @param url The URL address where the state of the media push changes.
- * @param state The current state of the media push: #RTMP_STREAM_PUBLISH_STATE.
- * @param reason The detailed error information for the media push: #RTMP_STREAM_PUBLISH_REASON.
*/
virtual void onRtmpStreamingStateChanged(const char* url, RTMP_STREAM_PUBLISH_STATE state,
RTMP_STREAM_PUBLISH_REASON reason) {
@@ -2596,10 +3179,12 @@ class IRtcEngineEventHandler {
(void)reason;
}
- /** Reports events during the media push.
+ /**
+ * @brief Reports events during the Media Push.
+ *
+ * @param url The URL for Media Push.
+ * @param eventCode The event code of Media Push. See `RTMP_STREAMING_EVENT`.
*
- * @param url The URL for media push.
- * @param eventCode The event code of media push. See RTMP_STREAMING_EVENT for details.
*/
virtual void onRtmpStreamingEvent(const char* url, RTMP_STREAMING_EVENT eventCode) {
(void)url;
@@ -2607,62 +3192,37 @@ class IRtcEngineEventHandler {
}
/**
- * Occurs when the publisher's transcoding settings are updated.
+ * @brief Occurs when the publisher's transcoding is updated.
*
- * When the `LiveTranscoding` class in \ref IRtcEngine::setLiveTranscoding "setLiveTranscoding"
- * updates, the SDK triggers this callback to report the update information.
+ * @details
+ * When the `LiveTranscoding` class in the `startRtmpStreamWithTranscoding` method updates, the SDK
+ * triggers the `onTranscodingUpdated` callback to report the update information.
+ *
+ * @note If you call the `startRtmpStreamWithTranscoding` method to set the `LiveTranscoding` class
+ * for the first time, the SDK does not trigger this callback.
*
- * @note
- * If you call the `setLiveTranscoding` method to set the `LiveTranscoding` class for the first time, the SDK
- * does not trigger this callback.
*/
virtual void onTranscodingUpdated() {}
- /** Occurs when the local audio route changes (for Android, iOS, and macOS only).
-
- The SDK triggers this callback when the local audio route switches to an
- earpiece, speakerphone, headset, or Bluetooth device.
- @param routing The current audio output routing:
- - -1: Default.
- - 0: Headset.
- - 1: Earpiece.
- - 2: Headset with no microphone.
- - 3: Speakerphone.
- - 4: Loudspeaker.
- - 5: Bluetooth headset.
+ /**
+ * @brief Occurs when the local audio route changes.
+ *
+ * @note This method is for Android, iOS and macOS only.
+ *
+ * @param routing The current audio routing. See `AudioRoute`.
+ *
*/
virtual void onAudioRoutingChanged(int routing) { (void)routing; }
/**
- * Occurs when the state of the media stream relay changes.
+ * @brief Occurs when the state of the media stream relay changes.
*
- * The SDK reports the state of the current media relay and possible error messages in this
- * callback.
+ * @details
+ * The SDK returns the state of the current media relay with any error message.
+ *
+ * @param state The state code. See `CHANNEL_MEDIA_RELAY_STATE`.
+ * @param code The error code of the channel media relay. See `CHANNEL_MEDIA_RELAY_ERROR`.
*
- * @param state The state code:
- * - `RELAY_STATE_IDLE(0)`: The SDK is initializing.
- * - `RELAY_STATE_CONNECTING(1)`: The SDK tries to relay the media stream to the destination
- * channel.
- * - `RELAY_STATE_RUNNING(2)`: The SDK successfully relays the media stream to the destination
- * channel.
- * - `RELAY_STATE_FAILURE(3)`: A failure occurs. See the details in `code`.
- * @param code The error code:
- * - `RELAY_OK(0)`: The state is normal.
- * - `RELAY_ERROR_SERVER_ERROR_RESPONSE(1)`: An error occurs in the server response.
- * - `RELAY_ERROR_SERVER_NO_RESPONSE(2)`: No server response. You can call the leaveChannel method
- * to leave the channel.
- * - `RELAY_ERROR_NO_RESOURCE_AVAILABLE(3)`: The SDK fails to access the service, probably due to
- * limited resources of the server.
- * - `RELAY_ERROR_FAILED_JOIN_SRC(4)`: Fails to send the relay request.
- * - `RELAY_ERROR_FAILED_JOIN_DEST(5)`: Fails to accept the relay request.
- * - `RELAY_ERROR_FAILED_PACKET_RECEIVED_FROM_SRC(6)`: The server fails to receive the media
- * stream.
- * - `RELAY_ERROR_FAILED_PACKET_SENT_TO_DEST(7)`: The server fails to send the media stream.
- * - `RELAY_ERROR_SERVER_CONNECTION_LOST(8)`: The SDK disconnects from the server due to poor
- * network connections. You can call the leaveChannel method to leave the channel.
- * - `RELAY_ERROR_INTERNAL_ERROR(9)`: An internal error occurs in the server.
- * - `RELAY_ERROR_SRC_TOKEN_EXPIRED(10)`: The token of the source channel has expired.
- * - `RELAY_ERROR_DEST_TOKEN_EXPIRED(11)`: The token of the destination channel has expired.
*/
virtual void onChannelMediaRelayStateChanged(int state, int code) {
(void)state;
@@ -2670,41 +3230,51 @@ class IRtcEngineEventHandler {
}
/**
- * Occurs when the remote media stream falls back to audio-only stream due to poor network conditions or
- * switches back to video stream after the network conditions improve.
+ * @brief Occurs when the remote media stream falls back to the audio-only stream due to poor
+ * network conditions or switches back to the video stream after the network conditions improve.
*
- * If you call `setRemoteSubscribeFallbackOption` and set `option` as `STREAM_FALLBACK_OPTION_AUDIO_ONLY(2)`, this
- * callback is triggered when the remotely subscribed media stream falls back to audio-only mode due to poor downlink
- * conditions, or when the remotely subscribed media stream switches back to the video after the downlink network
- * condition improves.
+ * @details
+ * If you call `setRemoteSubscribeFallbackOption` and set `option` to
+ * `STREAM_FALLBACK_OPTION_AUDIO_ONLY`, the SDK triggers this callback in the following situations:
+ * - The downstream network condition is poor, and the subscribed video stream is downgraded to
+ * audio-only stream.
+ * - The downstream network condition has improved, and the subscribed stream has been restored to
+ * video stream.
*
- * @note Once the remote media stream is switched to the low stream due to poor network conditions, you can monitor
- * the stream switch between a high and low stream in the `onRemoteVideoStats` callback.
+ * @note Once the remote media stream switches to the low-quality video stream due to weak network
+ * conditions, you can monitor the stream switch between a high-quality and low-quality stream in
+ * the `onRemoteVideoStats` callback.
+ *
+ * @param uid The user ID of the remote user.
+ * @param isFallbackOrRecover - `true`: The subscribed media stream falls back to audio-only due to
+ * poor network conditions.
+ * - `false`: The subscribed media stream switches back to the video stream after the network
+ * conditions improve.
*
- * @param uid ID of the remote user sending the stream.
- * @param isFallbackOrRecover Whether the remote media stream fell back to audio-only or switched back to the video:
- * - `true`: The remote media stream fell back to audio-only due to poor network conditions.
- * - `false`: The remote media stream switched back to the video stream after the network conditions improved.
*/
virtual void onRemoteSubscribeFallbackToAudioOnly(uid_t uid, bool isFallbackOrRecover) {
(void)uid;
(void)isFallbackOrRecover;
}
- /** Reports the transport-layer statistics of each remote audio stream.
+ /**
+ * @brief Reports the transport-layer statistics of each remote audio stream.
+ *
* @deprecated Use `onRemoteAudioStats` instead.
-
- This callback reports the transport-layer statistics, such as the packet loss rate and network time delay, once every
- two seconds after the local user receives an audio packet from a remote user. During a call, when the user receives
- the audio packet sent by the remote user/host, the callback is triggered every 2 seconds.
-
- @param uid ID of the remote user whose audio data packet is received.
- @param delay The network time delay (ms) from the sender to the receiver.
- @param lost The Packet loss rate (%) of the audio packet sent from the remote
- user.
- @param rxKBitRate Received bitrate (Kbps) of the audio packet sent from the
- remote user.
- */
+ *
+ * @details
+ * This callback reports the transport-layer statistics, such as the packet loss rate and network
+ * time delay after the local user receives an audio packet from a remote user. During a call, when
+ * the user receives the audio packet sent by the remote user, the callback is triggered every 2
+ * seconds.
+ *
+ * @param uid The ID of the remote user sending the audio streams.
+ * @param delay The network delay (ms) from the remote user to the receiver.
+ * @param lost The packet loss rate (%) of the audio packet sent from the remote user to the
+ * receiver.
+ * @param rxKBitrate The bitrate of the received audio (Kbps).
+ *
+ */
virtual void onRemoteAudioTransportStats(uid_t uid, unsigned short delay, unsigned short lost, unsigned short rxKBitRate) __deprecated {
(void)uid;
(void)delay;
@@ -2712,23 +3282,23 @@ class IRtcEngineEventHandler {
(void)rxKBitRate;
}
- /** Reports the transport-layer statistics of each remote video stream.
+ /**
+ * @brief Reports the transport-layer statistics of each remote video stream.
+ *
* @deprecated Use `onRemoteVideoStats` instead.
-
- This callback reports the transport-layer statistics, such as the packet loss rate and network time
- delay, once every two seconds after the local user receives a video packet from a remote user.
-
- During a call, when the user receives the video packet sent by the remote user/host, the callback is
- triggered every 2 seconds.
-
- @param uid ID of the remote user whose video packet is received.
- @param delay The network time delay (ms) from the remote user sending the
- video packet to the local user.
- @param lost The packet loss rate (%) of the video packet sent from the remote
- user.
- @param rxKBitRate The bitrate (Kbps) of the video packet sent from
- the remote user.
- */
+ *
+ * @details
+ * This callback reports the transport-layer statistics, such as the packet loss rate and network
+ * time delay after the local user receives a video packet from a remote user.
+ * During a call, when the user receives the video packet sent by the remote user/host, the callback
+ * is triggered every 2 seconds.
+ *
+ * @param uid The ID of the remote user sending the video packets.
+ * @param delay The network delay (ms) from the sender to the receiver.
+ * @param lost The packet loss rate (%) of the video packet sent from the remote user.
+ * @param rxKBitRate The bitrate of the received video (Kbps).
+ *
+ */
virtual void onRemoteVideoTransportStats(uid_t uid, unsigned short delay, unsigned short lost, unsigned short rxKBitRate) __deprecated {
(void)uid;
(void)delay;
@@ -2736,13 +3306,16 @@ class IRtcEngineEventHandler {
(void)rxKBitRate;
}
- /** Occurs when the network connection state changes.
+ /**
+ * @brief Occurs when the network connection state changes.
*
+ * @details
* When the network connection state changes, the SDK triggers this callback and reports the current
* connection state and the reason for the change.
-
- @param state The current connection state. See #CONNECTION_STATE_TYPE.
- @param reason The reason for a connection state change. See #CONNECTION_CHANGED_REASON_TYPE.
+ *
+ * @param state The current connection state. See `CONNECTION_STATE_TYPE`.
+ * @param reason The reason for a connection state change. See `CONNECTION_CHANGED_REASON_TYPE`.
+ *
*/
virtual void onConnectionStateChanged(
CONNECTION_STATE_TYPE state, CONNECTION_CHANGED_REASON_TYPE reason) {
@@ -2750,65 +3323,89 @@ class IRtcEngineEventHandler {
(void)reason;
}
- /** Occurs when the local network type changes.
+ /**
+ * @brief Occurs when the local network type changes.
*
+ * @details
* This callback occurs when the connection state of the local user changes. You can get the
- * connection state and reason for the state change in this callback. When the network connection
- * is interrupted, this callback indicates whether the interruption is caused by a network type
- * change or poor network conditions.
-
- @param type The type of the local network connection. See #NETWORK_TYPE.
+ * connection state and reason for the state change in this callback. When the network connection is
+ * interrupted, this callback indicates whether the interruption is caused by a network type change
+ * or poor network conditions.
+ *
+ * @param type The type of the local network connection. See `NETWORK_TYPE`.
+ *
*/
virtual void onNetworkTypeChanged(NETWORK_TYPE type) {
(void)type;
}
- /** Reports the built-in encryption errors.
+ /**
+ * @brief Reports the built-in encryption errors.
*
+ * @details
* When encryption is enabled by calling `enableEncryption`, the SDK triggers this callback if an
* error occurs in encryption or decryption on the sender or the receiver side.
-
- @param errorType The error type. See #ENCRYPTION_ERROR_TYPE.
+ *
+ * @param errorType Details about the error type. See `ENCRYPTION_ERROR_TYPE`.
+ *
*/
virtual void onEncryptionError(ENCRYPTION_ERROR_TYPE errorType) {
(void)errorType;
}
- /** Occurs when the SDK cannot get the device permission.
+ /**
+ * @brief Occurs when the SDK cannot get the device permission.
*
+ * @details
* When the SDK fails to get the device permission, the SDK triggers this callback to report which
* device permission cannot be got.
*
- * @note This method is for Android and iOS only.
-
- @param permissionType The type of the device permission. See #PERMISSION_TYPE.
- */
+ * @param permissionType The type of the device permission. See `PERMISSION_TYPE`.
+ *
+ */
virtual void onPermissionError(PERMISSION_TYPE permissionType) {
(void)permissionType;
}
- /** Occurs when the local user registers a user account.
+#if defined(__ANDROID__)
+ /**
+ * Reports the permission granted.
+ * @param permission {@link PERMISSION}
+ */
+ virtual void onPermissionGranted(agora::rtc::PERMISSION_TYPE permissionType) {}
+#endif
+
+ /**
+ * @brief Occurs when the local user registers a user account.
*
+ * @details
* After the local user successfully calls `registerLocalUserAccount` to register the user account
- * or calls `joinChannelWithUserAccount` to join a channel, the SDK triggers the callback and
+ * or calls `joinChannelWithUserAccount(const char* token, const char* channelId, const char*
+ * userAccount, const ChannelMediaOptions& options)` to join a channel, the SDK triggers the
+ * callback and
* informs the local user's UID and User Account.
-
- @param uid The ID of the local user.
- @param userAccount The user account of the local user.
- */
+ *
+ * @param uid The ID of the local user.
+ * @param userAccount The user account of the local user.
+ *
+ */
virtual void onLocalUserRegistered(uid_t uid, const char* userAccount) {
(void)uid;
(void)userAccount;
}
- /** Occurs when the SDK gets the user ID and user account of the remote user.
-
- After a remote user joins the channel, the SDK gets the UID and user account of the remote user,
- caches them in a mapping table object (`userInfo`), and triggers this callback on the local client.
-
- @param uid The ID of the remote user.
- @param info The `UserInfo` object that contains the user ID and user account of the remote user.
- */
+ /**
+ * @brief Occurs when the SDK gets the user ID and user account of the remote user.
+ *
+ * @details
+ * After a remote user joins the channel, the SDK gets the UID and user account of the remote user,
+ * caches them in a mapping table object, and triggers this callback on the local client.
+ *
+ * @param uid The user ID of the remote user.
+ * @param info The UserInfo object that contains the user ID and user account of the remote user.
+ * See `UserInfo` for details.
+ *
+ */
virtual void onUserInfoUpdated(uid_t uid, const UserInfo& info) {
(void)uid;
(void)info;
@@ -2826,11 +3423,20 @@ class IRtcEngineEventHandler {
}
/**
- * Reports the tracing result of video rendering event of the user.
+ * @brief Video frame rendering event callback.
+ *
+ * @details
+ * After calling the `startMediaRenderingTracing` method or joining a channel, the SDK triggers this
+ * callback to report the events of video frame rendering and the indicators during the rendering
+ * process. Developers can optimize the indicators to improve the efficiency of the first video
+ * frame rendering.
*
* @param uid The user ID.
- * @param currentEvent The current event of the tracing result: #MEDIA_TRACE_EVENT.
- * @param tracingInfo The tracing result: #VideoRenderingTracingInfo.
+ * @param currentEvent The current video frame rendering event. See `MEDIA_TRACE_EVENT`.
+ * @param tracingInfo The indicators during the video frame rendering process. Developers need to
+ * reduce the value of indicators as much as possible in order to improve the efficiency of the
+ * first video frame rendering. See `VideoRenderingTracingInfo`.
+ *
*/
virtual void onVideoRenderingTracingResult(uid_t uid, MEDIA_TRACE_EVENT currentEvent, VideoRenderingTracingInfo tracingInfo) {
(void)uid;
@@ -2839,10 +3445,16 @@ class IRtcEngineEventHandler {
}
/**
- * Occurs when local video transcoder stream has an error.
+ * @brief Occurs when there's an error during the local video mixing.
+ *
+ * @details
+ * When you fail to call `startLocalVideoTranscoder` or `updateLocalTranscoderConfiguration`, the
+ * SDK triggers this callback to report the reason.
+ *
+ * @param stream The video streams that cannot be mixed during video mixing. See
+ * `TranscodingVideoStream`.
+ * @param error The reason for local video mixing error. See `VIDEO_TRANSCODER_ERROR`.
*
- * @param stream Stream type of TranscodingVideoStream.
- * @param error Error code of VIDEO_TRANSCODER_ERROR.
*/
virtual void onLocalVideoTranscoderError(const TranscodingVideoStream& stream, VIDEO_TRANSCODER_ERROR error){
(void)stream;
@@ -2862,13 +3474,14 @@ class IRtcEngineEventHandler {
}
/**
- * Occurs when the audio subscribing state changes.
+ * @brief Occurs when the audio subscribing state changes.
*
- * @param channel The name of the channel.
- * @param uid The ID of the remote user.
- * @param oldState The previous subscribing status: #STREAM_SUBSCRIBE_STATE.
- * @param newState The current subscribing status: #STREAM_SUBSCRIBE_STATE.
+ * @param channel The channel name.
+ * @param uid The user ID of the remote user.
+ * @param oldState The previous subscribing status. See `STREAM_SUBSCRIBE_STATE`.
+ * @param newState The current subscribing status. See `STREAM_SUBSCRIBE_STATE`.
* @param elapseSinceLastState The time elapsed (ms) from the previous state to the current state.
+ *
*/
virtual void onAudioSubscribeStateChanged(const char* channel, uid_t uid, STREAM_SUBSCRIBE_STATE oldState, STREAM_SUBSCRIBE_STATE newState, int elapseSinceLastState) {
(void)channel;
@@ -2879,13 +3492,14 @@ class IRtcEngineEventHandler {
}
/**
- * Occurs when the video subscribing state changes.
+ * @brief Occurs when the video subscribing state changes.
*
- * @param channel The name of the channel.
- * @param uid The ID of the remote user.
- * @param oldState The previous subscribing status: #STREAM_SUBSCRIBE_STATE.
- * @param newState The current subscribing status: #STREAM_SUBSCRIBE_STATE.
+ * @param channel The channel name.
+ * @param uid The user ID of the remote user.
+ * @param oldState The previous subscribing status. See `STREAM_SUBSCRIBE_STATE`.
+ * @param newState The current subscribing status. See `STREAM_SUBSCRIBE_STATE`.
* @param elapseSinceLastState The time elapsed (ms) from the previous state to the current state.
+ *
*/
virtual void onVideoSubscribeStateChanged(const char* channel, uid_t uid, STREAM_SUBSCRIBE_STATE oldState, STREAM_SUBSCRIBE_STATE newState, int elapseSinceLastState) {
(void)channel;
@@ -2896,12 +3510,13 @@ class IRtcEngineEventHandler {
}
/**
- * Occurs when the audio publishing state changes.
+ * @brief Occurs when the audio publishing state changes.
*
- * @param channel The name of the channel.
- * @param oldState The previous publishing state: #STREAM_PUBLISH_STATE.
- * @param newState The current publishing state: #STREAM_PUBLISH_STATE.
+ * @param channel The channel name.
+ * @param oldState The previous publishing state. See `STREAM_PUBLISH_STATE`.
+ * @param newState The current publishing stat. See `STREAM_PUBLISH_STATE`.
* @param elapseSinceLastState The time elapsed (ms) from the previous state to the current state.
+ *
*/
virtual void onAudioPublishStateChanged(const char* channel, STREAM_PUBLISH_STATE oldState, STREAM_PUBLISH_STATE newState, int elapseSinceLastState) {
(void)channel;
@@ -2911,13 +3526,14 @@ class IRtcEngineEventHandler {
}
/**
- * Occurs when the video publishing state changes.
+ * @brief Occurs when the video publishing state changes.
*
- * @param source The video source type.
- * @param channel The name of the channel.
- * @param oldState The previous publishing state: #STREAM_PUBLISH_STATE.
- * @param newState The current publishing state: #STREAM_PUBLISH_STATE.
+ * @param channel The channel name.
+ * @param source The type of the video source. See `VIDEO_SOURCE_TYPE`.
+ * @param oldState The previous publishing state. See `STREAM_PUBLISH_STATE`.
+ * @param newState The current publishing stat. See `STREAM_PUBLISH_STATE`.
* @param elapseSinceLastState The time elapsed (ms) from the previous state to the current state.
+ *
*/
virtual void onVideoPublishStateChanged(VIDEO_SOURCE_TYPE source, const char* channel, STREAM_PUBLISH_STATE oldState, STREAM_PUBLISH_STATE newState, int elapseSinceLastState) {
(void)source;
@@ -2928,13 +3544,23 @@ class IRtcEngineEventHandler {
}
/**
- * Occurs when receive a video transcoder stream which has video layout info.
+ * @brief Occurs when the local user receives a mixed video stream carrying layout information.
+ *
+ * @details
+ * When the local user receives a mixed video stream sent by the video mixing server for the first
+ * time, or when there is a change in the layout information of the mixed stream, the SDK triggers
+ * this callback, reporting the layout information of each sub-video stream within the mixed video
+ * stream.
+ *
+ * @note This callback is for Android and iOS only.
+ *
+ * @param uid User ID who published this mixed video stream.
+ * @param width Width (px) of the mixed video stream.
+ * @param height Heitht (px) of the mixed video stream.
+ * @param layoutCount The number of layout information in the mixed video stream.
+ * @param layoutlist Layout information of a specific sub-video stream within the mixed stream. See
+ * `VideoLayout`.
*
- * @param uid user id of the transcoded stream.
- * @param width width of the transcoded stream.
- * @param height height of the transcoded stream.
- * @param layoutCount count of layout info in the transcoded stream.
- * @param layoutlist video layout info list of the transcoded stream.
*/
virtual void onTranscodedStreamLayoutInfo(uid_t uid, int width, int height, int layoutCount,const VideoLayout* layoutlist) {
(void)uid;
@@ -2959,13 +3585,15 @@ class IRtcEngineEventHandler {
}
/**
- * The event callback of the extension.
+ * @brief The event callback of the extension.
*
+ * @details
* To listen for events while the extension is running, you need to register this callback.
*
- * @param context The context of the extension.
+ * @param context The context information of the extension, see `ExtensionContext`.
* @param key The key of the extension.
* @param value The value of the extension key.
+ *
*/
virtual void onExtensionEventWithContext(const ExtensionContext &context, const char* key, const char* value) {
(void)context;
@@ -2974,36 +3602,44 @@ class IRtcEngineEventHandler {
}
/**
- * Occurs when the extension is enabled.
+ * @brief Occurrs when the extension is enabled.
*
- * After a successful creation of filter , the extension triggers this callback.
+ * @details
+ * The callback is triggered after the extension is successfully enabled.
+ *
+ * @param context The context information of the extension, see `ExtensionContext`.
*
- * @param context The context of the extension.
*/
virtual void onExtensionStartedWithContext(const ExtensionContext &context) {
(void)context;
}
/**
- * Occurs when the extension is disabled.
+ * @brief Occurs when the extension is disabled.
+ *
+ * @details
+ * The callback is triggered after the extension is successfully disabled.
*
- * After a successful destroy filter, the extension triggers this callback.
+ * @param context The context information of the extension, see `ExtensionContext`.
*
- * @param context The context of the extension.
*/
virtual void onExtensionStoppedWithContext(const ExtensionContext &context) {
(void)context;
}
/**
- * Occurs when the extension runs incorrectly.
+ * @brief Occurs when the extension runs incorrectly.
*
- * When the extension runs in error, the extension triggers
- * this callback and reports the error code and reason.
+ * @details
+ * In case of extension enabling failure or runtime errors, the extension triggers this callback and
+ * reports the error code along with the reasons.
+ *
+ * @param context The context information of the extension, see `ExtensionContext`.
+ * @param error Error code. For details, see the extension documentation provided by the extension
+ * provider.
+ * @param message Reason. For details, see the extension documentation provided by the extension
+ * provider.
*
- * @param context The context of the extension.
- * @param error The error code. For details, see the extension documentation provided by the extension provider.
- * @param message The error message. For details, see the extension documentation provided by the extension provider.
*/
virtual void onExtensionErrorWithContext(const ExtensionContext &context, int error, const char* message) {
(void)context;
@@ -3034,16 +3670,17 @@ class IRtcEngineEventHandler {
}
/**
- * @brief Reports the result of calling renewToken.
+ * @brief Callback for `renewToken` call result.
+ *
* @since 4.6.0
- *
- * Occurs when a user renews the token.
*
- * This callback notifies the app of the result after the user calls `renewToken` to renew the token.
- * The app can obtain the result of the `renewToken` call from this callback.
+ * @details
+ * This callback is triggered after the user calls the `renewToken` method to update the token, and
+ * is used to notify the app of the result.
+ *
+ * @param token Token.
+ * @param code Error code. See `RENEW_TOKEN_ERROR_CODE`.
*
- * @param token The token.
- * @param code The error code.
*/
virtual void onRenewTokenResult(const char* token, RENEW_TOKEN_ERROR_CODE code) {
(void)token;
@@ -3059,9 +3696,10 @@ class IVideoDeviceCollection {
virtual ~IVideoDeviceCollection() {}
/**
- * Gets the total number of the indexed video capture devices in the system.
+ * @brief Gets the total number of the indexed video devices in the system.
*
- * @return The total number of the indexed video capture devices.
+ * @return
+ * The total number of the indexed video devices in the system.
*/
virtual int getCount() = 0;
@@ -3090,7 +3728,7 @@ class IVideoDeviceCollection {
char deviceIdUTF8[MAX_DEVICE_ID_LENGTH]) = 0;
/**
- * Releases all the resources occupied by the IVideoDeviceCollection object.
+ * @brief Releases all the resources occupied by the `IVideoDeviceCollection` object.
*/
virtual void release() = 0;
};
@@ -3102,15 +3740,17 @@ class IVideoDeviceManager {
public:
virtual ~IVideoDeviceManager() {}
/**
- * Enumerates the video devices.
+ * @brief Enumerates the video devices.
*
+ * @details
* This method returns an `IVideoDeviceCollection` object including all video devices in the system.
* With the `IVideoDeviceCollection` object, the application can enumerate video devices. The
- * application must call the release method to release the returned object after using it.
+ * application must call the `release` method to release the returned object after using it.
+ *
+ * @note This method is for Windows and macOS only.
*
* @return
- * - Success: An `IVideoDeviceCollection` object including all video devices in the system.
- * - Failure: NULL.
+ * - Success: One `IVideoDeviceCollection` object including all video devices in the system.
*/
virtual IVideoDeviceCollection* enumerateVideoDevices() = 0;
@@ -3127,8 +3767,12 @@ class IVideoDeviceManager {
virtual int setDevice(const char deviceIdUTF8[MAX_DEVICE_ID_LENGTH]) = 0;
/**
- * Retrieves the current video capture device.
- * @param deviceIdUTF8 Output parameter. The device ID. The maximum length is #MAX_DEVICE_ID_LENGTH_TYPE.
+ * @brief Retrieves the current video capture device.
+ *
+ * @note This method is for Windows and macOS only.
+ *
+ * @param deviceIdUTF8 An output parameter. The device ID. The maximum length is
+ * `MAX_DEVICE_ID_LENGTH_TYPE`.
*
* @return
* - 0: Success.
@@ -3139,36 +3783,42 @@ class IVideoDeviceManager {
#if defined(_WIN32) || (defined(__linux__) && !defined(__ANDROID__) && !defined(__OHOS__)) || \
(defined(__APPLE__) && TARGET_OS_MAC && !TARGET_OS_IPHONE)
/**
- * Gets the number of video formats supported by the specified video capture device.
+ * @brief Gets the number of video formats supported by the specified video capture device.
*
+ * @details
* Video capture devices may support multiple video formats, and each format supports different
* combinations of video frame width, video frame height, and frame rate.
- *
* You can call this method to get how many video formats the specified video capture device can
* support, and then call `getCapability` to get the specific video frame information in the
* specified video format.
*
+ * @note This method is for Windows and macOS only.
+ *
* @param deviceIdUTF8 The ID of the video capture device.
*
* @return
- * - 0: Success. Returns the number of video formats supported by this device. For example: If the
+ * - > 0: Success. Returns the number of video formats supported by this device. For example: If the
* specified camera supports 10 different video formats, the return value is 10.
- * - < 0: Failure.
+ * - ≤ 0: Failure.
*/
virtual int numberOfCapabilities(const char* deviceIdUTF8) = 0;
/**
- * Gets the detailed video frame information of the video capture device in the specified video format.
+ * @brief Gets the detailed video frame information of the video capture device in the specified
+ * video format.
*
- * After calling `numberOfCapabilities` to get the number of video formats supported by the video capture
- * device, you can call this method to get the specific video frame information supported by the
- * specified index number.
+ * @details
+ * After calling `numberOfCapabilities` to get the number of video formats supported by the video
+ * capture device, you can call this method to get the specific video frame information supported by
+ * the specified index number.
*
- * @param deviceIdUTF8 ID of the video capture device.
- * @param deviceCapabilityNumber The index number of the video format. If the return value of `numberOfCapabilities`
- * is i, the value range of this parameter is [0,i).
- * @param capability Output parameter. Indicates the specific information of the specified video format,
- * including width (px), height (px), and frame rate (fps). See VideoFormat.
+ * @note This method is for Windows and macOS only.
+ *
+ * @param deviceIdUTF8 The ID of the video capture device.
+ * @param deviceCapabilityNumber The index number of the video format. If the return value of
+ * `numberOfCapabilities` is i, the value range of this parameter is [0,i).
+ * @param capability An output parameter. Indicates the specific information of the specified video
+ * format, including width (px), height (px), and frame rate (fps). See `VideoFormat`.
*
* @return
* - 0: Success.
@@ -3200,7 +3850,10 @@ class IVideoDeviceManager {
virtual int stopDeviceTest() = 0;
/**
- * Releases all the resources occupied by the `IVideoDeviceManager` object.
+ * @brief Releases all the resources occupied by the `IVideoDeviceManager` object.
+ *
+ * @note This method is for Windows and macOS only.
+ *
*/
virtual void release() = 0;
};
@@ -3213,65 +3866,80 @@ class IVideoDeviceManager {
class IVideoEffectObject : public RefCountInterface {
public:
virtual ~IVideoEffectObject() {}
-
+
/**
- * @brief Types of video effect nodes that can be applied.
+ * @brief Types of applicable video effect nodes.
*
* @since v4.6.0
*/
enum class VIDEO_EFFECT_NODE_ID : uint32_t {
- /** Beauty effect node. */
+ /**
+ * (1): Beauty effect node.
+ */
BEAUTY = 1U << 0,
- /** Style makeup effect node. */
+ /**
+ * (2): Style makeup effect node.
+ */
STYLE_MAKEUP = 1U << 1,
- /** Filter effect node. */
+ /**
+ * (4): Filter effect node.
+ */
FILTER = 1U << 2,
};
-
+
/**
* @brief Actions that can be performed on video effect nodes.
*
* @since v4.6.0
*/
enum VIDEO_EFFECT_ACTION {
- /** Save the current parameters of the video effect. */
+ /**
+ * (1): Save the current parameters of the video effect.
+ */
SAVE = 1,
- /** Reset the video effect to its default parameters. */
+ /**
+ * (2): Reset the video effect to default parameters.
+ */
RESET = 2,
};
/**
- * @brief Adds or updates video effects with specified node ID and template.
+ * @brief Adds or updates the video effect for the specified node ID and template.
*
* @since v4.6.0
*
- * @param nodeId The unique identifier or combination of video effect nodes. See #VIDEO_EFFECT_NODE_ID
- * Example:
- * - Single effect: `VIDEO_EFFECT_NODE_ID::BEAUTY`
- * - Combined effects: `VIDEO_EFFECT_NODE_ID::BEAUTY | VIDEO_EFFECT_NODE_ID::STYLE_MAKEUP`
- *
- * @note Priority Rules:
- * - The `STYLE_MAKEUP` node takes precedence over `FILTER` parameters.
- * - To apply `FILTER` parameters, first remove the `STYLE_MAKEUP` node:
- * @code{.cpp}
- * removeVideoEffect(VIDEO_EFFECT_NODE_ID::STYLE_MAKEUP);
- * addOrUpdateVideoEffect(VIDEO_EFFECT_NODE_ID::FILTER, "template name");
- * @endcode
+ * @note
+ * Priority rules:
+ * - The `VIDEO_EFFECT_NODE_ID::STYLE_MAKEUP` node takes precedence over the
+ * `VIDEO_EFFECT_NODE_ID::FILTER` parameter.
+ * - To apply the `VIDEO_EFFECT_NODE_ID::FILTER` parameter, you must first remove the
+ * `VIDEO_EFFECT_NODE_ID::STYLE_MAKEUP` node:
+ * ```
+ * removeVideoEffect(VIDEO_EFFECT_NODE_ID::STYLE_MAKEUP);
+ * addOrUpdateVideoEffect(VIDEO_EFFECT_NODE_ID::FILTER, "template name");
+ * ```
*
- * @param templateName The name of the effect template. If set to null or an empty string, the SDK loads the default configuration from the resource bundle.
+ * @param nodeId The unique identifier or combination of identifiers for the video effect node. See
+ * `VIDEO_EFFECT_NODE_ID`.
+ * Examples:
+ * - Single effect: `VIDEO_EFFECT_NODE_ID::BEAUTY`
+ * - Combined effects: `VIDEO_EFFECT_NODE_ID::BEAUTY | VIDEO_EFFECT_NODE_ID::STYLE_MAKEUP`
+ * @param templateName The name of the effect template. If set to null or an empty string, the SDK
+ * loads the default configuration from the resource package.
*
* @return
* - 0: Success.
- * - < 0: Failure. The specific error code can provide more details about the failure.
+ * - < 0: Failure.
*/
virtual int addOrUpdateVideoEffect(uint32_t nodeId, const char* templateName) = 0;
/**
- * @brief Removes a video effect with specified node ID.
+ * @brief Removes the video effect with the specified node ID.
*
* @since v4.6.0
*
- * @param nodeId The unique identifier of the video effect node to remove. See #VIDEO_EFFECT_NODE_ID
+ * @param nodeId The unique identifier of the video effect node to remove. See
+ * `VIDEO_EFFECT_NODE_ID`.
*
* @return
* - 0: Success.
@@ -3280,12 +3948,12 @@ class IVideoEffectObject : public RefCountInterface {
virtual int removeVideoEffect(uint32_t nodeId) = 0;
/**
- * @brief Performs an action on a specified video effect node.
+ * @brief Performs an action on the specified video effect node.
*
* @since v4.6.0
*
- * @param nodeId The unique identifier of the video effect node. See #VIDEO_EFFECT_NODE_ID
- * @param actionId The action to perform on the video effect. See #VIDEO_EFFECT_ACTION
+ * @param nodeId The unique identifier of the video effect node.
+ * @param actionId The action to perform. See `VIDEO_EFFECT_ACTION`.
*
* @return
* - 0: Success.
@@ -3294,11 +3962,11 @@ class IVideoEffectObject : public RefCountInterface {
virtual int performVideoEffectAction(uint32_t nodeId, VIDEO_EFFECT_ACTION actionId) = 0;
/**
- * @brief Sets a float parameter for the video effect.
+ * @brief Sets the float parameter for video effects.
*
* @since v4.6.0
*
- * @param option The option category of the parameter.
+ * @param option The category of the parameter option.
* @param key The key name of the parameter.
* @param param The float value to set.
*
@@ -3309,13 +3977,13 @@ class IVideoEffectObject : public RefCountInterface {
virtual int setVideoEffectFloatParam(const char* option, const char* key, float param) = 0;
/**
- * @brief Sets an integer parameter for the video effect.
+ * @brief Sets an integer parameter for video effects.
*
* @since v4.6.0
*
- * @param option The option category of the parameter.
+ * @param option The category of the option to which the parameter belongs.
* @param key The key name of the parameter.
- * @param param The integer value to set.
+ * @param param The integer parameter value to set.
*
* @return
* - 0: Success.
@@ -3324,15 +3992,15 @@ class IVideoEffectObject : public RefCountInterface {
virtual int setVideoEffectIntParam(const char* option, const char* key, int param) = 0;
/**
- * @brief Sets a boolean parameter for the video effect.
+ * @brief Sets the boolean parameter for video effects.
*
* @since v4.6.0
*
- * @param option The option category of the parameter.
+ * @param option The category of the parameter option.
* @param key The key name of the parameter.
* @param param The boolean value to set.
- * - true: Enable the option.
- * - false: Disable the option.
+ * - `true`: Enables the option.
+ * - `false`: Disables the option.
*
* @return
* - 0: Success.
@@ -3341,72 +4009,79 @@ class IVideoEffectObject : public RefCountInterface {
virtual int setVideoEffectBoolParam(const char* option, const char* key, bool param) = 0;
/**
- * @brief Gets a float parameter from the video effect.
+ * @brief Retrieves `float` type parameters in video effects.
*
* @since v4.6.0
*
- * @param option The option category of the parameter.
+ * @details
+ * Used to retrieve the value of a `float` type parameter corresponding to the specified option and
+ * key in video effects.
+ *
+ * @param option The category of the option to which the parameter belongs.
* @param key The key name of the parameter.
*
* @return
- * - The float value of the parameter if it exists.
- * - 0.0f if the parameter does not exist or an error occurs.
+ * - If the parameter exists, returns the corresponding `float` value.
+ * - If the parameter does not exist or an error occurs, returns 0.0f.
*/
virtual float getVideoEffectFloatParam(const char* option, const char* key) = 0;
/**
- * @brief Gets an integer parameter from the video effect.
+ * @brief Retrieves integer parameters in video effects.
*
* @since v4.6.0
*
- * @param option The option category of the parameter.
+ * @details
+ * Used to retrieve integer-type parameters in video effects.
+ *
+ * @param option The category of the parameter option.
* @param key The key name of the parameter.
*
* @return
- * - The integer value of the parameter if it exists.
- * - 0 if the parameter does not exist or an error occurs.
+ * - If the parameter exists, returns the corresponding integer value.
+ * - If the parameter does not exist or an error occurs, returns 0.
*/
virtual int getVideoEffectIntParam(const char* option, const char* key) = 0;
/**
- * @brief Gets a boolean parameter from the video effect.
+ * @brief Gets the boolean parameter in video effects.
*
* @since v4.6.0
*
- * @param option The option category of the parameter.
+ * @param option The option category to which the parameter belongs.
* @param key The key name of the parameter.
*
* @return
- * - true: The parameter is enabled.
- * - false: The parameter is disabled or does not exist.
+ * - `true`: The parameter is enabled.
+ * - `false`: The parameter is not enabled or does not exist.
*/
virtual bool getVideoEffectBoolParam(const char* option, const char* key) = 0;
};
/**
- * The context of IRtcEngine.
+ * @brief Configurations for the `RtcEngineContext` instance.
*/
struct RtcEngineContext {
/**
- * The event handler for IRtcEngine.
+ * The event handler for `IRtcEngine`. See `IRtcEngineEventHandler`.
*/
IRtcEngineEventHandler* eventHandler;
/**
* The App ID issued by Agora for your project. Only users in apps with the same App ID can join the
- * same channel and communicate with each other. An App ID can only be used to create one `IRtcEngine`
- * instance. To change your App ID, call release to destroy the current IRtcEngine instance, and then
- * create a new one.
+ * same channel and communicate with each other. An App ID can only be used to create one
+ * `IRtcEngine` instance. To change your App ID, call `release` to destroy the current `IRtcEngine`
+ * instance, and then create a new one.
*/
const char* appId;
/**
- * - For Android, it is the context of Activity or Application.
- * - For Windows, it is the window handle of app. Once set, this parameter enables you to plug
- * or unplug the video devices while they are powered.
+ * - For Windows, it is the window handle of the app. Once set, this parameter enables you to
+ * connect or disconnect the video devices while they are powered.
+ * - For Android, it is the context of Android Activity.
*/
void* context;
/**
- * The channel profile. See #CHANNEL_PROFILE_TYPE.
+ * The channel profile. See `CHANNEL_PROFILE_TYPE`.
*/
CHANNEL_PROFILE_TYPE channelProfile;
@@ -3416,37 +4091,35 @@ struct RtcEngineContext {
const char* license;
/**
- * The audio application scenario. See #AUDIO_SCENARIO_TYPE.
- *
- * @note Agora recommends the following scenarios:
- * - `AUDIO_SCENARIO_DEFAULT(0)`
- * - `AUDIO_SCENARIO_GAME_STREAMING(3)`
+ * The audio scenarios. Under different audio scenarios, the device uses different volume types. See
+ * `AUDIO_SCENARIO_TYPE`.
*/
AUDIO_SCENARIO_TYPE audioScenario;
/**
- * The region for connection. This is an advanced feature and applies to scenarios that have regional restrictions.
- *
- * For the regions that Agora supports, see #AREA_CODE. The area codes support bitwise operation.
- *
- * After specifying the region, the app integrated with the Agora SDK connects to the Agora servers
- * within that region.
+ * The region for connection. This is an advanced feature and applies to scenarios that have
+ * regional restrictions. For details on supported regions, see `AREA_CODE`. The area codes support
+ * bitwise operation.
*/
unsigned int areaCode;
/**
- * The log files that the SDK outputs. See LogConfig.
- *
- * By default, the SDK generates five SDK log files and five API call log files with the following rules:
- * - The SDK log files are: `agorasdk.log`, `agorasdk.1.log`, `agorasdk.2.log`, `agorasdk.3.log`, and `agorasdk.4.log`.
- * - The API call log files are: `agoraapi.log`, `agoraapi.1.log`, `agoraapi.2.log`, `agoraapi.3.log`, and `agoraapi.4.log`.
- * - The default size for each SDK log file is 1,024 KB; the default size for each API call log file is 2,048 KB. These log files are encoded in UTF-8.
+ * Sets the log file size. See `LogConfig`.
+ * By default, the SDK generates five SDK log files and five API call log files with the following
+ * rules:
+ * - The SDK log files are: `agorasdk.log`, `agorasdk.1.log`, `agorasdk.2.log`, `agorasdk.3.log`,
+ * and `agorasdk.4.log`.
+ * - The API call log files are: `agoraapi.log`, `agoraapi.1.log`, `agoraapi.2.log`,
+ * `agoraapi.3.log`, and `agoraapi.4.log`.
+ * - The default size of each SDK log file and API log file is 2,048 KB. These log files are encoded
+ * in UTF-8.
* - The SDK writes the latest logs in `agorasdk.log` or `agoraapi.log`.
- * - When `agorasdk.log` is full, the SDK processes the log files in the following order:
- * - Delete the `agorasdk.4.log` file (if any).
- * - Rename `agorasdk.3.log` to `agorasdk.4.log`.
- * - Rename `agorasdk.2.log` to `agorasdk.3.log`.
- * - Rename `agorasdk.1.log` to `agorasdk.2.log`.
- * - Create a new `agorasdk.log` file.
+ * - When `agorasdk.log` is full, the SDK processes the log files in the following order:1. Delete
+ * the `agorasdk.4.log` file (if any).
+ * 2. Rename `agorasdk.3.log` to `agorasdk.4.log`.
+ * 3. Rename `agorasdk.2.log` to `agorasdk.3.log`.
+ * 4. Rename `agorasdk.1.log` to `agorasdk.2.log`.
+ * 5. Create a new `agorasdk.log` file.
+ * - The overwrite rules for the `agoraapi.log` file are the same as for `agorasdk.log`.
*/
commons::LogConfig logConfig;
@@ -3465,16 +4138,20 @@ struct RtcEngineContext {
bool useExternalEglContext;
/**
- * Determines whether to enable domain limit
- * -true: only connect to servers which already parsed by DNS
- * -false: (Default) connect to servers with no limit
+ * Whether to enable domain name restriction:
+ * - `true`: Enables the domain name restriction. This value is suitable for scenarios where IoT
+ * devices use IoT cards for network access. The SDK will only connect to servers in the domain name
+ * or IP whitelist that has been reported to the operator.
+ * - `false`: (Default) Disables the domain name restriction. This value is suitable for most common
+ * scenarios.
*/
bool domainLimit;
/**
- * Whether to automatically register Agora extensions when initializing RtcEngine.
- * -true: (Default) Automatically register Agora extensions.
- * -false: Do not automatically register Agora extensions. The user calls EnableExtension to manually register an Agora extension.
+ * Whether to automatically register the Agora extensions when initializing `IRtcEngine`:
+ * - `true`: (Default) Automatically register the Agora extensions when initializing `IRtcEngine`.
+ * - `false`: Do not register the Agora extensions when initializing `IRtcEngine`. You need to call
+ * `enableExtension` to register the Agora extensions.
*/
bool autoRegisterAgoraExtensions;
@@ -3490,16 +4167,17 @@ class IMetadataObserver {
public:
virtual ~IMetadataObserver() {}
- /** The metadata type.
- *
- * @note We only support video metadata for now.
+ /**
+ * @brief Metadata type of the observer. We only support video metadata for now.
*/
enum METADATA_TYPE
{
- /** -1: (Not supported) Unknown.
+ /**
+ * -1: The type of metadata is unknown.
*/
UNKNOWN_METADATA = -1,
- /** 0: (Supported) Video metadata.
+ /**
+ * 0: The type of metadata is video.
*/
VIDEO_METADATA = 0,
};
@@ -3513,133 +4191,191 @@ class IMetadataObserver {
MAX_METADATA_SIZE_IN_BYTE = 1024
};
- /** Metadata.
+ /**
+ * @brief Media metadata.
*/
struct Metadata
{
- /** The channel ID of the `metadata`.
+ /**
+ * The channel name.
*/
const char* channelId;
- /** The User ID that sent the metadata.
- * - For the receiver: The user ID of the user who sent the `metadata`.
- * - For the sender: Ignore this value.
+ /**
+ * The user ID.
+ * - For the recipient: The ID of the remote user who sent the `Metadata`.
+ * - For the sender: Ignore it.
*/
unsigned int uid;
- /** The buffer size of the sent or received `metadata`.
+ /**
+ * The buffer size of the sent or received `Metadata`.
*/
unsigned int size;
- /** The buffer address of the sent or received `metadata`.
+ /**
+ * The buffer address of the received `Metadata`.
*/
unsigned char *buffer;
- /** The NTP timestamp (ms) when the metadata is sent.
- * @note If the receiver is audience, the receiver cannot get the NTP timestamp (ms).
+ /**
+ * The timestamp (ms) of when the `Metadata` is sent.
*/
long long timeStampMs;
Metadata() : channelId(NULL), uid(0), size(0), buffer(NULL), timeStampMs(0) {}
};
- /** Occurs when the SDK requests the maximum size of the metadata.
- *
- *
- * After successfully complete the registration by calling `registerMediaMetadataObserver`, the SDK
- * triggers this callback once every video frame is sent. You need to specify the maximum size of
- * the metadata in the return value of this callback.
- *
- * @return The maximum size of the buffer of the metadata that you want to use. The highest value is
- * 1024 bytes. Ensure that you set the return value.
- */
+ /**
+ * @brief Occurs when the SDK requests the maximum size of the metadata.
+ *
+ * @details
+ * After successfully complete the registration by calling `registerMediaMetadataObserver`, the SDK
+ * triggers this callback once every video frame is sent. You need to specify the maximum size of
+ * the metadata in the return value of this callback.
+ *
+ * @return
+ * The maximum size of the `buffer` of the metadata that you want to use. The highest value is 1024
+ * bytes. Ensure that you set the return value.
+ */
virtual int getMaxMetadataSize() { return DEFAULT_METADATA_SIZE_IN_BYTE; }
- /** Occurs when the local user receives the metadata.
-
- @note Ensure that the size of the metadata does not exceed the value set in the `getMaxMetadataSize` callback.
-
- @param metadata The metadata that the user wants to send. For details, see Metadata.
- @param source_type The video data type: #VIDEO_SOURCE_TYPE.
- @return
- - true: Send.
- - false: Do not send.
+ /**
+ * @brief Occurs when the SDK is ready to send metadata.
+ *
+ * @details
+ * This callback is triggered when the SDK is ready to send metadata.
+ *
+ * @note Ensure that the size of the metadata does not exceed the value set in the
+ * `getMaxMetadataSize` callback.
+ *
+ * @param source_type Video data type. See `VIDEO_SOURCE_TYPE`.
+ * @param metadata The metadata that the user wants to send. See `Metadata`.
+ *
+ * @return
+ * - `true`: Send the video frame.
+ * - `false`: Do not send the video frame.
*/
virtual bool onReadyToSendMetadata(Metadata &metadata, VIDEO_SOURCE_TYPE source_type) = 0;
- /** Occurs when the local user receives the metadata.
+ /**
+ * @brief Occurs when the local user receives the metadata.
*
- * @param metadata The metadata received. See Metadata.
+ * @param metadata The metadata received. See `Metadata`.
*
- * @note If the receiver is audience, the receiver cannot get the NTP timestamp (ms)
- * that the metadata sends.
*/
virtual void onMetadataReceived(const Metadata& metadata) = 0;
};
-// The reason codes for media streaming
-// GENERATED_JAVA_ENUM_PACKAGE: io.agora.streaming
+/**
+ * @brief Reasons for the changes in CDN streaming status.
+ *
+ * @deprecated v4.6.0.
+ */
enum DIRECT_CDN_STREAMING_REASON {
// No error occurs.
+ /**
+ * 0: No error.
+ */
DIRECT_CDN_STREAMING_REASON_OK = 0,
// A general error occurs (no specified reason).
+ /**
+ * 1: A general error; no specific reason. You can try to push the media stream again.
+ */
DIRECT_CDN_STREAMING_REASON_FAILED = 1,
// Audio publication error.
+ /**
+ * 2: An error occurs when pushing audio streams. For example, the local audio capture device is not
+ * working properly, is occupied by another process, or does not get the permission required.
+ */
DIRECT_CDN_STREAMING_REASON_AUDIO_PUBLICATION = 2,
// Video publication error.
+ /**
+ * 3: An error occurs when pushing video streams. For example, the local video capture device is not
+ * working properly, is occupied by another process, or does not get the permission required.
+ */
DIRECT_CDN_STREAMING_REASON_VIDEO_PUBLICATION = 3,
+ /**
+ * 4: Fails to connect to the CDN.
+ */
DIRECT_CDN_STREAMING_REASON_NET_CONNECT = 4,
// Already exist stream name.
+ /**
+ * 5: The URL is already being used. Use a new URL for streaming.
+ */
DIRECT_CDN_STREAMING_REASON_BAD_NAME = 5,
};
-// The connection state of media streaming
-// GENERATED_JAVA_ENUM_PACKAGE: io.agora.streaming
+/**
+ * @brief The current CDN streaming state.
+ *
+ * @deprecated v4.6.0.
+ */
enum DIRECT_CDN_STREAMING_STATE {
+ /**
+ * 0: The initial state before the CDN streaming starts.
+ */
DIRECT_CDN_STREAMING_STATE_IDLE = 0,
+ /**
+ * 1: Streams are being pushed to the CDN. The SDK returns this value when you call the
+ * `startDirectCdnStreaming` method to push streams to the CDN.
+ */
DIRECT_CDN_STREAMING_STATE_RUNNING = 1,
+ /**
+ * 2: Stops pushing streams to the CDN. The SDK returns this value when you call the
+ * `stopDirectCdnStreaming` method to stop pushing streams to the CDN.
+ */
DIRECT_CDN_STREAMING_STATE_STOPPED = 2,
+ /**
+ * 3: Fails to push streams to the CDN. You can troubleshoot the issue with the information reported
+ * by the `onDirectCdnStreamingStateChanged` callback, and then push streams to the CDN again.
+ */
DIRECT_CDN_STREAMING_STATE_FAILED = 3,
+ /**
+ * 4: Tries to reconnect the Agora server to the CDN. The SDK attempts to reconnect a maximum of 10
+ * times; if the connection is not restored, the streaming state becomes
+ * DIRECT_CDN_STREAMING_STATE_FAILED.
+ */
DIRECT_CDN_STREAMING_STATE_RECOVERING = 4,
};
/**
- * The statistics of the Direct Cdn Streams.
- *
+ * @brief The statistics of the current CDN streaming.
+ *
* @deprecated v4.6.0.
*/
struct DirectCdnStreamingStats {
/**
- * Width of the video pushed by rtmp.
+ * The width (px) of the video frame.
*/
int videoWidth;
/**
- * Height of the video pushed by rtmp.
+ * The height (px) of the video frame.
*/
int videoHeight;
/**
- * The frame rate of the video pushed by rtmp.
+ * The frame rate (fps) of the current video frame.
*/
int fps;
/**
- * Real-time bit rate of the video streamed by rtmp.
+ * The bitrate (bps) of the current video frame.
*/
int videoBitrate;
/**
- * Real-time bit rate of the audio pushed by rtmp.
+ * The bitrate (bps) of the current audio frame.
*/
int audioBitrate;
};
/**
* The event handler for direct cdn streaming
- *
+ *
* @deprecated v4.6.0.
*
*/
@@ -3648,10 +4384,18 @@ class IDirectCdnStreamingEventHandler {
virtual ~IDirectCdnStreamingEventHandler() {}
/**
- * Event callback of direct cdn streaming
- * @param state Current status
- * @param reason Reason Code
- * @param message Message
+ * @brief Occurs when the CDN streaming state changes.
+ *
+ * @details
+ * When the host directly pushes streams to the CDN, if the streaming state changes, the SDK
+ * triggers this callback to report the changed streaming state, error codes, and other information.
+ * You can troubleshoot issues by referring to this callback.
+ *
+ * @param state The current CDN streaming state. See `DIRECT_CDN_STREAMING_STATE`.
+ * @param reason Reasons for changes in the status of CDN streaming. See
+ * `DIRECT_CDN_STREAMING_REASON`.
+ * @param message The information about the changed streaming state.
+ *
*/
virtual void onDirectCdnStreamingStateChanged(DIRECT_CDN_STREAMING_STATE state, DIRECT_CDN_STREAMING_REASON reason, const char* message) {
(void)state;
@@ -3659,39 +4403,49 @@ class IDirectCdnStreamingEventHandler {
(void)message;
};
+ /**
+ * @brief Reports the CDN streaming statistics.
+ *
+ * @details
+ * When the host directly pushes media streams to the CDN, the SDK triggers this callback every one
+ * second.
+ *
+ * @param stats The statistics of the current CDN streaming. See `DirectCdnStreamingStats`.
+ *
+ */
virtual void onDirectCdnStreamingStats(const DirectCdnStreamingStats& stats) {
(void)stats;
};
};
/**
- * The channel media options.
- *
+ * @brief The media setting options for the host.
+ *
* @deprecated v4.6.0.
*/
struct DirectCdnStreamingMediaOptions {
/**
- * Determines whether to publish the video of the camera track.
- * - true: Publish the video track of the camera capturer.
- * - false: (Default) Do not publish the video track of the camera capturer.
+ * Sets whether to publish the video captured by the camera:
+ * - `true`: Publish the video captured by the camera.
+ * - `false`: (Default) Do not publish the video captured by the camera.
*/
Optional publishCameraTrack;
/**
- * Determines whether to publish the recorded audio.
- * - true: Publish the recorded audio.
- * - false: (Default) Do not publish the recorded audio.
+ * Sets whether to publish the audio captured by the microphone:
+ * - `true`: Publish the audio captured by the microphone.
+ * - `false`: (Default) Do not publish the audio captured by the microphone.
*/
Optional publishMicrophoneTrack;
/**
- * Determines whether to publish the audio of the custom audio track.
- * - true: Publish the audio of the custom audio track.
- * - false: (Default) Do not publish the audio of the custom audio track.
+ * Sets whether to publish the captured audio from a custom source:
+ * - `true`: Publish the captured audio from a custom source.
+ * - `false`: (Default) Do not publish the captured audio from the custom source.
*/
Optional publishCustomAudioTrack;
/**
- * Determines whether to publish the video of the custom video track.
- * - true: Publish the video of the custom video track.
- * - false: (Default) Do not publish the video of the custom video track.
+ * Sets whether to publish the captured video from a custom source:
+ * - `true`: Publish the captured video from a custom source.
+ * - `false`: (Default) Do not publish the captured video from the custom source.
*/
Optional publishCustomVideoTrack;
/**
@@ -3706,8 +4460,8 @@ struct DirectCdnStreamingMediaOptions {
*/
Optional publishMediaPlayerId;
/**
- * The custom video track id which will used to publish.
- * You can get the VideoTrackId after calling createCustomVideoTrack() of IRtcEngine.
+ * The video track ID returned by calling the `createCustomVideoTrack` method. The default value is
+ * 0.
*/
Optional customVideoTrackId;
@@ -3805,9 +4559,16 @@ class IMediaPlayer;
class IMediaRecorder;
/**
+ * @brief Callback triggered when `IRtcEngine` is released.
+ *
* @since v4.6.0
- * @brief Occurs when the `IRtcEngine` is released.
- * @post This callback is triggered when the `release` method is called to asynchronously release the `IRtcEngine` object.
+ *
+ * @details
+ * This callback is triggered when the `release` method is called to asynchronously release the
+ * `IRtcEngine` object.
+ * Call timing: This callback is triggered when the `release` method is called to asynchronously
+ * release the `IRtcEngine` object.
+ *
*/
using RtcEngineReleaseCallback = void(*)();
@@ -3820,53 +4581,63 @@ using RtcEngineReleaseCallback = void(*)();
class IRtcEngine : public agora::base::IEngineBase {
public:
/**
- * Releases the IRtcEngine object.
+ * @brief Releases the `IRtcEngine` instance.
*
+ * @details
* This method releases all resources used by the Agora SDK. Use this method for apps in which users
- * occasionally make voice or video calls. When users do not make calls, you can free up resources for
- * other operations.
- *
+ * occasionally make voice or video calls. When users do not make calls, you can free up resources
+ * for other operations.
* After a successful method call, you can no longer use any method or callback in the SDK anymore.
- * If you want to use the real-time communication functions again, you must call `createAgoraRtcEngine`
- * and `initialize` to create a new `IRtcEngine` instance.
+ * If you want to use the real-time communication functions again, you must call
+ * `createAgoraRtcEngine` and `initialize` to create a new `IRtcEngine` instance.
*
- * @note If you want to create a new `IRtcEngine` instance after destroying the current one, ensure
- * that you wait till the `release` method execution to complete.
+ * @note Agora does not recommend you calling `release` in any callback of the SDK. Otherwise, the
+ * SDK cannot release the resources until the callbacks return results, which may result in a
+ * deadlock.
+ *
+ * @param callback (Optional) Callback function pointer for setting the destruction mode of the
+ * engine to either synchronous or asynchronous. See `RtcEngineReleaseCallback`.
+ * - Non `nullptr`: Destroy the engine asynchronously. The method will return immediately, at which
+ * point the engine resources may not have been fully released yet. After the engine is destroyed,
+ * the SDK triggers `RtcEngineReleaseCallback`.
+ * - `nullptr`: Destroy the engine synchronously. This method only returns after the engine
+ * resources have been fully released.
*
- * @param callback An optional function pointer of `RtcEngineReleaseCallback`. It determines
- * whether this method is a synchronous call.
- * - `non-nullptr`: This method is an asynchronous call. The result returns immediately even when the
- * `IRtcEngine` object resources are not released, and `onEngineReleased` callback will be triggered
- * when engine is released.
- * - `nullptr`: This method is a synchronous call, which means that the result of this method call
- * returns after the `IRtcEngine` object resources are released. Do not call this method
- * in any callback generated by the SDK, or it may result in a deadlock.
*/
AGORA_CPP_API static void release(RtcEngineReleaseCallback callback = nullptr);
/**
- * Initializes `IRtcEngine`.
+ * @brief Initializes `IRtcEngine`.
+ *
+ * @details
+ * Call timing: Before calling other APIs, you must call `createAgoraRtcEngine` and `initialize` to
+ * create and initialize the `IRtcEngine` object.
*
+ * @note
+ * The SDK supports creating only one `IRtcEngine` instance for an app.
* All called methods provided by the `IRtcEngine` class are executed asynchronously. Agora
* recommends calling these methods in the same thread.
*
- * @note
- * - Before calling other APIs, you must call `createAgoraRtcEngine` and `initialize `to create and
- * initialize the `IRtcEngine` object.
- * - The SDK supports creating only one `IRtcEngine` instance for an app.
+ * @param context Configurations for the `IRtcEngine` instance. See `RtcEngineContext`.
*
- * @param context The RtcEngineContext object.
* @return
* - 0: Success.
* - < 0: Failure.
+ * - -1: A general error occurs (no specified reason).
+ * - -2: The parameter is invalid.
+ * - -7: The SDK is not initialized.
+ * - -22: The resource request failed. The SDK fails to allocate resources because your app
+ * consumes too much system resource or the system resources are insufficient.
+ * - -101: The App ID is invalid.
*/
virtual int initialize(const RtcEngineContext& context) = 0;
/**
- * Gets the pointer to the specified interface.
+ * @brief Gets the pointer to the specified interface.
+ *
+ * @param iid The ID of the interface. See `INTERFACE_ID_TYPE`.
+ * @param inter An output parameter. The pointer to the specified interface.
*
- * @param iid The ID of the interface. See #INTERFACE_ID_TYPE for details.
- * @param inter Output parameter. The pointer to the specified interface.
* @return
* - 0: Success.
* - < 0: Failure.
@@ -3875,313 +4646,436 @@ class IRtcEngine : public agora::base::IEngineBase {
/**
- * Gets the SDK version.
- * @param build The build number.
- * @return The version of the current SDK in the string format.
+ * @brief Gets the SDK version.
+ *
+ * @param build The SDK build index.
+ *
+ * @return
+ * The SDK version number. The format is a string.
*/
virtual const char* getVersion(int* build) = 0;
/**
- * Gets the warning or error description.
- * @param code The error code or warning code reported by the SDK.
- * @return The specific error or warning description.
+ * @brief Gets the warning or error description.
+ *
+ * @param code The error code reported by the SDK.
+ *
+ * @return
+ * The specific error description.
*/
virtual const char* getErrorDescription(int code) = 0;
/**
- * Queries the capacity of the current device codec.
+ * @brief Queries the video codec capabilities of the SDK.
+ *
+ * @param codecInfo Input and output parameter. An array representing the video codec capabilities
+ * of the SDK. See `CodecCapInfo`.
+ * - Input value: One `CodecCapInfo` defined by the user when executing this method, representing
+ * the video codec capability to be queried.
+ * - Output value: The `CodecCapInfo` after the method is executed, representing the actual video
+ * codec capabilities of the SDK.
+ * @param size Input and output parameter, represent the size of the `CodecCapInfo` array.
+ * - Input value: Size of the `CodecCapInfo` defined by the user when executing the method.
+ * - Output value: Size of the output `CodecCapInfo` after this method is executed.
*
- * @param codec_info An array of the codec cap information: CodecCapInfo.
- * @param size The array size.
* @return
- * 0: Success.
- * < 0: Failure.
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int queryCodecCapability(CodecCapInfo* codecInfo, int& size) = 0;
- /**
- * Queries the score of the current device.
+ /**
+ * @brief Queries device score.
*
- * @return
- * > 0: If the value is greater than 0, it means that the device score has been retrieved and represents the score value.
- * Most devices score between 60-100, with higher scores indicating better performance.
+ * @details
+ * Applicable scenarios: In high-definition or ultra-high-definition video scenarios, you can first
+ * call this method to query the device's score. If the returned score is low (for example, below
+ * 60), you need to lower the video resolution to avoid affecting the video experience. The minimum
+ * device score required for different business scenarios is varied. For specific score
+ * recommendations, please `technical support`.
*
- * < 0: Failure.
+ * @return
+ * - >0: The method call succeeeds, the value is the current device's score, the range is [0,100],
+ * the larger the value, the stronger the device capability. Most devices are rated between 60 and
+ * 100.
+ * - < 0: Failure.
*/
virtual int queryDeviceScore() = 0;
/**
- * Preload a channel.
- *
- * This method enables users to preload a channel.
+ * @brief Preloads a channel with `token`, `channelId`, and `uid`.
*
- * A successful call of this method will reduce the time of joining the same channel.
- *
- * Note:
- * 1. The SDK supports preloading up to 20 channels. Once the preloaded channels exceed the limit, the SDK will keep the latest 20 available.
- * 2. Renew the token of the preloaded channel by calling this method with the same 'channelId' and 'uid'.
+ * @details
+ * When audience members need to switch between different channels frequently, calling the method
+ * can help shortening the time of joining a channel, thus reducing the time it takes for audience
+ * members to hear and see the host.
+ * If you join a preloaded channel, leave it and want to rejoin the same channel, you do not need to
+ * call this method unless the token for preloading the channel expires.
+ * Call timing: To improve the user experience of preloading channels, Agora recommends that before
+ * joining the channel, calling this method as early as possible once confirming the channel name
+ * and user information.
*
- * @param token The token generated on your server for authentication.
- * @param channelId The channel name. This parameter signifies the channel in which users engage in
- * real-time audio and video interaction. Under the premise of the same App ID, users who fill in
- * the same channel ID enter the same channel for audio and video interaction. The string length
- * must be less than 64 bytes. Supported character scopes are:
+ * @note
+ * - When calling this method, ensure you set the user role as audience and do not set the audio
+ * scenario as `AUDIO_SCENARIO_CHORUS`, otherwise, this method does not take effect.
+ * - You also need to make sure that the channel name, user ID and token passed in for preloading
+ * are the same as the values passed in when joinning the channel, otherwise, this method does not
+ * take effect.
+ * - One `IRtcEngine` instance supports preloading 20 channels at most. When exceeding this limit,
+ * the latest 20 preloaded channels take effect.
+ * Failing to preload a channel does not mean that you can't join a channel, nor will it increase
+ * the time of joining a channel.
+ *
+ * @param token The token generated on your server for authentication. See .When the token for
+ * preloading channels expires, you can update the token based on the number of channels you
+ * preload.
+ * - When preloading one channel, calling this method to pass in the new token.
+ * - When preloading more than one channels:
+ * - If you use a wildcard token for all preloaded channels, call `updatePreloadChannelToken` to
+ * update the token.Note: When generating a wildcard token, ensure the user ID is not set as 0. See
+ * `Secure authentication with tokens`.
+ * - If you use different tokens to preload different channels, call this method to pass in your
+ * user ID, channel name and the new token.
+ * @param channelId The channel name that you want to preload. This parameter signifies the channel
+ * in which users engage in real-time audio and video interaction. Under the premise of the same App
+ * ID, users who fill in the same channel ID enter the same channel for audio and video interaction.
+ * The string length must be less than 64 bytes. Supported characters (89 characters in total):
* - All lowercase English letters: a to z.
* - All uppercase English letters: A to Z.
* - All numeric characters: 0 to 9.
- * - The space character.
- * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-",
- * ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",".
+ * - "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]",
+ * "^", "_", "{", "}", "|", "~", ","
* @param uid The user ID. This parameter is used to identify the user in the channel for real-time
* audio and video interaction. You need to set and manage user IDs yourself, and ensure that each
* user ID in the same channel is unique. This parameter is a 32-bit unsigned integer. The value
- * range is 1 to 232-1. If the user ID is not assigned (or set to 0), the SDK assigns a random user
- * ID and returns it in the onJoinChannelSuccess callback. Your application must record and maintain
- * the returned user ID, because the SDK does not do so.
+ * range is 1 to 2^32-1. If the user ID is not assigned (or set to 0), the SDK assigns a random user
+ * ID and `onJoinChannelSuccess` returns it in the callback. Your application must record and
+ * maintain the returned user ID, because the SDK does not do so.
*
* @return
* - 0: Success.
* - < 0: Failure.
- * - -7: The IRtcEngine object has not been initialized. You need to initialize the IRtcEngine
+ * - -7: The `IRtcEngine` object has not been initialized. You need to initialize the `IRtcEngine`
* object before calling this method.
- * - -102: The channel name is invalid. You need to pass in a valid channel name in channelId to
- * preload the channel again.
+ * - -102: The channel name is invalid. You need to pass in a valid channel name and join the
+ * channel again.
*/
virtual int preloadChannel(const char* token, const char* channelId, uid_t uid) = 0;
/**
- * Preload a channel.
- *
- * This method enables users to preload a channel.
- *
- * A successful call of this method will reduce the time of joining the same channel.
+ * @brief Preloads a channel with `token`, `channelId`, and `userAccount`.
*
- * Note:
- * 1. The SDK supports preloading up to 20 channels. Once the preloaded channels exceed the limit, the SDK will keep the latest 20 available.
- * 2. Renew the token of the preloaded channel by calling this method with the same 'channelId' and 'userAccount'.
+ * @details
+ * When audience members need to switch between different channels frequently, calling the method
+ * can help shortening the time of joining a channel, thus reducing the time it takes for audience
+ * members to hear and see the host.
+ * If you join a preloaded channel, leave it and want to rejoin the same channel, you do not need to
+ * call this method unless the token for preloading the channel expires.
+ * Call timing: To improve the user experience of preloading channels, Agora recommends that before
+ * joining the channel, calling this method as early as possible once confirming the channel name
+ * and user information.
*
- * @param token The token generated on your server for authentication.
- * @param channelId The channel name. This parameter signifies the channel in which users engage in
- * real-time audio and video interaction. Under the premise of the same App ID, users who fill in
- * the same channel ID enter the same channel for audio and video interaction. The string length
- * must be less than 64 bytes. Supported character scopes are:
+ * @note
+ * - When calling this method, ensure you set the user role as audience and do not set the audio
+ * scenario as `AUDIO_SCENARIO_CHORUS`, otherwise, this method does not take effect.
+ * - You also need to make sure that the User Account, channel ID and token passed in for preloading
+ * are the same as the values passed in when joining the channel, otherwise, this method does not
+ * take effect.
+ * - One `IRtcEngine` instance supports preloading 20 channels at most. When exceeding this limit,
+ * the latest 20 preloaded channels take effect.
+ * Failing to preload a channel does not mean that you can't join a channel, nor will it increase
+ * the time of joining a channel.
+ *
+ * @param token The token generated on your server for authentication. See .When the token for
+ * preloading channels expires, you can update the token based on the number of channels you
+ * preload.
+ * - When preloading one channel, calling this method to pass in the new token.
+ * - When preloading more than one channels:
+ * - If you use a wildcard token for all preloaded channels, call `updatePreloadChannelToken` to
+ * update the token.Note: When generating a wildcard token, ensure the user ID is not set as 0. See
+ * `Secure authentication with tokens`.
+ * - If you use different tokens to preload different channels, call this method to pass in your
+ * user ID, channel name and the new token.
+ * @param channelId The channel name that you want to preload. This parameter signifies the channel
+ * in which users engage in real-time audio and video interaction. Under the premise of the same App
+ * ID, users who fill in the same channel ID enter the same channel for audio and video interaction.
+ * The string length must be less than 64 bytes. Supported characters (89 characters in total):
* - All lowercase English letters: a to z.
* - All uppercase English letters: A to Z.
* - All numeric characters: 0 to 9.
- * - The space character.
- * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-",
- * ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",".
- * @param userAccount The user account. The maximum length of this parameter is 255 bytes. Ensure that you set this parameter and do not set it as null. Supported character scopes are:
- * - All lowercase English letters: a to z.
- * - All uppercase English letters: A to Z.
+ * - "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]",
+ * "^", "_", "{", "}", "|", "~", ","
+ * @param userAccount The user account. This parameter is used to identify the user in the channel
+ * for real-time audio and video engagement. You need to set and manage user accounts yourself and
+ * ensure that each user account in the same channel is unique. The maximum length of this parameter
+ * is 255 bytes. Ensure that you set this parameter and do not set it as NULL. Supported characters
+ * are as follows(89 in total):
+ * - The 26 lowercase English letters: a to z.
+ * - The 26 uppercase English letters: A to Z.
* - All numeric characters: 0 to 9.
- * - The space character.
- * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",".
+ * - Space
+ * - "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]",
+ * "^", "_", "{", "}", "|", "~", ","
*
* @return
* - 0: Success.
* - < 0: Failure.
- * - -2: The parameter is invalid. For example, the userAccount parameter is empty.
- * You need to pass in a valid parameter and preload the channel again.
- * - -7: The IRtcEngine object has not been initialized. You need to initialize the IRtcEngine
+ * - -2: The parameter is invalid. For example, the User Account is empty. You need to pass in a
+ * valid parameter and join the channel again.
+ * - -7: The `IRtcEngine` object has not been initialized. You need to initialize the `IRtcEngine`
* object before calling this method.
- * - -102: The channel name is invalid. You need to pass in a valid channel name in channelId to
- * preload the channel again.
+ * - -102: The channel name is invalid. You need to pass in a valid channel name and join the
+ * channel again.
*/
virtual int preloadChannelWithUserAccount(const char* token, const char* channelId, const char* userAccount) = 0;
/**
- * Update token of the preloaded channels.
+ * @brief Updates the wildcard token for preloading channels.
*
- * An easy way to update all preloaded channels' tokens, if all preloaded channels use the same token.
+ * @details
+ * You need to maintain the life cycle of the wildcard token by yourself. When the token expires,
+ * you need to generate a new wildcard token and then call this method to pass in the new token.
+ * Applicable scenarios: In scenarios involving multiple channels, such as switching between
+ * different channels, using a wildcard token means users do not need to apply for a new token every
+ * time joinning a new channel, which can save users time for switching channels and reduce the
+ * pressure on your token server.
*
- * If preloaded channels use different tokens, we need to call the 'preloadChannel' method with the same 'channelId'
- * and 'uid' or 'userAccount' to update the corresponding token.
- *
- * @param token The token generated on your server for authentication.
+ * @param token The new token.
*
* @return
* - 0: Success.
* - < 0: Failure.
- * - -2: The token is invalid. You need to pass in a valid token and update the token again.
- * - -7: The IRtcEngine object has not been initialized. You need to initialize the IRtcEngine
+ * - -2: The parameter is invalid. For example, the token is invalid. You need to pass in a valid
+ * parameter and join the channel again.
+ * - -7: The `IRtcEngine` object has not been initialized. You need to initialize the `IRtcEngine`
* object before calling this method.
*/
virtual int updatePreloadChannelToken(const char* token) = 0;
/**
- * Joins a channel.
+ * @brief Joins a channel.
*
- * This method enables users to join a channel. Users in the same channel can talk to each other,
- * and multiple users in the same channel can start a group chat. Users with different App IDs
- * cannot call each other.
- *
- * A successful call of this method triggers the following callbacks:
+ * @details
+ * By default, the user subscribes to the audio and video streams of all the other users in the
+ * channel, giving rise to usage and **billings**. To stop subscribing to a specified stream or all
+ * remote streams, call the corresponding `mute` methods.
+ * Call timing: Call this method after `initialize`.
+ * Related callbacks: A successful call of this method triggers the following callbacks:
* - The local client: The `onJoinChannelSuccess` and `onConnectionStateChanged` callbacks.
- * - The remote client: `onUserJoined`, if the user joining the channel is in the Communication
- * profile or is a host in the Live-broadcasting profile.
- *
- * When the connection between the client and Agora's server is interrupted due to poor network
- * conditions, the SDK tries reconnecting to the server. When the local client successfully rejoins
- * the channel, the SDK triggers the `onRejoinChannelSuccess` callback on the local client.
+ * - The remote client: The `onUserJoined` callback, if a user joining the channel in the
+ * COMMUNICATION profile, or a host joining a channel in the LIVE_BROADCASTING profile.
+ * When the connection between the local client and Agora's server is interrupted due to poor
+ * network conditions, the SDK tries reconnecting to the server. When the local client successfully
+ * rejoins the channel, the SDK triggers the `onRejoinChannelSuccess` callback on the local client.
*
- * @note Once a user joins the channel, the user subscribes to the audio and video streams of all
- * the other users in the channel by default, giving rise to usage and billing calculation. To
- * stop subscribing to a specified stream or all remote streams, call the corresponding `mute` methods.
- *
- * @param token The token generated on your server for authentication.
+ * @note
+ * - This method only supports users joining one channel at a time.
+ * - Users with different App IDs cannot call each other.
+ * - Before joining a channel, ensure that the App ID you use to generate a token is the same as
+ * that you pass in the `initialize` method; otherwise, you may fail to join the channel with the
+ * token.
+ *
+ * @param token The token generated on your server for authentication. See .Note:
+ * - (Recommended) If your project has enabled the security mode (using APP ID and Token for
+ * authentication), this parameter is required.
+ * - If you have only enabled the testing mode (using APP ID for authentication), this parameter is
+ * optional. You will automatically exit the channel 24 hours after successfully joining in.
+ * - If you need to join different channels at the same time or switch between channels, Agora
+ * recommends using a wildcard token so that you don't need to apply for a new token every time
+ * joining a channel. See `Secure authentication with tokens`.
* @param channelId The channel name. This parameter signifies the channel in which users engage in
* real-time audio and video interaction. Under the premise of the same App ID, users who fill in
* the same channel ID enter the same channel for audio and video interaction. The string length
- * must be less than 64 bytes. Supported character scopes are:
+ * must be less than 64 bytes. Supported characters (89 characters in total):
* - All lowercase English letters: a to z.
* - All uppercase English letters: A to Z.
* - All numeric characters: 0 to 9.
- * - The space character.
- * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-",
- * ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",".
+ * - "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]",
+ * "^", "_", "{", "}", "|", "~", ","
* @param info (Optional) Reserved for future use.
* @param uid The user ID. This parameter is used to identify the user in the channel for real-time
* audio and video interaction. You need to set and manage user IDs yourself, and ensure that each
* user ID in the same channel is unique. This parameter is a 32-bit unsigned integer. The value
- * range is 1 to 232-1. If the user ID is not assigned (or set to 0), the SDK assigns a random user
- * ID and returns it in the onJoinChannelSuccess callback. Your application must record and maintain
- * the returned user ID, because the SDK does not do so.
+ * range is 1 to 2^32-1. If the user ID is not assigned (or set to 0), the SDK assigns a random user
+ * ID and `onJoinChannelSuccess` returns it in the callback. Your application must record and
+ * maintain the returned user ID, because the SDK does not do so.
*
* @return
* - 0: Success.
* - < 0: Failure.
- * - -2: The parameter is invalid. For example, the token is invalid, the uid parameter is not set
- * to an integer, or the value of a member in the `ChannelMediaOptions` structure is invalid. You need
- * to pass in a valid parameter and join the channel again.
- * - -3: Failes to initialize the `IRtcEngine` object. You need to reinitialize the IRtcEngine object.
- * - -7: The IRtcEngine object has not been initialized. You need to initialize the IRtcEngine
+ * - -2: The parameter is invalid. For example, the token is invalid, the `uid` parameter is not
+ * set to an integer, or the value of a member in `ChannelMediaOptions` is invalid. You need to pass
+ * in a valid parameter and join the channel again.
+ * - -3: Fails to initialize the `IRtcEngine` object. You need to reinitialize the `IRtcEngine`
+ * object.
+ * - -7: The `IRtcEngine` object has not been initialized. You need to initialize the `IRtcEngine`
* object before calling this method.
- * - -8: The internal state of the IRtcEngine object is wrong. The typical cause is that you call
- * this method to join the channel without calling `stopEchoTest` to stop the test after calling
- * `startEchoTest` to start a call loop test. You need to call `stopEchoTest` before calling this method.
- * - -17: The request to join the channel is rejected. The typical cause is that the user is in the
- * channel. Agora recommends using the `onConnectionStateChanged` callback to get whether the user is
- * in the channel. Do not call this method to join the channel unless you receive the
- * `CONNECTION_STATE_DISCONNECTED(1)` state.
- * - -102: The channel name is invalid. You need to pass in a valid channel name in channelId to
+ * - -8: The internal state of the `IRtcEngine` object is wrong. The typical cause is that after
+ * calling `startEchoTest` to start a call loop test, you call this method to join the channel
+ * without calling `stopEchoTest` to stop the test. You need to call `stopEchoTest` before calling
+ * this method.
+ * - -17: The request to join the channel is rejected. The typical cause is that the user is
+ * already in the channel. Agora recommends that you use the `onConnectionStateChanged` callback to
+ * see whether the user is in the channel. Do not call this method to join the channel unless you
+ * receive the `CONNECTION_STATE_DISCONNECTED` (1) state.
+ * - -102: The channel name is invalid. You need to pass in a valid channel name in `channelId` to
* rejoin the channel.
- * - -121: The user ID is invalid. You need to pass in a valid user ID in uid to rejoin the channel.
+ * - -121: The user ID is invalid. You need to pass in a valid user ID in `uid` to rejoin the
+ * channel.
*/
virtual int joinChannel(const char* token, const char* channelId, const char* info, uid_t uid) = 0;
/**
- * Joins a channel with media options.
- *
- * This method enables users to join a channel. Users in the same channel can talk to each other,
- * and multiple users in the same channel can start a group chat. Users with different App IDs
- * cannot call each other.
+ * @brief Joins a channel with media options.
*
- * A successful call of this method triggers the following callbacks:
+ * @details
+ * Compared to `joinChannel(const char* token, const char* channelId, const char* info, uid_t uid)`,
+ * this method has the `options` parameter which is used to set
+ * media options, such as whether to publish audio and video streams within a channel, or whether to
+ * automatically subscribe to the audio and video streams of all remote users when joining a
+ * channel. By default, the user subscribes to the audio and video streams of all the other users in
+ * the channel, giving rise to usage and **billings**. To stop subscribing to other streams, set the
+ * `options` parameter or call the corresponding `mute` methods.
+ * Call timing: Call this method after `initialize`.
+ * Related callbacks: A successful call of this method triggers the following callbacks:
* - The local client: The `onJoinChannelSuccess` and `onConnectionStateChanged` callbacks.
- * - The remote client: `onUserJoined`, if the user joining the channel is in the Communication
- * profile or is a host in the Live-broadcasting profile.
- *
- * When the connection between the client and Agora's server is interrupted due to poor network
- * conditions, the SDK tries reconnecting to the server. When the local client successfully rejoins
- * the channel, the SDK triggers the `onRejoinChannelSuccess` callback on the local client.
- *
- * Compared to `joinChannel`, this method adds the options parameter to configure whether to
- * automatically subscribe to all remote audio and video streams in the channel when the user
- * joins the channel. By default, the user subscribes to the audio and video streams of all
- * the other users in the channel, giving rise to usage and billings. To unsubscribe, set the
- * `options` parameter or call the `mute` methods accordingly.
+ * - The remote client: The `onUserJoined` callback, if a user joining the channel in the
+ * COMMUNICATION profile, or a host joining a channel in the LIVE_BROADCASTING profile.
+ * When the connection between the local client and Agora's server is interrupted due to poor
+ * network conditions, the SDK tries reconnecting to the server. When the local client successfully
+ * rejoins the channel, the SDK triggers the `onRejoinChannelSuccess` callback on the local client.
*
* @note
- * - This method allows users to join only one channel at a time.
- * - Ensure that the app ID you use to generate the token is the same app ID that you pass in the
- * `initialize` method; otherwise, you may fail to join the channel by token.
- *
- * @param token The token generated on your server for authentication.
- *
+ * - This method only supports users joining one channel at a time.
+ * - Users with different App IDs cannot call each other.
+ * - Before joining a channel, ensure that the App ID you use to generate a token is the same as
+ * that you pass in the `initialize` method; otherwise, you may fail to join the channel with the
+ * token.
+ *
+ * @param token The token generated on your server for authentication. See .Note:
+ * - (Recommended) If your project has enabled the security mode (using APP ID and Token for
+ * authentication), this parameter is required.
+ * - If you have only enabled the testing mode (using APP ID for authentication), this parameter is
+ * optional. You will automatically exit the channel 24 hours after successfully joining in.
+ * - If you need to join different channels at the same time or switch between channels, Agora
+ * recommends using a wildcard token so that you don't need to apply for a new token every time
+ * joining a channel. See `Secure authentication with tokens`.
* @param channelId The channel name. This parameter signifies the channel in which users engage in
* real-time audio and video interaction. Under the premise of the same App ID, users who fill in
* the same channel ID enter the same channel for audio and video interaction. The string length
- * must be less than 64 bytes. Supported character scopes are:
+ * must be less than 64 bytes. Supported characters (89 characters in total):
* - All lowercase English letters: a to z.
* - All uppercase English letters: A to Z.
* - All numeric characters: 0 to 9.
- * - The space character.
- * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-",
- * ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",".
+ * - "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]",
+ * "^", "_", "{", "}", "|", "~", ","
* @param uid The user ID. This parameter is used to identify the user in the channel for real-time
* audio and video interaction. You need to set and manage user IDs yourself, and ensure that each
* user ID in the same channel is unique. This parameter is a 32-bit unsigned integer. The value
- * range is 1 to 232-1. If the user ID is not assigned (or set to 0), the SDK assigns a random user
- * ID and returns it in the `onJoinChannelSuccess` callback. Your application must record and maintain
- * the returned user ID, because the SDK does not do so.
- * @param options The channel media options: ChannelMediaOptions.
+ * range is 1 to 2^32-1. If the user ID is not assigned (or set to 0), the SDK assigns a random user
+ * ID and `onJoinChannelSuccess` returns it in the callback. Your application must record and
+ * maintain the returned user ID, because the SDK does not do so.
+ * @param options The channel media options. See `ChannelMediaOptions`.
*
* @return
* - 0: Success.
* - < 0: Failure.
- * - -2: The parameter is invalid. For example, the token is invalid, the uid parameter is not set
- * to an integer, or the value of a member in the `ChannelMediaOptions` structure is invalid. You need
- * to pass in a valid parameter and join the channel again.
- * - -3: Failes to initialize the `IRtcEngine` object. You need to reinitialize the IRtcEngine object.
- * - -7: The IRtcEngine object has not been initialized. You need to initialize the IRtcEngine
+ * - -2: The parameter is invalid. For example, the token is invalid, the `uid` parameter is not
+ * set to an integer, or the value of a member in `ChannelMediaOptions` is invalid. You need to pass
+ * in a valid parameter and join the channel again.
+ * - -3: Fails to initialize the `IRtcEngine` object. You need to reinitialize the `IRtcEngine`
+ * object.
+ * - -7: The `IRtcEngine` object has not been initialized. You need to initialize the `IRtcEngine`
* object before calling this method.
- * - -8: The internal state of the IRtcEngine object is wrong. The typical cause is that you call
- * this method to join the channel without calling `stopEchoTest` to stop the test after calling
- * `startEchoTest` to start a call loop test. You need to call `stopEchoTest` before calling this method.
- * - -17: The request to join the channel is rejected. The typical cause is that the user is in the
- * channel. Agora recommends using the `onConnectionStateChanged` callback to get whether the user is
- * in the channel. Do not call this method to join the channel unless you receive the
- * `CONNECTION_STATE_DISCONNECTED(1)` state.
- * - -102: The channel name is invalid. You need to pass in a valid channel name in channelId to
+ * - -8: The internal state of the `IRtcEngine` object is wrong. The typical cause is that after
+ * calling `startEchoTest` to start a call loop test, you call this method to join the channel
+ * without calling `stopEchoTest` to stop the test. You need to call `stopEchoTest` before calling
+ * this method.
+ * - -17: The request to join the channel is rejected. The typical cause is that the user is
+ * already in the channel. Agora recommends that you use the `onConnectionStateChanged` callback to
+ * see whether the user is in the channel. Do not call this method to join the channel unless you
+ * receive the `CONNECTION_STATE_DISCONNECTED` (1) state.
+ * - -102: The channel name is invalid. You need to pass in a valid channel name in `channelId` to
* rejoin the channel.
- * - -121: The user ID is invalid. You need to pass in a valid user ID in uid to rejoin the channel.
+ * - -121: The user ID is invalid. You need to pass in a valid user ID in `uid` to rejoin the
+ * channel.
*/
virtual int joinChannel(const char* token, const char* channelId, uid_t uid, const ChannelMediaOptions& options) = 0;
/**
- * Updates the channel media options after joining the channel.
+ * @brief Updates the channel media options after joining the channel.
+ *
+ * @param options The channel media options. See `ChannelMediaOptions`.
*
- * @param options The channel media options: ChannelMediaOptions.
* @return
* - 0: Success.
* - < 0: Failure.
+ * - -2: The value of a member in `ChannelMediaOptions` is invalid. For example, the token or the
+ * user ID is invalid. You need to fill in a valid parameter.
+ * - -7: The `IRtcEngine` object has not been initialized. You need to initialize the `IRtcEngine`
+ * object before calling this method.
+ * - -8: The internal state of the `IRtcEngine` object is wrong. The possible reason is that the
+ * user is not in the channel. Agora recommends that you use the `onConnectionStateChanged` callback
+ * to see whether the user is in the channel. If you receive the `CONNECTION_STATE_DISCONNECTED` (1)
+ * or `CONNECTION_STATE_FAILED` (5) state, the user is not in the channel. You need to call
+ * `joinChannel(const char* token, const char* channelId, uid_t uid, const ChannelMediaOptions&
+ * options)` to join a channel before calling this method.
*/
virtual int updateChannelMediaOptions(const ChannelMediaOptions& options) = 0;
/**
- * Leaves the channel.
- *
- * This method allows a user to leave the channel, for example, by hanging up or exiting a call.
+ * @brief Leaves a channel.
*
- * This method is an asynchronous call, which means that the result of this method returns even before
- * the user has not actually left the channel. Once the user successfully leaves the channel, the
- * SDK triggers the \ref IRtcEngineEventHandler::onLeaveChannel "onLeaveChannel" callback.
+ * @details
+ * After calling this method, the SDK terminates the audio and video interaction, leaves the current
+ * channel, and releases all resources related to the session.
+ * After joining the channel, you must call this method to end the call; otherwise, the next call
+ * cannot be started.
+ * Call timing: Call this method after joining a channel.
+ * Related callbacks: A successful call of this method triggers the following callbacks:
+ * - The local client: The `onLeaveChannel` callback will be triggered.
+ * - The remote client: The `onUserOffline` callback will be triggered after the remote host leaves
+ * the channel.
*
* @note
- * If you call \ref release "release" immediately after this method, the leaveChannel process will be
- * interrupted, and the SDK will not trigger the `onLeaveChannel` callback.
+ * If you call `release` immediately after calling this method, the SDK does not trigger the
+ * `onLeaveChannel` callback.
+ * - This method call is asynchronous. When this method returns, it does not necessarily mean that
+ * the user has left the channel.
+ * - If you have called `joinChannelEx` to join multiple channels, calling this method will leave
+ * all the channels you joined.
*
* @return
* - 0: Success.
* - < 0: Failure.
+ * - -1: A general error occurs (no specified reason).
+ * - -2: The parameter is invalid.
+ * - -7: The SDK is not initialized.
*/
virtual int leaveChannel() = 0;
/**
- * Leaves the channel.
- *
- * @param options The leave channel options.
+ * @brief Sets channel options and leaves the channel.
*
- * This method allows a user to leave the channel, for example, by hanging up or exiting a call.
- *
- * This method is an asynchronous call, which means that the result of this method returns even before
- * the user has not actually left the channel. Once the user successfully leaves the channel, the
- * SDK triggers the \ref IRtcEngineEventHandler::onLeaveChannel "onLeaveChannel" callback.
+ * @details
+ * After calling this method, the SDK terminates the audio and video interaction, leaves the current
+ * channel, and releases all resources related to the session.
+ * After joining a channel, you must call this method or `leaveChannel()` to end the call,
+ * otherwise, the next call cannot be started. If you have called `joinChannelEx` to join multiple
+ * channels, calling this method will leave all the channels you joined.
+ * Call timing: Call this method after joining a channel.
+ * Related callbacks: A successful call of this method triggers the following callbacks:
+ * - The local client: The `onLeaveChannel` callback will be triggered.
+ * - The remote client: The `onUserOffline` callback will be triggered after the remote host leaves
+ * the channel.
*
* @note
- * If you call \ref release "release" immediately after this method, the leaveChannel process will be
- * interrupted, and the SDK will not trigger the `onLeaveChannel` callback.
+ * If you call `release` immediately after calling this method, the SDK does not trigger the
+ * `onLeaveChannel` callback.
+ * This method call is asynchronous. When this method returns, it does not necessarily mean that the
+ * user has left the channel.
+ *
+ * @param options The options for leaving the channel. See `LeaveChannelOptions`.
*
* @return
* - 0: Success.
@@ -4190,97 +5084,176 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int leaveChannel(const LeaveChannelOptions& options) = 0;
/**
- * Renews the token.
- *
- * Once a token is enabled and used, it expires after a certain period of time.
+ * @brief Renews the token.
*
- * Under the following circumstances, generate a new token on your server, and then call this method to
- * renew it. Failure to do so results in the SDK disconnecting from the server.
- * The SDK triggers the \ref IRtcEngineEventHandler::onRenewTokenResult "onRenewTokenResult" callback after the token is renewed.
- * - The \ref IRtcEngineEventHandler onTokenPrivilegeWillExpire "onTokenPrivilegeWillExpire" callback is triggered;
- * - The \ref IRtcEngineEventHandler::onRequestToken "onRequestToken" callback is triggered;
- * - The `ERR_TOKEN_EXPIRED(-109)` error is reported.
+ * @details
+ * This method is used to update the token. After successfully calling this method, the SDK will
+ * trigger the `onRenewTokenResult` callback. A token will expire after a certain period of time, at
+ * which point the SDK will be unable to establish a connection with the server.
+ * Call timing: In any of the following cases, Agora recommends that you generate a new token on
+ * your server and then call this method to renew your token:
+ * - Receiving the `onTokenPrivilegeWillExpire` callback reporting the token is about to expire.
+ * - Receiving the `onRequestToken` callback reporting the token has expired.
+ * - Receiving the `onConnectionStateChanged` callback reporting `CONNECTION_CHANGED_TOKEN_EXPIRED`
+ * (9).
*
* @param token The new token.
+ *
* @return
* - 0: Success.
* - < 0: Failure.
+ * - -2: The parameter is invalid. For example, the token is empty.
+ * - -7: The `IRtcEngine` object has not been initialized. You need to initialize the `IRtcEngine`
+ * object before calling this method.
+ * - 110: Invalid token. Ensure the following:
+ * - The user ID specified when generating the token is consistent with the user ID used when
+ * joining the channel.
+ * - The generated token is the same as the token passed in to join the channel.
*/
virtual int renewToken(const char* token) = 0;
/**
- * Sets the channel profile.
+ * @brief Sets the channel profile.
*
- * The IRtcEngine differentiates channel profiles and applies different optimization algorithms accordingly.
- * For example, it prioritizes smoothness and low latency for a video call, and prioritizes video quality
- * for a video broadcast.
+ * @details
+ * You can call this method to set the channel profile. The SDK adopts different optimization
+ * strategies for different channel profiles. For example, in a live streaming scenario, the SDK
+ * prioritizes video quality. After initializing the SDK, the default channel profile is the live
+ * streaming profile.
+ * Call timing: Call this method before joining a channel.
*
* @note
- * - To ensure the quality of real-time communication, we recommend that all users in a channel use the
- * same channel profile.
- * - Call this method before calling `joinChannel`. You cannot set the channel profile
- * once you have joined the channel.
+ * To ensure the quality of real-time communication, Agora recommends that all users in a channel
+ * use the same channel profile.
+ * In different channel scenarios, the default audio routing of the SDK is different. See
+ * `setDefaultAudioRouteToSpeakerphone`.
+ *
+ * @param profile The channel profile. See `CHANNEL_PROFILE_TYPE`.
*
- * @param profile The channel profile: #CHANNEL_PROFILE_TYPE.
* @return
* - 0: Success.
* - < 0: Failure.
- * - -8(ERR_INVALID_STATE): The current status is invalid, only allowed to be called when the connection is disconnected.
+ * - -2: The parameter is invalid.
+ * - -7: The SDK is not initialized.
*/
virtual int setChannelProfile(CHANNEL_PROFILE_TYPE profile) = 0;
/**
- * Sets the role of a user.
- *
- * This method sets the user role as either BROADCASTER or AUDIENCE (default).
- * - The broadcaster sends and receives streams.
- * - The audience receives streams only.
- *
- * By default, all users are audience regardless of the channel profile.
- * Call this method to change the user role to BROADCASTER so that the user can
- * send a stream.
+ * @brief Sets the client role.
+ *
+ * @details
+ * By default,the SDK sets the user role as audience. You can call this method to set the user role
+ * as host. The user role ( `roles` ) determines the users' permissions at the SDK level, including
+ * whether they can publish audio and video streams in a channel.
+ * Call timing: You can call this method either before or after joining a channel.
+ * If you call this method to set the user role as the host before joining the channel and set the
+ * local video property through the `setupLocalVideo` method, the local video preview is
+ * automatically enabled when the user joins the channel.
+ * If you call this method to set the user role after joining a channel, the SDK will automatically
+ * call the `muteLocalAudioStream` and `muteLocalVideoStream` method to change the state for
+ * publishing audio and video streams.
+ * Related callbacks: If you call this method to switch the user role after joining the channel, the
+ * SDK triggers the following callbacks:
+ * - Triggers `onClientRoleChanged` on the local client.Note: Calling this method before joining a
+ * channel and set the `role` to `AUDIENCE` will trigger this callback as well.
+ * - Triggers `onUserJoined` or `onUserOffline` on the remote client.
+ * If you call this method to set the user role after joining a channel but encounter a failure, the
+ * SDK trigger the `onClientRoleChangeFailed` callback to report the reason for the failure and the
+ * current user role.
*
* @note
- * After calling the setClientRole() method to CLIENT_ROLE_AUDIENCE, the SDK stops audio recording.
- * However, CLIENT_ROLE_AUDIENCE will keep audio recording with AUDIO_SCENARIO_CHATROOM(5).
- * Normally, app developer can also use mute api to achieve the same result, and we implement
- * this 'non-orthogonal' behavior only to make API backward compatible.
+ * When calling this method before joining a channel and setting the user role to `BROADCASTER`, the
+ * `onClientRoleChanged` callback will not be triggered on the local client.
+ * Calling this method before joining a channel and set the `role` to `AUDIENCE` will trigger this
+ * callback as well.
*
- * @param role The role of the client: #CLIENT_ROLE_TYPE.
+ * @param role The user role. See `CLIENT_ROLE_TYPE`.
+ * Note: If you set the user role as an audience member, you cannot publish audio and video streams
+ * in the channel. If you want to publish media streams in a channel during live streaming, ensure
+ * you set the user role as broadcaster.
*
* @return
* - 0: Success.
* - < 0: Failure.
+ * - -1: A general error occurs (no specified reason).
+ * - -2: The parameter is invalid.
+ * - -7: The SDK is not initialized.
*/
virtual int setClientRole(CLIENT_ROLE_TYPE role) = 0;
- /** Sets the role of the user, such as a host or an audience (default), before joining a channel in the live interactive streaming.
- *
- * This method can be used to switch the user role in the live interactive streaming after the user joins a channel.
- *
- * In the `LIVE_BROADCASTING` profile, when a user switches user roles after joining a channel, a successful \ref agora::rtc::IRtcEngine::setClientRole "setClientRole" method call triggers the following callbacks:
- * - The local client: \ref agora::rtc::IRtcEngineEventHandler::onClientRoleChanged "onClientRoleChanged"
- * - The remote client: \ref agora::rtc::IRtcEngineEventHandler::onUserJoined "onUserJoined" or \ref agora::rtc::IRtcEngineEventHandler::onUserOffline "onUserOffline" (BECOME_AUDIENCE)
- *
- * @note
- * This method applies only to the `LIVE_BROADCASTING` profile.
- *
- * @param role Sets the role of the user. See #CLIENT_ROLE_TYPE.
- * @param options Sets the audience latency level of the user. See #ClientRoleOptions.
- *
- * @return
- * - 0(ERR_OK): Success.
- * - < 0: Failure.
- * - -1(ERR_FAILED): A general error occurs (no specified reason).
- * - -2(ERR_INALID_ARGUMENT): The parameter is invalid.
- * - -7(ERR_NOT_INITIALIZED): The SDK is not initialized.
- * - -8(ERR_INVALID_STATE): The channel profile is not `LIVE_BROADCASTING`.
- */
+ /**
+ * @brief Sets the user role and the audience latency level in a live streaming scenario.
+ *
+ * @details
+ * By default,the SDK sets the user role as audience. You can call this method to set the user role
+ * as host. The user role ( `roles` ) determines the users' permissions at the SDK level, including
+ * whether they can publish audio and video streams in a channel.
+ * The difference between this method and `setClientRole(CLIENT_ROLE_TYPE role)` is that, this
+ * method supports
+ * setting the `audienceLatencyLevel`. `audienceLatencyLevel` needs to be used together with `role`
+ * to determine the level of service that users can enjoy within their permissions. For example, an
+ * audience member can choose to receive remote streams with low latency or ultra-low latency.
+ * Call timing: You can call this method either before or after joining a channel.
+ * If you call this method to set the user role as the host before joining the channel and set the
+ * local video property through the `setupLocalVideo` method, the local video preview is
+ * automatically enabled when the user joins the channel.
+ * If you call this method to set the user role after joining a channel, the SDK will automatically
+ * call the `muteLocalAudioStream` and `muteLocalVideoStream` method to change the state for
+ * publishing audio and video streams.
+ * Related callbacks: If you call this method to switch the user role after joining the channel, the
+ * SDK triggers the following callbacks:
+ * - Triggers `onClientRoleChanged` on the local client.Note: Calling this method before joining a
+ * channel and set the `role` to `AUDIENCE` will trigger this callback as well.
+ * - Triggers `onUserJoined` or `onUserOffline` on the remote client.
+ * If you call this method to set the user role after joining a channel but encounter a failure, the
+ * SDK trigger the `onClientRoleChangeFailed` callback to report the reason for the failure and the
+ * current user role.
+ *
+ * @note
+ * When the user role is set to host, the audience latency level can only be set to
+ * AUDIENCE_LATENCY_LEVEL_ULTRA_LOW_LATENCY.
+ * When calling this method before joining a channel and setting the `role` to `BROADCASTER`, the
+ * `onClientRoleChanged` callback will not be triggered on the local client.
+ * Calling this method before joining a channel and set the `role` to `AUDIENCE` will trigger this
+ * callback as well.
+ *
+ * @param role The user role. See `CLIENT_ROLE_TYPE`.
+ * Note: If you set the user role as an audience member, you cannot publish audio and video streams
+ * in the channel. If you want to publish media streams in a channel during live streaming, ensure
+ * you set the user role as broadcaster.
+ * @param options The detailed options of a user, including the user level. See `ClientRoleOptions`.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ * - -1: A general error occurs (no specified reason).
+ * - -2: The parameter is invalid.
+ * - -5: The request is rejected.
+ * - -7: The SDK is not initialized.
+ */
virtual int setClientRole(CLIENT_ROLE_TYPE role, const ClientRoleOptions& options) = 0;
- /** Starts a video call test.
+ /**
+ * @brief Starts an audio device loopback test.
*
- * @param config: configuration for video call test.
+ * @details
+ * To test whether the user's local sending and receiving streams are normal, you can call this
+ * method to perform an audio and video call loop test, which tests whether the audio and video
+ * devices and the user's upstream and downstream networks are working properly.
+ * After starting the test, the user needs to make a sound or face the camera. The audio or video is
+ * output after about two seconds. If the audio playback is normal, the audio device and the user's
+ * upstream and downstream networks are working properly; if the video playback is normal, the video
+ * device and the user's upstream and downstream networks are working properly.
+ * Call timing: You can call this method either before or after joining a channel.
+ *
+ * @note
+ * - When calling in a channel, make sure that no audio or video stream is being published.
+ * - After calling this method, call `stopEchoTest` to end the test; otherwise, the user cannot
+ * perform the next audio and video call loop test and cannot join the channel.
+ * - In live streaming scenarios, this method only applies to hosts.
+ *
+ * @param config The configuration of the audio and video call loop test. See
+ * `EchoTestConfiguration`.
*
* @return
* - 0: Success.
@@ -4288,20 +5261,59 @@ class IRtcEngine : public agora::base::IEngineBase {
*/
virtual int startEchoTest(const EchoTestConfiguration& config) = 0;
- /** Stops the audio call test.
- @return int
-
- - 0: Success.
- - < 0: Failure.
- */
+ /**
+ * @brief Stops the audio call test.
+ *
+ * @details
+ * After calling `startEchoTest`, you must call this method to end the test; otherwise, the user
+ * cannot perform the next audio and video call loop test and cannot join the channel.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ * - -5(ERR_REFUSED): Failed to stop the echo test. The echo test may not be running.
+ */
virtual int stopEchoTest() = 0;
#if defined(__APPLE__) && TARGET_OS_IOS
- /** Enables the SDK use AVCaptureMultiCamSession or AVCaptureSession. Applies to iOS 13.0+ only.
- * @param enabled Whether to enable multi-camera when capturing video:
- * - true: Enable multi-camera, and the SDK uses AVCaptureMultiCamSession.
- * - false: Disable multi-camera, and the SDK uses AVCaptureSession.
- * @param config The config for secondary camera capture session. See #CameraCapturerConfiguration.
+ /**
+ * @brief Enables or disables multi-camera capture.
+ *
+ * @details
+ * In scenarios where there are existing cameras to capture video, Agora recommends that you use the
+ * following steps to capture and publish video with multiple cameras:1. Call this method to enable
+ * multi-channel camera capture.
+ * 2. Call `startPreview(VIDEO_SOURCE_TYPE sourceType)` to start the local video preview.
+ * 3. Call `startCameraCapture`, and set `sourceType` to start video capture with the second camera.
+ * 4. Call `joinChannelEx`, and set `publishSecondaryCameraTrack` to `true` to publish the video
+ * stream captured by the second camera in the channel.
+ * If you want to disable multi-channel camera capture, use the following steps:1. Call
+ * `stopCameraCapture`.
+ * 2. Call this method with `enabled` set to `false`.
+ * This method applies to iOS only.
+ * When using this function, ensure that the system version is 13.0 or later.
+ * The minimum iOS device types that support multi-camera capture are as follows:
+ * - iPhone XR
+ * - iPhone XS
+ * - iPhone XS Max
+ * - iPad Pro 3rd generation and later
+ *
+ * @note
+ * You can call this method before and after `startPreview(VIDEO_SOURCE_TYPE sourceType)` to enable
+ * multi-camera capture:
+ * - If it is enabled before `startPreview(VIDEO_SOURCE_TYPE sourceType)`, the local video preview
+ * shows the image captured
+ * by the two cameras at the same time.
+ * - If it is enabled after `startPreview(VIDEO_SOURCE_TYPE sourceType)`, the SDK stops the current
+ * camera capture first,
+ * and then enables the primary camera and the second camera. The local video preview appears black
+ * for a short time, and then automatically returns to normal.
+ *
+ * @param enabled Whether to enable multi-camera video capture mode:
+ * - `true`: Enable multi-camera capture mode; the SDK uses multiple cameras to capture video.
+ * - `false`: Disable multi-camera capture mode; the SDK uses a single camera to capture video.
+ * @param config Capture configuration for the second camera. See `CameraCapturerConfiguration`.
+ *
* @return
* - 0: Success.
* - < 0: Failure.
@@ -4309,15 +5321,28 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int enableMultiCamera(bool enabled, const CameraCapturerConfiguration& config) = 0;
#endif
/**
- * Enables the video.
+ * @brief Enables the video module.
*
- * You can call this method either before joining a channel or during a call.
- * If you call this method before entering a channel, the service starts the video; if you call it
- * during a call, the audio call switches to a video call.
+ * @details
+ * The video module is disabled by default, call this method to enable it. If you need to disable
+ * the video module later, you need to call `disableVideo`.
+ * Call timing: This method can be called either before joining a channel or while in the channel:
+ * - If called before joining a channel, it enables the video module.
+ * - If called during an audio-only call, the audio call automatically switches to a video call.
+ * Related callbacks: A successful call of this method triggers the `onRemoteVideoStateChanged`
+ * callback on the remote client.
*
* @note
- * This method controls the underlying states of the Engine. It is still
- * valid after one leaves the channel.
+ * - This method enables the internal engine and is valid after leaving the channel.
+ * - Calling this method will reset the entire engine, resulting in a slow response time. You can
+ * use the following methods to independently control a specific function of the video module based
+ * on your actual needs:
+ * - `enableLocalVideo`: Whether to enable the camera to create the local video stream.
+ * - `muteLocalVideoStream`: Whether to publish the local video stream.
+ * - `muteRemoteVideoStream`: Whether to subscribe to and play the remote video stream.
+ * - `muteAllRemoteVideoStreams`: Whether to subscribe to and play all remote video streams.
+ * - A successful call of this method resets `enableLocalVideo`, `muteRemoteVideoStream`, and
+ * `muteAllRemoteVideoStreams`. Proceed it with caution.
*
* @return
* - 0: Success.
@@ -4326,22 +5351,44 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int enableVideo() = 0;
/**
- * Disables the video.
+ * @brief Disables the video module.
*
- * This method stops capturing the local video and receiving any remote video.
- * To enable the local preview function, call \ref enableLocalVideo "enableLocalVideo" (true).
- * @return int
+ * @details
+ * This method is used to disable the video module.
+ * Call timing: This method can be called either before or after joining the channel.
+ * - If it is called before joining the channel, the audio-only mode is enabled.
+ * - If it is called after joining the channel, it switches from video mode to audio-only mode.
+ * Then, calling `enableVideo` can swithch to video mode again.
+ * Related callbacks: A successful call of this method triggers the `onUserEnableVideo` (`false`)
+ * callback on the remote client.
+ *
+ * @note
+ * - This method affects the internal engine and can be called after leaving the channel.
+ * - Calling this method will reset the entire engine, resulting in a slow response time. You can
+ * use the following methods to independently control a specific function of the video module based
+ * on your actual needs:
+ * - `enableLocalVideo`: Whether to enable the camera to create the local video stream.
+ * - `muteLocalVideoStream`: Whether to publish the local video stream.
+ * - `muteRemoteVideoStream`: Whether to subscribe to and play the remote video stream.
+ * - `muteAllRemoteVideoStreams`: Whether to subscribe to and play all remote video streams.
+ *
+ * @return
* - 0: Success.
* - < 0: Failure.
*/
virtual int disableVideo() = 0;
/**
- * Starts the local video preview before joining a channel.
+ * @brief Enables the local video preview.
*
- * Once you call this method to start the local video preview, if you leave
- * the channel by calling \ref leaveChannel "leaveChannel", the local video preview remains until
- * you call \ref stopPreview "stopPreview" to disable it.
+ * @details
+ * You can call this method to enable local video preview.
+ * Call timing: This method must be called after `enableVideo` and `setupLocalVideo`.
+ *
+ * @note
+ * - The local preview enables the mirror mode by default.
+ * - After leaving the channel, local preview remains enabled. You need to call `stopPreview()`
+ * to disable local preview.
*
* @return
* - 0: Success.
@@ -4350,8 +5397,20 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int startPreview() = 0;
/**
- * Starts the local video preview for specific source type.
- * @param sourceType - The video source type.
+ * @brief Enables the local video preview and specifies the video source for the preview.
+ *
+ * @details
+ * This method is used to start local video preview and specify the video source that appears in the
+ * preview screen.
+ * Call timing: This method must be called after `enableVideo` and `setupLocalVideo`.
+ *
+ * @note
+ * - The local preview enables the mirror mode by default.
+ * - After leaving the channel, local preview remains enabled. You need to call `stopPreview()`
+ * to disable local preview.
+ *
+ * @param sourceType The type of the video source. See `VIDEO_SOURCE_TYPE`.
+ *
* @return
* - 0: Success.
* - < 0: Failure.
@@ -4359,7 +5418,12 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int startPreview(VIDEO_SOURCE_TYPE sourceType) = 0;
/**
- * Stops the local video preview and the video.
+ * @brief Stops the local video preview.
+ *
+ * @details
+ * Applicable scenarios: After calling `startPreview()` to start the preview, if you want to
+ * stop the local video preview, call this method.
+ * Call timing: Call this method before joining a channel or after leaving a channel.
*
* @return
* - 0: Success.
@@ -4368,134 +5432,260 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int stopPreview() = 0;
/**
- * Stops the local video preview for specific source type.
- * @param sourceType - The video source type.
+ * @brief Stops the local video preview.
+ *
+ * @details
+ * Applicable scenarios: After calling `startPreview(VIDEO_SOURCE_TYPE sourceType)` to start the
+ * preview, if you want to
+ * stop the local video preview, call this method.
+ * Call timing: Call this method before joining a channel or after leaving a channel.
+ *
+ * @param sourceType The type of the video source. See `VIDEO_SOURCE_TYPE`.
+ *
* @return
* - 0: Success.
* - < 0: Failure.
*/
virtual int stopPreview(VIDEO_SOURCE_TYPE sourceType) = 0;
- /** Starts the last-mile network probe test.
-
- This method starts the last-mile network probe test before joining a channel
- to get the uplink and downlink last-mile network statistics, including the
- bandwidth, packet loss, jitter, and round-trip time (RTT).
-
- Call this method to check the uplink network quality before users join a
- channel or before an audience switches to a host. Once this method is
- enabled, the SDK returns the following callbacks:
- - \ref IRtcEngineEventHandler::onLastmileQuality "onLastmileQuality": the
- SDK triggers this callback depending on the network
- conditions. This callback rates the network conditions and is more closely
- linked to the user experience.
- - \ref IRtcEngineEventHandler::onLastmileProbeResult "onLastmileProbeResult":
- the SDK triggers this callback within 30 seconds depending on the network
- conditions. This callback returns the real-time statistics of the network
- conditions and is more objective.
-
- @note
- - Do not call other methods before receiving the
- \ref IRtcEngineEventHandler::onLastmileQuality "onLastmileQuality" and
- \ref IRtcEngineEventHandler::onLastmileProbeResult "onLastmileProbeResult"
- callbacks. Otherwise, the callbacks may be interrupted.
- - In the Live-broadcast profile, a host should not call this method after
- joining a channel.
-
- @param config Sets the configurations of the last-mile network probe test. See
- LastmileProbeConfig.
-
- @return
- - 0: Success.
- - < 0: Failure.
+ /**
+ * @brief Starts the last mile network probe test.
+ *
+ * @details
+ * This method starts the last-mile network probe test before joining a channel to get the uplink
+ * and downlink last mile network statistics, including the bandwidth, packet loss, jitter, and
+ * round-trip time (RTT).
+ * Call timing: Do not call other methods before receiving the `onLastmileQuality` and
+ * `onLastmileProbeResult` callbacks. Otherwise, the callbacks may be interrupted.
+ * Related callbacks: After successfully calling this method, the SDK sequentially triggers the
+ * following 2 callbacks:
+ * - `onLastmileQuality`: The SDK triggers this callback within two seconds depending on the network
+ * conditions. This callback rates the network conditions and is more closely linked to the user
+ * experience.
+ * - `onLastmileProbeResult`: The SDK triggers this callback within 30 seconds depending on the
+ * network conditions. This callback returns the real-time statistics of the network conditions and
+ * is more objective.
+ *
+ * @param config The configurations of the last-mile network probe test. See `LastmileProbeConfig`.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int startLastmileProbeTest(const LastmileProbeConfig& config) = 0;
- /** Stops the last-mile network probe test. */
+ /**
+ * @brief Stops the last mile network probe test.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ */
virtual int stopLastmileProbeTest() = 0;
/**
- * Sets the video encoder configuration.
+ * @brief Sets the video encoder configuration.
+ *
+ * @details
+ * Sets the encoder configuration for the local video. Each configuration profile corresponds to a
+ * set of video parameters, including the resolution, frame rate, and bitrate.
+ * Call timing: You can call this method either before or after joining a channel. If the user does
+ * not need to reset the video encoding properties after joining the channel, Agora recommends
+ * calling this method before `enableVideo` to reduce the time to render the first video frame.
*
- * Each configuration profile corresponds to a set of video parameters, including
- * the resolution, frame rate, and bitrate.
+ * @note
+ * - Both this method and the `getMirrorApplied` method support setting the mirroring effect. Agora
+ * recommends that you only choose one method to set it up. Using both methods at the same time
+ * causes the mirroring effect to overlap, and the mirroring settings fail.
+ * - The `config` specified in this method is the maximum value under ideal network conditions. If
+ * the video engine cannot render the video using the specified `config` due to unreliable network
+ * conditions, the parameters further down the list are considered until a successful configuration
+ * is found.
*
- * The parameters specified in this method are the maximum values under ideal network conditions.
- * If the video engine cannot render the video using the specified parameters due
- * to poor network conditions, the parameters further down the list are considered
- * until a successful configuration is found.
+ * @param config Video profile. See `VideoEncoderConfiguration`.
*
- * @param config The local video encoder configuration: VideoEncoderConfiguration.
* @return
* - 0: Success.
* - < 0: Failure.
*/
virtual int setVideoEncoderConfiguration(const VideoEncoderConfiguration& config) = 0;
- /** Enables/Disables image enhancement and sets the options.
+ /**
+ * @brief Sets the image enhancement options.
+ *
+ * @details
+ * Enables or disables image enhancement, and sets the options.
+ * Call timing: Call this method after calling `enableVideo` or `startPreview(VIDEO_SOURCE_TYPE
+ * sourceType)`.
+ *
+ * @note
+ * - This method relies on the image enhancement dynamic library
+ * `libagora_clear_vision_extension.dll`. If the dynamic library is deleted, the function cannot be
+ * enabled normally.
+ * - This feature has high requirements on device performance. When calling this method, the SDK
+ * automatically checks the capabilities of the current device.
*
- * @note Call this method after calling the \ref IRtcEngine::enableVideo "enableVideo" method.
+ * @param enabled Whether to enable the image enhancement function:
+ * - `true`: Enable the image enhancement function.
+ * - `false`: (Default) Disable the image enhancement function.
+ * @param options The image enhancement options. See `BeautyOptions`.
+ * @param type The type of the media source to which the filter effect is applied. See
+ * `MEDIA_SOURCE_TYPE`.Attention: In this method, this parameter supports only the following two
+ * settings:
+ * - Use the default value `PRIMARY_CAMERA_SOURCE` if you use camera to capture local video.
+ * - Set this parameter to `CUSTOM_VIDEO_SOURCE` if you use custom video source.
*
- * @param enabled Sets whether or not to enable image enhancement:
- * - true: enables image enhancement.
- * - false: disables image enhancement.
- * @param options Sets the image enhancement option. See BeautyOptions.
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ * - -4: The current device does not support this feature. Possible reasons include:
+ * - The current device capabilities do not meet the requirements for image enhancement. Agora
+ * recommends you replace it with a high-performance device.
*/
virtual int setBeautyEffectOptions(bool enabled, const BeautyOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0;
- /** Enables/Disables face shape and sets the beauty options.
+ /**
+ * @brief Sets the face shape options and specifies the media source.
+ *
+ * @details
+ * Calling this method allows for modifying various parts of the face, achieving slimming, enlarging
+ * eyes, slimming nose, and other minor cosmetic effects all at once using preset parameters,
+ * supporting fine-tuning the overall modification intensity.
+ * Call timing: Call this method after calling `enableVideo`.
+ *
+ * @note
+ * - This method only applies to Android 4.4 or later.
+ * - This method relies on the image enhancement dynamic library
+ * `libagora_clear_vision_extension.dll`. If the dynamic library is deleted, the function cannot be
+ * enabled normally.
+ * - This feature has high requirements on device performance. When calling this method, the SDK
+ * automatically checks the capabilities of the current device.
*
- * @note Call this method after calling the \ref IRtcEngine::enableVideo "enableVideo" method.
+ * @param enabled Whether to enable the face shape effect:
+ * - `true`: Enable the face shape effect.
+ * - `false`: (Default) Disable the face shape effect.
+ * @param options Face shaping style options, see `FaceShapeBeautyOptions`.
+ * @param type The type of the media source to which the filter effect is applied. See
+ * `MEDIA_SOURCE_TYPE`.Attention: In this method, this parameter supports only the following two
+ * settings:
+ * - Use the default value `PRIMARY_CAMERA_SOURCE` if you use camera to capture local video.
+ * - Set this parameter to `CUSTOM_VIDEO_SOURCE` if you use custom video source.
*
- * @param enabled Sets whether or not to enable face shape:
- * - true: enables face shape.
- * - false: disables face shape.
- * @param options Sets the face shape beauty option. See FaceShapeBeautyOptions.
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ * - -4: The current device does not support this feature. Possible reasons include:
+ * - The current device capabilities do not meet the requirements for image enhancement. Agora
+ * recommends you replace it with a high-performance device.
+ * - The current device version is lower than Android 4.4 and does not support this feature.
+ * Agora recommends you replace the device or upgrade the operating system.
*/
virtual int setFaceShapeBeautyOptions(bool enabled, const FaceShapeBeautyOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0;
- /** Enables/Disables face shape and sets the area options.
+ /**
+ * @brief Sets the image enhancement options for facial areas and specifies the media source.
*
- * @note Call this method after calling the \ref IRtcEngine::setFaceShapeBeautyOptions "setFaceShapeBeautyOptions" method.
+ * @details
+ * If the preset beauty effects implemented in the `setFaceShapeBeautyOptions` method do not meet
+ * expectations, you can use this method to set beauty area options, individually fine-tune each
+ * part of the face, and achieve a more refined beauty effect.
+ * Call timing: Call this method after calling `setFaceShapeBeautyOptions`.
*
- * @param options Sets the face shape area option. See FaceShapeAreaOptions.
+ * @note
+ * - This method only applies to Android 4.4 or later.
+ * - This method relies on the image enhancement dynamic library
+ * `libagora_clear_vision_extension.dll`. If the dynamic library is deleted, the function cannot be
+ * enabled normally.
+ * - This feature has high requirements on device performance. When calling this method, the SDK
+ * automatically checks the capabilities of the current device.
+ *
+ * @param options Facial enhancement areas, see `FaceShapeAreaOptions`.
+ * @param type The type of the media source to which the filter effect is applied. See
+ * `MEDIA_SOURCE_TYPE`.Attention: In this method, this parameter supports only the following two
+ * settings:
+ * - Use the default value `PRIMARY_CAMERA_SOURCE` if you use camera to capture local video.
+ * - Set this parameter to `CUSTOM_VIDEO_SOURCE` if you use custom video source.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ * - -4: The current device does not support this feature. Possible reasons include:
+ * - The current device capabilities do not meet the requirements for image enhancement. Agora
+ * recommends you replace it with a high-performance device.
+ * - The current device version is lower than Android 4.4 and does not support this feature.
+ * Agora recommends you replace the device or upgrade the operating system.
*/
virtual int setFaceShapeAreaOptions(const FaceShapeAreaOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0;
- /** Gets the face shape beauty options.
+ /**
+ * @brief Gets the beauty effect options.
+ *
+ * @details
+ * Calling this method can retrieve the current settings of the beauty effect.
+ * Applicable scenarios: When the user opens the beauty style and style intensity menu in the app,
+ * you can call this method to get the current beauty effect options, then refresh the menu in the
+ * user interface according to the results, and update the UI.
+ * Call timing: Call this method after calling `enableVideo`.
*
- * @note Call this method after calling the \ref IRtcEngine::enableVideo "enableVideo" method.
+ * @param options Face shaping style options, see `FaceShapeBeautyOptions`.
+ * @param type The type of the media source to which the filter effect is applied. See
+ * `MEDIA_SOURCE_TYPE`.Attention: In this method, this parameter supports only the following two
+ * settings:
+ * - Use the default value `PRIMARY_CAMERA_SOURCE` if you use camera to capture local video.
+ * - Set this parameter to `CUSTOM_VIDEO_SOURCE` if you use custom video source.
*
- * @param options Gets the face shape beauty option. See FaceShapeBeautyOptions.
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int getFaceShapeBeautyOptions(FaceShapeBeautyOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0;
- /** Gets the face shape area options.
+ /**
+ * @brief Gets the facial beauty area options.
*
- * @note Call this method after calling the \ref IRtcEngine::enableVideo "enableVideo" method.
+ * @details
+ * Calling this method can retrieve the current settings of the beauty effect.
+ * Applicable scenarios: When the user opens the facial beauty area and shaping intensity menu in
+ * the app, you can call this method to get the current beauty effect options, then refresh the menu
+ * in the user interface according to the results, and update the UI.
+ * Call timing: Call this method after calling `enableVideo`.
*
- * @param shapeArea The face area. See FaceShapeAreaOptions::FACE_SHAPE_AREA.
- * @param options Gets the face area beauty option. See FaceShapeAreaOptions.
+ * @param shapeArea Facial enhancement areas. See `FACE_SHAPE_AREA`.
+ * @param options Facial enhancement areas, see `FaceShapeAreaOptions`.
+ * @param type The type of the media source to which the filter effect is applied. See
+ * `MEDIA_SOURCE_TYPE`.Attention: In this method, this parameter supports only the following two
+ * settings:
+ * - Use the default value `PRIMARY_CAMERA_SOURCE` if you use camera to capture local video.
+ * - Set this parameter to `CUSTOM_VIDEO_SOURCE` if you use custom video source.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int getFaceShapeAreaOptions(agora::rtc::FaceShapeAreaOptions::FACE_SHAPE_AREA shapeArea, FaceShapeAreaOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0;
/**
- * Sets filter effect options.
+ * @brief Sets the filter effect options and specifies the media source.
*
* @since v4.4.1
- * You can call this method to enable the filter effect feature and set the options of the filter effect.
+ *
+ * @details
+ * Call timing: Call this method after calling `enableVideo`.
*
* @note
- * - Before calling this method, ensure that you have integrated the following dynamic library into your project:
- * - Android: `libagora_clear_vision_extension.so`
- * - iOS/macOS: `AgoraClearVisionExtension.xcframework`
- * - Windows: `libagora_clear_vision_extension.dll`
- * - Call this method after calling the \ref IRtcEngine::enableVideo "enableVideo" method.
- * - You can call this method either before or after joining a channel.
- * - The filter effect feature has specific performance requirements for devices. If your device overheats after enabling the filter effect, Agora recommends disabling it entirely.
+ * - This method relies on the image enhancement dynamic library
+ * `libagora_clear_vision_extension.dll`. If the dynamic library is deleted, the function cannot be
+ * enabled normally.
+ * - This feature has high requirements on device performance. When calling this method, the SDK
+ * automatically checks the capabilities of the current device.
*
- * @param enabled. Whether to enable filter effect:
- * - `true`: Enable.
- * - `false`: (Default) Disable.
- * @param options. Set the filter effect options. See FilterEffectOptions.
+ * @param enabled Whether to enable the filter effect:
+ * - `true`: Yes.
+ * - `false`: (Default) No.
+ * @param options The filter effect options. See `FilterEffectOptions`.
+ * @param type The type of the media source to which the filter effect is applied. See
+ * `MEDIA_SOURCE_TYPE`.Attention: In this method, this parameter supports only the following two
+ * settings:
+ * - Use the default value `PRIMARY_CAMERA_SOURCE` if you use camera to capture local video.
+ * - Set this parameter to `CUSTOM_VIDEO_SOURCE` if you use custom video source.
*
* @return
* - 0: Success.
@@ -4505,54 +5695,77 @@ class IRtcEngine : public agora::base::IEngineBase {
/**
- * @brief Creates a video effect object and returns its pointer.
+ * @brief Creates a video effect object.
*
* @since v4.6.0
*
- * @param bundlePath The path of the video effect bundle.
- * @param type The media source type. See #MEDIA_SOURCE_TYPE.
+ * @details
+ * Creates an `IVideoEffectObject` video effect object and returns its pointer.
*
- * @return
- * - The pointer to \ref rtc::IVideoEffectObject "IVideoEffectObject", if the method call succeeds.
- * - A null pointer, if the method call fails.
+ * @param bundlePath The path to the video effect bundle.
+ * @param type The media source type. See `MEDIA_SOURCE_TYPE`.
+ *
+ * @return
+ * - The `IVideoEffectObject` object pointer, if the method call succeeds.
+ * - An empty pointer, if the method call fails.
*/
virtual agora_refptr createVideoEffectObject(const char* bundlePath, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0;
-
+
/**
* @brief Destroys a video effect object.
*
* @since v4.6.0
*
- * @param videoEffectObject The pointer to \ref rtc::IVideoEffectObject.
+ * @param videoEffectObject The video effect object to be destroyed. See `IVideoEffectObject`.
*
* @return
* - 0: Success.
* - < 0: Failure.
*/
virtual int destroyVideoEffectObject(agora_refptr videoEffectObject) = 0;
-
+
/**
- * Sets low-light enhancement.
+ * @brief Sets low-light enhancement.
*
* @since v4.0.0
*
- * The low-light enhancement feature can adaptively adjust the brightness value of the video captured in situations with low or uneven lighting, such as backlit, cloudy, or dark scenes. It restores or highlights the image details and improves the overall visual effect of the video.
- *
- * You can call this method to enable the low-light enhancement feature and set the options of the low-light enhancement effect.
+ * @details
+ * You can call this method to enable the color enhancement feature and set the options of the color
+ * enhancement effect.
+ * Applicable scenarios: The low-light enhancement feature can adaptively adjust the brightness
+ * value of the video captured in situations with low or uneven lighting, such as backlit, cloudy,
+ * or dark scenes. It restores or highlights the image details and improves the overall visual
+ * effect of the video.
+ * Call timing: Call this method after calling `enableVideo`.
*
* @note
- * - Before calling this method, ensure that you have integrated the following dynamic library into your project:
- * - Android: `libagora_clear_vision_extension.so`
- * - iOS/macOS: `AgoraClearVisionExtension.xcframework`
- * - Windows: `libagora_clear_vision_extension.dll`
- * - Call this method after \ref IRtcEngine::enableVideo "enableVideo".
- * - The low-light enhancement feature has certain performance requirements on devices. If your device overheats after you enable low-light enhancement, Agora recommends modifying the low-light enhancement options to a less performance-consuming level or disabling low-light enhancement entirely.
- *
- * @param enabled Sets whether to enable low-light enhancement:
- * - `true`: Enable.
- * - `false`: (Default) Disable.
- * @param options The low-light enhancement options. See LowlightEnhanceOptions.
+ * - This method relies on the image enhancement dynamic library
+ * `libagora_clear_vision_extension.dll`. If the dynamic library is deleted, the function cannot be
+ * enabled normally.
+ * - Dark light enhancement has certain requirements for equipment performance. The low-light
+ * enhancement feature has certain performance requirements on devices. If your device overheats
+ * after you enable low-light enhancement, Agora recommends modifying the low-light enhancement
+ * options to a less performance-consuming level or disabling low-light enhancement entirely.
+ * - If you want to prioritize image quality ( LOW_LIGHT_ENHANCE_LEVEL_HIGH_QUALITY ) when using the
+ * low-light enhancement function, you need to first call `setVideoDenoiserOptions` to achieve video
+ * noise reduction, the specific corresponding relationship is as follows:
+ * - When low light enhancement is set to automatic mode ( LOW_LIGHT_ENHANCE_AUTO ), video noise
+ * reduction needs to be set to prioritize image quality ( VIDEO_DENOISER_LEVEL_HIGH_QUALITY ) and
+ * automatic mode ( VIDEO_DENOISER_AUTO ).
+ * - When low-light enhancement is set to manual mode ( LOW_LIGHT_ENHANCE_MANUAL ), video noise
+ * reduction needs to be set to prioritize image quality ( VIDEO_DENOISER_LEVEL_HIGH_QUALITY ) and
+ * manual mode ( VIDEO_DENOISER_MANUAL ).
+ *
+ * @param enabled Whether to enable low-light enhancement:
+ * - `true`: Enable low-light enhancement.
+ * - `false`: (Default) Disable low-light enhancement.
+ * @param options The low-light enhancement options. See `LowlightEnhanceOptions`.
+ * @param type The type of the media source to which the filter effect is applied. See
+ * `MEDIA_SOURCE_TYPE`.Attention: In this method, this parameter supports only the following two
+ * settings:
+ * - Use the default value `PRIMARY_CAMERA_SOURCE` if you use camera to capture local video.
+ * - Set this parameter to `CUSTOM_VIDEO_SOURCE` if you use custom video source.
*
* @return
* - 0: Success.
@@ -4560,26 +5773,45 @@ class IRtcEngine : public agora::base::IEngineBase {
*/
virtual int setLowlightEnhanceOptions(bool enabled, const LowlightEnhanceOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0;
/**
- * Sets video noise reduction.
+ * @brief Sets video noise reduction.
*
* @since v4.0.0
*
- * Underlit environments and low-end video capture devices can cause video images to contain significant noise, which affects video quality. In real-time interactive scenarios, video noise also consumes bitstream resources and reduces encoding efficiency during encoding.
- *
- * You can call this method to enable the video noise reduction feature and set the options of the video noise reduction effect.
+ * @details
+ * You can call this method to enable the video noise reduction feature and set the options of the
+ * video noise reduction effect.
+ * Applicable scenarios: dark environments and low-end video capture devices can cause video images
+ * to contain significant noise, which affects video quality. In real-time interactive scenarios,
+ * video noise also consumes bitstream resources and reduces encoding efficiency during encoding.
+ * Call timing: Call this method after calling `enableVideo`.
*
* @note
- * - Before calling this method, ensure that you have integrated the following dynamic library into your project:
- * - Android: `libagora_clear_vision_extension.so`
- * - iOS/macOS: `AgoraClearVisionExtension.xcframework`
- * - Windows: `libagora_clear_vision_extension.dll`
- * - Call this method after \ref IRtcEngine::enableVideo "enableVideo".
- * - The video noise reduction feature has certain performance requirements on devices. If your device overheats after you enable video noise reduction, Agora recommends modifying the video noise reduction options to a less performance-consuming level or disabling video noise reduction entirely.
- *
- * @param enabled Sets whether to enable video noise reduction:
- * - `true`: Enable.
- * - `false`: (Default) Disable.
- * @param options The video noise reduction options. See VideoDenoiserOptions.
+ * - This method relies on the image enhancement dynamic library
+ * `libagora_clear_vision_extension.dll`. If the dynamic library is deleted, the function cannot be
+ * enabled normally.
+ * - Video noise reduction has certain requirements for equipment performance. If your device
+ * overheats after you enable video noise reduction, Agora recommends modifying the video noise
+ * reduction options to a less performance-consuming level or disabling video noise reduction
+ * entirely.
+ * If the noise reduction implemented by this method does not meet your needs, Agora recommends that
+ * you call the `setBeautyEffectOptions` method to enable the beauty and skin smoothing function to
+ * achieve better video noise reduction effects. The recommended `BeautyOptions` settings for
+ * intense noise reduction effect are as follows:
+ * - `lighteningContrastLevel` LIGHTENING_CONTRAST_NORMAL
+ * - `lighteningLevel`: 0.0
+ * - `smoothnessLevel`: 0.5
+ * - `rednessLevel`: 0.0
+ * - `sharpnessLevel`: 0.1
+ *
+ * @param enabled Whether to enable video noise reduction:
+ * - `true`: Enable video noise reduction.
+ * - `false`: (Default) Disable video noise reduction.
+ * @param options The video noise reduction options. See `VideoDenoiserOptions`.
+ * @param type The type of the media source to which the filter effect is applied. See
+ * `MEDIA_SOURCE_TYPE`.Attention: In this method, this parameter supports only the following two
+ * settings:
+ * - Use the default value `PRIMARY_CAMERA_SOURCE` if you use camera to capture local video.
+ * - Set this parameter to `CUSTOM_VIDEO_SOURCE` if you use custom video source.
*
* @return
* - 0: Success.
@@ -4587,26 +5819,36 @@ class IRtcEngine : public agora::base::IEngineBase {
*/
virtual int setVideoDenoiserOptions(bool enabled, const VideoDenoiserOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0;
/**
- * Sets color enhancement.
+ * @brief Sets color enhancement.
*
* @since v4.0.0
*
- * The video images captured by the camera can have color distortion. The color enhancement feature intelligently adjusts video characteristics such as saturation and contrast to enhance the video color richness and color reproduction, making the video more vivid.
- *
- * You can call this method to enable the color enhancement feature and set the options of the color enhancement effect.
+ * @details
+ * The video images captured by the camera can have color distortion. The color enhancement feature
+ * intelligently adjusts video characteristics such as saturation and contrast to enhance the video
+ * color richness and color reproduction, making the video more vivid.
+ * You can call this method to enable the color enhancement feature and set the options of the color
+ * enhancement effect.
*
* @note
- * - Before calling this method, ensure that you have integrated the following dynamic library into your project:
- * - Android: `libagora_clear_vision_extension.so`
- * - iOS/macOS: `AgoraClearVisionExtension.xcframework`
- * - Windows: `libagora_clear_vision_extension.dll`
- * - Call this method after \ref IRtcEngine::enableVideo "enableVideo".
- * - The color enhancement feature has certain performance requirements on devices. If your device overheats after you enable color enhancement, Agora recommends modifying the color enhancement options to a less performance-consuming level or disabling color enhancement entirely.
+ * - Call this method after calling `enableVideo`.
+ * - The color enhancement feature has certain performance requirements on devices. With color
+ * enhancement turned on, Agora recommends that you change the color enhancement level to one that
+ * consumes less performance or turn off color enhancement if your device is experiencing severe
+ * heat problems.
+ * - This method relies on the image enhancement dynamic library
+ * `libagora_clear_vision_extension.dll`. If the dynamic library is deleted, the function cannot be
+ * enabled normally.
*
- * @param enabled Sets whether to enable color enhancement:
- * - `true`: Enable.
- * - `false`: (Default) Disable.
- * @param options The color enhancement options. See ColorEnhanceOptions.
+ * @param enabled Whether to enable color enhancement:
+ * - `true` Enable color enhancement.
+ * - `false`: (Default) Disable color enhancement.
+ * @param options The color enhancement options. See `ColorEnhanceOptions`.
+ * @param type The type of the media source to which the filter effect is applied. See
+ * `MEDIA_SOURCE_TYPE`.Attention: In this method, this parameter supports only the following two
+ * settings:
+ * - Use the default value `PRIMARY_CAMERA_SOURCE` if you use camera to capture local video.
+ * - Set this parameter to `CUSTOM_VIDEO_SOURCE` if you use custom video source.
*
* @return
* - 0: Success.
@@ -4615,75 +5857,124 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int setColorEnhanceOptions(bool enabled, const ColorEnhanceOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0;
/**
- * Enables/Disables the virtual background. (beta function)
+ * @brief Enables/Disables the virtual background.
*
* @since v3.7.200
*
- * After enabling the virtual background function, you can replace the original background image of the local user
- * with a custom background image. After the replacement, all users in the channel can see the custom background
- * image.
+ * @details
+ * The virtual background feature enables the local user to replace their original background with a
+ * static image, dynamic video, blurred background, or portrait-background segmentation to achieve
+ * picture-in-picture effect. Once the virtual background feature is enabled, all users in the
+ * channel can see the custom background.
+ * Call this method after calling `enableVideo` or `startPreview(VIDEO_SOURCE_TYPE sourceType)`.
*
* @note
- * - Before calling this method, ensure that you have integrated the
- * `libagora_segmentation_extension.dll` (Windows)/`AgoraVideoSegmentationExtension.framework` (macOS) dynamic
- * library into the project folder.
- * - Call this method after \ref IRtcEngine::enableVideo "enableVideo".
- * - This function requires a high-performance device. Agora recommends that you use this function on devices with
- * an i5 CPU and better.
- * - Agora recommends that you use this function in scenarios that meet the following conditions:
- * - A high-definition camera device is used, and the environment is uniformly lit.
- * - The captured video image is uncluttered, the user's portrait is half-length and largely unobstructed, and the
- * background is a single color that differs from the color of the user's clothing.
- *
- * @param enabled Sets whether to enable the virtual background:
- * - true: Enable.
- * - false: Disable.
- * @param backgroundSource The custom background image. See VirtualBackgroundSource. **Note**: To adapt the
- * resolution of the custom background image to the resolution of the SDK capturing video, the SDK scales and crops
- * the custom background image while ensuring that the content of the custom background image is not distorted.
- *
- * @return
- * - 0: Success.
- * - < 0: Failure.
+ * - Using a video as a your virtual background will lead to continuous increase in memory usage,
+ * which may cause issues such as app crashes. Therefore,it is recommended to reduce the resolution
+ * and frame rate of the video when using it.
+ * - This feature has high requirements on device performance. When calling this method, the SDK
+ * automatically checks the capabilities of the current device. Agora recommends you use virtual
+ * background on devices with the following processors:
+ * - Snapdragon 700 series 750G and later
+ * - Snapdragon 800 series 835 and later
+ * - Dimensity 700 series 720 and later
+ * - Kirin 800 series 810 and later
+ * - Kirin 900 series 980 and later
+ * - Devices with an i5 CPU and better
+ * - Devices with an A9 chip and better, as follows:
+ * - iPhone 6S and later
+ * - iPad Air 3rd generation and later
+ * - iPad 5th generation and later
+ * - iPad Pro 1st generation and later
+ * - iPad mini 5th generation and later
+ * - Agora recommends that you use this feature in scenarios that meet the following conditions:
+ * - A high-definition camera device is used, and the environment is uniformly lit.
+ * - There are few objects in the captured video. Portraits are half-length and unobstructed.
+ * Ensure that the background is a solid color that is different from the color of the user's
+ * clothing.
+ * - This method relies on the virtual background dynamic library
+ * `libagora_segmentation_extension.dll`. If the dynamic library is deleted, the function cannot be
+ * enabled normally.
+ *
+ * @param enabled Whether to enable virtual background:
+ * - `true`: Enable virtual background.
+ * - `false`: Disable virtual background.
+ * @param backgroundSource The custom background. See `VirtualBackgroundSource`. To adapt the
+ * resolution of the custom background image to that of the video captured by the SDK, the SDK
+ * scales and crops the custom background image while ensuring that the content of the custom
+ * background image is not distorted.
+ * @param segproperty Processing properties for background images. See `SegmentationProperty`.
+ * @param type The type of the media source to which the filter effect is applied. See
+ * `MEDIA_SOURCE_TYPE`.Attention: In this method, this parameter supports only the following two
+ * settings:
+ * - Use the default value `PRIMARY_CAMERA_SOURCE` if you use camera to capture local video.
+ * - Set this parameter to `CUSTOM_VIDEO_SOURCE` if you use custom video source.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ * - -4: The device capabilities do not meet the requirements for the virtual background feature.
+ * Agora recommends you try it on devices with higher performance.
*/
virtual int enableVirtualBackground(bool enabled, VirtualBackgroundSource backgroundSource, SegmentationProperty segproperty, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0;
/**
- * Initializes the video view of a remote user.
- *
- * This method initializes the video view of a remote stream on the local device. It affects only the
- * video view that the local user sees.
- *
- * Usually the app should specify the `uid` of the remote video in the method call before the
- * remote user joins the channel. If the remote `uid` is unknown to the app, set it later when the
- * app receives the \ref IRtcEngineEventHandler::onUserJoined "onUserJoined" callback.
+ * @brief Initializes the video view of a remote user.
*
- * To unbind the remote user from the view, set `view` in VideoCanvas as `null`.
+ * @details
+ * This method initializes the video view of a remote stream on the local device. It affects only
+ * the video view that the local user sees. Call this method to bind the remote video stream to a
+ * video view and to set the rendering and mirror modes of the video view.
+ * You need to specify the ID of the remote user in this method. If the remote user ID is unknown to
+ * the application, set it after the app receives the `onUserJoined` callback.
+ * To unbind the remote user from the view, set the `view` parameter to NULL.
+ * Once the remote user leaves the channel, the SDK unbinds the remote user.
+ * In the scenarios of custom layout for mixed videos on the mobile end, you can call this method
+ * and set a separate `view` for rendering each sub-video stream of the mixed video stream.
*
* @note
- * Ensure that you call this method in the UI thread.
+ * - To update the rendering or mirror mode of the remote video view during a call, use the
+ * `setRemoteRenderMode` method.
+ * - When using the recording service, the app does not need to bind a view, as it does not send a
+ * video stream. If your app does not recognize the recording service, bind the remote user to the
+ * view when the SDK triggers the `onFirstRemoteVideoDecoded` callback.
*
- * @param canvas The remote video view settings: VideoCanvas.
- * @return int
- * VIRTUAL_BACKGROUND_SOURCE_STATE_REASON_SUCCESS = 0,
- * VIRTUAL_BACKGROUND_SOURCE_STATE_REASON_IMAGE_NOT_EXIST = -1,
- * VIRTUAL_BACKGROUND_SOURCE_STATE_REASON_COLOR_FORMAT_NOT_SUPPORTED = -2,
- * VIRTUAL_BACKGROUND_SOURCE_STATE_REASON_DEVICE_NOT_SUPPORTED = -3,
+ * @param canvas The remote video view and settings. See `VideoCanvas`.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int setupRemoteVideo(const VideoCanvas& canvas) = 0;
/**
- * Initializes the local video view.
+ * @brief Initializes the local video view.
*
- * This method initializes the video view of the local stream on the local device. It affects only
- * the video view that the local user sees, not the published local video stream.
+ * @details
+ * This method initializes the video view of a local stream on the local device. It only affects the
+ * video seen by the local user and does not impact the publishing of the local video. Call this
+ * method to bind the local video stream to a video view ( `view` ) and to set the rendering and
+ * mirror modes of the video view.
+ * The binding remains valid after leaving the channel. To stop rendering or unbind the local video
+ * from the view, set `view` as NULL.
+ * Applicable scenarios: After initialization, call this method to set the local video and then join
+ * the channel.
+ * In real-time interactive scenarios, if you need to simultaneously view multiple preview frames in
+ * the local video preview, and each frame is at a different observation position along the video
+ * link, you can repeatedly call this method to set different `view` s and set different observation
+ * positions for each `view. ` For example, by setting the video source to the camera and then
+ * configuring two `view` s with `position` setting to POSITION_POST_CAPTURER_ORIGIN and
+ * POSITION_POST_CAPTURER, you can simultaneously preview the raw, unprocessed video frame and the
+ * video frame that has undergone preprocessing (image enhancement effects, virtual background,
+ * watermark) in the local video preview.
+ * Call timing: You can call this method either before or after joining a channel.
*
- * To unbind the local video from the view, set `view` in VideoCanvas as `null`.
+ * @note To update only the rendering or mirror mode of the local video view during a call, call
+ * `setLocalRenderMode(media::base::RENDER_MODE_TYPE renderMode, VIDEO_MIRROR_MODE_TYPE mirrorMode)`
+ * instead.
*
- * @note
- * Call this method before joining a channel.
+ * @param canvas The local video view and settings. See `VideoCanvas`.
*
- * @param canvas The local video view setting: VideoCanvas.
* @return
* - 0: Success.
* - < 0: Failure.
@@ -4691,22 +5982,71 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int setupLocalVideo(const VideoCanvas& canvas) = 0;
/**
- * Sets the Video application scenario.
+ * @brief Sets video application scenarios.
*
* @since v4.2.0
*
- * You can call this method to set the expected video scenario.
- * The SDK will optimize the video experience for each scenario you set.
- *
- *
- * @param scenarioType The video application scenario. See #ApplicationScenarioType.
- *
- * @return
- * - 0: Success.
- * - < 0: Failure.
- * - ERR_FAILED (1): A general error occurs (no specified reason).
- * - ERR_NOT_SUPPORTED (4): Unable to set video application scenario.
- * - ERR_NOT_INITIALIZED (7): The SDK is not initialized.
+ * @details
+ * After successfully calling this method, the SDK will automatically enable the best practice
+ * strategies and adjust key performance metrics based on the specified scenario, to optimize the
+ * video experience.
+ *
+ * @note Call this method before joining a channel.
+ *
+ * @param scenarioType The type of video application scenario. See
+ * `VIDEO_APPLICATION_SCENARIO_TYPE`.`APPLICATION_SCENARIO_MEETING` (1) is suitable for meeting
+ * scenarios. The SDK automatically enables the following strategies:
+ * - In meeting scenarios where low-quality video streams are required to have a high bitrate, the
+ * SDK automatically enables multiple technologies used to deal with network congestions, to enhance
+ * the performance of the low-quality streams and to ensure the smooth reception by subscribers.
+ * - The SDK monitors the number of subscribers to the high-quality video stream in real time and
+ * dynamically adjusts its configuration based on the number of subscribers.
+ * - If nobody subscribers to the high-quality stream, the SDK automatically reduces its bitrate
+ * and frame rate to save upstream bandwidth.
+ * - If someone subscribes to the high-quality stream, the SDK resets the high-quality stream to
+ * the `VideoEncoderConfiguration` configuration used in the most recent calling of
+ * `setVideoEncoderConfiguration`. If no configuration has been set by the user previously, the
+ * following values are used:
+ * - Resolution: (Windows and macOS) 1280 × 720; (Android and iOS) 960 × 540
+ * - Frame rate: 15 fps
+ * - Bitrate: (Windows and macOS) 1600 Kbps; (Android and iOS) 1000 Kbps
+ * - The SDK monitors the number of subscribers to the low-quality video stream in real time and
+ * dynamically enables or disables it based on the number of subscribers.Note: If the user has
+ * called `setDualStreamMode(SIMULCAST_STREAM_MODE mode, const SimulcastStreamConfig& streamConfig)`
+ * to set that never send low-quality video stream (
+ * `DISABLE_SIMULCAST_STREAM` ), the dynamic adjustment of the low-quality stream in meeting
+ * scenarios will not take effect.
+ * - If nobody subscribes to the low-quality stream, the SDK automatically disables it to save
+ * upstream bandwidth.
+ * - If someone subscribes to the low-quality stream, the SDK enables the low-quality stream and
+ * resets it to the `SimulcastStreamConfig` configuration used in the most recent calling of
+ * `setDualStreamMode(SIMULCAST_STREAM_MODE mode, const SimulcastStreamConfig& streamConfig)`. If no
+ * configuration has been set by the user previously, the following
+ * values are used:
+ * - Resolution: 480 × 272
+ * - Frame rate: 15 fps
+ * - Bitrate: 500 Kbps
+ * `APPLICATION_SCENARIO_1V1` (2) This is applicable to the `one to one live` scenario. To meet the
+ * requirements for low latency and high-quality video in this scenario, the SDK optimizes its
+ * strategies, improving performance in terms of video quality, first frame rendering, latency on
+ * mid-to-low-end devices, and smoothness under weak network conditions.Attention: This enumeration
+ * value is only applicable to the broadcaster vs. broadcaster scenario.
+ * `APPLICATION_SCENARIO_LIVESHOW` (3) This is applicable to the `show room` scenario. In this
+ * scenario, fast video rendering and high image quality are crucial. The SDK implements several
+ * performance optimizations, including automatically enabling accelerated audio and video frame
+ * rendering to minimize first-frame latency (no need to call `enableInstantMediaRendering` ), and
+ * B-frame encoding to achieve better image quality and bandwidth efficiency. The SDK also provides
+ * enhanced video quality and smooth playback, even in poor network conditions or on lower-end
+ * devices.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ * - -1: A general error occurs (no specified reason).
+ * - -4: Video application scenarios are not supported. Possible reasons include that you use the
+ * Voice SDK instead of the Video SDK.
+ * - -7: The `IRtcEngine` object has not been initialized. You need to initialize the `IRtcEngine`
+ * object before calling this method.
*/
virtual int setVideoScenario(VIDEO_APPLICATION_SCENARIO_TYPE scenarioType) = 0;
@@ -4731,13 +6071,24 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int setVideoQoEPreference(VIDEO_QOE_PREFERENCE_TYPE qoePreference) = 0;
/**
- * Enables the audio.
+ * @brief Enables the audio module.
*
- * The audio is enabled by default.
+ * @details
+ * The audio module is enabled by default After calling `disableAudio` to disable the audio module,
+ * you can call this method to re-enable it.
+ * Call timing: This method can be called either before or after joining the channel. It is still
+ * valid after one leaves channel.
*
* @note
- * This method controls the underlying states of the Engine. It is still
- * valid after one leaves channel.
+ * - Calling this method will reset the entire engine, resulting in a slow response time. You can
+ * use the following methods to independently control a specific function of the audio module based
+ * on your actual needs:
+ * - `enableLocalAudio`: Whether to enable the microphone to create the local audio stream.
+ * - `muteLocalAudioStream`: Whether to publish the local audio stream.
+ * - `muteRemoteAudioStream`: Whether to subscribe and play the remote audio stream.
+ * - `muteAllRemoteAudioStreams`: Whether to subscribe to and play all remote audio streams.
+ * - A successful call of this method resets `enableLocalAudio`, `muteRemoteAudioStream`, and
+ * `muteAllRemoteAudioStreams`. Proceed it with caution.
*
* @return
* - 0: Success.
@@ -4746,12 +6097,22 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int enableAudio() = 0;
/**
- * Disables the audio.
+ * @brief Disables the audio module.
*
- * @note
- * This method controls the underlying states of the Engine. It is still
+ * @details
+ * The audio module is enabled by default, and you can call this method to disable the audio module.
+ * Call timing: This method can be called either before or after joining the channel. It is still
* valid after one leaves channel.
*
+ * @note
+ * This method resets the internal engine and takes some time to take effect. Agora recommends using
+ * the following API methods to control the audio modules separately:
+ * - `enableLocalAudio`: Whether to enable the microphone to create the local audio stream.
+ * - `enableLoopbackRecording`: Whether to enable loopback audio capturing.
+ * - `muteLocalAudioStream`: Whether to publish the local audio stream.
+ * - `muteRemoteAudioStream`: Whether to subscribe and play the remote audio stream.
+ * - `muteAllRemoteAudioStreams`: Whether to subscribe to and play all remote audio streams.
+ *
* @return
* - 0: Success.
* - < 0: Failure.
@@ -4759,21 +6120,28 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int disableAudio() = 0;
/**
- * Sets the audio parameters and application scenarios.
+ * @brief Sets the audio profile and audio scenario.
*
- * @deprecated This method is deprecated. You can use the
- * \ref IRtcEngine::setAudioProfile(AUDIO_PROFILE_TYPE) "setAudioProfile"
- * method instead. To set the audio scenario, call the \ref IRtcEngine::initialize "initialize"
- * method and pass value in the `audioScenario` member in the RtcEngineContext struct.
+ * @deprecated This method is deprecated. You can use the `setAudioProfile(AUDIO_PROFILE_TYPE profile) = 0`
+ * method instead. To set the audio scenario, call the `initialize` method and pass value in the
+ * `audioScenario` member in the RtcEngineContext struct.
*
- * @note
- * - Call this method before calling the `joinChannel` method.
- * - In scenarios requiring high-quality audio, we recommend setting `profile` as `MUSIC_HIGH_QUALITY`(4)
- * and `scenario` as `AUDIO_SCENARIO_GAME_STREAMING`(3).
+ * @details
+ * Applicable scenarios: This method is suitable for various audio scenarios. You can choose as
+ * needed. For example, in scenarios with high audio quality requirements such as music teaching, it
+ * is recommended to set `profile` to `AUDIO_PROFILE_MUSIC_HIGH_QUALITY`(4) and `scenario` to
+ * `AUDIO_SCENARIO_GAME_STREAMING`(3).
+ * Call timing: You can call this method either before or after joining a channel.
+ *
+ * @note Due to iOS system restrictions, some audio routes cannot be recognized in call volume mode.
+ * Therefore, if you need to use an external sound card, it is recommended to set the audio scenario
+ * to `AUDIO_SCENARIO_GAME_STREAMING`(3). In this scenario, the SDK will switch to media volume to
+ * avoid this issue.
*
- * @param profile Sets the sample rate, bitrate, encoding mode, and the number of channels:
- * #AUDIO_PROFILE_TYPE.
- * @param scenario Sets the audio application scenarios: #AUDIO_SCENARIO_TYPE.
+ * @param profile The audio profile, including the sampling rate, bitrate, encoding mode, and the
+ * number of channels. See `AUDIO_PROFILE_TYPE`.
+ * @param scenario The audio scenarios. Under different audio scenarios, the device uses different
+ * volume types. See `AUDIO_SCENARIO_TYPE`.
*
* @return
* - 0: Success.
@@ -4782,15 +6150,18 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int setAudioProfile(AUDIO_PROFILE_TYPE profile, AUDIO_SCENARIO_TYPE scenario) __deprecated = 0;
/**
- * Sets the audio profile.
+ * @brief Sets audio profiles.
*
- * @note
- * - Call this method before calling the `joinChannel` method.
- * - In scenarios requiring high-quality audio, Agora recommends setting `profile` as `MUSIC_HIGH_QUALITY`(4).
- * - To set the audio scenario, call the \ref IRtcEngine::initialize "initialize"
- * method and pass value in the `audioScenario` member in the RtcEngineContext struct.
+ * @details
+ * If you need to set the audio scenario, you can either call `setAudioScenario`, or `initialize`
+ * and set the `audioScenario` in `RtcEngineContext`.
+ * Applicable scenarios: This method is suitable for various audio scenarios. You can choose as
+ * needed. For example, in scenarios with high audio quality requirements such as music teaching, it
+ * is recommended to set `profile` to `AUDIO_PROFILE_MUSIC_HIGH_QUALITY`(4).
+ * Call timing: You can call this method either before or after joining a channel.
*
- * @param profile The audio profile, such as the sample rate, bitrate and codec type: #AUDIO_PROFILE_TYPE.
+ * @param profile The audio profile, including the sampling rate, bitrate, encoding mode, and the
+ * number of channels. See `AUDIO_PROFILE_TYPE`.
*
* @return
* - 0: Success.
@@ -4798,31 +6169,53 @@ class IRtcEngine : public agora::base::IEngineBase {
*/
virtual int setAudioProfile(AUDIO_PROFILE_TYPE profile) = 0;
/**
- * Set the audio scenario.
+ * @brief Sets audio scenarios.
+ *
+ * @details
+ * Applicable scenarios: This method is suitable for various audio scenarios. You can choose as
+ * needed. For example, in scenarios such as music teaching that require high sound quality, it is
+ * recommended to set `scenario` to `AUDIO_SCENARIO_GAME_STREAMING`(3).
+ * Call timing: You can call this method either before or after joining a channel.
+ *
+ * @note Due to iOS system restrictions, some audio routes cannot be recognized in call volume mode.
+ * Therefore, if you need to use an external sound card, it is recommended to set the audio scenario
+ * to `AUDIO_SCENARIO_GAME_STREAMING`(3). In this scenario, the SDK will switch to media volume to
+ * avoid this issue.
+ *
+ * @param scenario The audio scenarios. Under different audio scenarios, the device uses different
+ * volume types. See `AUDIO_SCENARIO_TYPE`.
*
- * @param scenario The audio scenario: #AUDIO_SCENARIO_TYPE.
* @return
* - 0: Success.
* - < 0: Failure.
*/
virtual int setAudioScenario(AUDIO_SCENARIO_TYPE scenario) = 0;
/**
- * Enables or disables the local audio capture.
- *
- * The audio function is enabled by default. This method disables or re-enables the
- * local audio function, that is, to stop or restart local audio capture and
- * processing.
- *
- * This method does not affect receiving or playing the remote audio streams,
- * and `enableLocalAudio` (false) is applicable to scenarios where the user wants
- * to receive remote audio streams without sending any audio stream to other users
- * in the channel.
+ * @brief Enables or disables the local audio capture.
+ *
+ * @details
+ * The audio function is enabled by default when users joining a channel. This method disables or
+ * re-enables the local audio function to stop or restart local audio capturing.
+ * The difference between this method and `muteLocalAudioStream` are as follows:
+ * - `enableLocalAudio`: Disables or re-enables the local audio capturing and processing. If you
+ * disable or re-enable local audio capturing using the `enableLocalAudio` method, the local user
+ * might hear a pause in the remote audio playback.
+ * - `muteLocalAudioStream`: Sends or stops sending the local audio streams without affecting the
+ * audio capture status.
+ * Applicable scenarios: This method does not affect receiving the remote audio streams.
+ * `enableLocalAudio` `(false)` is suitable for scenarios where the user wants to receive remote
+ * audio streams without sending locally captured audio.
+ * Call timing: You can call this method either before or after joining a channel. Calling it before
+ * joining a channel only sets the device state, and it takes effect immediately after you join the
+ * channel.
+ * Related callbacks: Once the local audio function is disabled or re-enabled, the SDK triggers the
+ * `onLocalAudioStateChanged` callback, which reports `LOCAL_AUDIO_STREAM_STATE_STOPPED` (0) or
+ * `LOCAL_AUDIO_STREAM_STATE_RECORDING` (1).
*
- * @param enabled Determines whether to disable or re-enable the local audio function:
- * - true: (Default) Re-enable the local audio function, that is, to start local
- * audio capture and processing.
- * - false: Disable the local audio function, that is, to stop local audio
- * capture and processing.
+ * @param enabled
+ * - `true`: (Default) Re-enable the local audio function, that is, to start the
+ * local audio capturing device (for example, the microphone).
+ * - `false`: Disable the local audio function, that is, to stop local audio capturing.
*
* @return
* - 0: Success.
@@ -4831,29 +6224,24 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int enableLocalAudio(bool enabled) = 0;
/**
- Stops or resumes sending the local audio stream.
-
- After calling this method successfully, the SDK triggers the
- \ref IRtcEngineEventHandler::onRemoteAudioStateChanged "onRemoteAudioStateChanged"
- callback with the following parameters:
- - REMOTE_AUDIO_STATE_STOPPED(0) and REMOTE_AUDIO_REASON_REMOTE_MUTED(5).
- - REMOTE_AUDIO_STATE_DECODING(2) and REMOTE_AUDIO_REASON_REMOTE_UNMUTED(6).
-
- @note
- - When `mute` is set as `true`, this method does not disable the
- microphone, which does not affect any ongoing recording.
- - If you call \ref IRtcEngine::setChannelProfile "setChannelProfile" after
- this method, the SDK resets whether or not to mute the local audio
- according to the channel profile and user role. Therefore, we recommend
- calling this method after the `setChannelProfile` method.
-
- @param mute Determines whether to send or stop sending the local audio stream:
- - true: Stop sending the local audio stream.
- - false: (Default) Send the local audio stream.
-
- @return
- - 0: Success.
- - < 0: Failure.
+ * @brief Stops or resumes publishing the local audio stream.
+ *
+ * @details
+ * This method is used to control whether to publish the locally captured audio stream. If you call
+ * this method to stop publishing locally captured audio streams, the audio capturing device will
+ * still work and won't be affected.
+ * Call timing: This method can be called either before or after joining the channel.
+ * Related callbacks: After successfully calling this method, the local end triggers callback
+ * `onAudioPublishStateChanged`; the remote end triggers `onUserMuteAudio` and
+ * `onRemoteAudioStateChanged` callbacks.
+ *
+ * @param mute Whether to stop publishing the local audio stream:
+ * - `true`: Stops publishing the local audio stream.
+ * - `false`: (Default) Resumes publishing the local audio stream.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int muteLocalAudioStream(bool mute) = 0;
@@ -4898,19 +6286,46 @@ class IRtcEngine : public agora::base::IEngineBase {
- 0: Success.
- < 0: Failure.
*/
+ /**
+ * @brief Stops or resumes subscribing to the audio streams of all remote users.
+ *
+ * @details
+ * After successfully calling this method, the local user stops or resumes subscribing to the audio
+ * streams of all remote users, including all subsequent users.
+ * Call timing: Call this method after joining a channel.
+ *
+ * @note
+ * If you call this method and then call `enableAudio` or `disableAudio`, the latest call will
+ * prevail.
+ * By default, the SDK subscribes to the audio streams of all remote users when joining a channel.
+ * To modify this behavior, you can set `autoSubscribeAudio` to `false` when calling
+ * `joinChannel(const char* token, const char* channelId, uid_t uid, const ChannelMediaOptions&
+ * options)`
+ * to join the channel, which will cancel the subscription to the audio streams of all users
+ * upon joining the channel.
+ *
+ * @param mute Whether to stop subscribing to the audio streams of all remote users:
+ * - `true`: Stops subscribing to the audio streams of all remote users.
+ * - `false`: (Default) Subscribes to the audio streams of all remote users by default.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ */
virtual int muteAllRemoteAudioStreams(bool mute) = 0;
/**
- * Stops or resumes receiving the audio stream of a specified user.
+ * @brief Stops or resumes subscribing to the audio stream of a specified user.
*
- * @note
- * You can call this method before or after joining a channel. If a user
- * leaves a channel, the settings in this method become invalid.
+ * @details
+ * Call timing: Call this method after joining a channel.
+ * Related callbacks: After a successful method call, the SDK triggers the
+ * `onAudioSubscribeStateChanged` callback.
*
- * @param uid The ID of the specified user.
- * @param mute Whether to stop receiving the audio stream of the specified user:
- * - true: Stop receiving the audio stream of the specified user.
- * - false: (Default) Resume receiving the audio stream of the specified user.
+ * @param uid The user ID of the specified user.
+ * @param mute Whether to subscribe to the specified remote user's audio stream.
+ * - `true`: Stop subscribing to the audio stream of the specified user.
+ * - `false`: (Default) Subscribe to the audio stream of the specified user.
*
* @return
* - 0: Success.
@@ -4919,11 +6334,22 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int muteRemoteAudioStream(uid_t uid, bool mute) = 0;
/**
- * Stops or resumes sending the local video stream.
+ * @brief Stops or resumes publishing the local video stream.
+ *
+ * @details
+ * This method is used to control whether to publish the locally captured video stream. If you call
+ * this method to stop publishing locally captured video streams, the video capturing device will
+ * still work and won't be affected.
+ * Compared to `enableLocalVideo` (`false`), which can also cancel the publishing of local video
+ * stream by turning off the local video stream capture, this method responds faster.
+ * Call timing: This method can be called either before or after joining the channel.
+ * Related callbacks: After successfully calling this method, the local end triggers callback
+ * `onVideoPublishStateChanged`; the remote end triggers `onUserMuteVideo` and
+ * `onRemoteVideoStateChanged` callbacks.
*
- * @param mute Determines whether to send or stop sending the local video stream:
- * - true: Stop sending the local video stream.
- * - false: (Default) Send the local video stream.
+ * @param mute Whether to stop publishing the local video stream.
+ * - `true`: Stop publishing the local video stream.
+ * - `false`: (Default) Publish the local video stream.
*
* @return
* - 0: Success.
@@ -4932,24 +6358,29 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int muteLocalVideoStream(bool mute) = 0;
/**
- * Disables or re-enables the local video capture.
+ * @brief Enables/Disables the local video capture.
*
- * Once you enable the video using \ref enableVideo "enableVideo", the local video is enabled
- * by default. This method disables or re-enables the local video capture.
- *
- * `enableLocalVideo(false)` applies to scenarios when the user wants to watch the remote video
- * without sending any video stream to the other user.
+ * @details
+ * This method disables or re-enables the local video capture, and does not affect receiving the
+ * remote video stream.
+ * After calling `enableVideo`, the local video capture is enabled by default.
+ * If you call `enableLocalVideo` (`false`) to disable local video capture within the channel, it
+ * also simultaneously stops publishing the video stream within the channel. If you want to restart
+ * video catpure, you can call `enableLocalVideo` (`true`) and then call `updateChannelMediaOptions`
+ * to set the `options` parameter to publish the locally captured video stream in the channel.
+ * After the local video capturer is successfully disabled or re-enabled, the SDK triggers the
+ * `onRemoteVideoStateChanged` callback on the remote client.
*
* @note
- * Call this method after `enableVideo`. Otherwise, this method may not work properly.
+ * - You can call this method either before or after joining a channel. However, if you call it
+ * before joining, the settings will only take effect once you have joined the channel.
+ * - This method enables the internal engine and is valid after leaving the channel.
*
- * @param enabled Determines whether to disable or re-enable the local video, including
- * the capturer, renderer, and sender:
- * - true: (Default) Re-enable the local video.
- * - false: Disable the local video. Once the local video is disabled, the remote
- * users can no longer receive the video stream of this user, while this user
- * can still receive the video streams of other remote users. When you set
- * `enabled` as `false`, this method does not require a local camera.
+ * @param enabled Whether to enable the local video capture.
+ * - `true`: (Default) Enable the local video capture.
+ * - `false`: Disable the local video capture. Once the local video is disabled, the remote users
+ * cannot receive the video stream of the local user, while the local user can still receive the
+ * video streams of remote users. When set to `false`, this method does not require a local camera.
*
* @return
* - 0: Success.
@@ -4996,12 +6427,66 @@ class IRtcEngine : public agora::base::IEngineBase {
- 0: Success.
- < 0: Failure.
*/
+ /**
+ * @brief Stops or resumes subscribing to the video streams of all remote users.
+ *
+ * @details
+ * After successfully calling this method, the local user stops or resumes subscribing to the video
+ * streams of all remote users, including all subsequent users.
+ * Call timing: Call this method after joining a channel.
+ *
+ * @note
+ * If you call this method and then call `enableVideo` or `disableVideo`, the latest call will
+ * prevail.
+ * By default, the SDK subscribes to the video streams of all remote users when joining a channel.
+ * To modify this behavior, you can set `autoSubscribeVideo` to`false` when calling
+ * `joinChannel(const char* token, const char* channelId, uid_t uid, const ChannelMediaOptions&
+ * options)`
+ * to join the channel, which will cancel the subscription to the video streams of all users
+ * upon joining the channel.
+ *
+ * @param mute Whether to stop subscribing to the video streams of all remote users.
+ * - `true`: Stop subscribing to the video streams of all remote users.
+ * - `false`: (Default) Subscribe to the video streams of all remote users by default.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ */
virtual int muteAllRemoteVideoStreams(bool mute) = 0;
/**
- * Sets the default stream type of the remote video if the remote user has enabled dual-stream.
+ * @brief Sets the default video stream type to subscribe to.
+ *
+ * @details
+ * Depending on the default behavior of the sender and the specific settings when calling
+ * `setDualStreamMode(SIMULCAST_STREAM_MODE mode, const SimulcastStreamConfig& streamConfig)`, the
+ * scenarios for the receiver calling this method are as follows:
+ * - The SDK enables low-quality video stream adaptive mode ( `AUTO_SIMULCAST_STREAM` ) on the
+ * sender side by default, meaning only the high-quality video stream is transmitted. Only the
+ * receiver with the role of the **host**can call this method to initiate a low-quality video stream
+ * request. Once the sender receives the request, it starts automatically sending the low-quality
+ * video stream. At this point, all users in the channel can call this method to switch to
+ * low-quality video stream subscription mode.
+ * - If the sender calls `setDualStreamMode(SIMULCAST_STREAM_MODE mode, const SimulcastStreamConfig&
+ * streamConfig)` and sets `mode` to `DISABLE_SIMULCAST_STREAM`
+ * (never send low-quality video stream), then calling this method will have no effect.
+ * - If the sender calls `setDualStreamMode(SIMULCAST_STREAM_MODE mode, const SimulcastStreamConfig&
+ * streamConfig)` and sets `mode` to `ENABLE_SIMULCAST_STREAM`
+ * (always send low-quality video stream), both the host and audience receivers can call this method
+ * to switch to low-quality video stream subscription mode.
+ * The SDK will dynamically adjust the size of the corresponding video stream based on the size of
+ * the video window to save bandwidth and computing resources. The default aspect ratio of the
+ * low-quality video stream is the same as that of the high-quality video stream. According to the
+ * current aspect ratio of the high-quality video stream, the system will automatically allocate the
+ * resolution, frame rate, and bitrate of the low-quality video stream.
+ * Call timing: Call this method before joining a channel. The SDK does not support changing the
+ * default subscribed video stream type after joining a channel.
*
- * @param streamType Sets the default video stream type: #VIDEO_STREAM_TYPE.
+ * @note If you call both this method and `setRemoteVideoStreamType`, the setting of
+ * `setRemoteVideoStreamType` takes effect.
+ *
+ * @param streamType The default video-stream type. See `VIDEO_STREAM_TYPE`.
*
* @return
* - 0: Success.
@@ -5010,16 +6495,17 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int setRemoteDefaultVideoStreamType(VIDEO_STREAM_TYPE streamType) = 0;
/**
- * Stops or resumes receiving the video stream of a specified user.
+ * @brief Stops or resumes subscribing to the video stream of a specified user.
*
- * @note
- * You can call this method before or after joining a channel. If a user
- * leaves a channel, the settings in this method become invalid.
+ * @details
+ * Call timing: Call this method after joining a channel.
+ * Related callbacks: After a successful method call, the SDK triggers the
+ * `onVideoSubscribeStateChanged` callback.
*
- * @param uid The ID of the specified user.
- * @param mute Whether to stop receiving the video stream of the specified user:
- * - true: Stop receiving the video stream of the specified user.
- * - false: (Default) Resume receiving the video stream of the specified user.
+ * @param uid The user ID of the specified user.
+ * @param mute Whether to subscribe to the specified remote user's video stream.
+ * - `true`: Stop subscribing to the video streams of the specified user.
+ * - `false`: (Default) Subscribe to the video stream of the specified user.
*
* @return
* - 0: Success.
@@ -5028,17 +6514,38 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int muteRemoteVideoStream(uid_t uid, bool mute) = 0;
/**
- * Sets the remote video stream type.
- *
- * If the remote user has enabled the dual-stream mode, by default the SDK receives the high-stream video by
- * Call this method to switch to the low-stream video.
+ * @brief Sets the video stream type to subscribe to.
+ *
+ * @details
+ * Depending on the default behavior of the sender and the specific settings when calling
+ * `setDualStreamMode(SIMULCAST_STREAM_MODE mode, const SimulcastStreamConfig& streamConfig)`, the
+ * scenarios for the receiver calling this method are as follows:
+ * - The SDK enables low-quality video stream adaptive mode ( `AUTO_SIMULCAST_STREAM` ) on the
+ * sender side by default, meaning only the high-quality video stream is transmitted. Only the
+ * receiver with the role of the **host**can call this method to initiate a low-quality video stream
+ * request. Once the sender receives the request, it starts automatically sending the low-quality
+ * video stream. At this point, all users in the channel can call this method to switch to
+ * low-quality video stream subscription mode.
+ * - If the sender calls `setDualStreamMode(SIMULCAST_STREAM_MODE mode, const SimulcastStreamConfig&
+ * streamConfig)` and sets `mode` to `DISABLE_SIMULCAST_STREAM`
+ * (never send low-quality video stream), then calling this method will have no effect.
+ * - If the sender calls `setDualStreamMode(SIMULCAST_STREAM_MODE mode, const SimulcastStreamConfig&
+ * streamConfig)` and sets `mode` to `ENABLE_SIMULCAST_STREAM`
+ * (always send low-quality video stream), both the host and audience receivers can call this method
+ * to switch to low-quality video stream subscription mode.
+ * The SDK will dynamically adjust the size of the corresponding video stream based on the size of
+ * the video window to save bandwidth and computing resources. The default aspect ratio of the
+ * low-quality video stream is the same as that of the high-quality video stream. According to the
+ * current aspect ratio of the high-quality video stream, the system will automatically allocate the
+ * resolution, frame rate, and bitrate of the low-quality video stream.
*
* @note
- * This method applies to scenarios where the remote user has enabled the dual-stream mode using
- * \ref enableDualStreamMode "enableDualStreamMode"(true) before joining the channel.
+ * - You can call this method either before or after joining a channel.
+ * - If you call both this method and `setRemoteDefaultVideoStreamType`, the setting of this method
+ * takes effect.
*
- * @param uid ID of the remote user sending the video stream.
- * @param streamType Sets the video stream type: #VIDEO_STREAM_TYPE.
+ * @param uid The user ID.
+ * @param streamType The video stream type, see `VIDEO_STREAM_TYPE`.
*
* @return
* - 0: Success.
@@ -5047,11 +6554,25 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int setRemoteVideoStreamType(uid_t uid, VIDEO_STREAM_TYPE streamType) = 0;
/**
- * Sets the remote video subscription options
+ * @brief Options for subscribing to remote video streams.
+ *
+ * @details
+ * When a remote user has enabled dual-stream mode, you can call this method to choose the option
+ * for subscribing to the video streams sent by the remote user. The default subscription behavior
+ * of the SDK for remote video streams depends on the type of registered video observer:
+ * - If the `IVideoFrameObserver` observer is registered, the default is to subscribe to both raw
+ * data and encoded data.
+ * - If the `IVideoEncodedFrameObserver` observer is registered, the default is to subscribe only to
+ * the encoded data.
+ * - If both types of observers are registered, the default behavior follows the last registered
+ * video observer. For example, if the last registered observer is the `IVideoFrameObserver`
+ * observer, the default is to subscribe to both raw data and encoded data.
+ * If you want to modify the default behavior, or set different subscription options for different
+ * `uids`, you can call this method to set it.
*
+ * @param uid The user ID of the remote user.
+ * @param options The video subscription options. See `VideoSubscriptionOptions`.
*
- * @param uid ID of the remote user sending the video stream.
- * @param options Sets the video subscription options.
* @return
* - 0: Success.
* - < 0: Failure.
@@ -5059,14 +6580,27 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int setRemoteVideoSubscriptionOptions(uid_t uid, const VideoSubscriptionOptions &options) = 0;
/**
- * Sets the blocklist of subscribe remote stream audio.
+ * @brief Sets the blocklist of subscriptions for audio streams.
*
- * @param uidList The id list of users whose audio you do not want to subscribe to.
- * @param uidNumber The number of uid in uidList.
+ * @details
+ * You can call this method to specify the audio streams of a user that you do not want to subscribe
+ * to.
*
* @note
- * If uid is in uidList, the remote user's audio will not be subscribed,
- * even if muteRemoteAudioStream(uid, false) and muteAllRemoteAudioStreams(false) are operated.
+ * - You can call this method either before or after joining a channel.
+ * - The blocklist is not affected by the setting in `muteRemoteAudioStream`,
+ * `muteAllRemoteAudioStreams`, and `autoSubscribeAudio` in `ChannelMediaOptions`.
+ * - Once the blocklist of subscriptions is set, it is effective even if you leave the current
+ * channel and rejoin the channel.
+ * - If a user is added in the allowlist and blocklist at the same time, only the blocklist takes
+ * effect.
+ *
+ * @param uidList The user ID list of users that you do not want to subscribe to.
+ * If you want to specify the audio streams of a user that you do not want to subscribe to, add the
+ * user ID in this list. If you want to remove a user from the blocklist, you need to call the
+ * `setSubscribeAudioBlocklist` method to update the user ID list; this means you only add the `uid`
+ * of users that you do not want to subscribe to in the new user ID list.
+ * @param uidNumber The number of users in the user ID list.
*
* @return
* - 0: Success.
@@ -5075,16 +6609,26 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int setSubscribeAudioBlocklist(uid_t* uidList, int uidNumber) = 0;
/**
- * Sets the allowlist of subscribe remote stream audio.
+ * @brief Sets the allowlist of subscriptions for audio streams.
*
- * @param uidList The id list of users whose audio you want to subscribe to.
- * @param uidNumber The number of uid in uidList.
+ * @details
+ * You can call this method to specify the audio streams of a user that you want to subscribe to.
*
* @note
- * If uid is in uidList, the remote user's audio will be subscribed,
- * even if muteRemoteAudioStream(uid, true) and muteAllRemoteAudioStreams(true) are operated.
+ * - You can call this method either before or after joining a channel.
+ * - The allowlist is not affected by the setting in `muteRemoteAudioStream`,
+ * `muteAllRemoteAudioStreams` and `autoSubscribeAudio` in `ChannelMediaOptions`.
+ * - Once the allowlist of subscriptions is set, it is effective even if you leave the current
+ * channel and rejoin the channel.
+ * - If a user is added in the allowlist and blocklist at the same time, only the blocklist takes
+ * effect.
*
- * If a user is in the blocklist and allowlist at the same time, only the blocklist takes effect.
+ * @param uidList The user ID list of users that you want to subscribe to.
+ * If you want to specify the audio streams of a user for subscription, add the user ID in this
+ * list. If you want to remove a user from the allowlist, you need to call the
+ * `setSubscribeAudioAllowlist` method to update the user ID list; this means you only add the `uid`
+ * of users that you want to subscribe to in the new user ID list.
+ * @param uidNumber The number of users in the user ID list.
*
* @return
* - 0: Success.
@@ -5093,14 +6637,27 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int setSubscribeAudioAllowlist(uid_t* uidList, int uidNumber) = 0;
/**
- * Sets the blocklist of subscribe remote stream video.
+ * @brief Sets the blocklist of subscriptions for video streams.
*
- * @param uidList The id list of users whose video you do not want to subscribe to.
- * @param uidNumber The number of uid in uidList.
+ * @details
+ * You can call this method to specify the video streams of a user that you do not want to subscribe
+ * to.
*
* @note
- * If uid is in uidList, the remote user's video will not be subscribed,
- * even if muteRemoteVideoStream(uid, false) and muteAllRemoteVideoStreams(false) are operated.
+ * - You can call this method either before or after joining a channel.
+ * - The blocklist is not affected by the setting in `muteRemoteVideoStream`,
+ * `muteAllRemoteVideoStreams` and `autoSubscribeAudio` in `ChannelMediaOptions`.
+ * - Once the blocklist of subscriptions is set, it is effective even if you leave the current
+ * channel and rejoin the channel.
+ * - If a user is added in the allowlist and blocklist at the same time, only the blocklist takes
+ * effect.
+ *
+ * @param uidList The user ID list of users that you do not want to subscribe to.
+ * If you want to specify the video streams of a user that you do not want to subscribe to, add the
+ * user ID of that user in this list. If you want to remove a user from the blocklist, you need to
+ * call the `setSubscribeVideoBlocklist` method to update the user ID list; this means you only add
+ * the `uid` of users that you do not want to subscribe to in the new user ID list.
+ * @param uidNumber The number of users in the user ID list.
*
* @return
* - 0: Success.
@@ -5109,16 +6666,26 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int setSubscribeVideoBlocklist(uid_t* uidList, int uidNumber) = 0;
/**
- * Sets the allowlist of subscribe remote stream video.
+ * @brief Sets the allowlist of subscriptions for video streams.
*
- * @param uidList The id list of users whose video you want to subscribe to.
- * @param uidNumber The number of uid in uidList.
+ * @details
+ * You can call this method to specify the video streams of a user that you want to subscribe to.
*
* @note
- * If uid is in uidList, the remote user's video will be subscribed,
- * even if muteRemoteVideoStream(uid, true) and muteAllRemoteVideoStreams(true) are operated.
+ * - You can call this method either before or after joining a channel.
+ * - The allowlist is not affected by the setting in `muteRemoteVideoStream`,
+ * `muteAllRemoteVideoStreams` and `autoSubscribeAudio` in `ChannelMediaOptions`.
+ * - Once the allowlist of subscriptions is set, it is effective even if you leave the current
+ * channel and rejoin the channel.
+ * - If a user is added in the allowlist and blocklist at the same time, only the blocklist takes
+ * effect.
*
- * If a user is in the blocklist and allowlist at the same time, only the blocklist takes effect.
+ * @param uidList The user ID list of users that you want to subscribe to.
+ * If you want to specify the video streams of a user for subscription, add the user ID of that user
+ * in this list. If you want to remove a user from the allowlist, you need to call the
+ * `setSubscribeVideoAllowlist` method to update the user ID list; this means you only add the `uid`
+ * of users that you want to subscribe to in the new user ID list.
+ * @param uidNumber The number of users in the user ID list.
*
* @return
* - 0: Success.
@@ -5127,26 +6694,32 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int setSubscribeVideoAllowlist(uid_t* uidList, int uidNumber) = 0;
/**
- * Enables the `onAudioVolumeIndication` callback to report on which users are speaking
- * and the speakers' volume.
+ * @brief Enables the reporting of users' volume indication.
*
- * Once the \ref IRtcEngineEventHandler::onAudioVolumeIndication "onAudioVolumeIndication"
- * callback is enabled, the SDK returns the volume indication in the at the time interval set
- * in `enableAudioVolumeIndication`, regardless of whether any user is speaking in the channel.
+ * @details
+ * This method enables the SDK to regularly report the volume information to the app of the local
+ * user who sends a stream and remote users (three users at most) whose instantaneous volumes are
+ * the highest.
+ * Call timing: This method can be called either before or after joining the channel.
+ * Related callbacks: The SDK triggers the `onAudioVolumeIndication` callback according to the
+ * interval you set if this method is successfully called and there are users publishing streams in
+ * the channel.
*
* @param interval Sets the time interval between two consecutive volume indications:
- * - <= 0: Disables the volume indication.
- * - > 0: Time interval (ms) between two consecutive volume indications,
- * and should be integral multiple of 200 (less than 200 will be set to 200).
- * @param smooth The smoothing factor that sets the sensitivity of the audio volume
- * indicator. The value range is [0, 10]. The greater the value, the more sensitive the
- * indicator. The recommended value is 3.
- * @param reportVad
- * - `true`: Enable the voice activity detection of the local user. Once it is enabled, the `vad` parameter of the
- * `onAudioVolumeIndication` callback reports the voice activity status of the local user.
- * - `false`: (Default) Disable the voice activity detection of the local user. Once it is disabled, the `vad` parameter
- * of the `onAudioVolumeIndication` callback does not report the voice activity status of the local user, except for
- * the scenario where the engine automatically detects the voice activity of the local user.
+ * - ≤ 0: Disables the volume indication.
+ * - > 0: Time interval (ms) between two consecutive volume indications. Ensure this parameter is
+ * set to a value greater than 10, otherwise you will not receive the `onAudioVolumeIndication`
+ * callback. Agora recommends that this value is set as greater than 100.
+ * @param smooth The smoothing factor that sets the sensitivity of the audio volume indicator. The
+ * value ranges between 0 and 10. The recommended value is 3. The greater the value, the more
+ * sensitive the indicator.
+ * @param reportVad - `true`: Enables the voice activity detection of the local user. Once it is
+ * enabled, the `vad` parameter of the `onAudioVolumeIndication` callback reports the voice activity
+ * status of the local user.
+ * - `false`: (Default) Disables the voice activity detection of the local user. Once it is
+ * disabled, the `vad` parameter of the `onAudioVolumeIndication` callback does not report the voice
+ * activity status of the local user, except for the scenario where the engine automatically detects
+ * the voice activity of the local user.
*
* @return
* - 0: Success.
@@ -5154,49 +6727,62 @@ class IRtcEngine : public agora::base::IEngineBase {
*/
virtual int enableAudioVolumeIndication(int interval, int smooth, bool reportVad) = 0;
- /** Starts an audio recording.
-
- The SDK allows recording during a call, which supports either one of the
- following two formats:
-
- - .wav: Large file size with high sound fidelity
- - .aac: Small file size with low sound fidelity
-
- Ensure that the directory to save the recording file exists and is writable.
- This method is usually called after the joinChannel() method.
- The recording automatically stops when the leaveChannel() method is
- called.
-
- @param filePath Full file path of the recording file. The string of the file
- name is in UTF-8 code.
- @param quality Sets the audio recording quality: #AUDIO_RECORDING_QUALITY_TYPE.
- @return
- - 0: Success.
- - < 0: Failure.
- */
+ /**
+ * @brief Starts client-side audio recording with recording configuration.
+ *
+ * @details
+ * The SDK supports recording on the client during a call. After calling this method, you can record
+ * the audio of users in the channel and obtain a recording file. The recording file supports the
+ * following formats only:
+ * - WAV: Higher audio fidelity, larger file size. For example, with a sample rate of 32000 Hz, a
+ * 10-minute recording is about 73 MB.
+ * - AAC: Lower audio fidelity, smaller file size. For example, with a sample rate of 32000 Hz and
+ * recording quality set to AUDIO_RECORDING_QUALITY_MEDIUM, a 10-minute recording is about 2 MB.
+ * Recording automatically stops when the user leaves the channel.
+ * Call timing: This method must be called after joining a channel.
+ *
+ * @param config Recording configuration. See `AudioRecordingConfiguration`.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure. See `Error Codes` for details and resolution suggestions.
+ */
virtual int startAudioRecording(const char* filePath,
AUDIO_RECORDING_QUALITY_TYPE quality) = 0;
- /** Starts an audio recording.
-
- The SDK allows recording during a call, which supports either one of the
- following two formats:
-
- - .wav: Large file size with high sound fidelity
- - .aac: Small file size with low sound fidelity
-
- Ensure that the directory to save the recording file exists and is writable.
- This method is usually called after the joinChannel() method.
- The recording automatically stops when the leaveChannel() method is
- called.
-
- @param filePath Full file path of the recording file. The string of the file
- name is in UTF-8 code.
- @param sampleRate Sample rate, value should be 16000, 32000, 44100, or 48000.
- @param quality Sets the audio recording quality: #AUDIO_RECORDING_QUALITY_TYPE.
- @return
- - 0: Success.
- - < 0: Failure.
- */
+ /**
+ * @brief Starts client-side audio recording and sets the recording sample rate.
+ *
+ * @details
+ * The SDK supports recording on the client during a call. After calling this method, you can record
+ * the audio of all users in the channel and obtain a recording file that includes all voices. The
+ * recording file supports the following formats only:
+ * - .wav: Large file size, higher audio fidelity.
+ * - .aac: Smaller file size, lower audio fidelity.
+ *
+ * @note
+ * - Make sure the path you specify in this method exists and is writable.
+ * - This method must be called after `joinChannel(const char* token, const char* channelId, uid_t
+ * uid, const ChannelMediaOptions& options)`. If `leaveChannel(const LeaveChannelOptions& options)`
+ * is called while recording is in progress, the recording will automatically stop.
+ * - To ensure recording quality, when `sampleRate` is set to 44.1 kHz or 48 kHz, it is recommended
+ * to set `quality` to `AUDIO_RECORDING_QUALITY_MEDIUM`
+ * or `AUDIO_RECORDING_QUALITY_HIGH`.
+ *
+ * @param filePath The absolute path where the recording file will be saved locally, including the
+ * file name and extension. For example: `C:\music\audio.aac`.
+ * Note:
+ * Make sure the specified path exists and is writable.
+ * @param sampleRate Recording sample rate (Hz). You can set it to one of the following values:
+ * - 16000
+ * - 32000 (default)
+ * - 44100
+ * - 48000
+ * @param quality Recording quality. See `AUDIO_RECORDING_QUALITY_TYPE`.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure. See `Error Codes` for details and resolution suggestions.
+ */
virtual int startAudioRecording(const char* filePath,
int sampleRate,
AUDIO_RECORDING_QUALITY_TYPE quality) = 0;
@@ -5221,68 +6807,94 @@ class IRtcEngine : public agora::base::IEngineBase {
*/
virtual int startAudioRecording(const AudioRecordingConfiguration& config) = 0;
- /** register encoded audio frame observer
- @return
- - 0: Success.
- - < 0: Failure.
+ /**
+ * @brief Registers an encoded audio observer.
+ *
+ * @note
+ * - Call this method after joining a channel.
+ * - You can call this method or `startAudioRecording [3/3]` to set the recording type and quality
+ * of audio files, but Agora does not recommend using this method and `startAudioRecording [3/3]` at
+ * the same time. Only the method called later will take effect.
+ *
+ * @param config Observer settings for the encoded audio. See `AudioEncodedFrameObserverConfig`.
+ * @param observer The encoded audio observer. See `IAudioEncodedFrameObserver`.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int registerAudioEncodedFrameObserver(const AudioEncodedFrameObserverConfig& config, IAudioEncodedFrameObserver *observer) = 0;
- /** Stops the audio recording on the client.
-
- The recording automatically stops when the leaveChannel() method is called.
-
- @return
- - 0: Success.
- - < 0: Failure.
- */
+ /**
+ * @brief Stops client-side audio recording.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure. See `Error Codes` for details and resolution suggestions.
+ */
virtual int stopAudioRecording() = 0;
- /**
- * Creates a media player source object and return its pointer. If full featured
- * media player source is supported, it will create it, or it will create a simple
- * media player.
+ /**
+ * @brief Creates a media player object.
+ *
+ * @details
+ * Before calling any APIs in the `IMediaPlayer` class, you need to call this method to create an
+ * instance of the media player. If you need to create multiple instances, you can call this method
+ * multiple times.
+ * Call timing: You can call this method either before or after joining a channel.
*
* @return
- * - The pointer to \ref rtc::IMediaPlayerSource "IMediaPlayerSource",
- * if the method call succeeds.
- * - The empty pointer NULL, if the method call fails.
+ * - An `IMediaPlayer` object, if the method call succeeds.
+ * - An empty pointer, if the method call fails.
*/
virtual agora_refptr createMediaPlayer() = 0;
/**
- * Destroy a media player source instance.
- * If a media player source instance is destroyed, the video and audio of it cannot
- * be published.
+ * @brief Destroys the media player instance.
*
- * @param media_player The pointer to \ref rtc::IMediaPlayerSource.
+ * @param media_player `IMediaPlayer` object.
*
* @return
- * - >0: The id of media player source instance.
+ * - ≥ 0: Success. Returns the ID of media player instance.
* - < 0: Failure.
*/
virtual int destroyMediaPlayer(agora_refptr media_player) = 0;
/**
- * Creates a media recorder object and return its pointer.
+ * @brief Creates an audio and video recording object.
+ *
+ * @details
+ * Before starting to record audio and video streams, you need to call this method to create a
+ * recording object. The SDK supports recording multiple audio and video streams from local or
+ * remote users. You can call this method multiple times to create recording objects, and use the
+ * `info`
+ * parameter to specify the channel name and the user ID of the stream to be recorded.
+ * After successful creation, you need to call `setMediaRecorderObserver` to register an observer
+ * for the recording object to listen for related callbacks, and then call `startRecording` to begin
+ * recording.
*
- * @param info The RecorderStreamInfo object. It contains the user ID and the channel name.
+ * @param info Information about the audio and video stream to be recorded. See
+ * `RecorderStreamInfo`.
*
* @return
- * - The pointer to \ref rtc::IMediaRecorder "IMediaRecorder",
- * if the method call succeeds.
- * - The empty pointer NULL, if the method call fails.
+ * - If the method call succeeds: Returns an `IMediaRecorder` object.
+ * - If the method call fails: Returns a null pointer.
*/
virtual agora_refptr createMediaRecorder(const RecorderStreamInfo& info) = 0;
/**
- * Destroy a media recorder object.
+ * @brief Destroys an audio and video recording object.
*
- * @param mediaRecorder The pointer to \ref rtc::IMediaRecorder.
+ * @details
+ * When you no longer need to record audio and video streams, you can call this method to destroy
+ * the corresponding recording object. If recording is in progress, call `stopRecording` first, then
+ * call this method to destroy the recording object.
+ *
+ * @param mediaRecorder The `IMediaRecorder` object to be destroyed.
*
* @return
* - 0: Success.
- * - < 0: Failure.
+ * - < 0: Failure. See `Error Codes` for details and resolution suggestions.
*/
virtual int destroyMediaRecorder(agora_refptr mediaRecorder) = 0;
@@ -5327,6 +6939,57 @@ class IRtcEngine : public agora::base::IEngineBase {
- 0: Success.
- < 0: Failure.
*/
+ /**
+ * @brief Starts playing the music file.
+ *
+ * @details
+ * For the audio file formats supported by this method, see `What formats of audio files does the
+ * Agora RTC SDK support`. If the local music file does not exist, the SDK does not support the file
+ * format, or the the SDK cannot access the music file URL, the SDK reports
+ * AUDIO_MIXING_REASON_CAN_NOT_OPEN.
+ * Call timing: You can call this method either before or after joining a channel.
+ * Related callbacks: A successful method call triggers the `onAudioMixingStateChanged`
+ * (`AUDIO_MIXING_STATE_PLAYING`) callback. When the audio mixing file playback finishes, the SDK
+ * triggers the `onAudioMixingStateChanged` (`AUDIO_MIXING_STATE_STOPPED`) callback on the local
+ * client.
+ *
+ * @note
+ * - If you call this method to play short sound effect files, you may encounter playback failure.
+ * Agora recommends using `playEffect` instead to play such files.
+ * - If you need to call this method multiple times, ensure that the time interval between calling
+ * this method is more than 500 ms.
+ * - On Android, there are following considerations:
+ * - To use this method, ensure that the Android device is v4.2 or later, and the API version is
+ * v16 or later.
+ * - If you need to play an online music file, Agora does not recommend using the redirected URL
+ * address. Some Android devices may fail to open a redirected URL address.
+ * - If you call this method on an emulator, ensure that the music file is in the `/sdcard/`
+ * directory and the format is MP3.
+ *
+ * @param filePath The file path. The SDK supports URLs and absolute path of local files. The
+ * absolute path needs to be accurate to the file name and extension. Supported audio formats
+ * include MP3, AAC, M4A, MP4, WAV, and 3GP. See `Supported Audio Formats`.
+ * Attention: If you have preloaded an audio effect into memory by calling `preloadEffect`, ensure
+ * that the value of this parameter is the same as that of `filePath` in `preloadEffect`.
+ * @param loopback Whether to only play music files on the local client:
+ * - `true`: Only play music files on the local client so that only the local user can hear the
+ * music.
+ * - `false`: Publish music files to remote clients so that both the local user and remote users can
+ * hear the music.
+ * @param cycle The number of times the music file plays.
+ * - >0: The number of times for playback. For example, 1 represents playing 1 time.
+ * - -1: Play the audio file in an infinite loop.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ * - -1: A general error occurs (no specified reason).
+ * - -2: The parameter is invalid.
+ * - -3: The SDK is not ready.
+ * - The audio module is disabled.
+ * - The program is not complete.
+ * - The initialization of `IRtcEngine` fails. Reinitialize the `IRtcEngine`.
+ */
virtual int startAudioMixing(const char* filePath, bool loopback, int cycle) = 0;
/** Starts playing and mixing the music file.
@@ -5372,164 +7035,324 @@ class IRtcEngine : public agora::base::IEngineBase {
- 0: Success.
- < 0: Failure.
*/
+ /**
+ * @brief Starts playing the music file.
+ *
+ * @details
+ * For the audio file formats supported by this method, see `What formats of audio files does the
+ * Agora RTC SDK support`. If the local music file does not exist, the SDK does not support the file
+ * format, or the the SDK cannot access the music file URL, the SDK reports
+ * AUDIO_MIXING_REASON_CAN_NOT_OPEN.
+ * Call timing: You can call this method either before or after joining a channel.
+ * Related callbacks: A successful method call triggers the `onAudioMixingStateChanged`
+ * (`AUDIO_MIXING_STATE_PLAYING`) callback. When the audio mixing file playback finishes, the SDK
+ * triggers the `onAudioMixingStateChanged` (`AUDIO_MIXING_STATE_STOPPED`) callback on the local
+ * client.
+ *
+ * @note
+ * - If you call this method to play short sound effect files, you may encounter playback failure.
+ * Agora recommends using `playEffect` instead to play such files.
+ * - If you need to call this method multiple times, ensure that the time interval between calling
+ * this method is more than 500 ms.
+ * - On Android, there are following considerations:
+ * - To use this method, ensure that the Android device is v4.2 or later, and the API version is
+ * v16 or later.
+ * - If you need to play an online music file, Agora does not recommend using the redirected URL
+ * address. Some Android devices may fail to open a redirected URL address.
+ * - If you call this method on an emulator, ensure that the music file is in the `/sdcard/`
+ * directory and the format is MP3.
+ *
+ * @param filePath File path:
+ * - Android: The file path, which needs to be accurate to the file name and suffix. Agora supports
+ * URL addresses, absolute paths, or file paths that start with `/assets/`. You might encounter
+ * permission issues if you use an absolute path to access a local file, so Agora recommends using a
+ * URI address instead. For example`:
+ * content://com.android.providers.media.documents/document/audio%3A14441`
+ * - Windows: The absolute path or URL address (including the suffixes of the filename) of the audio
+ * effect file. For example`: C:\music\audio.mp4`.
+ * @param loopback Whether to only play music files on the local client:
+ * - `true`: Only play music files on the local client so that only the local user can hear the
+ * music.
+ * - `false`: Publish music files to remote clients so that both the local user and remote users can
+ * hear the music.
+ * @param cycle The number of times the music file plays.
+ * - >0: The number of times for playback. For example, 1 represents playing 1 time.
+ * - -1: Play the audio file in an infinite loop.
+ * @param startPos The playback position (ms) of the music file.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ * - -1: A general error occurs (no specified reason).
+ * - -2: The parameter is invalid.
+ * - -3: The SDK is not ready.
+ * - The audio module is disabled.
+ * - The program is not complete.
+ * - The initialization of `IRtcEngine` fails. Reinitialize the `IRtcEngine`.
+ */
virtual int startAudioMixing(const char* filePath, bool loopback, int cycle, int startPos) = 0;
- /** Stops playing and mixing the music file.
-
- Call this method when you are in a channel.
+ /**
+ * @brief Stops playing the music file.
+ *
+ * @details
+ * After calling `startAudioMixing(const char* filePath, bool loopback, int cycle, int startPos)` to
+ * play a music file, you can call this method to stop the
+ * playing. If you only need to pause the playback, call `pauseAudioMixing`.
+ * Call timing: Call this method after joining a channel.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ */
+ virtual int stopAudioMixing() = 0;
- @return
- - 0: Success.
- - < 0: Failure.
- */
- virtual int stopAudioMixing() = 0;
-
- /** Pauses playing and mixing the music file.
-
- Call this method when you are in a channel.
-
- @return
- - 0: Success.
- - < 0: Failure.
- */
+ /**
+ * @brief Pauses playing and mixing the music file.
+ *
+ * @details
+ * After calling `startAudioMixing(const char* filePath, bool loopback, int cycle, int startPos)` to
+ * play a music file, you can call this method to pause
+ * the playing. If you need to stop the playback, call `stopAudioMixing`.
+ * Call timing: Call this method after joining a channel.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ */
virtual int pauseAudioMixing() = 0;
- /** Resumes playing and mixing the music file.
-
- Call this method when you are in a channel.
-
- @return
- - 0: Success.
- - < 0: Failure.
- */
+ /**
+ * @brief Resumes playing and mixing the music file.
+ *
+ * @details
+ * After calling `pauseAudioMixing` to pause the playback, you can call this method to resume the
+ * playback.
+ * Call timing: Call this method after joining a channel.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ */
virtual int resumeAudioMixing() = 0;
- /** Select audio track for the music file.
-
- Call this method when you are in a channel.
-
- @return
- - 0: Success.
- - < 0: Failure.
- */
+ /**
+ * @brief Selects the audio track used during playback.
+ *
+ * @details
+ * After getting the track index of the audio file, you can call this method to specify any track to
+ * play. For example, if different tracks of a multi-track file store songs in different languages,
+ * you can call this method to set the playback language.
+ *
+ * @note
+ * - For the supported formats of audio files, see
+ * `https://docs.agora.io/en/help/general-product-inquiry/audio_format#extended-audio-file-formats`.
+ * - You need to call this method after calling `startAudioMixing(const char* filePath, bool
+ * loopback, int cycle, int startPos)` and receiving the
+ * `onAudioMixingStateChanged` `(AUDIO_MIXING_STATE_PLAYING)` callback.
+ *
+ * @param index The audio track you want to specify. The value should be greater than 0 and less
+ * than that of returned by `getAudioTrackCount`.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ */
virtual int selectAudioTrack(int index) = 0;
- /** Get audio track count of the music file.
-
- Call this method when you are in a channel.
-
- @return
- - ≥ 0: Audio track count of the music file, if the method call is successful.
- - < 0: Failure.
+ /**
+ * @brief Gets the index of audio tracks of the current music file.
+ *
+ * @note You need to call this method after calling `startAudioMixing(const char* filePath, bool
+ * loopback, int cycle, int startPos)` and receiving the
+ * `onAudioMixingStateChanged` `(AUDIO_MIXING_STATE_PLAYING)` callback.
+ *
+ * @return
+ * - The SDK returns the index of the audio tracks if the method call succeeds.
+ * - < 0: Failure.
*/
virtual int getAudioTrackCount() = 0;
- /** Adjusts the volume during audio mixing.
-
- Call this method when you are in a channel.
-
- @note This method does not affect the volume of audio effect file playback
- invoked by the \ref IRtcEngine::playEffect "playEffect" method.
-
- @param volume The audio mixing volume. The value ranges between 0 and 100
- (default).
-
- @return
- - 0: Success.
- - < 0: Failure.
- */
+ /**
+ * @brief Adjusts the volume during audio mixing.
+ *
+ * @details
+ * This method adjusts the audio mixing volume on both the local client and remote clients.
+ * Call timing: Call this method after `startAudioMixing(const char* filePath, bool loopback, int
+ * cycle, int startPos)`.
+ *
+ * @note This method does not affect the volume of the audio file set in the `playEffect` method.
+ *
+ * @param volume Audio mixing volume. The value ranges between 0 and 100. The default value is 100,
+ * which means the original volume.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ */
virtual int adjustAudioMixingVolume(int volume) = 0;
- /** Adjusts the audio mixing volume for publishing (for remote users).
- @note Call this method when you are in a channel.
- @param volume Audio mixing volume for publishing. The value ranges between 0 and 100 (default).
- @return
- - 0: Success.
- - < 0: Failure.
+ /**
+ * @brief Adjusts the volume of audio mixing for publishing.
+ *
+ * @details
+ * This method adjusts the volume of audio mixing for publishing (sending to other users).
+ * Call timing: Call this method after calling `startAudioMixing(const char* filePath, bool
+ * loopback, int cycle, int startPos)` and receiving the
+ * `onAudioMixingStateChanged` `(AUDIO_MIXING_STATE_PLAYING)` callback.
+ *
+ * @param volume The volume of audio mixing for local playback. The value ranges between 0 and 100
+ * (default). 100 represents the original volume.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int adjustAudioMixingPublishVolume(int volume) = 0;
- /** Retrieves the audio mixing volume for publishing.
- This method helps troubleshoot audio volume related issues.
- @note Call this method when you are in a channel.
- @return
- - ≥ 0: The audio mixing volume for publishing, if this method call succeeds. The value range is [0,100].
- - < 0: Failure.
+ /**
+ * @brief Retrieves the audio mixing volume for publishing.
+ *
+ * @details
+ * This method helps troubleshoot audio volume‑related issues.
+ *
+ * @note You need to call this method after calling `startAudioMixing(const char* filePath, bool
+ * loopback, int cycle, int startPos)` and receiving the
+ * `onAudioMixingStateChanged` `(AUDIO_MIXING_STATE_PLAYING)` callback.
+ *
+ * @return
+ * - ≥ 0: The audio mixing volume, if this method call succeeds. The value range is [0,100].
+ * - < 0: Failure.
*/
virtual int getAudioMixingPublishVolume() = 0;
- /** Adjusts the audio mixing volume for local playback.
- @note Call this method when you are in a channel.
- @param volume Audio mixing volume for local playback. The value ranges between 0 and 100 (default).
- @return
- - 0: Success.
- - < 0: Failure.
+ /**
+ * @brief Adjusts the volume of audio mixing for local playback.
+ *
+ * @details
+ * Call timing: You need to call this method after calling `startAudioMixing(const char* filePath,
+ * bool loopback, int cycle, int startPos)` and receiving
+ * the `onAudioMixingStateChanged` `(AUDIO_MIXING_STATE_PLAYING)` callback.
+ *
+ * @param volume The volume of audio mixing for local playback. The value ranges between 0 and 100
+ * (default). 100 represents the original volume.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int adjustAudioMixingPlayoutVolume(int volume) = 0;
- /** Retrieves the audio mixing volume for local playback.
- This method helps troubleshoot audio volume related issues.
- @note Call this method when you are in a channel.
- @return
- - ≥ 0: The audio mixing volume, if this method call succeeds. The value range is [0,100].
- - < 0: Failure.
+ /**
+ * @brief Retrieves the audio mixing volume for local playback.
+ *
+ * @details
+ * You can call this method to get the local playback volume of the mixed audio file, which helps in
+ * troubleshooting volume‑related issues.
+ * Call timing: Call this method after `startAudioMixing(const char* filePath, bool loopback, int
+ * cycle, int startPos)` and receiving the
+ * `onAudioMixingStateChanged` `(AUDIO_MIXING_STATE_PLAYING)` callback.
+ *
+ * @return
+ * - ≥ 0: The audio mixing volume, if this method call succeeds. The value range is [0,100].
+ * - < 0: Failure.
*/
virtual int getAudioMixingPlayoutVolume() = 0;
- /** Gets the duration (ms) of the music file.
-
- Call this API when you are in a channel.
-
- @return
- - Returns the audio mixing duration, if the method call is successful.
- - < 0: Failure.
+ /**
+ * @brief Retrieves the duration (ms) of the music file.
+ *
+ * @details
+ * Retrieves the total duration (ms) of the audio.
+ * Call timing: Call this method after `startAudioMixing(const char* filePath, bool loopback, int
+ * cycle, int startPos)` and receiving the
+ * `onAudioMixingStateChanged` `(AUDIO_MIXING_STATE_PLAYING)` callback.
+ *
+ * @return
+ * - ≥ 0: The audio mixing duration, if this method call succeeds.
+ * - < 0: Failure.
*/
virtual int getAudioMixingDuration() = 0;
- /** Gets the playback position (ms) of the music file.
-
- Call this method when you are in a channel.
-
- @return
- - ≥ 0: The current playback position of the audio mixing, if this method
- call succeeds.
- - < 0: Failure.
+ /**
+ * @brief Retrieves the playback position (ms) of the music file.
+ *
+ * @details
+ * Retrieves the playback position (ms) of the audio.
+ *
+ * @note
+ * - You need to call this method after calling `startAudioMixing(const char* filePath, bool
+ * loopback, int cycle, int startPos)` and receiving the
+ * `onAudioMixingStateChanged` `(AUDIO_MIXING_STATE_PLAYING)` callback.
+ * - If you need to call `getAudioMixingCurrentPosition` multiple times, ensure that the time
+ * interval between calling this method is more than 500 ms.
+ *
+ * @return
+ * - ≥ 0: The current playback position (ms) of the audio mixing, if this method call succeeds. 0
+ * represents that the current music file does not start playing.
+ * - < 0: Failure.
*/
virtual int getAudioMixingCurrentPosition() = 0;
- /** Sets the playback position of the music file to a different starting
- position (the default plays from the beginning).
-
- @param pos The playback starting position (ms) of the audio mixing file.
-
- @return
- - 0: Success.
- - < 0: Failure.
+ /**
+ * @brief Sets the audio mixing position.
+ *
+ * @details
+ * Call this method to set the playback position of the music file to a different starting position
+ * (the default plays from the beginning).
+ * Call timing: Call this method after `startAudioMixing(const char* filePath, bool loopback, int
+ * cycle, int startPos)` and receiving the
+ * `onAudioMixingStateChanged` `(AUDIO_MIXING_STATE_PLAYING)` callback.
+ *
+ * @param pos Integer. The playback position (ms).
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int setAudioMixingPosition(int pos /*in ms*/) = 0;
- /** In dual-channel music files, different audio data can be stored on the left and right channels.
- * According to actual needs, you can set the channel mode as the original mode,
- * the left channel mode, the right channel mode or the mixed mode
-
- @param mode The mode of channel mode
-
- @return
- - 0: Success.
- - < 0: Failure.
+ /**
+ * @brief Sets the channel mode of the current audio file.
+ *
+ * @details
+ * In a stereo music file, the left and right channels can store different audio data. According to
+ * your needs, you can set the channel mode to original mode, left channel mode, right channel mode,
+ * or mixed channel mode.
+ * Applicable scenarios: For example, in the KTV scenario, the left channel of the music file stores
+ * the musical accompaniment, and the right channel stores the original singer's vocals. You can set
+ * according to actual needs:
+ * - If you only want to hear the accompaniment, use this method to set the audio file's channel
+ * mode to left channel mode.
+ * - If you need to hear both the accompaniment and the original vocals simultaneously, call this
+ * method to set the channel mode to mixed mode.
+ * Call timing: Call this method after `startAudioMixing(const char* filePath, bool loopback, int
+ * cycle, int startPos)` and receiving the
+ * `onAudioMixingStateChanged` `(AUDIO_MIXING_STATE_PLAYING)` callback.
+ *
+ * @note This method only applies to stereo audio files.
+ *
+ * @param mode The channel mode. See `AUDIO_MIXING_DUAL_MONO_MODE`.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int setAudioMixingDualMonoMode(media::AUDIO_MIXING_DUAL_MONO_MODE mode) = 0;
- /** Sets the pitch of the local music file.
- *
- * When a local music file is mixed with a local human voice, call this method to set the pitch of the local music file only.
+ /**
+ * @brief Sets the pitch of the local music file.
*
- * @note Call this method after calling \ref IRtcEngine::startAudioMixing "startAudioMixing" and
- * receiving the \ref IRtcEngineEventHandler::onAudioMixingStateChanged "onAudioMixingStateChanged" (AUDIO_MIXING_STATE_PLAYING) callback.
+ * @details
+ * When a local music file is mixed with a local human voice, call this method to set the pitch of
+ * the local music file only.
+ * Call timing: You need to call this method after calling `startAudioMixing(const char* filePath,
+ * bool loopback, int cycle, int startPos)` and receiving
+ * the `onAudioMixingStateChanged` `(AUDIO_MIXING_STATE_PLAYING)` callback.
*
- * @param pitch Sets the pitch of the local music file by chromatic scale. The default value is 0,
- * which means keeping the original pitch. The value ranges from -12 to 12, and the pitch value between
- * consecutive values is a chromatic value. The greater the absolute value of this parameter, the
- * higher or lower the pitch of the local music file.
+ * @param pitch Sets the pitch of the local music file by the chromatic scale. The default value is
+ * 0, which means keeping the original pitch. The value ranges from -12 to 12, and the pitch value
+ * between consecutive values is a chromatic value. The greater the absolute value of this
+ * parameter, the higher or lower the pitch of the local music file.
*
* @return
* - 0: Success.
@@ -5538,12 +7361,15 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int setAudioMixingPitch(int pitch) = 0;
/**
- * Sets the playback speed of the current music file.
+ * @brief Sets the playback speed of the current audio file.
*
- * @note Call this method after calling \ref IRtcEngine::startAudioMixing(const char*,bool,bool,int,int) "startAudioMixing" [2/2]
- * and receiving the \ref IRtcEngineEventHandler::onAudioMixingStateChanged "onAudioMixingStateChanged" (AUDIO_MIXING_STATE_PLAYING) callback.
+ * @details
+ * Ensure you call this method after calling `startAudioMixing(const char* filePath, bool loopback,
+ * int cycle, int startPos)` receiving the
+ * `onAudioMixingStateChanged` callback reporting the state as `AUDIO_MIXING_STATE_PLAYING`.
*
- * @param speed The playback speed. Agora recommends that you limit this value to between 50 and 400, defined as follows:
+ * @param speed The playback speed. Agora recommends that you set this to a value between 50 and
+ * 400, defined as follows:
* - 50: Half the original speed.
* - 100: The original speed.
* - 400: 4 times the original speed.
@@ -5555,114 +7381,143 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int setAudioMixingPlaybackSpeed(int speed) = 0;
/**
- * Gets the volume of audio effects.
+ * @brief Retrieves the volume of the audio effects.
+ *
+ * @details
+ * The volume is an integer ranging from 0 to 100. The default value is 100, which means the
+ * original volume.
+ *
+ * @note Call this method after `playEffect`.
*
* @return
- * - ≥ 0: The volume of audio effects. The value ranges between 0 and 100 (original volume).
+ * - Volume of the audio effects, if this method call succeeds.
* - < 0: Failure.
*/
virtual int getEffectsVolume() = 0;
- /** Sets the volume of audio effects.
+ /**
+ * @brief Sets the volume of the audio effects.
+ *
+ * @details
+ * Call timing: Call this method after `playEffect`.
*
- * @param volume The volume of audio effects. The value ranges between 0
- * and 100 (original volume).
+ * @param volume The playback volume. The value range is [0, 100]. The default value is 100, which
+ * represents the original volume.
*
* @return
* - 0: Success.
* - < 0: Failure.
*/
virtual int setEffectsVolume(int volume) = 0;
- /** Preloads a specified audio effect.
- *
- * This method preloads only one specified audio effect into the memory each time
- * it is called. To preload multiple audio effects, call this method multiple times.
- *
- * After preloading, you can call \ref IRtcEngine::playEffect "playEffect"
- * to play the preloaded audio effect or call
- * \ref IRtcEngine::playAllEffects "playAllEffects" to play all the preloaded
- * audio effects.
+ /**
+ * @brief Preloads a specified audio effect file into the memory.
*
+ * @details
+ * Ensure the size of all preloaded files does not exceed the limit.
+ * For the audio file formats supported by this method, see `What formats of audio files does the
+ * Agora RTC SDK support`.
+ * Call timing: Agora recommends that you call this method before joining a channel.
+ *
* @note
- * - To ensure smooth communication, limit the size of the audio effect file.
- * - Agora recommends calling this method before joining the channel.
- *
- * @param soundId The ID of the audio effect.
- * @param filePath The absolute path of the local audio effect file or the URL
- * of the online audio effect file. Supported audio formats: mp3, mp4, m4a, aac,
- * 3gp, mkv, and wav.
+ * - If preloadEffect is called before playEffect is executed, the file resource will not be closed after playEffect.
+ * The next time playEffect is executed, it will directly seek to play at the beginning.
+ * - If preloadEffect is not called before playEffect is executed, the resource will be destroyed after playEffect.
+ * The next time playEffect is executed, it will try to reopen the file and play it from the beginning.
+ *
+ * @param soundId The audio effect ID. The ID of each audio effect file is unique.
+ * @param filePath File path:
+ * - Android: The file path, which needs to be accurate to the file name and suffix. Agora supports
+ * URL addresses, absolute paths, or file paths that start with `/assets/`. You might encounter
+ * permission issues if you use an absolute path to access a local file, so Agora recommends using a
+ * URI address instead. For example:
+ * `content://com.android.providers.media.documents/document/audio%3A14441`
+ * - Windows: The absolute path or URL address (including the suffixes of the filename) of the audio
+ * effect file. For example: `C:\music\audio.mp4`.
+ * - iOS or macOS: The absolute path or URL address (including the suffixes of the filename) of the audio effect file. For example: `/var/mobile/Containers/Data/audio.mp4`.
+ * @param startPos The playback position (ms) of the audio effect file.
*
* @return
* - 0: Success.
* - < 0: Failure.
*/
virtual int preloadEffect(int soundId, const char* filePath, int startPos = 0) = 0;
- /** Plays a specified audio effect.
- *
- * After calling \ref IRtcEngine::preloadEffect "preloadEffect", you can call
- * this method to play the specified audio effect for all users in
- * the channel.
- *
- * This method plays only one specified audio effect each time it is called.
- * To play multiple audio effects, call this method multiple times.
- *
- * @note
- * - Agora recommends playing no more than three audio effects at the same time.
- * - The ID and file path of the audio effect in this method must be the same
- * as that in the \ref IRtcEngine::preloadEffect "preloadEffect" method.
- *
- * @param soundId The ID of the audio effect.
- * @param filePath The absolute path of the local audio effect file or the URL
- * of the online audio effect file. Supported audio formats: mp3, mp4, m4a, aac,
- * 3gp, mkv, and wav.
- * @param loopCount The number of times the audio effect loops:
- * - `-1`: Play the audio effect in an indefinite loop until
- * \ref IRtcEngine::stopEffect "stopEffect" or
- * \ref IRtcEngine::stopAllEffects "stopAllEffects"
- * - `0`: Play the audio effect once.
- * - `1`: Play the audio effect twice.
- * @param pitch The pitch of the audio effect. The value ranges between 0.5 and 2.0.
- * The default value is `1.0` (original pitch). The lower the value, the lower the pitch.
+ /**
+ * @brief Plays the specified local or online audio effect file.
+ *
+ * @details
+ * To play multiple audio effect files at the same time, call this method multiple times with
+ * different `soundId` and `filePath`. To achieve the optimal user experience, Agora recommends that
+ * you do not playing more than three audio files at the same time.
+ * Call timing: You can call this method either before or after joining a channel.
+ * Related callbacks: After the playback of an audio effect file completes, the SDK triggers the
+ * `onAudioEffectFinished` callback.
+ *
+ * @note
+ * - If you need to play an online audio effect file, Agora recommends that you cache the online
+ * audio effect file to your local device, call `preloadEffect` to preload the file into memory, and
+ * then call this method to play the audio effect. Otherwise, you might encounter playback failures
+ * or no sound during playback due to loading timeouts or failures.
+ * - If preloadEffect is called before playEffect is executed, the file resource will not be closed after playEffect.
+ * The next time playEffect is executed, it will directly seek to play at the beginning.
+ * - If preloadEffect is not called before playEffect is executed, the resource will be destroyed after playEffect.
+ * The next time playEffect is executed, it will try to reopen the file and play it from the beginning.
+ *
+ * @param soundId The audio effect ID. The ID of each audio effect file is unique.Attention: If you
+ * have preloaded an audio effect into memory by calling `preloadEffect`, ensure that the value of
+ * this parameter is the same as that of `soundId` in `preloadEffect`.
+ * @param filePath The file path. The SDK supports URLs and absolute path of local files. The
+ * absolute path needs to be accurate to the file name and extension. Supported audio formats
+ * include MP3, AAC, M4A, MP4, WAV, and 3GP. See `Supported Audio Formats`.
+ * Attention: If you have preloaded an audio effect into memory by calling `preloadEffect`, ensure
+ * that the value of this parameter is the same as that of `filePath` in `preloadEffect`.
+ * @param loopCount The number of times the audio effect loops.
+ * - ≥ 0: The number of playback times. For example, 1 means looping one time, which means playing
+ * the audio effect two times in total.
+ * - -1: Play the audio file in an infinite loop.
+ * @param pitch The pitch of the audio effect. The value range is 0.5 to 2.0. The default value is
+ * 1.0, which means the original pitch. The lower the value, the lower the pitch.
* @param pan The spatial position of the audio effect. The value ranges between -1.0 and 1.0:
- * - `-1.0`: The audio effect displays to the left.
- * - `0.0`: The audio effect displays ahead.
- * - `1.0`: The audio effect displays to the right.
- * @param gain The volume of the audio effect. The value ranges between 0 and 100.
- * The default value is `100` (original volume). The lower the value, the lower
- * the volume of the audio effect.
- * @param publish Sets whether to publish the audio effect to the remote:
- * - true: Publish the audio effect to the remote.
- * - false: (Default) Do not publish the audio effect to the remote.
+ * - -1.0: The audio effect is heard on the left of the user.
+ * - 0.0: The audio effect is heard in front of the user.
+ * - 1.0: The audio effect is heard on the right of the user.
+ * @param gain The volume of the audio effect. The value range is 0.0 to 100.0. The default value is
+ * 100.0, which means the original volume. The smaller the value, the lower the volume.
+ * @param publish Whether to publish the audio effect to the remote users:
+ * - `true`: Publish the audio effect to the remote users. Both the local user and remote users can
+ * hear the audio effect.
+ * - `false`: Do not publish the audio effect to the remote users. Only the local user can hear the
+ * audio effect.
+ * @param startPos The playback position (ms) of the audio effect file.
*
* @return
* - 0: Success.
* - < 0: Failure.
*/
virtual int playEffect(int soundId, const char* filePath, int loopCount, double pitch, double pan, int gain, bool publish = false, int startPos = 0) = 0;
- /** Plays all audio effects.
+ /**
+ * @brief Plays all audio effect files.
*
- * After calling \ref IRtcEngine::preloadEffect "preloadEffect" multiple times
- * to preload multiple audio effects into the memory, you can call this
- * method to play all the specified audio effects for all users in
- * the channel.
+ * @details
+ * After calling `preloadEffect` multiple times to preload multiple audio effects into the memory,
+ * you can call this method to play all the specified audio effects for all users in the channel.
*
* @param loopCount The number of times the audio effect loops:
- * - `-1`: Play the audio effect in an indefinite loop until
- * \ref IRtcEngine::stopEffect "stopEffect" or
- * \ref IRtcEngine::stopAllEffects "stopAllEffects"
- * - `0`: Play the audio effect once.
- * - `1`: Play the audio effect twice.
- * @param pitch The pitch of the audio effect. The value ranges between 0.5 and 2.0.
- * The default value is `1.0` (original pitch). The lower the value, the lower the pitch.
+ * - -1: Play the audio effect files in an indefinite loop until you call `stopEffect` or
+ * `stopAllEffects`.
+ * - 0: Play the audio effect once.
+ * - 1: Play the audio effect twice.
+ * @param pitch The pitch of the audio effect. The value ranges between 0.5 and 2.0. The default
+ * value is 1.0 (original pitch). The lower the value, the lower the pitch.
* @param pan The spatial position of the audio effect. The value ranges between -1.0 and 1.0:
- * - `-1.0`: The audio effect displays to the left.
- * - `0.0`: The audio effect displays ahead.
- * - `1.0`: The audio effect displays to the right.
- * @param gain The volume of the audio effect. The value ranges between 0 and 100.
- * The default value is `100` (original volume). The lower the value, the lower
- * the volume of the audio effect.
- * @param publish Sets whether to publish the audio effect to the remote:
- * - true: Publish the audio effect to the remote.
- * - false: (Default) Do not publish the audio effect to the remote.
+ * - -1.0: The audio effect shows on the left.
+ * - 0: The audio effect shows ahead.
+ * - 1.0: The audio effect shows on the right.
+ * @param gain The volume of the audio effect. The value range is [0, 100]. The default value is 100
+ * (original volume). The smaller the value, the lower the volume.
+ * @param publish Whether to publish the audio effect to the remote users:
+ * - `true`: Publish the audio effect to the remote users. Both the local user and remote users can
+ * hear the audio effect.
+ * - `false`: (Default) Do not publish the audio effect to the remote users. Only the local user can
+ * hear the audio effect.
*
* @return
* - 0: Success.
@@ -5670,86 +7525,119 @@ class IRtcEngine : public agora::base::IEngineBase {
*/
virtual int playAllEffects(int loopCount, double pitch, double pan, int gain, bool publish = false) = 0;
- /** Gets the volume of the specified audio effect.
+ /**
+ * @brief Gets the volume of a specified audio effect file.
*
- * @param soundId The ID of the audio effect.
+ * @param soundId The ID of the audio effect file.
*
* @return
- * - ≥ 0: The volume of the specified audio effect. The value ranges
- * between 0 and 100 (original volume).
+ * - ≥ 0: Returns the volume of the specified audio effect, if the method call is successful. The
+ * value ranges between 0 and 100. 100 represents the original volume.
* - < 0: Failure.
*/
virtual int getVolumeOfEffect(int soundId) = 0;
- /** Sets the volume of the specified audio effect.
+ /**
+ * @brief Gets the volume of a specified audio effect file.
*
- * @param soundId The ID of the audio effect.
- * @param volume The volume of the specified audio effect. The value ranges
- * between 0 and 100 (original volume).
+ * @details
+ * Call timing: Call this method after `playEffect`.
+ *
+ * @param soundId The ID of the audio effect. The unique ID of each audio effect file.
+ * @param volume The playback volume. The value range is [0, 100]. The default value is 100, which
+ * represents the original volume.
*
* @return
* - 0: Success.
* - < 0: Failure.
*/
virtual int setVolumeOfEffect(int soundId, int volume) = 0;
- /** Pauses playing the specified audio effect.
+ /**
+ * @brief Pauses a specified audio effect file.
*
- * @param soundId The ID of the audio effect.
+ * @param soundId The audio effect ID. The ID of each audio effect file is unique.
*
* @return
* - 0: Success.
* - < 0: Failure.
*/
virtual int pauseEffect(int soundId) = 0;
- /** Pauses playing audio effects.
+ /**
+ * @brief Pauses all audio effects.
*
* @return
* - 0: Success.
* - < 0: Failure.
*/
virtual int pauseAllEffects() = 0;
- /** Resumes playing the specified audio effect.
+ /**
+ * @brief Resumes playing a specified audio effect.
*
- * @param soundId The ID of the audio effect.
+ * @param soundId The audio effect ID. The ID of each audio effect file is unique.
*
* @return
* - 0: Success.
* - < 0: Failure.
*/
virtual int resumeEffect(int soundId) = 0;
- /** Resumes playing audio effects.
+ /**
+ * @brief Resumes playing all audio effect files.
+ *
+ * @details
+ * After you call `pauseAllEffects` to pause the playback, you can call this method to resume the
+ * playback.
+ * Call timing: Call this method after `pauseAllEffects`.
*
* @return
* - 0: Success.
* - < 0: Failure.
*/
virtual int resumeAllEffects() = 0;
- /** Stops playing the specified audio effect.
+ /**
+ * @brief Stops playing a specified audio effect.
+ *
+ * @details
+ * When you no longer need to play the audio effect, you can call this method to stop the playback.
+ * If you only need to pause the playback, call `pauseEffect`.
+ * Call timing: Call this method after `playEffect`.
*
- * @param soundId The ID of the audio effect.
+ * @param soundId The ID of the audio effect. Each audio effect has a unique ID.
*
* @return
* - 0: Success.
* - < 0: Failure.
*/
virtual int stopEffect(int soundId) = 0;
- /** Stops playing audio effects.
+ /**
+ * @brief Stops playing all audio effects.
+ *
+ * @details
+ * When you no longer need to play the audio effect, you can call this method to stop the playback.
+ * If you only need to pause the playback, call `pauseAllEffects`.
+ * Call timing: Call this method after `playEffect`.
*
* @return
* - 0: Success.
* - < 0: Failure.
*/
virtual int stopAllEffects() = 0;
- /** Releases the specified preloaded audio effect from the memory.
+ /**
+ * @brief Releases a specified preloaded audio effect from the memory.
+ *
+ * @details
+ * After loading the audio effect file into memory using `preloadEffect`, if you need to release the
+ * audio effect file, call this method.
+ * Call timing: You can call this method either before or after joining a channel.
*
- * @param soundId The ID of the audio effect.
+ * @param soundId The ID of the audio effect. Each audio effect has a unique ID.
*
* @return
* - 0: Success.
* - < 0: Failure.
*/
virtual int unloadEffect(int soundId) = 0;
- /** Releases preloaded audio effects from the memory.
+ /**
+ * @brief Releases a specified preloaded audio effect from the memory.
*
* @return
* - 0: Success.
@@ -5757,146 +7645,176 @@ class IRtcEngine : public agora::base::IEngineBase {
*/
virtual int unloadAllEffects() = 0;
/**
- * Gets the duration of the audio effect file.
- * @note
- * - Call this method after joining a channel.
- * - For the audio file formats supported by this method, see [What formats of audio files does the Agora RTC SDK support](https://docs.agora.io/en/faq/audio_format).
+ * @brief Retrieves the duration of the audio effect file.
+ *
+ * @note Call this method after joining a channel.
*
- * @param filePath The absolute path or URL address (including the filename extensions)
- * of the music file. For example: `C:\music\audio.mp4`.
- * When you access a local file on Android, Agora recommends passing a URI address or the path starts
- * with `/assets/` in this parameter.
+ * @param filePath File path:
+ * - Android: The file path, which needs to be accurate to the file name and suffix. Agora supports
+ * URL addresses, absolute paths, or file paths that start with `/assets/`. You might encounter
+ * permission issues if you use an absolute path to access a local file, so Agora recommends using a
+ * URI address instead. For example:
+ * `content://com.android.providers.media.documents/document/audio%3A14441`
+ * - Windows: The absolute path or URL address (including the suffixes of the filename) of the audio
+ * effect file. For example: `C:\music\audio.mp4`.
+ * - iOS or macOS: The absolute path or URL address (including the suffixes of the filename) of the audio effect file. For example: `/var/mobile/Containers/Data/audio.mp4`.
*
* @return
- * - ≥ 0: A successful method call. Returns the total duration (ms) of
- * the specified audio effect file.
+ * - The total duration (ms) of the specified audio effect file, if the method call succeeds.
* - < 0: Failure.
- * - `-22(ERR_RESOURCE_LIMITED)`: Cannot find the audio effect file. Please
- * set a correct `filePath`.
*/
virtual int getEffectDuration(const char* filePath) = 0;
/**
- * Sets the playback position of an audio effect file.
+ * @brief Sets the playback position of an audio effect file.
+ *
+ * @details
* After a successful setting, the local audio effect file starts playing at the specified position.
*
- * @note Call this method after \ref IRtcEngine::playEffect(int,const char*,int,double,double,int,bool,int) "playEffect" .
+ * @note Call this method after `playEffect`.
*
- * @param soundId Audio effect ID. Ensure that this parameter is set to the
- * same value as in \ref IRtcEngine::playEffect(int,const char*,int,double,double,int,bool,int) "playEffect" .
+ * @param soundId The audio effect ID. The ID of each audio effect file is unique.
* @param pos The playback position (ms) of the audio effect file.
*
* @return
* - 0: Success.
* - < 0: Failure.
- * - `-22(ERR_RESOURCE_LIMITED)`: Cannot find the audio effect file. Please
- * set a correct `soundId`.
*/
virtual int setEffectPosition(int soundId, int pos) = 0;
/**
- * Gets the playback position of the audio effect file.
- * @note Call this method after \ref IRtcEngine::playEffect(int,const char*,int,double,double,int,bool,int) "playEffect" .
+ * @brief Retrieves the playback position of the audio effect file.
*
- * @param soundId Audio effect ID. Ensure that this parameter is set to the
- * same value as in \ref IRtcEngine::playEffect(int,const char*,int,double,double,int,bool,int) "playEffect" .
+ * @note Call this method after `playEffect`.
+ *
+ * @param soundId The audio effect ID. The ID of each audio effect file is unique.
*
* @return
- * - ≥ 0: A successful method call. Returns the playback position (ms) of
- * the specified audio effect file.
+ * - The playback position (ms) of the specified audio effect file, if the method call succeeds.
* - < 0: Failure.
- * - `-22(ERR_RESOURCE_LIMITED)`: Cannot find the audio effect file. Please
- * set a correct `soundId`.
*/
virtual int getEffectCurrentPosition(int soundId) = 0;
- /** Enables/Disables stereo panning for remote users.
-
- Ensure that you call this method before joinChannel to enable stereo panning for remote users so that the local user can track the position of a remote user by calling \ref agora::rtc::IRtcEngine::setRemoteVoicePosition "setRemoteVoicePosition".
-
- @param enabled Sets whether or not to enable stereo panning for remote users:
- - true: enables stereo panning.
- - false: disables stereo panning.
-
- @return
- - 0: Success.
- - < 0: Failure.
+ /**
+ * @brief Enables or disables stereo panning for remote users.
+ *
+ * @details
+ * Ensure that you call this method before joining a channel to enable stereo panning for remote
+ * users so that the local user can track the position of a remote user by calling
+ * `setRemoteVoicePosition`.
+ *
+ * @param enabled Whether to enable stereo panning for remote users:
+ * - `true`: Enable stereo panning.
+ * - `false`: Disable stereo panning.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int enableSoundPositionIndication(bool enabled) = 0;
- /** Sets the sound position and gain of a remote user.
-
- When the local user calls this method to set the sound position of a remote user, the sound difference between the left and right channels allows the local user to track the real-time position of the remote user, creating a real sense of space. This method applies to massively multiplayer online games, such as Battle Royale games.
-
- @note
- - For this method to work, enable stereo panning for remote users by calling the \ref agora::rtc::IRtcEngine::enableSoundPositionIndication "enableSoundPositionIndication" method before joining a channel.
- - This method requires hardware support. For the best sound positioning, we recommend using a wired headset.
- - Ensure that you call this method after joining a channel.
-
- @param uid The ID of the remote user.
- @param pan The sound position of the remote user. The value ranges from -1.0 to 1.0:
- - 0.0: the remote sound comes from the front.
- - -1.0: the remote sound comes from the left.
- - 1.0: the remote sound comes from the right.
- @param gain Gain of the remote user. The value ranges from 0.0 to 100.0. The default value is 100.0 (the original gain of the remote user). The smaller the value, the less the gain.
-
- @return
- - 0: Success.
- - < 0: Failure.
+ /**
+ * @brief Sets the 2D position (the position on the horizontal plane) of the remote user's voice.
+ *
+ * @details
+ * This method sets the 2D position and volume of a remote user, so that the local user can easily
+ * hear and identify the remote user's position.
+ * When the local user calls this method to set the voice position of a remote user, the voice
+ * difference between the left and right channels allows the local user to track the real-time
+ * position of the remote user, creating a sense of space. This method applies to massive
+ * multiplayer online games, such as Battle Royale games.
+ *
+ * @note
+ * - For this method to work, enable stereo panning for remote users by calling the
+ * `enableSoundPositionIndication` method before joining a channel.
+ * - For the best voice positioning, Agora recommends using a wired headset.
+ * - Call this method after joining a channel.
+ *
+ * @param uid The user ID of the remote user.
+ * @param pan The voice position of the remote user. The value ranges from -1.0 to 1.0:
+ * - 0.0: (Default) The remote voice comes from the front.
+ * - -1.0: The remote voice comes from the left.
+ * - 1.0: The remote voice comes from the right.
+ * @param gain The volume of the remote user. The value ranges from 0.0 to 100.0. The default value
+ * is 100.0 (the original volume of the remote user). The smaller the value, the lower the volume.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int setRemoteVoicePosition(uid_t uid, double pan, double gain) = 0;
- /** enable spatial audio
-
- @param enabled enable/disable spatial audio:
- - true: enable spatial audio.
- - false: disable spatial audio.
- @return
- - 0: Success.
- - < 0: Failure.
+ /**
+ * @brief Enables or disables the spatial audio effect.
+ *
+ * @details
+ * After enabling the spatial audio effect, you can call `setRemoteUserSpatialAudioParams` to set
+ * the spatial audio effect parameters of the remote user.
+ *
+ * @note
+ * - You can call this method either before or after joining a channel.
+ * - This method relies on the spatial audio dynamic library `libagora_spatial_audio_extension.dll`.
+ * If the dynamic library is deleted, the function cannot be enabled normally.
+ *
+ * @param enabled Whether to enable the spatial audio effect:
+ * - `true`: Enable the spatial audio effect.
+ * - `false`: Disable the spatial audio effect.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int enableSpatialAudio(bool enabled) = 0;
- /** Sets remote user parameters for spatial audio
-
- @param uid The ID of the remote user.
- @param param spatial audio parameters: SpatialAudioParams.
-
- @return int
- - 0: Success.
- - < 0: Failure.
+ /**
+ * @brief Sets the spatial audio effect parameters of the remote user.
+ *
+ * @details
+ * Call this method after `enableSpatialAudio`. After successfully setting the spatial audio effect
+ * parameters of the remote user, the local user can hear the remote user with a sense of space.
+ *
+ * @param uid The user ID. This parameter must be the same as the user ID passed in when the user
+ * joined the channel.
+ * @param params The spatial audio parameters. See `SpatialAudioParams`.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int setRemoteUserSpatialAudioParams(uid_t uid, const agora::SpatialAudioParams& params) = 0;
- /** Sets an SDK preset voice beautifier effect.
- *
- * Call this method to set an SDK preset voice beautifier effect for the local user who sends an
- * audio stream. After setting a voice beautifier effect, all users in the channel can hear the
- * effect.
- *
- * You can set different voice beautifier effects for different scenarios. See *Set the Voice
- * Beautifier and Audio Effects*.
+ /**
+ * @brief Sets a preset voice beautifier effect.
*
- * To achieve better audio effect quality, Agora recommends calling \ref
- * IRtcEngine::setAudioProfile "setAudioProfile" and setting the `scenario` parameter to
- * `AUDIO_SCENARIO_GAME_STREAMING(3)` and the `profile` parameter to
- * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY(4)` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)` before
- * calling this method.
+ * @details
+ * Call this method to set a preset voice beautifier effect for the local user who sends an audio
+ * stream. After setting a voice beautifier effect, all users in the channel can hear the effect.
+ * You can set different voice beautifier effects for different scenarios.
+ * Call timing: This method can be called either before or after joining the channel.
+ * To achieve better vocal effects, it is recommended that you call the following APIs before
+ * calling this method:
+ * - Call `setAudioScenario` to set the audio scenario to high-quality audio scenario, namely
+ * `AUDIO_SCENARIO_GAME_STREAMING` (3).
+ * - Call `setAudioProfile(AUDIO_PROFILE_TYPE profile)` to set the `profile` parameter to
+ * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` (4) or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` (5).
*
* @note
- * - You can call this method either before or after joining a channel.
- * - Do not set the `profile` parameter of \ref IRtcEngine::setAudioProfile "setAudioProfile" to
- * `AUDIO_PROFILE_SPEECH_STANDARD(1)` or `AUDIO_PROFILE_IOT(6)`; otherwise, this method call
- * fails.
- * - This method works best with the human voice. Agora does not recommend using this method for
- * audio containing music.
- * - After calling this method, Agora recommends not calling the following methods, because they
- * can override \ref IRtcEngine::setAudioEffectParameters "setAudioEffectParameters":
- * - \ref IRtcEngine::setAudioEffectPreset "setAudioEffectPreset"
- * - \ref IRtcEngine::setVoiceBeautifierPreset "setVoiceBeautifierPreset"
- * - \ref IRtcEngine::setLocalVoicePitch "setLocalVoicePitch"
- * - \ref IRtcEngine::setLocalVoiceEqualization "setLocalVoiceEqualization"
- * - \ref IRtcEngine::setLocalVoiceReverb "setLocalVoiceReverb"
- * - \ref IRtcEngine::setVoiceBeautifierParameters "setVoiceBeautifierParameters"
+ * - Do not set the `profile` parameter in `setAudioProfile(AUDIO_PROFILE_TYPE profile)` to
+ * `AUDIO_PROFILE_SPEECH_STANDARD` (1) or `AUDIO_PROFILE_IOT` (6), or the method does not take
+ * effect.
+ * - This method has the best effect on human voice processing, and Agora does not recommend calling
+ * this method to process audio data containing music.
+ * - After calling `setVoiceBeautifierPreset`, Agora does not recommend calling the following
+ * methods, otherwise the effect set by `setVoiceBeautifierPreset` will be overwritten:
+ * - `setAudioEffectPreset`
+ * - `setAudioEffectParameters`
+ * - `setLocalVoicePitch`
+ * - `setLocalVoiceEqualization`
+ * - `setLocalVoiceReverb`
+ * - `setVoiceBeautifierParameters`
+ * - `setVoiceConversionPreset`
+ * - This method relies on the voice beautifier dynamic library
+ * `libagora_audio_beauty_extension.dll`. If the dynamic library is deleted, the function cannot be
+ * enabled normally.
*
- * @param preset The options for SDK preset voice beautifier effects: #VOICE_BEAUTIFIER_PRESET.
+ * @param preset The preset voice beautifier effect options: `VOICE_BEAUTIFIER_PRESET`.
*
* @return
* - 0: Success.
@@ -5904,38 +7822,41 @@ class IRtcEngine : public agora::base::IEngineBase {
*/
virtual int setVoiceBeautifierPreset(VOICE_BEAUTIFIER_PRESET preset) = 0;
- /** Sets an SDK preset audio effect.
- *
- * Call this method to set an SDK preset audio effect for the local user who sends an audio
- * stream. This audio effect does not change the gender characteristics of the original voice.
- * After setting an audio effect, all users in the channel can hear the effect.
- *
- * You can set different audio effects for different scenarios. See *Set the Voice Beautifier and
- * Audio Effects*.
+ /**
+ * @brief Sets an SDK preset audio effect.
*
- * To achieve better audio effect quality, Agora recommends calling \ref
- * IRtcEngine::setAudioProfile "setAudioProfile" and setting the `scenario` parameter to
- * `AUDIO_SCENARIO_GAME_STREAMING(3)` before calling this method.
+ * @details
+ * Call this method to set an SDK preset audio effect for the local user who sends an audio stream.
+ * This audio effect does not change the gender characteristics of the original voice. After setting
+ * an audio effect, all users in the channel can hear the effect.
+ * Call timing: This method can be called either before or after joining the channel.
+ * To achieve better vocal effects, it is recommended that you call the following APIs before
+ * calling this method:
+ * - Call `setAudioScenario` to set the audio scenario to high-quality audio scenario, namely
+ * `AUDIO_SCENARIO_GAME_STREAMING` (3).
+ * - Call `setAudioProfile(AUDIO_PROFILE_TYPE profile)` to set the `profile` parameter to
+ * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` (4) or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` (5).
*
* @note
- * - You can call this method either before or after joining a channel.
- * - Do not set the profile `parameter` of `setAudioProfile` to `AUDIO_PROFILE_SPEECH_STANDARD(1)`
- * or `AUDIO_PROFILE_IOT(6)`; otherwise, this method call fails.
- * - This method works best with the human voice. Agora does not recommend using this method for
- * audio containing music.
- * - If you call this method and set the `preset` parameter to enumerators except
- * `ROOM_ACOUSTICS_3D_VOICE` or `PITCH_CORRECTION`, do not call \ref
- * IRtcEngine::setAudioEffectParameters "setAudioEffectParameters"; otherwise,
- * `setAudioEffectParameters` overrides this method.
- * - After calling this method, Agora recommends not calling the following methods, because they
- * can override `setAudioEffectPreset`:
- * - \ref IRtcEngine::setVoiceBeautifierPreset "setVoiceBeautifierPreset"
- * - \ref IRtcEngine::setLocalVoicePitch "setLocalVoicePitch"
- * - \ref IRtcEngine::setLocalVoiceEqualization "setLocalVoiceEqualization"
- * - \ref IRtcEngine::setLocalVoiceReverb "setLocalVoiceReverb"
- * - \ref IRtcEngine::setVoiceBeautifierParameters "setVoiceBeautifierParameters"
+ * - Do not set the `profile` parameter in `setAudioProfile(AUDIO_PROFILE_TYPE profile)` to
+ * `AUDIO_PROFILE_SPEECH_STANDARD` (1) or `AUDIO_PROFILE_IOT` (6), or the method does not take
+ * effect.
+ * - If you call `setAudioEffectPreset` and set enumerators except for `ROOM_ACOUSTICS_3D_VOICE` or
+ * `PITCH_CORRECTION`, do not call `setAudioEffectParameters`; otherwise, `setAudioEffectPreset` is
+ * overridden.
+ * - After calling `setAudioEffectPreset`, Agora does not recommend you to call the following
+ * methods, otherwise the effect set by `setAudioEffectPreset` will be overwritten:
+ * - `setVoiceBeautifierPreset`
+ * - `setLocalVoicePitch`
+ * - `setLocalVoiceEqualization`
+ * - `setLocalVoiceReverb`
+ * - `setVoiceBeautifierParameters`
+ * - `setVoiceConversionPreset`
+ * - This method relies on the voice beautifier dynamic library
+ * `libagora_audio_beauty_extension.dll`. If the dynamic library is deleted, the function cannot be
+ * enabled normally.
*
- * @param preset The options for SDK preset audio effects. See #AUDIO_EFFECT_PRESET.
+ * @param preset The options for SDK preset audio effects. See `AUDIO_EFFECT_PRESET`.
*
* @return
* - 0: Success.
@@ -5943,37 +7864,43 @@ class IRtcEngine : public agora::base::IEngineBase {
*/
virtual int setAudioEffectPreset(AUDIO_EFFECT_PRESET preset) = 0;
- /** Sets an SDK preset voice conversion.
- *
- * Call this method to set an SDK preset voice conversion for the local user who sends an audio
- * stream. After setting an voice conversion, all users in the channel can hear the effect.
- *
- * You can set different voice conversion for different scenarios. See *Set the Voice Beautifier and
- * Audio Effects*.
+ /**
+ * @brief Sets a preset voice beautifier effect.
*
- * To achieve better voice conversion quality, Agora recommends calling \ref
- * IRtcEngine::setAudioProfile "setAudioProfile" and setting the `scenario` parameter to
- * `AUDIO_SCENARIO_GAME_STREAMING(3)` before calling this method.
+ * @details
+ * Call this method to set a preset voice changing effect for the local user who publishes an audio
+ * stream in a channel. After setting the voice changing effect, all users in the channel can hear
+ * the effect. You can set different voice changing effects for the user depending on different
+ * scenarios.
+ * Call timing: This method can be called either before or after joining the channel.
+ * To achieve better vocal effects, it is recommended that you call the following APIs before
+ * calling this method:
+ * - Call `setAudioScenario` to set the audio scenario to high-quality audio scenario, namely
+ * `AUDIO_SCENARIO_GAME_STREAMING` (3).
+ * - Call `setAudioProfile(AUDIO_PROFILE_TYPE profile)` to set the `profile` parameter to
+ * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` (4) or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` (5).
*
* @note
- * - You can call this method either before or after joining a channel.
- * - Do not set the profile `parameter` of `setAudioProfile` to `AUDIO_PROFILE_SPEECH_STANDARD(1)`
- * or `AUDIO_PROFILE_IOT(6)`; otherwise, this method call fails.
- * - This method works best with the human voice. Agora does not recommend using this method for
- * audio containing music.
- * - If you call this method and set the `preset` parameter to enumerators,
- * - After calling this method, Agora recommends not calling the following methods, because they
- * can override `setVoiceConversionPreset`:
- * - \ref IRtcEngine::setVoiceBeautifierPreset "setVoiceBeautifierPreset"
- * - \ref IRtcEngine::setAudioEffectPreset "setAudioEffectPreset"
- * - \ref IRtcEngine::setLocalVoicePitch "setLocalVoicePitch"
- * - \ref IRtcEngine::setLocalVoiceFormant "setLocalVoiceFormant"
- * - \ref IRtcEngine::setLocalVoiceEqualization "setLocalVoiceEqualization"
- * - \ref IRtcEngine::setLocalVoiceReverb "setLocalVoiceReverb"
- * - \ref IRtcEngine::setVoiceBeautifierParameters "setVoiceBeautifierParameters"
- * - \ref IRtcEngine::setAudioEffectParameters "setAudioEffectParameters"
+ * - Do not set the `profile` parameter in `setAudioProfile(AUDIO_PROFILE_TYPE profile)` to
+ * `AUDIO_PROFILE_SPEECH_STANDARD` (1) or `AUDIO_PROFILE_IOT` (6), or the method does not take
+ * effect.
+ * - This method has the best effect on human voice processing, and Agora does not recommend calling
+ * this method to process audio data containing music.
+ * - After calling `setVoiceConversionPreset`, Agora does not recommend you to call the following
+ * methods, otherwise the effect set by `setVoiceConversionPreset` will be overwritten:
+ * - `setAudioEffectPreset`
+ * - `setAudioEffectParameters`
+ * - `setVoiceBeautifierPreset`
+ * - `setVoiceBeautifierParameters`
+ * - `setLocalVoicePitch`
+ * - `setLocalVoiceFormant`
+ * - `setLocalVoiceEqualization`
+ * - `setLocalVoiceReverb`
+ * - This method relies on the voice beautifier dynamic library
+ * `libagora_audio_beauty_extension.dll`. If the dynamic library is deleted, the function cannot be
+ * enabled normally.
*
- * @param preset The options for SDK preset voice conversion. See #VOICE_CONVERSION_PRESET.
+ * @param preset The options for the preset voice beautifier effects: `VOICE_CONVERSION_PRESET`.
*
* @return
* - 0: Success.
@@ -5981,76 +7908,75 @@ class IRtcEngine : public agora::base::IEngineBase {
*/
virtual int setVoiceConversionPreset(VOICE_CONVERSION_PRESET preset) = 0;
- /** Sets parameters for SDK preset audio effects.
+ /**
+ * @brief Sets parameters for SDK preset audio effects.
*
- * Call this method to set the following parameters for the local user who send an audio stream:
+ * @details
+ * Call this method to set the following parameters for the local user who sends an audio stream:
* - 3D voice effect: Sets the cycle period of the 3D voice effect.
* - Pitch correction effect: Sets the basic mode and tonic pitch of the pitch correction effect.
* Different songs have different modes and tonic pitches. Agora recommends bounding this method
* with interface elements to enable users to adjust the pitch correction interactively.
- *
- * After setting parameters, all users in the channel can hear the relevant effect.
- *
- * You can call this method directly or after \ref IRtcEngine::setAudioEffectPreset
- * "setAudioEffectPreset". If you call this method after \ref IRtcEngine::setAudioEffectPreset
- * "setAudioEffectPreset", ensure that you set the preset parameter of `setAudioEffectPreset` to
- * `ROOM_ACOUSTICS_3D_VOICE` or `PITCH_CORRECTION` and then call this method to set the same
- * enumerator; otherwise, this method overrides `setAudioEffectPreset`.
+ * After setting the audio parameters, all users in the channel can hear the effect.
+ * To achieve better vocal effects, it is recommended that you call the following APIs before
+ * calling this method:
+ * - Call `setAudioScenario` to set the audio scenario to high-quality audio scenario, namely
+ * `AUDIO_SCENARIO_GAME_STREAMING` (3).
+ * - Call `setAudioProfile(AUDIO_PROFILE_TYPE profile)` to set the `profile` parameter to
+ * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` (4) or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` (5).
*
* @note
* - You can call this method either before or after joining a channel.
- * - To achieve better audio effect quality, Agora recommends calling \ref
- * IRtcEngine::setAudioProfile "setAudioProfile" and setting the `scenario` parameter to
- * `AUDIO_SCENARIO_GAME_STREAMING(3)` before calling this method.
- * - Do not set the `profile` parameter of \ref IRtcEngine::setAudioProfile "setAudioProfile" to
- * `AUDIO_PROFILE_SPEECH_STANDARD(1)` or `AUDIO_PROFILE_IOT(6)`; otherwise, this method call
- * fails.
- * - This method works best with the human voice. Agora does not recommend using this method for
- * audio containing music.
- * - After calling this method, Agora recommends not calling the following methods, because they
- * can override `setAudioEffectParameters`:
- * - \ref IRtcEngine::setAudioEffectPreset "setAudioEffectPreset"
- * - \ref IRtcEngine::setVoiceBeautifierPreset "setVoiceBeautifierPreset"
- * - \ref IRtcEngine::setLocalVoicePitch "setLocalVoicePitch"
- * - \ref IRtcEngine::setLocalVoiceEqualization "setLocalVoiceEqualization"
- * - \ref IRtcEngine::setLocalVoiceReverb "setLocalVoiceReverb"
- * - \ref IRtcEngine::setVoiceBeautifierParameters "setVoiceBeautifierParameters"
+ * - Do not set the `profile` parameter in `setAudioProfile(AUDIO_PROFILE_TYPE profile)` to
+ * `AUDIO_PROFILE_SPEECH_STANDARD` (1) or `AUDIO_PROFILE_IOT` (6), or the method does not take
+ * effect.
+ * - This method has the best effect on human voice processing, and Agora does not recommend calling
+ * this method to process audio data containing music.
+ * - After calling `setAudioEffectParameters`, Agora does not recommend you to call the following
+ * methods, otherwise the effect set by `setAudioEffectParameters` will be overwritten:
+ * - `setAudioEffectPreset`
+ * - `setVoiceBeautifierPreset`
+ * - `setLocalVoicePitch`
+ * - `setLocalVoiceEqualization`
+ * - `setLocalVoiceReverb`
+ * - `setVoiceBeautifierParameters`
+ * - `setVoiceConversionPreset`
+ * - This method relies on the voice beautifier dynamic library
+ * `libagora_audio_beauty_extension.dll`. If the dynamic library is deleted, the function cannot be
+ * enabled normally.
+ *
* @param preset The options for SDK preset audio effects:
- * - 3D voice effect: `ROOM_ACOUSTICS_3D_VOICE`.
- * - Call \ref IRtcEngine::setAudioProfile "setAudioProfile" and set the `profile` parameter to
- * `AUDIO_PROFILE_MUSIC_STANDARD_STEREO(3)` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)` before
+ * - `ROOM_ACOUSTICS_3D_VOICE`, 3D voice effect:
+ * - You need to set the `profile` parameter in `setAudioProfile(AUDIO_PROFILE_TYPE profile)` to
+ * `AUDIO_PROFILE_MUSIC_STANDARD_STEREO` (3) or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` (5) before
* setting this enumerator; otherwise, the enumerator setting does not take effect.
- * - If the 3D voice effect is enabled, users need to use stereo audio playback devices to hear
+ * - If the 3D voice effect is enabled, users need to use stereo audio playback devices to hear
* the anticipated voice effect.
- * - Pitch correction effect: `PITCH_CORRECTION`. To achieve better audio effect quality, Agora
- * recommends calling \ref IRtcEngine::setAudioProfile "setAudioProfile" and setting the `profile`
- * parameter to `AUDIO_PROFILE_MUSIC_HIGH_QUALITY(4)` or
- * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)` before setting this enumerator.
- * @param param1
- * - If you set `preset` to `ROOM_ACOUSTICS_3D_VOICE`, the `param1` sets the cycle period of the
- * 3D voice effect. The value range is [1,60] and the unit is a second. The default value is 10
- * seconds, indicating that the voice moves around you every 10 seconds.
- * - If you set `preset` to `PITCH_CORRECTION`, `param1` sets the basic mode of the pitch
+ * - `PITCH_CORRECTION`, Pitch correction effect:
+ * @param param1 - If you set `preset` to `ROOM_ACOUSTICS_3D_VOICE`, `param1` sets the cycle period
+ * of the 3D voice effect. The value range is [1,60] and the unit is seconds. The default value is
+ * 10, indicating that the voice moves around you every 10 seconds.
+ * - If you set `preset` to `PITCH_CORRECTION`, `param1` indicates the basic mode of the pitch
* correction effect:
- * - `1`: (Default) Natural major scale.
- * - `2`: Natural minor scale.
- * - `3`: Japanese pentatonic scale.
- * @param param2
- * - If you set `preset` to `ROOM_ACOUSTICS_3D_VOICE`, you need to set `param2` to `0`.
- * - If you set `preset` to `PITCH_CORRECTION`, `param2` sets the tonic pitch of the pitch
+ * - `1`: (Default) Natural major scale.
+ * - `2`: Natural minor scale.
+ * - `3`: Japanese pentatonic scale.
+ * @param param2 - If you set `preset` to `ROOM_ACOUSTICS_3D_VOICE` , you need to set `param2` to
+ * `0`.
+ * - If you set `preset` to `PITCH_CORRECTION`, `param2` indicates the tonic pitch of the pitch
* correction effect:
- * - `1`: A
- * - `2`: A#
- * - `3`: B
- * - `4`: (Default) C
- * - `5`: C#
- * - `6`: D
- * - `7`: D#
- * - `8`: E
- * - `9`: F
- * - `10`: F#
- * - `11`: G
- * - `12`: G#
+ * - `1`: A
+ * - `2`: A#
+ * - `3`: B
+ * - `4`: (Default) C
+ * - `5`: C#
+ * - `6`: D
+ * - `7`: D#
+ * - `8`: E
+ * - `9`: F
+ * - `10`: F#
+ * - `11`: G
+ * - `12`: G#
*
* @return
* - 0: Success.
@@ -6058,40 +7984,46 @@ class IRtcEngine : public agora::base::IEngineBase {
*/
virtual int setAudioEffectParameters(AUDIO_EFFECT_PRESET preset, int param1, int param2) = 0;
- /** Sets parameters for SDK preset voice beautifier effects.
+ /**
+ * @brief Sets parameters for the preset voice beautifier effects.
*
+ * @details
* Call this method to set a gender characteristic and a reverberation effect for the singing
* beautifier effect. This method sets parameters for the local user who sends an audio stream.
- *
- * After you call this method successfully, all users in the channel can hear the relevant effect.
- *
- * To achieve better audio effect quality, before you call this method, Agora recommends calling
- * \ref IRtcEngine::setAudioProfile "setAudioProfile", and setting the `scenario` parameter as
- * `AUDIO_SCENARIO_GAME_STREAMING(3)` and the `profile` parameter as
- * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY(4)` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)`.
+ * After setting the audio parameters, all users in the channel can hear the effect.
+ * To achieve better vocal effects, it is recommended that you call the following APIs before
+ * calling this method:
+ * - Call `setAudioScenario` to set the audio scenario to high-quality audio scenario, namely
+ * `AUDIO_SCENARIO_GAME_STREAMING` (3).
+ * - Call `setAudioProfile(AUDIO_PROFILE_TYPE profile)` to set the `profile` parameter to
+ * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` (4) or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` (5).
*
* @note
* - You can call this method either before or after joining a channel.
- * - Do not set the `profile` parameter of \ref IRtcEngine::setAudioProfile "setAudioProfile" as
- * `AUDIO_PROFILE_SPEECH_STANDARD(1)` or `AUDIO_PROFILE_IOT(6)`; otherwise, this method call does
- * not take effect.
- * - This method works best with the human voice. Agora does not recommend using this method for
- * audio containing music.
- * - After you call this method, Agora recommends not calling the following methods, because they
- * can override `setVoiceBeautifierParameters`:
- * - \ref IRtcEngine::setAudioEffectPreset "setAudioEffectPreset"
- * - \ref IRtcEngine::setAudioEffectParameters "setAudioEffectParameters"
- * - \ref IRtcEngine::setVoiceBeautifierPreset "setVoiceBeautifierPreset"
- * - \ref IRtcEngine::setLocalVoicePitch "setLocalVoicePitch"
- * - \ref IRtcEngine::setLocalVoiceEqualization "setLocalVoiceEqualization"
- * - \ref IRtcEngine::setLocalVoiceReverb "setLocalVoiceReverb"
- *
- * @param preset The options for SDK preset voice beautifier effects:
- * - `SINGING_BEAUTIFIER`: Singing beautifier effect.
+ * - Do not set the `profile` parameter in `setAudioProfile(AUDIO_PROFILE_TYPE profile)` to
+ * `AUDIO_PROFILE_SPEECH_STANDARD` (1) or `AUDIO_PROFILE_IOT` (6), or the method does not take
+ * effect.
+ * - This method has the best effect on human voice processing, and Agora does not recommend calling
+ * this method to process audio data containing music.
+ * - After calling `setVoiceBeautifierParameters`, Agora does not recommend calling the following
+ * methods, otherwise the effect set by `setVoiceBeautifierParameters` will be overwritten:
+ * - `setAudioEffectPreset`
+ * - `setAudioEffectParameters`
+ * - `setVoiceBeautifierPreset`
+ * - `setLocalVoicePitch`
+ * - `setLocalVoiceEqualization`
+ * - `setLocalVoiceReverb`
+ * - `setVoiceConversionPreset`
+ * - This method relies on the voice beautifier dynamic library
+ * `libagora_audio_beauty_extension.dll`. If the dynamic library is deleted, the function cannot be
+ * enabled normally.
+ *
+ * @param preset The option for the preset audio effect:
+ * - `SINGING_BEAUTIFIER`: The singing beautifier effect.
* @param param1 The gender characteristics options for the singing voice:
* - `1`: A male-sounding voice.
* - `2`: A female-sounding voice.
- * @param param2 The reverberation effects options:
+ * @param param2 The reverberation effect options for the singing voice:
* - `1`: The reverberation effect sounds like singing in a small room.
* - `2`: The reverberation effect sounds like singing in a large room.
* - `3`: The reverberation effect sounds like singing in a hall.
@@ -6118,99 +8050,149 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int setVoiceConversionParameters(VOICE_CONVERSION_PRESET preset,
int param1, int param2) = 0;
- /** Changes the voice pitch of the local speaker.
-
- @param pitch The voice pitch. The value ranges between 0.5 and 2.0. The lower
- the value, the lower the voice pitch. The default value is 1.0 (no change to
- the local voice pitch).
-
- @return
- - 0: Success.
- - -1: Failure.
- */
+ /**
+ * @brief Changes the voice pitch of the local speaker.
+ *
+ * @details
+ * Call timing: This method can be called either before or after joining the channel.
+ *
+ * @param pitch The local voice pitch. The value range is [0.5,2.0]. The lower the value, the lower
+ * the pitch. The default value is 1.0 (no change to the pitch).
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ */
virtual int setLocalVoicePitch(double pitch) = 0;
- /** Changes the voice formant ratio for local speaker.
-
- @param formantRatio The voice formant ratio. The value ranges between -1.0 and 1.0.
- The lower the value, the deeper the sound, and the higher the value, the more it
- sounds like a child. The default value is 0.0 (the local user's voice will not be changed).
-
- @return
- - 0: Success.
- - -1: Failure.
- */
+ /**
+ * @brief Sets the formant ratio to change the timbre of human voice.
+ *
+ * @details
+ * Formant ratio affects the timbre of voice. The smaller the value, the deeper the sound will be,
+ * and the larger, the sharper. After you set the formant ratio, all users in the channel can hear
+ * the changed voice. If you want to change the timbre and pitch of voice at the same time, Agora
+ * recommends using this method together with `setLocalVoicePitch`.
+ * Applicable scenarios: You can call this method to set the formant ratio of local audio to change
+ * the timbre of human voice.
+ * Call timing: This method can be called either before or after joining the channel.
+ *
+ * @param formantRatio The formant ratio. The value range is [-1.0, 1.0]. The default value is 0.0,
+ * which means do not change the timbre of the voice.Note: Agora recommends setting this value
+ * within the range of [-0.4, 0.6]. Otherwise, the voice may be seriously distorted.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ */
virtual int setLocalVoiceFormant(double formantRatio) = 0;
- /** Sets the local voice equalization effect.
-
- @param bandFrequency The band frequency ranging from 0 to 9, representing the
- respective 10-band center frequencies of the voice effects, including 31, 62,
- 125, 500, 1k, 2k, 4k, 8k, and 16k Hz.
- @param bandGain Gain of each band in dB. The value ranges from -15 to 15. The
- default value is 0.
- @return
- - 0: Success.
- - -1: Failure.
- */
+ /**
+ * @brief Sets the local voice equalization effect.
+ *
+ * @details
+ * Call timing: This method can be called either before or after joining the channel.
+ *
+ * @param bandFrequency The band frequency. The value ranges between 0 and 9; representing the
+ * respective 10-band center frequencies of the voice effects, including 31, 62, 125, 250, 500, 1k,
+ * 2k, 4k, 8k, and 16k Hz. See `AUDIO_EQUALIZATION_BAND_FREQUENCY`.
+ * @param bandGain The gain of each band in dB. The value ranges between -15 and 15. The default
+ * value is 0.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ */
virtual int setLocalVoiceEqualization(AUDIO_EQUALIZATION_BAND_FREQUENCY bandFrequency, int bandGain) = 0;
- /** Sets the local voice reverberation.
-
- @param reverbKey The reverberation key: #AUDIO_REVERB_TYPE.
- @param value The value of the reverberation key: #AUDIO_REVERB_TYPE.
- @return
- - 0: Success.
- - -1: Failure.
- */
+ /**
+ * @brief Sets the local voice reverberation.
+ *
+ * @details
+ * The SDK provides an easier-to-use method, `setAudioEffectPreset`, to directly implement preset
+ * reverb effects for such as pop, R&B, and KTV.
+ *
+ * @note You can call this method either before or after joining a channel.
+ *
+ * @param reverbKey The reverberation key. Agora provides five reverberation keys, see
+ * `AUDIO_REVERB_TYPE`.
+ * @param value The value of the reverberation key.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ */
virtual int setLocalVoiceReverb(AUDIO_REVERB_TYPE reverbKey, int value) = 0;
- /** Sets preset audio playback effect for remote headphones after remote audio is mixed.
-
- @param preset The preset key: #HEADPHONE_EQUALIZER_PRESET.
- - HEADPHONE_EQUALIZER_OFF = 0x00000000 : Turn off the eualizer effect for headphones.
- - HEADPHONE_EQUALIZER_OVEREAR = 0x04000001 : For over-ear headphones only.
- - HEADPHONE_EQUALIZER_INEAR = 0x04000002 : For in-ear headphones only.
- @return
- - 0: Success.
- - < 0: Failure.
- - -1(ERR_FAILED): A general error occurs (no specified reason).
- */
+ /**
+ * @brief Sets the preset headphone equalization effect.
+ *
+ * @details
+ * This method is mainly used in spatial audio effect scenarios. You can select the preset headphone
+ * equalizer to listen to the audio to achieve the expected audio experience.
+ *
+ * @note If the headphones you use already have a good equalization effect, you may not get a
+ * significant improvement when you call this method, and could even diminish the experience.
+ *
+ * @param preset The preset headphone equalization effect. See `HEADPHONE_EQUALIZER_PRESET`.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ * - -1: A general error occurs (no specified reason).
+ */
virtual int setHeadphoneEQPreset(HEADPHONE_EQUALIZER_PRESET preset) = 0;
- /** Sets the parameters of audio playback effect for remote headphones after remote audio is mixed.
-
- @param lowGain The higher the parameter value, the deeper the sound. The value range is [-10,10].
- @param highGain The higher the parameter value, the sharper the sound. The value range is [-10,10].
- @return
- - 0: Success.
- - < 0: Failure.
- - -1(ERR_FAILED): A general error occurs (no specified reason).
- */
+ /**
+ * @brief Sets the low- and high-frequency parameters of the headphone equalizer.
+ *
+ * @details
+ * In a spatial audio effect scenario, if the preset headphone equalization effect is not achieved
+ * after calling the `setHeadphoneEQPreset` method, you can further adjust the headphone
+ * equalization effect by calling this method.
+ *
+ * @param lowGain The low-frequency parameters of the headphone equalizer. The value range is
+ * [-10,10]. The larger the value, the deeper the sound.
+ * @param highGain The high-frequency parameters of the headphone equalizer. The value range is
+ * [-10,10]. The larger the value, the sharper the sound.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ * - -1: A general error occurs (no specified reason).
+ */
virtual int setHeadphoneEQParameters(int lowGain, int highGain) = 0;
- /** Enables or disables the voice AI tuner.
+ /**
+ * @brief Enables or disables the voice AI tuner.
+ *
+ * @details
+ * The voice AI tuner supports enhancing sound quality and adjusting tone style.
+ * Applicable scenarios: Social entertainment scenes including online KTV, online podcast and live
+ * streaming in showrooms, where high sound quality is required.
+ * Call timing: This method can be called either before or after joining the channel.
*
- * @param enabled Determines whether to enable the voice AI tuner:
- * - true: Enable the voice AI tuner
- * - false: (default) Disable the voice AI tuner.
+ * @param enabled Whether to enable the voice AI tuner:
+ * - `true`: Enables the voice AI tuner.
+ * - `false`: (Default) Disable the voice AI tuner.
+ * @param type Voice AI tuner sound types, see `VOICE_AI_TUNER_TYPE`.
*
- * @param type. The options for SDK voice AI tuner types. See #VOICE_AI_TUNER_TYPE.
* @return
* - 0: Success.
* - < 0: Failure.
*/
virtual int enableVoiceAITuner(bool enabled, VOICE_AI_TUNER_TYPE type) = 0;
- /** **DEPRECATED** Specifies an SDK output log file.
+ /**
+ * @brief Sets the log file.
*
- * The log file records all log data for the SDK's operation. Ensure that the
- * directory for the log file exists and is writable.
+ * @details
+ * Specifies an SDK output log file. The log file records all log data for the SDK’s operation.
+ * Call timing: This method needs to be called immediately after `initialize`, otherwise the output
+ * log may be incomplete.
*
- * @note
- * Ensure that you call this method immediately after \ref initialize "initialize",
- * or the output log may not be complete.
+ * @note Ensure that the directory for the log file exists and is writable.
*
- * @param filePath File path of the log file. The string of the log file is in UTF-8.
+ * @param filePath The complete path of the log files. These log files are encoded in UTF-8.
*
* @return
* - 0: Success.
@@ -6219,22 +8201,17 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int setLogFile(const char* filePath) = 0;
/**
- * Sets the output log filter level of the SDK.
+ * @brief Sets the log output level of the SDK.
*
- * You can use one or a combination of the filters. The log filter level follows the
- * sequence of `OFF`, `CRITICAL`, `ERROR`, `WARNING`, `INFO`, and `DEBUG`. Choose a filter level
- * and you will see logs preceding that filter level. For example, if you set the log filter level to
- * `WARNING`, you see the logs within levels `CRITICAL`, `ERROR`, and `WARNING`.
+ * @details
+ * This method sets the output log level of the SDK. You can use one or a combination of the log
+ * filter levels. The log level follows the sequence of `LOG_FILTER_OFF`, `LOG_FILTER_CRITICAL`,
+ * `LOG_FILTER_ERROR`, `LOG_FILTER_WARN`, `LOG_FILTER_INFO`, and `LOG_FILTER_DEBUG`. Choose a level
+ * to see the logs preceding that level.
+ * If, for example, you set the log level to `LOG_FILTER_WARN`, you see the logs within levels
+ * `LOG_FILTER_CRITICAL`, `LOG_FILTER_ERROR` and `LOG_FILTER_WARN`.
*
- * @param filter The log filter level:
- * - `LOG_FILTER_DEBUG(0x80f)`: Output all API logs. Set your log filter as DEBUG
- * if you want to get the most complete log file.
- * - `LOG_FILTER_INFO(0x0f)`: Output logs of the CRITICAL, ERROR, WARNING, and INFO
- * level. We recommend setting your log filter as this level.
- * - `LOG_FILTER_WARNING(0x0e)`: Output logs of the CRITICAL, ERROR, and WARNING level.
- * - `LOG_FILTER_ERROR(0x0c)`: Output logs of the CRITICAL and ERROR level.
- * - `LOG_FILTER_CRITICAL(0x08)`: Output logs of the CRITICAL level.
- * - `LOG_FILTER_OFF(0)`: Do not output any log.
+ * @param filter The output log level of the SDK. See `LOG_FILTER_TYPE`.
*
* @return
* - 0: Success.
@@ -6243,16 +8220,12 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int setLogFilter(unsigned int filter) = 0;
/**
- * Sets the output log level of the SDK.
+ * @brief Sets the output log level of the SDK.
*
- * You can set the SDK to ouput the log files of the specified level.
+ * @details
+ * Choose a level to see the logs preceding that level.
*
- * @param level The log level:
- * - `LOG_LEVEL_NONE (0x0000)`: Do not output any log file.
- * - `LOG_LEVEL_INFO (0x0001)`: (Recommended) Output log files of the INFO level.
- * - `LOG_LEVEL_WARN (0x0002)`: Output log files of the WARN level.
- * - `LOG_LEVEL_ERROR (0x0004)`: Output log files of the ERROR level.
- * - `LOG_LEVEL_FATAL (0x0008)`: Output log files of the FATAL level.
+ * @param level The log level. See `LOG_LEVEL`.
*
* @return
* - 0: Success.
@@ -6261,15 +8234,34 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int setLogLevel(commons::LOG_LEVEL level) = 0;
/**
- * Sets the log file size (KB).
+ * @brief Sets the log file size.
*
- * The SDK has two log files, each with a default size of 512 KB. If you set
- * `fileSizeInBytes` as 1024 KB, the SDK outputs log files with a total
- * maximum size of 2 MB.
- * If the total size of the log files exceed the set value,
- * the new output log files overwrite the old output log files.
+ * @details
+ * By default, the SDK generates five SDK log files and five API call log files with the following
+ * rules:
+ * - The SDK log files are: `agorasdk.log`, `agorasdk.1.log`, `agorasdk.2.log`, `agorasdk.3.log`,
+ * and `agorasdk.4.log`.
+ * - The API call log files are: `agoraapi.log`, `agoraapi.1.log`, `agoraapi.2.log`,
+ * `agoraapi.3.log`, and `agoraapi.4.log`.
+ * - The default size of each SDK log file and API log file is 2,048 KB. These log files are encoded
+ * in UTF-8.
+ * - The SDK writes the latest logs in `agorasdk.log` or `agoraapi.log`.
+ * - When `agorasdk.log` is full, the SDK processes the log files in the following order:1. Delete
+ * the `agorasdk.4.log` file (if any).
+ * 2. Rename `agorasdk.3.log` to `agorasdk.4.log`.
+ * 3. Rename `agorasdk.2.log` to `agorasdk.3.log`.
+ * 4. Rename `agorasdk.1.log` to `agorasdk.2.log`.
+ * 5. Create a new `agorasdk.log` file.
+ * - The overwrite rules for the `agoraapi.log` file are the same as for `agorasdk.log`.
+ *
+ * @note This method is used to set the size of the `agorasdk.log` file only and does not effect the
+ * `agoraapi.log file`.
+ *
+ * @param fileSizeInKBytes The size (KB) of an `agorasdk.log` file. The value range is [128,20480].
+ * The default value is 2,048 KB. If you set `fileSizeInKByte` smaller than 128 KB, the SDK
+ * automatically adjusts it to 128 KB; if you set `fileSizeInKByte` greater than 20,480 KB, the SDK
+ * automatically adjusts it to 20,480 KB.
*
- * @param fileSizeInKBytes The SDK log file size (KB).
* @return
* - 0: Success.
* - < 0: Failure.
@@ -6304,18 +8296,25 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int writeLog(commons::LOG_LEVEL level, const char* fmt, ...) = 0;
/**
- * Updates the display mode of the local video view.
+ * @brief Updates the display mode of the local video view.
*
- * After initializing the local video view, you can call this method to update its rendering mode.
- * It affects only the video view that the local user sees, not the published local video stream.
+ * @details
+ * After initializing the local video view, you can call this method to update its rendering and
+ * mirror modes. It affects only the video view that the local user sees and does not impact the
+ * publishing of the local video.
+ * Call timing: - Ensure that you have called the `setupLocalVideo` method to initialize the local
+ * video view before calling this method.
+ * - During a call, you can call this method as many times as necessary to update the display mode
+ * of the local video view.
*
- * @note
- * - Ensure that you have called \ref setupLocalVideo "setupLocalVideo" to initialize the local video
- * view before this method.
- * - During a call, you can call this method as many times as necessary to update the local video view.
+ * @note This method only takes effect on the primary camera `(PRIMARY_CAMERA_SOURCE)`. In scenarios
+ * involving custom video capture or the use of alternative video sources, you need to use
+ * `setupLocalVideo` instead of this method.
*
- * @param renderMode Sets the local display mode. See #RENDER_MODE_TYPE.
- * @param mirrorMode Sets the local mirror mode. See #VIDEO_MIRROR_MODE_TYPE.
+ * @param renderMode The local video display mode. See `RENDER_MODE_TYPE`.
+ * @param mirrorMode The mirror mode of the local video view. See `VIDEO_MIRROR_MODE_TYPE`.
+ * Attention: If you use a front camera, the SDK enables the mirror mode by default; if you use a
+ * rear camera, the SDK disables the mirror mode by default.
*
* @return
* - 0: Success.
@@ -6324,20 +8323,21 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int setLocalRenderMode(media::base::RENDER_MODE_TYPE renderMode, VIDEO_MIRROR_MODE_TYPE mirrorMode) = 0;
/**
- * Updates the display mode of the video view of a remote user.
+ * @brief Updates the display mode of the video view of a remote user.
*
+ * @details
* After initializing the video view of a remote user, you can call this method to update its
* rendering and mirror modes. This method affects only the video view that the local user sees.
*
* @note
- * - Ensure that you have called \ref setupRemoteVideo "setupRemoteVideo" to initialize the remote video
- * view before calling this method.
+ * - Call this method after initializing the remote view by calling the `setupRemoteVideo` method.
* - During a call, you can call this method as many times as necessary to update the display mode
* of the video view of a remote user.
*
- * @param uid ID of the remote user.
- * @param renderMode Sets the remote display mode. See #RENDER_MODE_TYPE.
- * @param mirrorMode Sets the mirror type. See #VIDEO_MIRROR_MODE_TYPE.
+ * @param uid The user ID of the remote user.
+ * @param renderMode The rendering mode of the remote user view. For details, see
+ * `RENDER_MODE_TYPE`.
+ * @param mirrorMode The mirror mode of the remote user view. See `VIDEO_MIRROR_MODE_TYPE`.
*
* @return
* - 0: Success.
@@ -6346,10 +8346,20 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int setRemoteRenderMode(uid_t uid, media::base::RENDER_MODE_TYPE renderMode,
VIDEO_MIRROR_MODE_TYPE mirrorMode) = 0;
/**
- * Sets the target frames per second (FPS) for the local render target.
+ * @brief Sets the maximum frame rate for rendering local video.
+ *
+ * @details
+ * Applicable scenarios: In scenarios where the requirements for video rendering frame rate are not
+ * high (such as screen sharing or online education), you can call this method to set the maximum
+ * frame rate for local video rendering. The SDK will attempt to keep the actual frame rate of local
+ * rendering close to this value, which helps to reduce CPU consumption and improving system
+ * performance.
+ * Call timing: You can call this method either before or after joining a channel.
*
- * @param sourceType The type of video source.
- * @param targetFps The target frames per second to be set.
+ * @param sourceType The type of the video source. See `VIDEO_SOURCE_TYPE`.
+ * @param targetFps The capture frame rate (fps) of the local video. Sopported values are: 1, 7, 10,
+ * 15, 24, 30, 60.CAUTION: Set this parameter to a value lower than the actual video frame rate;
+ * otherwise, the settings do not take effect.
*
* @return
* - 0: Success.
@@ -6357,9 +8367,20 @@ class IRtcEngine : public agora::base::IEngineBase {
*/
virtual int setLocalRenderTargetFps(VIDEO_SOURCE_TYPE sourceType, int targetFps) = 0;
/**
- * Sets the target frames per second (FPS) for the remote render target.
+ * @brief Sets the maximum frame rate for rendering remote video.
+ *
+ * @details
+ * Applicable scenarios: In scenarios where the video rendering frame rate is not critical (e.g.,
+ * screen sharing, online education) or when the remote users are using mid-to-low-end devices, you
+ * can call this method to set the maximum frame rate for video rendering on the remote client. The
+ * SDK will attempt to render the actual frame rate as close as possible to this value, which helps
+ * to reduce CPU consumption and improve system performance.
+ * Call timing: You can call this method either before or after joining a channel.
+ *
+ * @param targetFps The capture frame rate (fps) of the local video. Sopported values are: 1, 7, 10,
+ * 15, 24, 30, 60.CAUTION: Set this parameter to a value lower than the actual video frame rate;
+ * otherwise, the settings do not take effect.
*
- * @param targetFps The target frames per second to be set for the remote render target.
* @return
* - 0: Success.
* - < 0: Failure.
@@ -6387,11 +8408,9 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int setLocalRenderMode(media::base::RENDER_MODE_TYPE renderMode) __deprecated = 0;
/**
- * Sets the local video mirror mode.
+ * @brief Sets the local video mirror mode.
*
- * Use this method before calling the \ref startPreview "startPreview" method, or the mirror mode
- * does not take effect until you call the `startPreview` method again.
- * @param mirrorMode Sets the local video mirror mode. See #VIDEO_MIRROR_MODE_TYPE.
+ * @param mirrorMode The local video mirror mode. See `VIDEO_MIRROR_MODE_TYPE`.
*
* @return
* - 0: Success.
@@ -6400,15 +8419,26 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int setLocalVideoMirrorMode(VIDEO_MIRROR_MODE_TYPE mirrorMode) __deprecated = 0;
/**
- * Enables or disables the dual video stream mode.
+ * @brief Enables or disables dual-stream mode on the sender side.
*
- * If dual-stream mode is enabled, the subscriber can choose to receive the high-stream
- * (high-resolution high-bitrate video stream) or low-stream (low-resolution low-bitrate video stream)
- * video using \ref setRemoteVideoStreamType "setRemoteVideoStreamType".
+ * @details
+ * Dual streams are a pairing of a high-quality video stream and a low-quality video stream:
+ * - High-quality video stream: High bitrate, high resolution.
+ * - Low-quality video stream: Low bitrate, low resolution.
+ * After you enable dual-stream mode, you can call `setRemoteVideoStreamType` to choose to receive
+ * either the high-quality video stream or the low-quality video stream on the subscriber side.
+ *
+ * @note
+ * - This method is applicable to all types of streams from the sender, including but not limited to
+ * video streams collected from cameras, screen sharing streams, and custom-collected video streams.
+ * - If you need to enable dual video streams in a multi-channel scenario, you can call the
+ * `enableDualStreamModeEx` method.
+ * - You can call this method either before or after joining a channel.
+ *
+ * @param enabled Whether to enable dual-stream mode:
+ * - `true`: Enable dual-stream mode.
+ * - `false`: (Default) Disable dual-stream mode.
*
- * @param enabled
- * - true: Enable the dual-stream mode.
- * - false: (default) Disable the dual-stream mode.
* @return
* - 0: Success.
* - < 0: Failure.
@@ -6416,17 +8446,30 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int enableDualStreamMode(bool enabled) __deprecated = 0;
/**
- * Enables or disables the dual video stream mode.
+ * @brief Sets the dual-stream mode on the sender side and the low-quality video stream.
*
- * If dual-stream mode is enabled, the subscriber can choose to receive the high-stream
- * (high-resolution high-bitrate video stream) or low-stream (low-resolution low-bitrate video stream)
- * video using \ref setRemoteVideoStreamType "setRemoteVideoStreamType".
+ * @details
+ * You can call this method to enable or disable the dual-stream mode on the publisher side. Dual
+ * streams are a pairing of a high-quality video stream and a low-quality video stream:
+ * - High-quality video stream: High bitrate, high resolution.
+ * - Low-quality video stream: Low bitrate, low resolution.
+ * After you enable dual-stream mode, you can call `setRemoteVideoStreamType` to choose to receive
+ * either the high-quality video stream or the low-quality video stream on the subscriber side.
+ *
+ * @note
+ * - This method is applicable to all types of streams from the sender, including but not limited to
+ * video streams collected from cameras, screen sharing streams, and custom-collected video streams.
+ * - If you need to enable dual video streams in a multi-channel scenario, you can call the
+ * `enableDualStreamModeEx` method.
+ * - You can call this method either before or after joining a channel.
+ *
+ * @param enabled Whether to enable dual-stream mode:
+ * - `true`: Enable dual-stream mode.
+ * - `false`: (Default) Disable dual-stream mode.
+ * @param streamConfig The configuration of the low-quality video stream. See
+ * `SimulcastStreamConfig`.Note: When setting `mode` to `DISABLE_SIMULCAST_STREAM`, setting
+ * `streamConfig` will not take effect.
*
- * @param enabled
- * - true: Enable the dual-stream mode.
- * - false: (default) Disable the dual-stream mode.
- * @param streamConfig
- * - The minor stream config
* @return
* - 0: Success.
* - < 0: Failure.
@@ -6435,14 +8478,32 @@ class IRtcEngine : public agora::base::IEngineBase {
/**
- * Enables, disables or auto enable the dual video stream mode.
+ * @brief Sets the dual-stream mode on the sender side.
+ *
+ * @details
+ * The SDK defaults to enabling low-quality video stream adaptive mode ( `AUTO_SIMULCAST_STREAM` )
+ * on the sender side, which means the sender does not actively send low-quality video stream. The
+ * receiving end with the role of the **host** can initiate a low-quality video stream request by
+ * calling `setRemoteVideoStreamType`, and upon receiving the request, the sending end automatically
+ * starts sending low-quality stream.
+ * - If you want to modify this behavior, you can call this method and set `mode` to
+ * `DISABLE_SIMULCAST_STREAM` (never send low-quality video streams) or `ENABLE_SIMULCAST_STREAM`
+ * (always send low-quality video streams).
+ * - If you want to restore the default behavior after making changes, you can call this method
+ * again with `mode` set to `AUTO_SIMULCAST_STREAM`.
*
- * If dual-stream mode is enabled, the subscriber can choose to receive the high-stream
- * (high-resolution high-bitrate video stream) or low-stream (low-resolution low-bitrate video stream)
- * video using \ref setRemoteVideoStreamType "setRemoteVideoStreamType".
+ * @note
+ * The difference and connection between this method and `enableDualStreamMode(bool enabled)` is as
+ * follows:
+ * - When calling this method and setting `mode` to `DISABLE_SIMULCAST_STREAM`, it has the same
+ * effect as `enableDualStreamMode(bool enabled)` `(false)`.
+ * - When calling this method and setting `mode` to `ENABLE_SIMULCAST_STREAM`, it has the same
+ * effect as `enableDualStreamMode(bool enabled)` `(true)`.
+ * - Both methods can be called before and after joining a channel. If both methods are used, the
+ * settings in the method called later takes precedence.
+ *
+ * @param mode The mode in which the video stream is sent. See `SIMULCAST_STREAM_MODE`.
*
- * @param mode
- * - The dual stream mode
* @return
* - 0: Success.
* - < 0: Failure.
@@ -6450,38 +8511,61 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int setDualStreamMode(SIMULCAST_STREAM_MODE mode) = 0;
/**
- * Sets the multi-layer video stream configuration.
+ * @brief Sets the simulcast video stream configuration.
*
- * When users expect the same UID to send multiple streams of different resolutions, they can achieve this by calling setSimulcastConfig
- *
- * If multi-layer is configured, the subscriber can choose to receive the corresponding layer
- * of video stream using {@link setRemoteVideoStreamType setRemoteVideoStreamType}.
- *
- * @details This method allows a broadcaster to simultaneously transmit multiple video streams
- * with different resolutions. The configuration supports enabling up to four layers
- * simultaneously: one major stream (highest resolution) and three additional simulcast
- * streams.
- *
- * @param simulcastConfig
- * - The configuration for multi-layer video stream. It includes seven layers, ranging from
- * STREAM_LAYER_1 to STREAM_LOW. A maximum of 3 layers can be enabled simultaneously.
+ * @since v4.6.0
+ *
+ * @details
+ * You can call this method to set video streams with different resolutions for the same video
+ * source. The subscribers can call `setRemoteVideoStreamType` to select which stream layer to
+ * receive. The broadcaster can publish up to four layers of video streams: one main stream (highest
+ * resolution) and three additional streams of different quality levels.
+ *
+ * @param simulcastConfig This configuration includes seven layers, from STREAM_LAYER_1 to
+ * STREAM_LOW, with a maximum of three layers enabled simultaneously. See `SimulcastConfig`.
*
* @return
* - 0: Success.
* - < 0: Failure.
- * @since v4.6.0
*/
virtual int setSimulcastConfig(const SimulcastConfig& simulcastConfig) = 0;
/**
- * Enables, disables or auto enable the dual video stream mode.
+ * @brief Sets dual-stream mode configuration on the sender side.
+ *
+ * @details
+ * The SDK defaults to enabling low-quality video stream adaptive mode ( `AUTO_SIMULCAST_STREAM` )
+ * on the sender side, which means the sender does not actively send low-quality video stream. The
+ * receiving end with the role of the **host** can initiate a low-quality video stream request by
+ * calling `setRemoteVideoStreamType`, and upon receiving the request, the sending end automatically
+ * starts sending low-quality stream.
+ * - If you want to modify this behavior, you can call this method and set `mode` to
+ * `DISABLE_SIMULCAST_STREAM` (never send low-quality video streams) or `ENABLE_SIMULCAST_STREAM`
+ * (always send low-quality video streams).
+ * - If you want to restore the default behavior after making changes, you can call this method
+ * again with `mode` set to `AUTO_SIMULCAST_STREAM`.
+ * The difference between this method and `setDualStreamMode(SIMULCAST_STREAM_MODE mode)` is that
+ * this method can also
+ * configure the low-quality video stream, and the SDK sends the stream according to the
+ * configuration in `streamConfig`.
+ *
+ * @note
+ * The difference and connection between this method and `enableDualStreamMode(bool enabled, const
+ * SimulcastStreamConfig& streamConfig)` is as follows:
+ * - When calling this method and setting `mode` to `DISABLE_SIMULCAST_STREAM`, it has the same
+ * effect as calling `enableDualStreamMode(bool enabled, const SimulcastStreamConfig& streamConfig)`
+ * and setting `enabled` to `false`.
+ * - When calling this method and setting `mode` to `ENABLE_SIMULCAST_STREAM`, it has the same
+ * effect as calling `enableDualStreamMode(bool enabled, const SimulcastStreamConfig& streamConfig)`
+ * and setting `enabled` to `true`.
+ * - Both methods can be called before and after joining a channel. If both methods are used, the
+ * settings in the method called later takes precedence.
*
- * If dual-stream mode is enabled, the subscriber can choose to receive the high-stream
- * (high-resolution high-bitrate video stream) or low-stream (low-resolution low-bitrate video stream)
- * video using \ref setRemoteVideoStreamType "setRemoteVideoStreamType".
+ * @param mode The mode in which the video stream is sent. See `SIMULCAST_STREAM_MODE`.
+ * @param streamConfig The configuration of the low-quality video stream. See
+ * `SimulcastStreamConfig`.Note: When setting `mode` to `DISABLE_SIMULCAST_STREAM`, setting
+ * `streamConfig` will not take effect.
*
- * @param mode Dual stream mode: #SIMULCAST_STREAM_MODE.
- * @param streamConfig Configurations of the low stream: SimulcastStreamConfig.
* @return
* - 0: Success.
* - < 0: Failure.
@@ -6489,15 +8573,23 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int setDualStreamMode(SIMULCAST_STREAM_MODE mode, const SimulcastStreamConfig& streamConfig) = 0;
/**
- * Sets the external audio track.
+ * @brief Sets whether to enable the local playback of external audio source.
*
- * @note
- * Ensure that you call this method before joining the channel.
+ * @details
+ * After calling this method to enable the local playback of external audio source, if you need to
+ * stop local playback, you can call this method again and set `enabled` to `false`.
+ * You can call `adjustCustomAudioPlayoutVolume` to adjust the local playback volume of the custom
+ * audio track.
+ *
+ * @note Ensure you have called the `createCustomAudioTrack` method to create a custom audio track
+ * before calling this method.
+ *
+ * @param trackId The audio track ID. Set this parameter to the custom audio track ID returned in
+ * `createCustomAudioTrack`.
+ * @param enabled Whether to play the external audio source:
+ * - `true`: Play the external audio source.
+ * - `false`: (Default) Do not play the external source.
*
- * @param trackId custom audio track id.
- * @param enabled Determines whether to local playback the external audio track:
- * - true: Local playback the external audio track.
- * - false: Local don`t playback the external audio track.
* @return
* - 0: Success.
* - < 0: Failure.
@@ -6505,19 +8597,23 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int enableCustomAudioLocalPlayback(track_id_t trackId, bool enabled) = 0;
/**
- * Sets the audio recording format for the
- * \ref agora::media::IAudioFrameObserver::onRecordAudioFrame "onRecordAudioFrame" callback.
+ * @brief Sets the format of the captured raw audio data.
*
- * @param sampleRate The sample rate (Hz) of the audio data returned in the `onRecordAudioFrame` callback, which can set be
- * as 8000, 16000, 32000, 44100, or 48000.
- * @param channel The number of audio channels of the audio data returned in the `onRecordAudioFrame` callback, which can
- * be set as 1 or 2:
+ * @details
+ * The SDK calculates the sampling interval based on the `samplesPerCall`, `sampleRate` and
+ * `channel` parameters set in this method.Sample interval (sec) = `samplePerCall` /( `sampleRate` ×
+ * `channel` ). Ensure that the sample interval ≥ 0.01 (s). The SDK triggers the
+ * `onRecordAudioFrame` callback according to the sampling interval.
+ * Call timing: Call this method before joining a channel.
+ *
+ * @param sampleRate The sample rate returned in the callback, which can be set as 8000, 16000,
+ * 32000, 44100, or 48000 Hz.
+ * @param channel The number of audio channels. You can set the value as 1 or 2.
* - 1: Mono.
* - 2: Stereo.
- * @param mode This mode is deprecated.
- * @param samplesPerCall not support. Sampling points in the called data returned in
- * onRecordAudioFrame(). For example, it is usually set as 1024 for stream
- * pushing.
+ * @param mode The use mode of the audio frame. See `RAW_AUDIO_FRAME_OP_MODE_TYPE`.
+ * @param samplesPerCall The number of data samples, such as 1024 for the Media Push.
+ *
* @return
* - 0: Success.
* - < 0: Failure.
@@ -6527,20 +8623,23 @@ class IRtcEngine : public agora::base::IEngineBase {
int samplesPerCall) = 0;
/**
- * Sets the audio playback format for the
- * \ref agora::media::IAudioFrameObserver::onPlaybackAudioFrame "onPlaybackAudioFrame" callback.
+ * @brief Sets the format of the raw audio playback data.
+ *
+ * @details
+ * The SDK calculates the sampling interval based on the `samplesPerCall`, `sampleRate` and
+ * `channel` parameters set in this method.Sample interval (sec) = `samplePerCall` /( `sampleRate` ×
+ * `channel` ). Ensure that the sample interval ≥ 0.01 (s). The SDK triggers the
+ * `onPlaybackAudioFrame` callback according to the sampling interval.
+ * Call timing: Call this method before joining a channel.
+ *
+ * @param sampleRate The sample rate returned in the callback, which can be set as 8000, 16000,
+ * 24000, 32000, 44100, or 48000 Hz.
+ * @param channel The number of audio channels. You can set the value as 1 or 2.
+ * - 1: Mono.
+ * - 2: Stereo.
+ * @param mode The use mode of the audio frame. See `RAW_AUDIO_FRAME_OP_MODE_TYPE`.
+ * @param samplesPerCall The number of data samples, such as 1024 for the Media Push.
*
- * @param sampleRate Sets the sample rate (Hz) of the audio data returned in the `onPlaybackAudioFrame` callback,
- * which can set be as 8000, 16000, 32000, 44100, or 48000.
- * @param channel The number of channels of the audio data returned in the `onPlaybackAudioFrame` callback, which
- * can be set as 1 or 2:
- * - 1: Mono
- * - 2: Stereo
- * @param mode Deprecated. The use mode of the onPlaybackAudioFrame() callback:
- * agora::rtc::RAW_AUDIO_FRAME_OP_MODE_TYPE.
- * @param samplesPerCall not support. Sampling points in the called data returned in
- * onPlaybackAudioFrame(). For example, it is usually set as 1024 for stream
- * pushing.
* @return
* - 0: Success.
* - < 0: Failure.
@@ -6550,37 +8649,53 @@ class IRtcEngine : public agora::base::IEngineBase {
int samplesPerCall) = 0;
/**
- * Sets the mixed audio format for the
- * \ref agora::media::IAudioFrameObserver::onMixedAudioFrame "onMixedAudioFrame" callback.
+ * @brief Sets the format of the raw audio data after mixing for audio capture and playback.
*
- * @param sampleRate The sample rate (Hz) of the audio data returned in the `onMixedAudioFrame` callback, which can set
- * be as 8000, 16000, 32000, 44100, or 48000.
- * @param channel The number of channels of the audio data in `onMixedAudioFrame` callback, which can be set as 1 or 2:
- * - 1: Mono
- * - 2: Stereo
- * @param samplesPerCall not support. Sampling points in the called data returned in
- * `onMixedAudioFrame`. For example, it is usually set as 1024 for stream pushing.
- * @return
- * - 0: Success.
- * - < 0: Failure.
- */
+ * @details
+ * The SDK calculates the sampling interval based on the `samplesPerCall`, `sampleRate` and
+ * `channel` parameters set in this method.Sample interval (sec) = `samplePerCall` /( `sampleRate` ×
+ * `channel` ). Ensure that the sample interval ≥ 0.01 (s). The SDK triggers the `onMixedAudioFrame`
+ * callback according to the sampling interval.
+ * Call timing: Call this method before joining a channel.
+ *
+ * @param sampleRate The sample rate returned in the callback, which can be set as 8000, 16000,
+ * 32000, 44100, or 48000 Hz.
+ * @param channel The number of audio channels. You can set the value as 1 or 2.
+ * - 1: Mono.
+ * - 2: Stereo.
+ * @param samplesPerCall The number of data samples, such as 1024 for the Media Push.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ */
virtual int setMixedAudioFrameParameters(int sampleRate, int channel, int samplesPerCall) = 0;
/**
- * Sets the audio ear monitoring format for the
- * \ref agora::media::IAudioFrameObserver::onEarMonitoringAudioFrame "onEarMonitoringAudioFrame" callback.
+ * @brief Sets the format of the in-ear monitoring raw audio data.
+ *
+ * @details
+ * This method is used to set the in-ear monitoring audio data format reported by the
+ * `onEarMonitoringAudioFrame` callback.
+ *
+ * @note
+ * - Before calling this method, you need to call `enableInEarMonitoring`, and set
+ * `includeAudioFilters` to `EAR_MONITORING_FILTER_BUILT_IN_AUDIO_FILTERS` or
+ * `EAR_MONITORING_FILTER_NOISE_SUPPRESSION`.
+ * - The SDK calculates the sampling interval based on the `samplesPerCall`, `sampleRate` and
+ * `channel` parameters set in this method.Sample interval (sec) = `samplePerCall` /( `sampleRate` ×
+ * `channel` ). Ensure that the sample interval ≥ 0.01 (s). The SDK triggers the
+ * `onEarMonitoringAudioFrame` callback according to the sampling interval.
+ *
+ * @param sampleRate The sample rate of the audio data reported in the `onEarMonitoringAudioFrame`
+ * callback, which can be set as 8,000, 16,000, 32,000, 44,100, or 48,000 Hz.
+ * @param channel The number of audio channels reported in the `onEarMonitoringAudioFrame` callback.
+ * - 1: Mono.
+ * - 2: Stereo.
+ * @param mode The use mode of the audio frame. See `RAW_AUDIO_FRAME_OP_MODE_TYPE`.
+ * @param samplesPerCall The number of data samples reported in the `onEarMonitoringAudioFrame`
+ * callback, such as 1,024 for the Media Push.
*
- * @param sampleRate Sets the sample rate (Hz) of the audio data returned in the `onEarMonitoringAudioFrame` callback,
- * which can set be as 8000, 16000, 32000, 44100, or 48000.
- * @param channel The number of channels of the audio data returned in the `onEarMonitoringAudioFrame` callback, which
- * can be set as 1 or 2:
- * - 1: Mono
- * - 2: Stereo
- * @param mode Deprecated. The use mode of the onEarMonitoringAudioFrame() callback:
- * agora::rtc::RAW_AUDIO_FRAME_OP_MODE_TYPE.
- * @param samplesPerCall not support. Sampling points in the called data returned in
- * onEarMonitoringAudioFrame(). For example, it is usually set as 1024 for stream
- * pushing.
* @return
* - 0: Success.
* - < 0: Failure.
@@ -6590,16 +8705,19 @@ class IRtcEngine : public agora::base::IEngineBase {
int samplesPerCall) = 0;
/**
- * Sets the audio playback format before mixing in the
- * \ref agora::media::IAudioFrameObserver::onPlaybackAudioFrameBeforeMixing "onPlaybackAudioFrameBeforeMixing"
- * callback.
+ * @brief Sets the format of the raw audio playback data before mixing.
+ *
+ * @details
+ * The SDK triggers the `onPlaybackAudioFrameBeforeMixing` callback according to the sampling
+ * interval.
+ * Call timing: Call this method before joining a channel.
+ *
+ * @param sampleRate The sample rate returned in the callback, which can be set as 8000, 16000,
+ * 32000, 44100, or 48000 Hz.
+ * @param channel The number of audio channels. You can set the value as 1 or 2.
+ * - 1: Mono.
+ * - 2: Stereo.
*
- * @param sampleRate The sample rate (Hz) of the audio data returned in
- * `onPlaybackAudioFrameBeforeMixing`, which can set be as 8000, 16000, 32000, 44100, or 48000.
- * @param channel Number of channels of the audio data returned in `onPlaybackAudioFrameBeforeMixing`,
- * which can be set as 1 or 2:
- * - 1: Mono
- * - 2: Stereo
* @return
* - 0: Success.
* - < 0: Failure.
@@ -6607,18 +8725,22 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int setPlaybackAudioFrameBeforeMixingParameters(int sampleRate, int channel) = 0;
/**
- * Sets the audio playback format before mixing in the
- * \ref agora::media::IAudioFrameObserver::onPlaybackAudioFrameBeforeMixing "onPlaybackAudioFrameBeforeMixing"
- * callback.
+ * @brief Sets the format of audio data in the `onPlaybackAudioFrameBeforeMixing` callback.
+ *
+ * @details
+ * Used to set the sample rate, number of channels, and number of samples per callback for the audio
+ * data returned in the `onPlaybackAudioFrameBeforeMixing` callback.
+ *
+ * @param sampleRate Set the sample rate returned in the `onPlaybackAudioFrameBeforeMixing`
+ * callback. It can be set as the following values: 8000、16000、32000、44100 or 48000.
+ * @param channel Set the number of channels for the audio data returned in the
+ * `onPlaybackAudioFrameBeforeMixing` callback. It can be set to:
+ * - 1: Mono.
+ * - 2: Stereo.
+ * @param samplesPerCall Set the sample rate of the audio data returned in the
+ * `onPlaybackAudioFrameBeforeMixing` callback. In the RTMP streaming scenario, it is recommended to
+ * set it to 1024.
*
- * @param sampleRate The sample rate (Hz) of the audio data returned in
- * `onPlaybackAudioFrameBeforeMixing`, which can set be as 8000, 16000, 32000, 44100, or 48000.
- * @param channel Number of channels of the audio data returned in `onPlaybackAudioFrameBeforeMixing`,
- * which can be set as 1 or 2:
- * - 1: Mono
- * - 2: Stereo
- * @param samplesPerCall Sampling points in the called data returned in
- * `onPlaybackAudioFrameBeforeMixing`. For example, it is usually set as 1024 for stream pushing.
* @return
* - 0: Success.
* - < 0: Failure.
@@ -6626,18 +8748,32 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int setPlaybackAudioFrameBeforeMixingParameters(int sampleRate, int channel, int samplesPerCall) = 0;
/**
- * Enable the audio spectrum monitor.
+ * @brief Turns on audio spectrum monitoring.
+ *
+ * @details
+ * If you want to obtain the audio spectrum data of local or remote users, you can register the
+ * audio spectrum observer and enable audio spectrum monitoring.
*
- * @param intervalInMS Sets the time interval(ms) between two consecutive audio spectrum callback.
- * The default value is 100. This param should be larger than 10.
+ * @note You can call this method either before or after joining a channel.
+ *
+ * @param intervalInMS The interval (in milliseconds) at which the SDK triggers the
+ * `onLocalAudioSpectrum` and `onRemoteAudioSpectrum` callbacks. The default value is 100. Do not
+ * set this parameter to a value less than 10, otherwise calling this method would fail.
*
* @return
* - 0: Success.
* - < 0: Failure.
+ * - -2: Invalid parameters.
*/
virtual int enableAudioSpectrumMonitor(int intervalInMS = 100) = 0;
/**
- * Disalbe the audio spectrum monitor.
+ * @brief Disables audio spectrum monitoring.
+ *
+ * @details
+ * After calling `enableAudioSpectrumMonitor`, if you want to disable audio spectrum monitoring, you
+ * can call this method.
+ *
+ * @note You can call this method either before or after joining a channel.
*
* @return
* - 0: Success.
@@ -6646,52 +8782,73 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int disableAudioSpectrumMonitor() = 0;
/**
- * Registers an audio spectrum observer.
+ * @brief Registers an audio spectrum observer.
+ *
+ * @details
+ * After successfully registering the audio spectrum observer and calling
+ * `enableAudioSpectrumMonitor` to enable the audio spectrum monitoring, the SDK reports the
+ * callback that you implement in the `IAudioSpectrumObserver` class according to the time interval
+ * you set.
*
- * You need to implement the `IAudioSpectrumObserver` class in this method, and register the following callbacks
- * according to your scenario:
- * - \ref agora::media::IAudioSpectrumObserver::onAudioSpectrumComputed "onAudioSpectrumComputed": Occurs when
- * the SDK receives the audio data and at set intervals.
+ * @note You can call this method either before or after joining a channel.
+ *
+ * @param observer The audio spectrum observer. See `IAudioSpectrumObserver`.
*
- * @param observer A pointer to the audio spectrum observer: \ref agora::media::IAudioSpectrumObserver
- * "IAudioSpectrumObserver".
* @return
* - 0: Success.
* - < 0: Failure.
*/
virtual int registerAudioSpectrumObserver(agora::media::IAudioSpectrumObserver * observer) = 0;
/**
- * Releases the audio spectrum observer.
+ * @brief Unregisters the audio spectrum observer.
+ *
+ * @details
+ * After calling `registerAudioSpectrumObserver`, if you want to disable audio spectrum monitoring,
+ * you can call this method.
+ *
+ * @note You can call this method either before or after joining a channel.
+ *
+ * @param observer The audio spectrum observer. See `IAudioSpectrumObserver`.
*
- * @param observer The pointer to the audio spectrum observer: \ref agora::media::IAudioSpectrumObserver
- * "IAudioSpectrumObserver".
* @return
* - 0: Success.
* - < 0: Failure.
*/
virtual int unregisterAudioSpectrumObserver(agora::media::IAudioSpectrumObserver * observer) = 0;
- /** Adjusts the recording volume.
-
- @param volume The recording volume, which ranges from 0 to 400:
-
- - 0: Mute the recording volume.
- - 100: The Original volume.
- - 400: (Maximum) Four times the original volume with signal clipping
- protection.
-
- @return
- - 0: Success.
- - < 0: Failure.
- */
+ /**
+ * @brief Adjusts the capturing signal volume.
+ *
+ * @details
+ * If you only need to mute the audio signal, Agora recommends that you use `muteRecordingSignal`
+ * instead.
+ * Call timing: This method can be called either before or after joining the channel.
+ *
+ * @param volume The volume of the user. The value range is [0,400].
+ * - 0: Mute.
+ * - 100: (Default) The original volume.
+ * - 400: Four times the original volume (amplifying the audio signals by four times).
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ */
virtual int adjustRecordingSignalVolume(int volume) = 0;
/**
- * Mute or resume recording signal volume.
+ * @brief Whether to mute the recording signal.
*
- * @param mute Determines whether to mute or resume the recording signal volume.
- * - true: Mute the recording signal volume.
- * - false: (Default) Resume the recording signal volume.
+ * @details
+ * If you have already called `adjustRecordingSignalVolume` to adjust the recording signal volume,
+ * when you call this method and set it to `true`, the SDK behaves as follows:1. Records the
+ * adjusted volume.
+ * 2. Mutes the recording signal.
+ * When you call this method again and set it to `false`, the recording signal volume will be
+ * restored to the volume recorded by the SDK before muting.
+ * Call timing: This method can be called either before or after joining the channel.
+ *
+ * @param mute - `true`: Mute the recording signal.
+ * - `false`: (Default) Do not mute the recording signal.
*
* @return
* - 0: Success.
@@ -6699,57 +8856,67 @@ class IRtcEngine : public agora::base::IEngineBase {
*/
virtual int muteRecordingSignal(bool mute) = 0;
- /** Adjusts the playback volume.
-
- @param volume The playback volume, which ranges from 0 to 400:
-
- - 0: Mute the recoridng volume.
- - 100: The Original volume.
- - 400: (Maximum) Four times the original volume with signal clipping
- protection.
-
- @return
- - 0: Success.
- - < 0: Failure.
- */
+ /**
+ * @brief Adjusts the playback signal volume of all remote users.
+ *
+ * @details
+ * This method is used to adjust the signal volume of all remote users mixed and played locally. If
+ * you need to adjust the signal volume of a specified remote user played locally, it is recommended
+ * that you call `adjustUserPlaybackSignalVolume` instead.
+ * Call timing: This method can be called either before or after joining the channel.
+ *
+ * @param volume The volume of the user. The value range is [0,400].
+ * - 0: Mute.
+ * - 100: (Default) The original volume.
+ * - 400: Four times the original volume (amplifying the audio signals by four times).
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ */
virtual int adjustPlaybackSignalVolume(int volume) = 0;
- /*
- * Adjust the playback volume of the user specified by uid.
+ /**
+ * @brief Adjusts the playback signal volume of a specified remote user.
*
- * You can call this method to adjust the playback volume of the user specified by uid
- * in call. If you want to adjust playback volume of the multi user, you can call this
- * this method multi times.
+ * @details
+ * You can call this method to adjust the playback volume of a specified remote user. To adjust the
+ * playback volume of different remote users, call the method as many times, once for each remote
+ * user.
+ * Call timing: Call this method after joining a channel.
*
- * @note
- * Please call this method after join channel.
- * This method adjust the playback volume of specified user.
+ * @param uid The user ID of the remote user.
+ * @param volume The volume of the user. The value range is [0,400].
+ * - 0: Mute.
+ * - 100: (Default) The original volume.
+ * - 400: Four times the original volume (amplifying the audio signals by four times).
*
- * @param uid Remote user ID.
- * @param volume The playback volume of the specified remote user. The value ranges between 0 and 400, including the following:
- * 0: Mute.
- * 100: (Default) Original volume.
- * 400: Four times the original volume with signal-clipping protection.
* @return
* - 0: Success.
* - < 0: Failure.
*/
virtual int adjustUserPlaybackSignalVolume(uid_t uid, int volume) = 0;
- /** Sets the fallback option for the remotely subscribed video stream based on the network conditions.
-
- The default setting for `option` is #STREAM_FALLBACK_OPTION_VIDEO_STREAM_LOW (1), where the remotely subscribed video stream falls back to the low-stream video (low resolution and low bitrate) under poor downlink network conditions.
-
- If `option` is set as #STREAM_FALLBACK_OPTION_AUDIO_ONLY (2), the SDK automatically switches the video from a high-stream to a low-stream, or disables the video when the downlink network conditions cannot support both audio and video to guarantee the quality of the audio. The SDK monitors the network quality and restores the video stream when the network conditions improve.
-
- When the remotely subscribed video stream falls back to audio only or when the audio-only stream switches back to the video stream, the SDK triggers the \ref agora::rtc::IRtcEngineEventHandler::onRemoteSubscribeFallbackToAudioOnly "onRemoteSubscribeFallbackToAudioOnly" callback.
-
- @note Ensure that you call this method before joining a channel.
-
- @param option Sets the fallback option for the remotely subscribed video stream. See #STREAM_FALLBACK_OPTIONS.
- @return
- - 0: Success.
- - < 0: Failure.
+ /**
+ * @brief Sets the fallback option for the subscribed video stream based on the network conditions.
+ *
+ * @details
+ * An unstable network affects the audio and video quality in a video call or interactive live video
+ * streaming. If `option` is set as `STREAM_FALLBACK_OPTION_VIDEO_STREAM_LOW` or
+ * `STREAM_FALLBACK_OPTION_AUDIO_ONLY`, the SDK automatically switches the video from a high-quality
+ * stream to a low-quality stream or disables the video when the downlink network conditions cannot
+ * support both audio and video to guarantee the quality of the audio. Meanwhile, the SDK
+ * continuously monitors network quality and resumes subscribing to audio and video streams when the
+ * network quality improves.
+ * When the subscribed video stream falls back to an audio-only stream, or recovers from an
+ * audio-only stream to an audio-video stream, the SDK triggers the
+ * `onRemoteSubscribeFallbackToAudioOnly` callback.
+ *
+ * @param option Fallback options for the subscribed stream. See `STREAM_FALLBACK_OPTIONS`.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int setRemoteSubscribeFallbackOption(STREAM_FALLBACK_OPTIONS option) = 0;
@@ -6821,20 +8988,30 @@ class IRtcEngine : public agora::base::IEngineBase {
*/
virtual int getExtensionProperty(const char* provider, const char* extension, const ExtensionInfo& extensionInfo, const char* key, char* value, int buf_len) = 0;
- /** Enables loopback recording.
+ /**
+ * @brief Enables loopback audio capturing.
+ *
+ * @details
+ * If you enable loopback audio capturing, the output of the sound card is mixed into the audio
+ * stream sent to the other end.
*
- * If you enable loopback recording, the output of the default sound card is mixed into
- * the audio stream sent to the other end.
+ * @note
+ * - This method applies to the macOS and Windows only.
+ * - You can call this method either before or after joining a channel.
+ * - If you call the `disableAudio` method to disable the audio module, audio capturing will be
+ * disabled as well. If you need to enable audio capturing, call the `enableAudio` method to enable
+ * the audio module and then call the `enableLoopbackRecording` method.
*
- * @note This method is for Windows only.
+ * @param enabled Sets whether to enable loopback audio capturing.
+ * - `true`: Enable sound card capturing. You can find the name of the virtual sound card in your
+ * system's**Audio Devices > Output**.
+ * - `false`: Disable sound card capturing. The name of the virtual sound card will not be shown in
+ * your system's **Audio Devices > Output**.
+ * @param deviceName - macOS: The device name of the virtual sound card. The default value is set to
+ * NULL, which means using AgoraALD for loopback audio capturing.
+ * - Windows: The device name of the sound card. The default is set to NULL, which means the SDK
+ * uses the sound card of your device for loopback audio capturing.
*
- * @param enabled Sets whether to enable/disable loopback recording.
- * - true: Enable loopback recording.
- * - false: (Default) Disable loopback recording.
- * @param deviceName Pointer to the device name of the sound card. The default value is NULL (the default sound card).
- * - This method is for macOS and Windows only.
- * - macOS does not support loopback capturing of the default sound card. If you need to use this method,
- * please use a virtual sound card and pass its name to the deviceName parameter. Agora has tested and recommends using soundflower.
* @return
* - 0: Success.
* - < 0: Failure.
@@ -6842,18 +9019,20 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int enableLoopbackRecording(bool enabled, const char* deviceName = NULL) = 0;
- /** Adjusts the loopback recording volume.
-
- @param volume The loopback volume, which ranges from 0 to 100:
-
- - 0: Mute the recoridng volume.
- - 100: The Original volume.
- protection.
-
- @return
- - 0: Success.
- - < 0: Failure.
- */
+ /**
+ * @brief Adjusts the volume of the signal captured by the sound card.
+ *
+ * @details
+ * After calling `enableLoopbackRecording` to enable loopback audio capturing, you can call this
+ * method to adjust the volume of the signal captured by the sound card.
+ *
+ * @param volume Audio mixing volume. The value ranges between 0 and 100. The default value is 100,
+ * which means the original volume.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ */
virtual int adjustLoopbackSignalVolume(int volume) = 0;
/** Retrieves the audio volume for recording loopback.
@@ -6865,40 +9044,87 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int getLoopbackRecordingVolume() = 0;
/**
- * Enables in-ear monitoring.
+ * @brief Enables in-ear monitoring.
+ *
+ * @details
+ * This method enables or disables in-ear monitoring.
+ * Call timing: This method can be called either before or after joining the channel.
+ *
+ * @note Users must use earphones (wired or Bluetooth) to hear the in-ear monitoring effect.
+ *
+ * @param enabled Enables or disables in-ear monitoring.
+ * - `true`: Enables in-ear monitoring.
+ * - `false`: (Default) Disables in-ear monitoring.
+ * @param includeAudioFilters The audio filter types of in-ear monitoring. See
+ * `EAR_MONITORING_FILTER_TYPE`.
*
- * @param enabled Determines whether to enable in-ear monitoring.
- * - true: Enable.
- * - false: (Default) Disable.
- * @param includeAudioFilters The type of the ear monitoring: #EAR_MONITORING_FILTER_TYPE
* @return
* - 0: Success.
* - < 0: Failure.
+ * - - 8: Make sure the current audio routing is Bluetooth or headset.
*/
virtual int enableInEarMonitoring(bool enabled, int includeAudioFilters) = 0;
/**
- * Sets the volume of the in-ear monitor.
+ * @brief Sets the volume of the in-ear monitor.
+ *
+ * @details
+ * Call timing: This method can be called either before or after joining the channel.
*
- * @param volume Sets the volume of the in-ear monitor. The value ranges
- * between 0 and 100 (default).
+ * @param volume The volume of the user. The value range is [0,400].
+ * - 0: Mute.
+ * - 100: (Default) The original volume.
+ * - 400: Four times the original volume (amplifying the audio signals by four times).
*
* @return
* - 0: Success.
* - < 0: Failure.
+ * - -2: Invalid parameter settings, such as in-ear monitoring volume exceeding the valid range (<
+ * 0 or > 400).
*/
virtual int setInEarMonitoringVolume(int volume) = 0;
#if defined(_WIN32) || defined(__linux__) || defined(__ANDROID__)
+ /**
+ * @brief Loads an extension.
+ *
+ * @details
+ * This method is used to add extensions external to the SDK (such as those from Extensions
+ * Marketplace and SDK extensions) to the SDK.
+ * Call timing: Make sure the `IRtcEngine` is initialized before you call this method.
+ *
+ * @note
+ * If you want to load multiple extensions, you need to call this method multiple times.
+ * (For Windows and Android only)
+ * (For Windows and Android only)
+ *
+ * @param path The extension library path and name. For example:
+ * `/library/libagora_segmentation_extension.dll`.
+ * @param unload_after_use Whether to uninstall the current extension when you no longer using it:
+ * - `true`: Uninstall the extension when the `IRtcEngine` is destroyed.
+ * - `false`: (Rcommended) Do not uninstall the extension until the process terminates.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
+ */
virtual int loadExtensionProvider(const char* path, bool unload_after_use = false) = 0;
#endif
/**
- * Sets the provider property of an extension.
+ * @brief Sets the properties of the extension provider.
*
- * @param provider The name of the extension provider, e.g. agora.io.
+ * @details
+ * You can call this method to set the attributes of the extension provider and initialize the
+ * relevant parameters according to the type of the provider.
+ * Call timing: Call this method before `enableExtension` and after `registerExtension`.
+ *
+ * @note If you want to set the properties of the extension provider for multiple extensions, you
+ * need to call this method multiple times.
+ *
+ * @param provider The name of the extension provider.
* @param key The key of the extension.
- * @param value The JSON formatted value of the extension key.
+ * @param value The value of the extension key.
*
* @return
* - 0: Success.
@@ -6907,48 +9133,82 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int setExtensionProviderProperty(const char* provider, const char* key, const char* value) = 0;
/**
- * Registers an extension. Normally you should call this function immediately after engine initialization.
- * Once an extension is registered, the SDK will automatically create and add it to the pipeline.
+ * @brief Registers an extension.
*
- * @param provider The name of the extension provider, e.g. agora.io.
- * @param extension The name of the extension, e.g. agora.beauty.
- * @param type The source type of the extension, e.g. PRIMARY_CAMERA_SOURCE. The default is UNKNOWN_MEDIA_SOURCE.
+ * @details
+ * For extensions external to the SDK (such as those from Extensions Marketplace and SDK
+ * Extensions), you need to load them before calling this method. Extensions internal to the SDK
+ * (those included in the full SDK package) are automatically loaded and registered after the
+ * initialization of `IRtcEngine`.
+ * Call timing: - Agora recommends you call this method after the initialization of `IRtcEngine` and
+ * before joining a channel.
+ * - For video extensions (such as the image enhancement extension), you need to call this method
+ * after enabling the video module by calling `enableVideo` or `enableLocalVideo`.
+ * - Before calling this method, you need to call `loadExtensionProvider` to load the extension
+ * first.
+ *
+ * @note
+ * - If you want to register multiple extensions, you need to call this method multiple times.
+ * - The data processing order of different extensions in the SDK is determined by the order in
+ * which the extensions are registered. That is, the extension that is registered first will process
+ * the data first.
+ *
+ * @param provider The name of the extension provider.
+ * @param extension The name of the extension.
+ * @param type Source type of the extension. See `MEDIA_SOURCE_TYPE`.
*
* @return
* - 0: Success.
* - < 0: Failure.
+ * - -3: The extension library is not loaded. Agora recommends that you check the storage location
+ * or the name of the dynamic library.
*/
virtual int registerExtension(const char* provider, const char* extension, agora::media::MEDIA_SOURCE_TYPE type = agora::media::UNKNOWN_MEDIA_SOURCE) = 0;
/**
- * Enable/Disable an extension.
- * By calling this function, you can dynamically enable/disable the extension without changing the pipeline.
- * For example, enabling/disabling Extension_A means the data will be adapted/bypassed by Extension_A.
+ * @brief Enables or disables extensions.
*
- * NOTE: For compatibility reasons, if you haven't call registerExtension,
- * enableExtension will automatically register the specified extension.
- * We suggest you call registerExtension explicitly.
+ * @details
+ * Call timing: Agora recommends that you call this method after joining a channel.
+ * Related callbacks: When this method is successfully called within the channel, it triggers
+ * `onExtensionStartedWithContext` or `onExtensionStoppedWithContext`.
*
- * @param provider The name of the extension provider, e.g. agora.io.
- * @param extension The name of the extension, e.g. agora.beauty.
+ * @note
+ * - If you want to enable multiple extensions, you need to call this method multiple times.
+ * - After a successful call of this method, you cannot load other extensions.
+ *
+ * @param provider The name of the extension provider.
+ * @param extension The name of the extension.
* @param enable Whether to enable the extension:
- * - true: (Default) Enable the extension.
- * - false: Disable the extension.
- * @param type The source type of the extension, e.g. PRIMARY_CAMERA_SOURCE. The default is UNKNOWN_MEDIA_SOURCE.
+ * - `true`: Enable the extension.
+ * - `false`: Disable the extension.
+ * @param type Source type of the extension. See `MEDIA_SOURCE_TYPE`.
*
* @return
* - 0: Success.
* - < 0: Failure.
+ * - -3: The extension library is not loaded. Agora recommends that you check the storage location
+ * or the name of the dynamic library.
*/
virtual int enableExtension(const char* provider, const char* extension, bool enable=true, agora::media::MEDIA_SOURCE_TYPE type = agora::media::UNKNOWN_MEDIA_SOURCE) = 0;
/**
- * Sets the properties of an extension.
+ * @brief Sets the properties of the extension.
*
- * @param provider The name of the extension provider, e.g. agora.io.
- * @param extension The name of the extension, e.g. agora.beauty.
+ * @details
+ * After enabling the extension, you can call this method to set the properties of the extension.
+ * Call timing: Call this mehtod after calling `enableExtension`.
+ * Related callbacks: After calling this method, it may trigger the `onExtensionEventWithContext`
+ * callback, and the specific triggering logic is related to the extension itself.
+ *
+ * @note If you want to set properties for multiple extensions, you need to call this method
+ * multiple times.
+ *
+ * @param provider The name of the extension provider.
+ * @param extension The name of the extension.
* @param key The key of the extension.
- * @param value The JSON formatted value of the extension key.
+ * @param value The value of the extension key.
+ * @param type Source type of the extension. See `MEDIA_SOURCE_TYPE`.
*
* @return
* - 0: Success.
@@ -6959,13 +9219,18 @@ class IRtcEngine : public agora::base::IEngineBase {
const char* key, const char* value, agora::media::MEDIA_SOURCE_TYPE type = agora::media::UNKNOWN_MEDIA_SOURCE) = 0;
/**
- * Gets the properties of an extension.
+ * @brief Gets detailed information on the extensions.
*
- * @param provider The name of the extension provider, e.g. agora.io.
- * @param extension The name of the extension, e.g. agora.beauty.
- * @param key The key of the extension.
- * @param value The value of the extension key.
- * @param buf_len Maximum length of the JSON string indicating the extension property.
+ * @details
+ * Call timing: This method can be called either before or after joining the channel.
+ *
+ * @param provider An output parameter. The name of the extension provider.
+ * @param extension An output parameter. The name of the extension.
+ * @param key An output parameter. The key of the extension.
+ * @param value An output parameter. The value of the extension key.
+ * @param type Source type of the extension. See `MEDIA_SOURCE_TYPE`.
+ * @param buf_len Maximum length of the JSON string indicating the extension property. The maximum
+ * value is 512 bytes.
*
* @return
* - 0: Success.
@@ -6975,13 +9240,24 @@ class IRtcEngine : public agora::base::IEngineBase {
const char* provider, const char* extension,
const char* key, char* value, int buf_len, agora::media::MEDIA_SOURCE_TYPE type = agora::media::UNKNOWN_MEDIA_SOURCE) = 0;
- /** Sets the camera capture configuration.
- * @note Call this method before enabling the local camera.
- * That said, you can call this method before calling \ref IRtcEngine::joinChannel "joinChannel",
- * \ref IRtcEngine::enableVideo "enableVideo", or \ref IRtcEngine::enableLocalVideo "enableLocalVideo",
- * depending on which method you use to turn on your local camera.
+ /**
+ * @brief Sets the camera capture configuration.
+ *
+ * @details
+ * Call timing: Call this method before enabling local camera capture, such as before calling
+ * `startPreview(VIDEO_SOURCE_TYPE sourceType)` and `joinChannel(const char* token, const char*
+ * channelId, uid_t uid, const ChannelMediaOptions& options)`.
+ *
+ * @note
+ * To adjust the camera focal length configuration, It is recommended to call
+ * `queryCameraFocalLengthCapability` first to check the device's focal length capabilities, and
+ * then configure based on the query results.
+ * Due to limitations on some Android devices, even if you set the focal length type according to
+ * the results returned in `queryCameraFocalLengthCapability`, the settings may not take effect.
+ *
+ * @param config The camera capture configuration. See `CameraCapturerConfiguration`.Attention: In
+ * this method, you do not need to set the `deviceId` parameter.
*
- * @param config Sets the camera capturer configuration. See CameraCapturerConfiguration.
* @return
* - 0: Success.
* - < 0: Failure.
@@ -6989,11 +9265,22 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int setCameraCapturerConfiguration(const CameraCapturerConfiguration& config) = 0;
/**
- * Get an custom video track id created by internal,which could used to publish or preview
+ * @brief Creates a custom video track.
+ *
+ * @details
+ * To publish a custom video source, see the following steps:1. Call this method to create a video
+ * track and get the video track ID.
+ * 2. Call `joinChannel(const char* token, const char* channelId, uid_t uid, const
+ * ChannelMediaOptions& options)` to join the channel. In `ChannelMediaOptions`, set
+ * `customVideoTrackId` to the video track ID that you want to publish, and set
+ * `publishCustomVideoTrack` to `true`.
+ * 3. Call `pushVideoFrame` and specify `videoTrackId` as the video track ID set in step 2. You can
+ * then publish the corresponding custom video source in the channel.
*
* @return
- * - > 0: the useable video track id.
- * - < 0: Failure.
+ * - If the method call is successful, the video track ID is returned as the unique identifier of
+ * the video track.
+ * - If the method call fails, 0xffffffff is returned.
*/
virtual video_track_id_t createCustomVideoTrack() = 0;
@@ -7007,9 +9294,10 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual video_track_id_t createCustomEncodedVideoTrack(const SenderOptions& sender_option) = 0;
/**
- * destroy a created custom video track id
+ * @brief Destroys the specified video track.
+ *
+ * @param video_track_id The video track ID returned by calling the `createCustomVideoTrack` method.
*
- * @param video_track_id The video track id which was created by createCustomVideoTrack
* @return
* - 0: Success.
* - < 0: Failure.
@@ -7028,9 +9316,21 @@ class IRtcEngine : public agora::base::IEngineBase {
#if defined(__ANDROID__) || (defined(__APPLE__) && TARGET_OS_IOS) || defined(__OHOS__)
/**
- * Switches between front and rear cameras.
+ * @brief Switches between front and rear cameras.
+ *
+ * @details
+ * You can call this method to dynamically switch cameras based on the actual camera availability
+ * during the app's runtime, without having to restart the video stream or reconfigure the video
+ * source.
+ * Call timing: This method must be called after the camera is successfully enabled, that is, after
+ * the SDK triggers the `onLocalVideoStateChanged` callback and returns the local video state as
+ * `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1).
+ *
+ * @note
+ * - This method only switches the camera for the video stream captured by the first camera, that is,
+ * the video source set to `VIDEO_SOURCE_CAMERA` (0) when calling `startCameraCapture`.
+ * - This method is for Android and iOS only.
*
- * @note This method applies to Android and iOS only.
* @return
* - 0: Success.
* - < 0: Failure.
@@ -7038,65 +9338,127 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int switchCamera() = 0;
/**
- * Checks whether the camera zoom function is supported.
+ * @brief Checks whether the device supports camera zoom.
+ *
+ * @details
+ * Call timing: This method must be called after the SDK triggers the `onLocalVideoStateChanged`
+ * callback and returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1).
+ *
+ * @note This method is for Android and iOS only.
*
* @return
- * - true: The camera zoom function is supported.
- * - false: The camera zoom function is not supported.
+ * - `true`: The device supports camera zoom.
+ * - `false`: The device does not support camera zoom.
*/
virtual bool isCameraZoomSupported() = 0;
/**
- * Checks whether the camera face detect is supported.
+ * @brief Checks whether the device camera supports face detection.
+ *
+ * @note
+ * - This method is for Android and iOS only.
+ * - This method must be called after the SDK triggers the `onLocalVideoStateChanged` callback and
+ * returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1).
*
* @return
- * - true: The camera face detect is supported.
- * - false: The camera face detect is not supported.
+ * - `true`: The device camera supports face detection.
+ * - `false`: The device camera does not support face detection.
*/
virtual bool isCameraFaceDetectSupported() = 0;
/**
- * Checks whether the camera flash function is supported.
+ * @brief Checks whether the device supports camera flash.
+ *
+ * @note
+ * - This method is for Android and iOS only.
+ * - This method must be called after the SDK triggers the `onLocalVideoStateChanged` callback and
+ * returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1).
+ * - The app enables the front camera by default. If your front camera does not support flash, this
+ * method returns false. If you want to check whether the rear camera supports the flash function,
+ * call `switchCamera` before this method.
+ * - On iPads with system version 15, even if `isCameraTorchSupported` returns `true`, you might
+ * fail to successfully enable the flash by calling `setCameraTorchOn` due to system issues.
*
* @return
- * - true: The camera flash function is supported.
- * - false: The camera flash function is not supported.
+ * - `true`: The device supports camera flash.
+ * - `false`: The device does not support camera flash.
*/
virtual bool isCameraTorchSupported() = 0;
/**
- * Checks whether the camera manual focus function is supported.
+ * @brief Check whether the device supports the manual focus function.
+ *
+ * @note
+ * - This method is for Android and iOS only.
+ * - This method must be called after the SDK triggers the `onLocalVideoStateChanged` callback and
+ * returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1).
*
* @return
- * - true: The camera manual focus function is supported.
- * - false: The camera manual focus function is not supported.
+ * - `true`: The device supports the manual focus function.
+ * - `false`: The device does not support the manual focus function.
*/
virtual bool isCameraFocusSupported() = 0;
/**
- * Checks whether the camera auto focus function is supported.
+ * @brief Checks whether the device supports the face auto-focus function.
+ *
+ * @note
+ * - This method is for Android and iOS only.
+ * - This method must be called after the SDK triggers the `onLocalVideoStateChanged` callback and
+ * returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1).
*
* @return
- * - true: The camera auto focus function is supported.
- * - false: The camera auto focus function is not supported.
+ * - `true`: The device supports the face auto-focus function.
+ * - `false`: The device does not support the face auto-focus function.
*/
virtual bool isCameraAutoFocusFaceModeSupported() = 0;
/**
- * Sets the camera zoom ratio.
+ * @brief Sets the camera zoom factor.
+ *
+ * @details
+ * For iOS devices equipped with multi-lens rear cameras, such as those featuring dual-camera
+ * (wide-angle and ultra-wide-angle) or triple-camera (wide-angle, ultra-wide-angle, and telephoto),
+ * you can call `setCameraCapturerConfiguration` first to set the `cameraFocalLengthType` as
+ * `CAMERA_FOCAL_LENGTH_DEFAULT` (0) (standard lens). Then, adjust the camera zoom factor to a value
+ * less than 1.0. This configuration allows you to capture video with an ultra-wide-angle
+ * perspective.
+ *
+ * @note
+ * - This method is for Android and iOS only.
+ * - You must call this method after `enableVideo`. The setting result will take effect after the
+ * camera is successfully turned on, that is, after the SDK triggers the `onLocalVideoStateChanged`
+ * callback and returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1).
+ *
+ * @param factor Camera zoom factor. For devices that do not support ultra-wide-angle, the value
+ * ranges from 1.0 to the maximum zoom factor; for devices that support ultra-wide-angle, the value
+ * ranges from 0.5 to the maximum zoom factor. You can get the maximum zoom factor supported by the
+ * device by calling the `getCameraMaxZoomFactor` method.
*
- * @param factor The camera zoom factor. It ranges from 1.0 to the maximum zoom
- * supported by the camera.
* @return
- * - 0: Success.
- * - < 0: Failure.
+ * - The camera zoom `factor` value, if successful.
+ * - < 0: if the method if failed.
*/
virtual int setCameraZoomFactor(float factor) = 0;
/**
- * Sets the camera face detection.
+ * @brief Enables or disables face detection for the local user.
+ *
+ * @details
+ * Call timing: This method needs to be called after the camera is started (for example, by calling
+ * `startPreview(VIDEO_SOURCE_TYPE sourceType)` or `enableVideo` ).
+ * Related callbacks: Once face detection is enabled, the SDK triggers the `onFacePositionChanged`
+ * callback to report the face information of the local user, which includes the following:
+ * - The width and height of the local video.
+ * - The position of the human face in the local view.
+ * - The distance between the human face and the screen.
+ *
+ * @note This method is for Android and iOS only.
+ *
+ * @param enabled Whether to enable face detection for the local user:
+ * - `true`: Enable face detection.
+ * - `false`: (Default) Disable face detection.
*
- * @param enabled The camera face detection enabled.
* @return
* - 0: Success.
* - < 0: Failure.
@@ -7104,89 +9466,161 @@ class IRtcEngine : public agora::base::IEngineBase {
virtual int enableFaceDetection(bool enabled) = 0;
/**
- * Gets the maximum zoom ratio supported by the camera.
- * @return The maximum zoom ratio supported by the camera.
+ * @brief Gets the maximum zoom ratio supported by the camera.
+ *
+ * @note
+ * - This method is for Android and iOS only.
+ * - This method must be called after the SDK triggers the `onLocalVideoStateChanged` callback and
+ * returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1).
+ *
+ * @return
+ * The maximum zoom ratio supported by the camera.
*/
virtual float getCameraMaxZoomFactor() = 0;
/**
- * Sets the manual focus position.
+ * @brief Sets the camera manual focus position.
+ *
+ * @note
+ * - This method is for Android and iOS only.
+ * - You must call this method after `enableVideo`. The setting result will take effect after the
+ * camera is successfully turned on, that is, after the SDK triggers the `onLocalVideoStateChanged`
+ * callback and returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1).
+ * - After a successful method call, the SDK triggers the `onCameraFocusAreaChanged` callback.
+ *
+ * @param positionX The horizontal coordinate of the touchpoint in the view.
+ * @param positionY The vertical coordinate of the touchpoint in the view.
*
- * @param positionX The horizontal coordinate of the touch point in the view.
- * @param positionY The vertical coordinate of the touch point in the view.
* @return
* - 0: Success.
* - < 0: Failure.
- */
+ */
virtual int setCameraFocusPositionInPreview(float positionX, float positionY) = 0;
/**
- * Enables the camera flash.
+ * @brief Enables the camera flash.
+ *
+ * @note
+ * - This method is for Android and iOS only.
+ * - You must call this method after `enableVideo`. The setting result will take effect after the
+ * camera is successfully turned on, that is, after the SDK triggers the `onLocalVideoStateChanged`
+ * callback and returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1).
+ *
+ * @param isOn Whether to turn on the camera flash:
+ * - `true`: Turn on the flash.
+ * - `false`: (Default) Turn off the flash.
*
- * @param isOn Determines whether to enable the camera flash.
- * - true: Enable the flash.
- * - false: Do not enable the flash.
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int setCameraTorchOn(bool isOn) = 0;
/**
- * Enables the camera auto focus face function.
+ * @brief Enables the camera auto-face focus function.
+ *
+ * @details
+ * By default, the SDK disables face autofocus on Android and enables face autofocus on iOS. To set
+ * face autofocus, call this method.
+ * Call timing: This method must be called after the SDK triggers the `onLocalVideoStateChanged`
+ * callback and returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1).
+ *
+ * @note This method is for Android and iOS only.
*
- * @param enabled Determines whether to enable the camera auto focus face mode.
- * - true: Enable the auto focus face function.
- * - false: Do not enable the auto focus face function.
+ * @param enabled Whether to enable face autofocus:
+ * - `true`: Enable the camera auto-face focus function.
+ * - `false`: Disable face autofocus.
+ *
+ * @return
+ * - 0: Success.
+ * - < 0: Failure.
*/
virtual int setCameraAutoFocusFaceModeEnabled(bool enabled) = 0;
- /** Checks whether the camera exposure function is supported.
- *
- * Ensure that you call this method after the camera starts, for example, by calling `startPreview` or `joinChannel`.
+ /**
+ * @brief Checks whether the device supports manual exposure.
*
* @since v2.3.2.
+ *
+ * @note
+ * - This method is for Android and iOS only.
+ * - This method must be called after the SDK triggers the `onLocalVideoStateChanged` callback and
+ * returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1).
+ *
* @return
- *
- *
true: The device supports the camera exposure function.
- *
false: The device does not support the camera exposure function.
- *
+ * - `true`: The device supports manual exposure.
+ * - `false`: The device does not support manual exposure.
*/
virtual bool isCameraExposurePositionSupported() = 0;
- /** Sets the camera exposure position.
- *
- * Ensure that you call this method after the camera starts, for example, by calling `startPreview` or `joinChannel`.
+ /**
+ * @brief Sets the camera exposure position.
*
- * A successful setCameraExposurePosition method call triggers the {@link IRtcEngineEventHandler#onCameraExposureAreaChanged onCameraExposureAreaChanged} callback on the local client.
* @since v2.3.2.
- * @param positionXinView The horizontal coordinate of the touch point in the view.
- * @param positionYinView The vertical coordinate of the touch point in the view.
+ *
+ * @note
+ * - This method is for Android and iOS only.
+ * - You must call this method after `enableVideo`. The setting result will take effect after the
+ * camera is successfully turned on, that is, after the SDK triggers the `onLocalVideoStateChanged`
+ * callback and returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1).
+ * - After a successful method call, the SDK triggers the `onCameraExposureAreaChanged` callback.
+ *
+ * @param positionXinView The horizontal coordinate of the touchpoint in the view.
+ * @param positionYinView The vertical coordinate of the touchpoint in the view.
*
* @return
- *