Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[AUTO] Generate comments by iris-doc #781

Merged
merged 1 commit into from
Apr 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 24 additions & 20 deletions src/AgoraBase.ts
Original file line number Diff line number Diff line change
Expand Up @@ -327,7 +327,7 @@ export enum ErrorCodeType {
*/
ErrInvalidUserId = 121,
/**
* @ignore
* 122: Data streams decryption fails. The user might use an incorrect password to join the channel. Check the entered password, or tell the user to try rejoining the channel.
*/
ErrDatastreamDecryptionFailed = 122,
/**
Expand Down Expand Up @@ -893,23 +893,23 @@ export enum VideoCodecType {
}

/**
* @ignore
* The camera focal length types.
*/
export enum CameraFocalLengthType {
/**
* @ignore
* 0: (Default) Standard lens.
*/
CameraFocalLengthDefault = 0,
/**
* @ignore
* 1: Wide-angle lens.
*/
CameraFocalLengthWideAngle = 1,
/**
* @ignore
*/
CameraFocalLengthUrltraWide = 2,
/**
* @ignore
* 3: (For iOS only) Telephoto lens.
*/
CameraFocalLengthTelephoto = 3,
}
Expand Down Expand Up @@ -1353,15 +1353,15 @@ export class CodecCapInfo {
}

/**
* @ignore
* Focal length information supported by the camera, including the camera direction and focal length type.
*/
export class FocalLengthInfo {
/**
* @ignore
* The camera direction. See CameraDirection.
*/
cameraDirection?: number;
/**
* @ignore
* The focal length type. See CameraFocalLengthType.
*/
focalLengthType?: CameraFocalLengthType;
}
Expand Down Expand Up @@ -1985,27 +1985,29 @@ export enum CaptureBrightnessLevelType {
}

/**
* @ignore
* Camera stabilization modes.
*
* The camera stabilization effect increases in the order of 1 < 2 < 3, and the latency will also increase accordingly.
*/
export enum CameraStabilizationMode {
/**
* @ignore
* -1: (Default) Camera stabilization mode off.
*/
CameraStabilizationModeOff = -1,
/**
* @ignore
* 0: Automatic camera stabilization. The system automatically selects a stabilization mode based on the status of the camera. However, the latency is relatively high in this mode, so it is recommended not to use this enumeration.
*/
CameraStabilizationModeAuto = 0,
/**
* @ignore
* 1: (Recommended) Level 1 camera stabilization.
*/
CameraStabilizationModeLevel1 = 1,
/**
* @ignore
* 2: Level 2 camera stabilization.
*/
CameraStabilizationModeLevel2 = 2,
/**
* @ignore
* 3: Level 3 camera stabilization.
*/
CameraStabilizationModeLevel3 = 3,
/**
Expand Down Expand Up @@ -2157,11 +2159,13 @@ export enum LocalVideoStreamReason {
*/
LocalVideoStreamReasonDeviceInvalidId = 10,
/**
* @ignore
* 14: (Android only) Video capture is interrupted. Possible reasons include the following:
* The camera is being used by another app. Prompt the user to check if the camera is being used by another app.
* The current app has been switched to the background. You can use foreground services to notify the operating system and ensure that the app can still collect video when it switches to the background.
*/
LocalVideoStreamReasonDeviceInterrupt = 14,
/**
* @ignore
* 15: (Android only) The video capture device encounters an error. Prompt the user to close and restart the camera to restore functionality. If this operation does not solve the problem, check if the camera has a hardware failure.
*/
LocalVideoStreamReasonDeviceFatalError = 15,
/**
Expand Down Expand Up @@ -3864,7 +3868,7 @@ export enum AudioEffectPreset {
*/
RoomAcousticsVirtualSurroundSound = 0x02010900,
/**
* @ignore
* The audio effect of chorus. Agora recommends using this effect in chorus scenarios to enhance the sense of depth and dimension in the vocals.
*/
RoomAcousticsChorus = 0x02010d00,
/**
Expand Down Expand Up @@ -4541,11 +4545,11 @@ export enum EncryptionErrorType {
*/
EncryptionErrorEncryptionFailure = 2,
/**
* @ignore
* 3: Data stream decryption error. Ensure that the receiver and the sender use the same encryption mode and key.
*/
EncryptionErrorDatastreamDecryptionFailure = 3,
/**
* @ignore
* 4: Data stream encryption error.
*/
EncryptionErrorDatastreamEncryptionFailure = 4,
}
Expand Down Expand Up @@ -4705,7 +4709,7 @@ export enum EarMonitoringFilterType {
*/
EarMonitoringFilterNoiseSuppression = 1 << 2,
/**
* @ignore
* 1<<15: Reuse the audio filter that has been processed on the sending end for in-ear monitoring. This enumerator reduces CPU usage while increasing in-ear monitoring latency, which is suitable for latency-tolerant scenarios requiring low CPU consumption.
*/
EarMonitoringFilterReusePostProcessingFilter = 1 << 15,
}
Expand Down
50 changes: 43 additions & 7 deletions src/AgoraMediaBase.ts
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,11 @@ export enum VideoSourceType {
*/
VideoSourceTranscoded = 10,
/**
* @ignore
* 11: (For Android only) The third camera.
*/
VideoSourceCameraThird = 11,
/**
* @ignore
* 12: (For Android only) The fourth camera.
*/
VideoSourceCameraFourth = 12,
/**
Expand Down Expand Up @@ -951,6 +951,8 @@ export interface IAudioFrameObserver extends IAudioFrameObserverBase {
/**
* Retrieves the audio frame of a specified user before mixing.
*
* Due to framework limitations, this callback does not support sending processed audio data back to the SDK.
*
* @param channelId The channel ID.
* @param uid The user ID of the specified user.
* @param audioFrame The raw audio data. See AudioFrame.
Expand Down Expand Up @@ -1060,9 +1062,7 @@ export interface IVideoFrameObserver {
/**
* Occurs each time the SDK receives a video frame captured by local devices.
*
* After you successfully register the video frame observer, the SDK triggers this callback each time it receives a video frame. In this callback, you can get the video data captured by local devices. You can then pre-process the data according to your scenarios. Once the pre-processing is complete, you can directly modify videoFrame in this callback, and set the return value to true to send the modified video data to the SDK.
* The video data that this callback gets has not been pre-processed such as watermarking, cropping, and rotating.
* If the video data type you get is RGBA, the SDK does not support processing the data of the alpha channel.
* You can get raw video data collected by the local device through this callback.
*
* @param sourceType Video source types, including cameras, screens, or media player. See VideoSourceType.
* @param videoFrame The video frame. See VideoFrame. The default value of the video frame data format obtained through this callback is as follows:
Expand All @@ -1078,6 +1078,7 @@ export interface IVideoFrameObserver {
* Occurs each time the SDK receives a video frame before encoding.
*
* After you successfully register the video frame observer, the SDK triggers this callback each time it receives a video frame. In this callback, you can get the video data before encoding and then process the data according to your particular scenarios. After processing, you can send the processed video data back to the SDK in this callback.
* Due to framework limitations, this callback does not support sending processed video data back to the SDK.
* The video data that this callback gets has been preprocessed, with its content cropped and rotated, and the image enhanced.
*
* @param sourceType The type of the video source. See VideoSourceType.
Expand All @@ -1100,6 +1101,7 @@ export interface IVideoFrameObserver {
*
* After you successfully register the video frame observer, the SDK triggers this callback each time it receives a video frame. In this callback, you can get the video data sent from the remote end before rendering, and then process it according to the particular scenarios.
* If the video data type you get is RGBA, the SDK does not support processing the data of the alpha channel.
* Due to framework limitations, this callback does not support sending processed video data back to the SDK.
*
* @param channelId The channel ID.
* @param remoteUid The user ID of the remote user who sends the current video frame.
Expand Down Expand Up @@ -1232,11 +1234,45 @@ export class MediaRecorderConfiguration {
}

/**
* @ignore
* Facial information observer.
*
* You can call registerFaceInfoObserver to register or unregister the IFaceInfoObserver object.
*/
export interface IFaceInfoObserver {
/**
* @ignore
* Occurs when the facial information processed by speech driven extension is received.
*
* @param outFaceInfo Output parameter, the JSON string of the facial information processed by the voice driver plugin, including the following fields:
* faces: Object sequence. The collection of facial information, with each face corresponding to an object.
* blendshapes: Object. The collection of face capture coefficients, named according to ARkit standards, with each key-value pair representing a blendshape coefficient. The blendshape coefficient is a floating point number with a range of [0.0, 1.0].
* rotation: Object sequence. The rotation of the head, which includes the following three key-value pairs, with values as floating point numbers ranging from -180.0 to 180.0:
* pitch: Head pitch angle. A positve value means looking down, while a negative value means looking up.
* yaw: Head yaw angle. A positve value means turning left, while a negative value means turning right.
* roll: Head roll angle. A positve value means tilting to the right, while a negative value means tilting to the left.
* timestamp: String. The timestamp of the output result, in milliseconds. Here is an example of JSON:
* {
* "faces":[{
* "blendshapes":{
* "eyeBlinkLeft":0.9, "eyeLookDownLeft":0.0, "eyeLookInLeft":0.0, "eyeLookOutLeft":0.0, "eyeLookUpLeft":0.0,
* "eyeSquintLeft":0.0, "eyeWideLeft":0.0, "eyeBlinkRight":0.0, "eyeLookDownRight":0.0, "eyeLookInRight":0.0,
* "eyeLookOutRight":0.0, "eyeLookUpRight":0.0, "eyeSquintRight":0.0, "eyeWideRight":0.0, "jawForward":0.0,
* "jawLeft":0.0, "jawRight":0.0, "jawOpen":0.0, "mouthClose":0.0, "mouthFunnel":0.0, "mouthPucker":0.0,
* "mouthLeft":0.0, "mouthRight":0.0, "mouthSmileLeft":0.0, "mouthSmileRight":0.0, "mouthFrownLeft":0.0,
* "mouthFrownRight":0.0, "mouthDimpleLeft":0.0, "mouthDimpleRight":0.0, "mouthStretchLeft":0.0, "mouthStretchRight":0.0,
* "mouthRollLower":0.0, "mouthRollUpper":0.0, "mouthShrugLower":0.0, "mouthShrugUpper":0.0, "mouthPressLeft":0.0,
* "mouthPressRight":0.0, "mouthLowerDownLeft":0.0, "mouthLowerDownRight":0.0, "mouthUpperUpLeft":0.0, "mouthUpperUpRight":0.0,
* "browDownLeft":0.0, "browDownRight":0.0, "browInnerUp":0.0, "browOuterUpLeft":0.0, "browOuterUpRight":0.0,
* "cheekPuff":0.0, "cheekSquintLeft":0.0, "cheekSquintRight":0.0, "noseSneerLeft":0.0, "noseSneerRight":0.0,
* "tongueOut":0.0
* },
* "rotation":{"pitch":30.0, "yaw":25.5, "roll":-15.5},
*
* }],
* "timestamp":"654879876546"
* }
*
* @returns
* true : Facial information JSON parsing successful. false : Facial information JSON parsing failed.
*/
onFaceInfo?(outFaceInfo: string): void;
}
Expand Down
20 changes: 18 additions & 2 deletions src/IAgoraMediaEngine.ts
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,17 @@ export abstract class IMediaEngine {
): number;

/**
* @ignore
* Registers a facial information observer.
*
* You can call this method to register the onFaceInfo callback to receive the facial information processed by Agora speech driven extension. When calling this method to register a facial information observer, you can register callbacks in the IFaceInfoObserver class as needed. After successfully registering the facial information observer, the SDK triggers the callback you have registered when it captures the facial information converted by the speech driven extension.
* Ensure that you call this method before joining a channel.
* Before calling this method, you need to make sure that the speech driven extension has been enabled by calling enableExtension.
*
* @param observer Facial information observer, see IFaceInfoObserver.
*
* @returns
* 0: Success.
* < 0: Failure.
*/
abstract registerFaceInfoObserver(observer: IFaceInfoObserver): number;

Expand Down Expand Up @@ -313,7 +323,13 @@ export abstract class IMediaEngine {
): number;

/**
* @ignore
* Unregisters a facial information observer.
*
* @param observer Facial information observer, see IFaceInfoObserver.
*
* @returns
* 0: Success.
* < 0: Failure.
*/
abstract unregisterFaceInfoObserver(observer: IFaceInfoObserver): number;
}
Loading
Loading