import { AgoraRTCError } from '@agora-js/shared'; import { CheckVisibleResult } from '@agora-js/shared'; import { ElectronDesktopCapturerSource } from '@agora-js/shared'; import { EventEmitter } from '@agora-js/shared'; import { IAudioProcessor } from 'agora-rte-extension'; import type { IAudioProcessorContext } from 'agora-rte-extension'; import { IBaseProcessor } from 'agora-rte-extension'; import type { IProcessorContext } from 'agora-rte-extension'; import { isElectron } from '@agora-js/shared'; import type { Kind } from 'agora-rte-extension'; import type { NetworkQuality } from '@agora-js/shared'; import type { ProcessorStats } from 'agora-rte-extension'; import { PromiseMutex } from '@agora-js/shared'; import { RequiredOnlyOneOf } from '@agora-js/shared'; import { SDKStore } from '@agora-js/shared'; import { UID } from '@agora-js/shared'; import { Usage } from 'agora-rte-extension'; import type { UsageWithDirection } from 'agora-rte-extension'; export declare const __TRACK_LIST__: Track[]; export declare function addTrack(track: Track): void; export declare interface AgoraRTCCompatibility { getDisplayMedia: boolean; getStreamFromExtension: boolean; supportUnifiedPlan: boolean; supportMinBitrate: boolean; supportSetRtpSenderParameters: boolean; supportDualStream: boolean; webAudioMediaStreamDest: boolean; supportReplaceTrack: boolean; supportWebGL: boolean; webAudioWithAEC: boolean; supportRequestFrame: boolean; supportShareAudio: boolean; supportDualStreamEncoding: boolean; supportDataChannel: boolean; supportPCSetConfiguration: boolean; supportWebRTCEncodedTransform: boolean; supportWebRTCInsertableStream: boolean; supportRequestVideoFrameCallback: boolean; supportWebCrypto: boolean; } declare class AgoraRTCPlayer extends VideoPlayer { private container?; private slot; constructor(config: PlayerConfig); updateConfig(config: PlayerConfig): void; updateVideoTrack(track?: MediaStreamTrack): void; play(sessionId?: string): void; getCurrentFrame(): ImageData; getCurrentFrameToUint8Array(type: string, quality?: number): Promise; destroy(): void; private createElements; private mountedVideoElement; private unmountedVideoElement; protected resetVideoElement(): void; getContainerElement(): HTMLDivElement | undefined; } /** * Statistics of the call, which can be retrieved by calling [AgoraRTCClient.getRTCStats]{@link IAgoraRTCClient.getRTCStats}. */ export declare interface AgoraRTCStats { /** * Call duration in seconds. */ Duration: number; /** * The total bitrate (bps) of the received audio and video, represented by an instantaneous value. */ RecvBitrate: number; /** * The total number of bytes received, represented by an aggregate value. */ RecvBytes: number; /** * The total bitrate (bps) of the sent audio and video, represented by an instantaneous value. */ SendBitrate: number; /** * The total number of bytes sent, represented by an aggregate value. */ SendBytes: number; /** * The number of users in the channel. * * - Communication profile: The number of users in the channel. * - Live Broadcast profile: * - If the local user is an audience: The number of users in the channel = The number of hosts in the channel + 1. * - If the local user is a host: The number of users in the channel = The number of hosts in the channel. */ UserCount: number; /** * RTT (Round-Trip Time) between the SDK and Agora's edge server, in ms. */ RTT: number; /** * The estimated bandwidth (Kbps) of the uplink network. */ OutgoingAvailableBandwidth: number; } export declare enum AUDIO_CONTEXT_EVENT { IOS_15_16_INTERRUPTION_START = "ios15_16-interruption-start", IOS_15_16_INTERRUPTION_END = "ios15_16-interruption-end", IOS_INTERRUPTION_START = "ios-interruption-start", IOS_INTERRUPTION_END = "ios-interruption-end", STATE_CHANGE = "state-change" } /** * @ignore */ export declare const AUDIO_ENCODER_CONFIG_SETTINGS: { speech_low_quality: AudioEncoderConfiguration; speech_standard: AudioEncoderConfiguration; music_standard: AudioEncoderConfiguration; standard_stereo: AudioEncoderConfiguration; high_quality: AudioEncoderConfiguration; high_quality_stereo: AudioEncoderConfiguration; }; export declare enum AUDIO_TRACK_EVENT { UPDATE_TRACK_SOURCE = "update-track-source" } declare class AudioBufferSource extends AudioSource { private audioBuffer; protected sourceNode?: AudioBufferSourceNode; private startPlayTime; private startPlayOffset; private pausePlayTime; private options; private currentLoopCount; private currentPlaybackSpeed; set currentState(state: AudioSourceState); get currentState(): AudioSourceState; private _currentState; constructor(buffer: AudioBuffer, options?: AudioSourceOptions); createWebAudioDiagram(): GainNode; get duration(): number; get playbackSpeed(): number; get currentTime(): number; updateOptions(options: AudioSourceOptions): void; startProcessAudioBuffer(): void; pauseProcessAudioBuffer(): void; seekAudioBuffer(time: number): void; resumeProcessAudioBuffer(): void; stopProcessAudioBuffer(): void; destroy(): void; setAudioBufferPlaybackSpeed(speed: number): void; private startSourceNode; private createSourceNode; private handleSourceNodeEnded; private reset; } export declare const audioContextState: AudioState; declare class AudioElementPlayCenter { onAutoplayFailed?: () => void; private elementMap; private elementStateMap; private elementsNeedToResume; private sinkIdMap; constructor(); setSinkID(trackId: string, deviceID: string): Promise; play(track: MediaStreamTrack, trackId: string, volume: number, sessionId?: string): void; updateTrack(trackId: string, track: MediaStreamTrack): void; isPlaying(trackId: string): boolean; setVolume(trackId: string, volume: number): void; stop(trackId: string): void; private bindAudioElementEvents; getPlayerState(trackId: string): string; private autoResumeAudioElement; autoResumeAfterInterruption: (force?: boolean) => void; private autoResumeAfterInterruptionOnIOS15_16; } export declare const audioElementPlayCenter: AudioElementPlayCenter; /** * * `AudioEncoderConfiguration` is the interface that defines the audio encoder configurations. * * You can customize the audio encoder configurations when calling [AgoraRTC.createCustomAudioTrack]{@link IAgoraRTC.createCustomAudioTrack}, [AgoraRTC.createMicrophoneAudioTrack]{@link IAgoraRTC.createMicrophoneAudioTrack} or [AgoraRTC.createBufferSourceAudioTrack]{@link IAgoraRTC.createBufferSourceAudioTrack}. */ export declare interface AudioEncoderConfiguration { /** * Sample rate of the audio (Hz). */ sampleRate?: number; /** * Sample size of the audio. */ sampleSize?: number; /** * Whether to enable stereo. */ stereo?: boolean; /** * Bitrate of the audio (Kbps). */ bitrate?: number; } /** * The preset audio encoder configurations. * * You can pass the preset video encoder configurations when calling the following methods: * - [AgoraRTC.createCustomAudioTrack]{@link IAgoraRTC.createCustomAudioTrack} * - [AgoraRTC.createMicrophoneAudioTrack]{@link IAgoraRTC.createMicrophoneAudioTrack} * - [AgoraRTC.createBufferSourceAudioTrack]{@link IAgoraRTC.createBufferSourceAudioTrack} * * The following table lists all the preset audio profiles. The SDK uses `"music_standard"` by default. * * | Audio Profile | Configurations | * | -------- | --------------- | * |`"speech_low_quality"`|Sample rate 16 kHz, mono, encoding bitrate 24 Kbps| * |`"speech_standard"`|Sample rate 32 kHz, mono, encoding bitrate 24 Kbps| * |`"music_standard"`|Sample rate 48 kHz, mono, encoding bitrate 32 Kbps| * |`"standard_stereo"`|Sample rate 48 kHz, stereo, encoding bitrate 64 Kbps| * |`"high_quality"`|Sample rate 48 kHz, mono, encoding bitrate 128 Kbps| * |`"high_quality_stereo"`|Sample rate 48 kHz, stereo, encoding bitrate 192 Kbps| Kbps. * @public */ export declare type AudioEncoderConfigurationPreset = keyof typeof AUDIO_ENCODER_CONFIG_SETTINGS; export declare interface AudioPlaybackOptions { origin?: boolean; mixing?: boolean; effect?: boolean; } export declare class AudioProcessorContext extends EventEmitter implements IAudioProcessorContext { private constraintsMap; private statsRegistry; private readonly audioContext; private readonly trackId; private readonly direction; private usageRegistry; private _chained; set chained(chained: boolean); get chained(): boolean; constructor(audioContext: AudioContext, trackId: string, direction: "local" | "remote"); getConstraints(): Promise; getAudioContext(): AudioContext; requestApplyConstraints(constraints: MediaTrackConstraints, processor: IBaseProcessor): Promise; requestRevertConstraints(processor: IBaseProcessor): Promise; registerStats(processor: IBaseProcessor, type: string, cb: () => any): void; unregisterStats(processor: IBaseProcessor, type: string): void; gatherStats(): ProcessorStats[]; registerUsage(processor: IBaseProcessor, cb: () => Usage): void; unregisterUsage(processor: IBaseProcessor): void; gatherUsage(): Promise; getDirection(): "local" | "remote"; } export declare class AudioProcessorDestination extends EventEmitter implements IAudioProcessor { name: string; ID: string; private inputTrack?; private inputNode?; private readonly audioProcessorContext; _source?: IAudioProcessor; constructor(audioProcessorContext: AudioProcessorContext); get kind(): Kind; get enabled(): boolean; pipe(): IAudioProcessor; unpipe(): void; enable(): void; disable(): void; reset(): void; updateInput(inputOptions: { track?: MediaStreamTrack; node?: AudioNode; context: IAudioProcessorContext; }): void; } declare abstract class AudioSource extends EventEmitter { outputNode: GainNode; outputTrack?: MediaStreamTrack; isPlayed: boolean; protected abstract sourceNode?: AudioNode; context: AudioContext; private audioBufferNode?; private destNode?; private audioOutputLevel; protected volumeLevelAnalyser: VolumeLevelAnalyser; private _processedNode; get processSourceNode(): AudioNode | undefined; set processedNode(node: AudioNode | undefined); get processedNode(): AudioNode | undefined; protected playNode: AudioNode; protected isDestroyed: boolean; protected onNoAudioInput?: () => void; protected isNoAudioInput: boolean; private _noAudioInputCount; constructor(); startGetAudioBuffer(bufferSize: number): void; stopGetAudioBuffer(): void; createOutputTrack(): MediaStreamTrack; play(dest?: AudioNode): void; stop(): void; getAccurateVolumeLevel(): number; checkHasAudioInput(times?: number): Promise; getAudioVolume(): number; setVolume(level: number): void; destroy(): void; protected disconnect(): void; protected connect(): void; } export declare enum AudioSourceEvents { AUDIO_SOURCE_STATE_CHANGE = "audio_source_state_change", RECEIVE_TRACK_BUFFER = "receive_track_buffer", ON_AUDIO_BUFFER = "on_audio_buffer", UPDATE_SOURCE = "update_source" } /** * Options for processing the audio buffer. You need to set the options for processing the audio buffer when calling [startProcessAudioBuffer]{@link IBufferSourceAudioTrack.startProcessAudioBuffer}. */ export declare interface AudioSourceOptions { /** * How many times the audio loops. */ cycle?: number; /** * Whether to loop the audio infinitely. */ loop?: boolean; /** * The playback position (seconds). */ startPlayTime?: number; } /** * Processing state of the audio buffer: * - `"stopped"`: The SDK stops processing the audio buffer. Reasons may include: * - The SDK finishes processing the audio buffer. * - The user manually stops the processing of the audio buffer. * - `"playing"`: The SDK is processing the audio buffer. * - `"paused"`: The SDK pauses processing the audio buffer. * * You can get the state with [BufferSourceAudioTrack.on("source-state-change")]{@link IBufferSourceAudioTrack.event_source_state_change}. */ export declare type AudioSourceState = "stopped" | "playing" | "paused"; declare class AudioState extends EventEmitter { prevState: AudioContextState | "interrupted" | undefined; curState: AudioContextState | "interrupted" | undefined; currentTime?: number; currentTimeStuckAt?: number; private interruptDetectorTrack?; get duringInterruption(): boolean; private onLocalAudioTrackMute; private onLocalAudioTrackUnmute; bindInterruptDetectorTrack(track: MicrophoneAudioTrack): void; unbindInterruptDetectorTrack(track: MicrophoneAudioTrack): void; } export declare function audioTimerLoop(callback: (time: number) => any, frequency: number): () => void; declare class AudioTrackSource extends AudioSource { protected sourceNode: MediaStreamAudioSourceNode; track: MediaStreamTrack; clonedTrack?: MediaStreamTrack; private audioElement; private isCurrentTrackCloned; private isRemoteTrack; private originVolumeLevelAnalyser?; get isFreeze(): boolean; constructor(track: MediaStreamTrack, isRemoteTrack?: boolean, originTrack?: MediaStreamTrack); private rebuildWebAudio; updateTrack(track: MediaStreamTrack): void; destroy(): void; createMediaStreamSourceNode(track: MediaStreamTrack): MediaStreamAudioSourceNode; updateOriginTrack(originTrack: MediaStreamTrack): void; getOriginVolumeLevel(): number; } declare class AutoPlayGestureEventEmitter extends EventEmitter { onAutoplayFailed?: () => void; onAudioAutoplayFailed?: () => void; } export declare const autoPlayGestureEventEmitter: AutoPlayGestureEventEmitter; /** * @ignore * * Image enhancement options. You need to set the image enhancement options when calling [setBeautyEffect]{@link ILocalVideoTrack.setBeautyEffect}. */ export declare interface BeautyEffectOptions { /** * * The smoothness level. * * The value range is [0.0, 1.0]. The original smoothness level is 0.0. The default value is 0.5. This parameter is usually used to remove blemishes. */ smoothnessLevel?: number; /** * The brightness level. * * The value range is [0.0, 1.0]. The original brightness level is 0.0. The default value is 0.7. */ lighteningLevel?: number; /** * The redness level. * * The value range is [0.0, 1.0]. The original redness level is 0.0. The default value is 0.1. This parameter adjusts the red saturation level. */ rednessLevel?: number; /** * The contrast level. Use this together with {@link lighteningLevel}. * - 0: Low contrast level. * - 1: (Default) The original contrast level. * - 2: High contrast level. */ lighteningContrastLevel?: 0 | 1 | 2; } export declare interface BeautyWebGLParameters { denoiseLevel?: number; lightLevel?: number; rednessLevel?: number; lighteningContrastLevel?: number; } export declare const blob2Uint8Array: (blob: Blob) => Promise; export declare class BufferSourceAudioTrack extends LocalAudioTrack implements IBufferSourceAudioTrack { source: string | File | AudioBuffer | null; private _bufferSource; get __className__(): string; constructor(source: string | File | AudioBuffer, bufferSource: AudioBufferSource, encodingConfig?: AudioEncoderConfiguration, trackId?: string); get currentState(): AudioSourceState; get duration(): number; get playbackSpeed(): number; getCurrentTime(): number; startProcessAudioBuffer(options?: AudioSourceOptions): void; pauseProcessAudioBuffer(): void; seekAudioBuffer(time: number): void; resumeProcessAudioBuffer(): void; stopProcessAudioBuffer(): void; close(): void; setAudioBufferPlaybackSpeed(speed: number): void; } /** * Configurations for the audio track from an audio file or `AudioBuffer` object. Set these configurations when calling [AgoraRTC.createBufferSourceAudioTrack]{@link IAgoraRTC.createBufferSourceAudioTrack}. */ export declare interface BufferSourceAudioTrackInitConfig { /** * The type of the audio source: * - `File`: An [File](https://developer.mozilla.org/en-US/docs/Web/API/File) object, representing a local audio file. * - `string`: The online audio file retrieved from an HTTPS address. Ensure the address supports HTTPS and CORS. * - `AudioBuffer`: An [AudioBuffer](https://developer.mozilla.org/en-US/docs/Web/API/AudioBuffer) object, representing the raw data in PCM format. */ source: File | string | AudioBuffer; /** * Whether to cache the online file: * - `true`: Cache the online file. * - `false`: (default) Do not cache the online file. */ cacheOnlineFile?: boolean; /** * The audio encoder configurations. * * You can set the audio encoder configurations in either of the following ways: * - Pass the preset audio encoder configurations by using [[AudioEncoderConfigurationPreset]]. * - Pass your customized audio encoder configurations by using [[AudioEncoderConfiguration]]. * * > Firefox does not support setting the audio encoding rate. */ encoderConfig?: AudioEncoderConfiguration | AudioEncoderConfigurationPreset; } export declare class CameraVideoTrack extends LocalVideoTrack implements ICameraVideoTrack { private _config; private _originalConstraints; private _constraints; _enabled: boolean; _deviceName: string; get __className__(): string; constructor(track: MediaStreamTrack, config: CameraVideoTrackInitConfig, constraints: MediaTrackConstraints, scalabilityConfig?: SVCConfiguration, optimizationMode?: OptimizationMode | "balanced", trackId?: string); setDevice(deviceId: string | RequiredOnlyOneOf<{ facingMode: VideoFacingModeEnum; deviceId: string; }>): Promise; private _setDeviceById; private _setDeviceByFacingModel; setEnabled(enabled: boolean, skipChangeState?: boolean): Promise; setEncoderConfiguration(config: VideoEncoderConfiguration | VideoEncoderConfigurationPreset, doNotRenegoation?: boolean): Promise; protected _getDefaultPlayerConfig(): Partial; protected onTrackEnded(): void; renewMediaStreamTrack(newConstraints?: MediaTrackConstraints): Promise; tryResumeVideoForIOS15_16WeChat: () => Promise; close(): void; clone(config?: VideoEncoderConfiguration | VideoEncoderConfigurationPreset, cloneTrack?: boolean): CameraVideoTrack; bindProcessorContextEvents(): void; } /** * Configurations for the video track from the video captured by a camera. Set these configurations when calling [AgoraRTC.createCameraVideoTrack]{@link IAgoraRTC.createCameraVideoTrack}. */ export declare interface CameraVideoTrackInitConfig { /** * The video encoder configurations. * * You can set the video encoder configurations in either of the following ways: * - Pass the preset video encoder configurations by using [[VideoEncoderConfigurationPreset]]. * - Pass your customized video encoder configurations by using [[VideoEncoderConfiguration]]. * - Leave this property empty to use the SDK's default value, `"480p_1"` (resolution: 640 × 480, frame rate: 15 fps, bitrate: 500 Kbps). */ encoderConfig?: VideoEncoderConfiguration | VideoEncoderConfigurationPreset; /** * Whether to user the front camera or the rear camera. * * You can use this parameter to choose between the front camera and the rear camera on a mobile device: * - `"user"`: The front camera. * - `"environment"`: The rear camera. */ facingMode?: VideoFacingModeEnum; /** * Specifies the camera ID. * * You can get a list of the available cameras by calling [AgoraRTC.getCameras]{@link IAgoraRTC.getCameras}. */ cameraId?: string; /** * @since *
   *4.2.0* * * Sets the video transmission optimization mode. * * You can call this method during a video call, a live streaming or screen sharing to dynamically change the optimization mode. For example, during the screen sharing, before you change the shared content from text to video, you can change the optimization mode from `"detail"` to `"motion"` to ensure smoothness in poor network conditions. * * > Note: This method supports Chrome only. * * @param mode The video transmission optimization mode: * - `"detail"`: Prioritizes video quality. * - The SDK ensures high-quality images by automatically calculating a minimum bitrate based on the capturing resolution and frame rate. No matter how poor the network condition is, the sending bitrate will never be lower than the minimum value. * - In most cases, the SDK does not reduce the sending resolution, but may reduce the frame rate. * - `"motion"`: Since 4.21.0, the SDK prioritizes video smoothness. * - In poor network conditions, the SDK reduces the sending bitrate to minimize video freezes. * - In most cases, the SDK does not reduce the frame rate, but may reduce the sending resolution. */ optimizationMode?: OptimizationMode; /** * @ignore * * @since *
   *4.18.0* * * Configurations for Scalable Video Coding (SVC). * * You can set the configurations using one of the following options: * - Use the preset SVC configurations provided by the SDK through {@link SVCConfigurationPreset}. * - Use your custom SVC configurations through {@link SVCConfiguration}. */ scalabiltyMode?: SVCConfiguration | SVCConfigurationPreset; } export declare function checkMediaStreamTrackResolution(track: MediaStreamTrack): Promise<[number, number]>; export declare function checkTrackState(): (target: any, propertyKey: any, descriptor: PropertyDescriptor) => TypedPropertyDescriptor | undefined; /** * The visibility of the `