Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion examples/mobile-client/fishjam-chat/app.json
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,9 @@
}
}
],
["../common/plugins/build/withLocalWebrtcPaths.js"]
[
"../common/plugins/build/withLocalWebrtcPaths.js"
]
],
"experiments": {
"typedRoutes": true
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { RTCView, usePeers } from '@fishjam-cloud/react-native-client';
import { RTCView, usePeers, useVAD } from '@fishjam-cloud/react-native-client';
import React, { useCallback, useMemo } from 'react';
import type { ListRenderItemInfo } from 'react-native';
import { FlatList, StyleSheet, Text, View } from 'react-native';
Expand All @@ -21,6 +21,9 @@ const GridTrackItem = ({
peer.track?.stream && !peer.track?.metadata?.paused
? peer.track.stream
: null;
const vadStatus = useVAD({ peerIds: [peer.peerId] });
const isPeerSpeaking =
vadStatus[peer.peerId] && peer.track?.metadata?.type === 'camera';

return (
<View style={styles.trackContainer}>
Expand All @@ -31,6 +34,10 @@ const GridTrackItem = ({
backgroundColor: peer.isLocal
? BrandColors.seaBlue60
: BrandColors.darkBlue60,
borderColor: isPeerSpeaking
? BrandColors.seaBlue80
: BrandColors.darkBlue100,
borderWidth: isPeerSpeaking ? 3 : 2,
},
]}>
{mediaStream ? (
Expand Down
4 changes: 2 additions & 2 deletions examples/mobile-client/fishjam-chat/utils/tracks.ts
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import type { PeerWithTracks, Track } from '@fishjam-cloud/react-native-client';
import type { PeerId, PeerWithTracks, Track } from '@fishjam-cloud/react-native-client';

export type GridTrack = {
track: Track | null;
peerId: string;
peerId: PeerId;
isLocal: boolean;
isVadActive: boolean;
aspectRatio: number | null;
Expand Down
68 changes: 68 additions & 0 deletions packages/react-client/src/hooks/useLocalVAD.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
import { useContext, useEffect, useState } from "react";

import { FishjamClientContext } from "../contexts/fishjamClient";
import type { PeerId } from "../types/public";
import { usePeers } from "./usePeers";

// This is a dBov-to-linear conversion. -32 dBov number is taken from backend VAD threshold
// formula for dBov to linear conversion: linear = 10 ^ (dBov / 20)
// So -32 dBov = 10^(-32/20) ≈ 0.025. This is the minimum audio level considered "speech".
const THRESHOLD = 10 ** (-32 / 20);

// Number of consecutive "silence" ticks before we consider speech to have stopped. Helps with smoothing out brief pauses in speech.
const SILENCE_DEBOUNCE_TICKS = 2;

/**
* Client-side voice activity detection for the local peer.
*
* Polls the local microphone's audio level every 100ms and derives a speech/silence
* state from it. A level above ~0.025 (approximately −32 dBov, scaled to [0, 1])
* is treated as speech. Silence is debounced over 2 consecutive ticks (~200ms)
* to prevent rapid flapping.
*
* This is purely client-side — it does not signal other peers. Remote participants
* receive the local peer's VAD status via backend `vadNotification` messages.
*
* @internal Used by `useVAD` when the local peer's id is included in `peerIds`.
* @returns A record mapping the local peer's id to its current speaking state,
* or an empty object if `options.disabled` is true, the local peer is not available, or no microphone track is found.
*/
export const useLocalVAD = (options: { disabled: boolean }): Record<PeerId, boolean> => {
const fishjamClient = useContext(FishjamClientContext);
const [isSpeaking, setIsSpeaking] = useState(false);
const { localPeer } = usePeers();
const localPeerId = localPeer?.id;
const microphoneTrackId = localPeer?.microphoneTrack?.trackId;

useEffect(() => {
if (options.disabled || !localPeerId || !microphoneTrackId) return;

let silenceTicks = 0;
let timeoutId: ReturnType<typeof setTimeout>;

const poll = async () => {
const trackAudio = await fishjamClient?.current?.getLocalTrackAudioLevel(microphoneTrackId);
if (trackAudio != null && trackAudio.level > THRESHOLD) {
silenceTicks = 0;
setIsSpeaking(true);
} else {
silenceTicks += 1;
if (silenceTicks >= SILENCE_DEBOUNCE_TICKS) {
setIsSpeaking(false);
}
}

timeoutId = setTimeout(poll, 100);
};

timeoutId = setTimeout(poll, 0);

return () => {
clearTimeout(timeoutId);
setIsSpeaking(false);
};
}, [options.disabled, fishjamClient, localPeerId, microphoneTrackId]);

if (!localPeerId || options.disabled || !microphoneTrackId) return {};
return { [localPeerId]: isSpeaking };
};
67 changes: 41 additions & 26 deletions packages/react-client/src/hooks/useVAD.ts
Original file line number Diff line number Diff line change
@@ -1,90 +1,105 @@
import type { TrackContext, VadStatus } from "@fishjam-cloud/ts-client";
import type { FishjamTrackContext, VadStatus } from "@fishjam-cloud/ts-client";
import { useContext, useEffect, useMemo, useState } from "react";

import { FishjamClientStateContext } from "../contexts/fishjamState";
import type { PeerId, TrackId } from "../types/public";
import { useLocalVAD } from "./useLocalVAD";

/**
* Voice activity detection. Use this hook to check if voice is detected in audio track for given peer(s).
* Voice activity detection. Use this hook to check if voice is detected in the audio track for given peer(s).
*
* @param options - Options object containing `peerIds` - a list of ids of peers to subscribe to for voice activity detection notifications.
* Remote peer VAD is driven by `vadNotification` messages from the backend.
* If the local peer's id is included in `peerIds`, local VAD is determined client-side
* by polling the microphone's audio level (see `useLocalVAD`).
*
* @param options - Options object.
* @param options.peerIds - List of peer ids to subscribe to for VAD notifications.
* Include the local peer's id to also track whether the local user is speaking.
*
* Example usage:
* ```tsx
* import { useVAD, type PeerId } from "@fishjam-cloud/react-client";
*
* function WhoIsTalkingComponent({ peerIds }: { peerIds: PeerId[] }) {
* const peersInfo = useVAD({peerIds});
* const peersInfo = useVAD({ peerIds });
* const activePeers = (Object.keys(peersInfo) as PeerId[]).filter((peerId) => peersInfo[peerId]);
*
* return "Now talking: " + activePeers.join(", ");
* }
* ```
* @category Connection
* @group Hooks
* @returns Each key is a peerId and the boolean value indicates if voice activity is currently detected for that peer.
* @returns A record where each key is a peer id and the boolean value indicates
* whether voice activity is currently detected for that peer.
*/
export const useVAD = (options: { peerIds: ReadonlyArray<PeerId> }): Record<PeerId, boolean> => {
const { peerIds } = options;
const clientState = useContext(FishjamClientStateContext);
if (!clientState) throw Error("useVAD must be used within FishjamProvider");
const showLocalPeerVAD = useMemo(
() => (clientState.localPeer?.id ? peerIds.includes(clientState.localPeer?.id) : false),
[clientState.localPeer?.id, peerIds],
);

const micTracksWithSelectedPeerIds = useMemo(
() =>
Object.values(clientState.peers)
.filter((peer) => peerIds.includes(peer.id))
.map((peer) => ({
peerId: peer.id,
microphoneTracks: Array.from(peer.tracks.values()).filter(({ metadata }) => metadata?.type === "microphone"),
microphoneTrack: Array.from(peer.tracks.values()).find(({ metadata }) => metadata?.type === "microphone"),
})),
[clientState.peers, peerIds],
);

const getDefaultVadStatuses = () =>
micTracksWithSelectedPeerIds.reduce<Record<PeerId, Record<TrackId, VadStatus>>>(
(mappedTracks, peer) => ({
(mappedTracks, { peerId, microphoneTrack }) => ({
...mappedTracks,
[peer.peerId]: peer.microphoneTracks.reduce(
(vadStatuses, track) => ({ ...vadStatuses, [track.trackId]: track.vadStatus }),
{},
),
[peerId]: microphoneTrack ? { [microphoneTrack.trackId]: microphoneTrack.vadStatus } : {},
}),
{},
);

const [_vadStatuses, setVadStatuses] = useState<Record<PeerId, Record<TrackId, VadStatus>>>(getDefaultVadStatuses);

useEffect(() => {
const unsubs = micTracksWithSelectedPeerIds.map(({ peerId, microphoneTracks }) => {
const updateVadStatus = (track: TrackContext) => {
const unsubs = micTracksWithSelectedPeerIds.map(({ peerId, microphoneTrack }) => {
const updateVadStatus = (track: FishjamTrackContext) => {
setVadStatuses((prev) => ({
...prev,
[peerId]: { ...prev[peerId], [track.trackId]: track.vadStatus },
}));
};

microphoneTracks.forEach((track) => {
track.on("voiceActivityChanged", updateVadStatus);
});
if (microphoneTrack) {
microphoneTrack.on("voiceActivityChanged", updateVadStatus);
}

return () => {
microphoneTracks.forEach((track) => {
track.off("voiceActivityChanged", updateVadStatus);
});
if (microphoneTrack) {
microphoneTrack.off("voiceActivityChanged", updateVadStatus);
}
};
});

return () => unsubs.forEach((unsub) => unsub());
}, [micTracksWithSelectedPeerIds]);

const localVAD = useLocalVAD({ disabled: !showLocalPeerVAD });

const vadStatuses = useMemo(
() =>
Object.fromEntries(
Object.entries(_vadStatuses).map(([peerId, tracks]) => [
peerId,
Object.values(tracks).some((vad) => vad === "speech"),
]),
) satisfies Record<PeerId, boolean>,
[_vadStatuses],
({
...Object.fromEntries(
Object.entries(_vadStatuses).map(([peerId, tracks]) => [
peerId,
Object.values(tracks).some((vad) => vad === "speech"),
]),
),
...localVAD,
}) satisfies Record<PeerId, boolean>,
[_vadStatuses, localVAD],
);

return vadStatuses;
Expand Down
18 changes: 18 additions & 0 deletions packages/ts-client/src/FishjamClient.ts
Original file line number Diff line number Diff line change
Expand Up @@ -948,4 +948,22 @@ export class FishjamClient<PeerMetadata = GenericMetadata, ServerMetadata = Gene
public cleanup() {
this.reconnectManager.cleanup();
}

/**
* Returns the current audio level for a local track.
*
* The `level` represents a normalized audio level in the range 0.0–1.0,
* derived from WebRTC statistics for the given local audio track.
*
* This method returns `null` when the WebRTC layer is not initialized, when the track
* cannot be found among local tracks, or when audio statistics are not yet or no longer
* available for the track.
*
* @param trackId - The ID of the local track to query.
* @returns A promise resolving to an object containing the audio `level`, or `null`
* if the track is unknown or stats are not available.
*/
public getLocalTrackAudioLevel(trackId: string): Promise<{ level: number } | null> {
return this.webrtc?.getLocalTrackAudioLevel(trackId) ?? Promise.resolve(null);
}
}
4 changes: 4 additions & 0 deletions packages/webrtc-client/src/tracks/Local.ts
Original file line number Diff line number Diff line change
Expand Up @@ -346,4 +346,8 @@ export class Local {
localTrack.addTrackToConnection();
});
};

public getLocalTrackAudioLevel = async (trackId: TrackId): Promise<{ level: number } | null> => {
return this.localTracks[trackId]?.getAudioLevel() ?? null;
};
}
14 changes: 14 additions & 0 deletions packages/webrtc-client/src/tracks/LocalTrack.ts
Original file line number Diff line number Diff line change
Expand Up @@ -248,6 +248,20 @@ export class LocalTrack implements TrackCommon {
);
};

public getAudioLevel = async (): Promise<{ level: number } | null> => {
if (!this.sender) return null;

try {
const stats = await this.sender.getStats();
const source = [...stats.values()].find(
(r) => r.type === 'media-source' && r.kind === 'audio' && typeof r.audioLevel === 'number',
);
return source ? { level: source.audioLevel } : null;
} catch {
return null;
}
};

public createTrackVariantBitratesEvent = () => {
// TODO implement this when simulcast is supported
// return generateCustomEvent({
Expand Down
19 changes: 19 additions & 0 deletions packages/webrtc-client/src/webRTCEndpoint.ts
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,25 @@ export class WebRTCEndpoint extends (EventEmitter as new () => TypedEmitter<Requ
this.sendMediaEvent({ connect });
};

/**
* Returns the current audio level for a local audio track, if available.
*
* This method only works for local **audio** tracks that have been negotiated
* with the remote peer and for which an underlying `RTCRtpSender` and
* statistics are available.
*
* @param trackId - Identifier of the local track to query, as used when
* adding or managing local tracks on this endpoint.
* @returns A promise that resolves to `{ level: number }` when an audio
* level can be determined for the given track, or `null` if:
* - the track does not exist,
* - the track is not an audio track,
* - the track has not yet been negotiated / no sender exists
*/
public getLocalTrackAudioLevel(trackId: string): Promise<{ level: number } | null> {
return this.local.getLocalTrackAudioLevel(trackId);
}

/**
* Feeds media event received from RTC Engine to {@link WebRTCEndpoint}.
* This function should be called whenever some media event from RTC Engine
Expand Down
Loading
Loading