mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-01-21 09:46:12 +01:00
✨ feat: Implement WebRTC messaging and audio handling in the WebRTC service
This commit is contained in:
parent
cf4b73b5e3
commit
9a33292f88
8 changed files with 674 additions and 137 deletions
|
|
@ -1,75 +1,44 @@
|
|||
import { useRef, useCallback } from 'react';
|
||||
import { WebRTCService } from '../services/WebRTC/WebRTCService';
|
||||
import type { RTCMessage } from '~/common';
|
||||
import useWebSocket from './useWebSocket';
|
||||
|
||||
const SILENCE_THRESHOLD = -50;
|
||||
const SILENCE_DURATION = 1000;
|
||||
|
||||
const useWebRTC = () => {
|
||||
const { sendMessage } = useWebSocket();
|
||||
const localStreamRef = useRef<MediaStream | null>(null);
|
||||
const audioContextRef = useRef<AudioContext | null>(null);
|
||||
const analyserRef = useRef<AnalyserNode | null>(null);
|
||||
const silenceStartTime = useRef<number | null>(null);
|
||||
const isProcessingRef = useRef(false);
|
||||
const webrtcServiceRef = useRef<WebRTCService | null>(null);
|
||||
|
||||
const log = (msg: string) => console.log(`[WebRTC ${new Date().toISOString()}] ${msg}`);
|
||||
|
||||
const processAudioLevel = () => {
|
||||
if (!analyserRef.current || !isProcessingRef.current) {
|
||||
return;
|
||||
}
|
||||
|
||||
const dataArray = new Float32Array(analyserRef.current.frequencyBinCount);
|
||||
analyserRef.current.getFloatFrequencyData(dataArray);
|
||||
const average = dataArray.reduce((a, b) => a + b) / dataArray.length;
|
||||
|
||||
if (average < SILENCE_THRESHOLD) {
|
||||
if (!silenceStartTime.current) {
|
||||
silenceStartTime.current = Date.now();
|
||||
log(`Silence started: ${average}dB`);
|
||||
} else if (Date.now() - silenceStartTime.current > SILENCE_DURATION) {
|
||||
log('Silence threshold reached - requesting response');
|
||||
sendMessage({ type: 'request-response' });
|
||||
silenceStartTime.current = null;
|
||||
const handleRTCMessage = useCallback(
|
||||
(message: RTCMessage) => {
|
||||
switch (message.type) {
|
||||
case 'audio-chunk':
|
||||
sendMessage({ type: 'processing-start' });
|
||||
break;
|
||||
case 'transcription':
|
||||
case 'llm-response':
|
||||
case 'tts-chunk':
|
||||
// TODO: Handle streaming responses
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
silenceStartTime.current = null;
|
||||
}
|
||||
|
||||
requestAnimationFrame(processAudioLevel);
|
||||
};
|
||||
},
|
||||
[sendMessage],
|
||||
);
|
||||
|
||||
const startLocalStream = async () => {
|
||||
try {
|
||||
log('Starting audio capture');
|
||||
localStreamRef.current = await navigator.mediaDevices.getUserMedia({ audio: true });
|
||||
|
||||
audioContextRef.current = new AudioContext();
|
||||
const source = audioContextRef.current.createMediaStreamSource(localStreamRef.current);
|
||||
analyserRef.current = audioContextRef.current.createAnalyser();
|
||||
|
||||
source.connect(analyserRef.current);
|
||||
isProcessingRef.current = true;
|
||||
processAudioLevel();
|
||||
|
||||
log('Audio capture started');
|
||||
webrtcServiceRef.current = new WebRTCService(handleRTCMessage);
|
||||
await webrtcServiceRef.current.initializeCall();
|
||||
sendMessage({ type: 'call-start' });
|
||||
} catch (error) {
|
||||
log(`Error: ${error instanceof Error ? error.message : 'Unknown error'}`);
|
||||
console.error(error);
|
||||
throw error;
|
||||
}
|
||||
};
|
||||
|
||||
const stopLocalStream = useCallback(() => {
|
||||
log('Stopping audio capture');
|
||||
isProcessingRef.current = false;
|
||||
audioContextRef.current?.close();
|
||||
localStreamRef.current?.getTracks().forEach((track) => track.stop());
|
||||
|
||||
localStreamRef.current = null;
|
||||
audioContextRef.current = null;
|
||||
analyserRef.current = null;
|
||||
silenceStartTime.current = null;
|
||||
}, []);
|
||||
webrtcServiceRef.current?.endCall();
|
||||
webrtcServiceRef.current = null;
|
||||
sendMessage({ type: 'call-ended' });
|
||||
}, [sendMessage]);
|
||||
|
||||
return { startLocalStream, stopLocalStream };
|
||||
};
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue