mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-20 10:20:15 +01:00
🎯 fix: Prevent UI De-sync By Removing Redundant States (#5333)
* fix: remove local state from Dropdown causing de-sync * refactor: cleanup STT code, avoid redundant states to prevent de-sync and side effects * fix: reset transcript after sending final text to prevent data loss * fix: clear timeout on component unmount to prevent memory leaks
This commit is contained in:
parent
b55e695541
commit
e309c6abef
8 changed files with 149 additions and 145 deletions
|
|
@ -1,25 +1,72 @@
|
|||
import { useEffect, useState } from 'react';
|
||||
import { useEffect, useRef, useMemo } from 'react';
|
||||
import { useRecoilState } from 'recoil';
|
||||
import { useToastContext } from '~/Providers';
|
||||
import store from '~/store';
|
||||
import SpeechRecognition, { useSpeechRecognition } from 'react-speech-recognition';
|
||||
import useGetAudioSettings from './useGetAudioSettings';
|
||||
import { useToastContext } from '~/Providers';
|
||||
import store from '~/store';
|
||||
|
||||
const useSpeechToTextBrowser = () => {
|
||||
const useSpeechToTextBrowser = (
|
||||
setText: (text: string) => void,
|
||||
onTranscriptionComplete: (text: string) => void,
|
||||
) => {
|
||||
const { showToast } = useToastContext();
|
||||
const [languageSTT] = useRecoilState<string>(store.languageSTT);
|
||||
const [autoTranscribeAudio] = useRecoilState<boolean>(store.autoTranscribeAudio);
|
||||
const { speechToTextEndpoint } = useGetAudioSettings();
|
||||
const isBrowserSTTEnabled = speechToTextEndpoint === 'browser';
|
||||
const [isListening, setIsListening] = useState(false);
|
||||
|
||||
const lastTranscript = useRef<string | null>(null);
|
||||
const lastInterim = useRef<string | null>(null);
|
||||
const timeoutRef = useRef<NodeJS.Timeout | null>();
|
||||
const [autoSendText] = useRecoilState(store.autoSendText);
|
||||
const [languageSTT] = useRecoilState<string>(store.languageSTT);
|
||||
const [autoTranscribeAudio] = useRecoilState<boolean>(store.autoTranscribeAudio);
|
||||
|
||||
const {
|
||||
interimTranscript,
|
||||
finalTranscript,
|
||||
listening,
|
||||
browserSupportsSpeechRecognition,
|
||||
finalTranscript,
|
||||
resetTranscript,
|
||||
interimTranscript,
|
||||
isMicrophoneAvailable,
|
||||
browserSupportsSpeechRecognition,
|
||||
} = useSpeechRecognition();
|
||||
const isListening = useMemo(() => listening, [listening]);
|
||||
|
||||
useEffect(() => {
|
||||
if (interimTranscript == null || interimTranscript === '') {
|
||||
return;
|
||||
}
|
||||
|
||||
if (lastInterim.current === interimTranscript) {
|
||||
return;
|
||||
}
|
||||
|
||||
setText(interimTranscript);
|
||||
lastInterim.current = interimTranscript;
|
||||
}, [setText, interimTranscript]);
|
||||
|
||||
useEffect(() => {
|
||||
if (finalTranscript == null || finalTranscript === '') {
|
||||
return;
|
||||
}
|
||||
|
||||
if (lastTranscript.current === finalTranscript) {
|
||||
return;
|
||||
}
|
||||
|
||||
setText(finalTranscript);
|
||||
lastTranscript.current = finalTranscript;
|
||||
if (autoSendText > -1 && finalTranscript.length > 0) {
|
||||
timeoutRef.current = setTimeout(() => {
|
||||
onTranscriptionComplete(finalTranscript);
|
||||
resetTranscript();
|
||||
}, autoSendText * 1000);
|
||||
}
|
||||
|
||||
return () => {
|
||||
if (timeoutRef.current) {
|
||||
clearTimeout(timeoutRef.current);
|
||||
}
|
||||
};
|
||||
}, [setText, onTranscriptionComplete, resetTranscript, finalTranscript, autoSendText]);
|
||||
|
||||
const toggleListening = () => {
|
||||
if (!browserSupportsSpeechRecognition) {
|
||||
|
|
@ -38,11 +85,9 @@ const useSpeechToTextBrowser = () => {
|
|||
return;
|
||||
}
|
||||
|
||||
if (listening) {
|
||||
setIsListening(false);
|
||||
if (isListening === true) {
|
||||
SpeechRecognition.stopListening();
|
||||
} else {
|
||||
setIsListening(true);
|
||||
SpeechRecognition.startListening({
|
||||
language: languageSTT,
|
||||
continuous: autoTranscribeAudio,
|
||||
|
|
@ -61,17 +106,9 @@ const useSpeechToTextBrowser = () => {
|
|||
return () => window.removeEventListener('keydown', handleKeyDown);
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
if (!listening) {
|
||||
setIsListening(false);
|
||||
}
|
||||
}, [listening]);
|
||||
|
||||
return {
|
||||
isListening,
|
||||
isLoading: false,
|
||||
interimTranscript,
|
||||
text: finalTranscript,
|
||||
startRecording: toggleListening,
|
||||
stopRecording: toggleListening,
|
||||
};
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue