mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-18 09:20:15 +01:00
🎛️ fix: Improve Frontend Practices for Audio Settings (#3624)
* refactor: do not call await inside useCallbacks, rely on updates for dropdown * fix: remember last selected voice * refactor: Update Speech component to use TypeScript in useCallback * refactor: Update Dropdown component styles to match header theme
This commit is contained in:
parent
8cbb6ba166
commit
05696233a9
20 changed files with 436 additions and 367 deletions
|
|
@ -1,13 +1,18 @@
|
|||
import { useRef } from 'react';
|
||||
import { useRecoilState } from 'recoil';
|
||||
import { useRef, useMemo, useEffect } from 'react';
|
||||
import { parseTextParts } from 'librechat-data-provider';
|
||||
import type { TMessage } from 'librechat-data-provider';
|
||||
import type { Option } from '~/common';
|
||||
import useTextToSpeechExternal from './useTextToSpeechExternal';
|
||||
import useTextToSpeechBrowser from './useTextToSpeechBrowser';
|
||||
import useGetAudioSettings from './useGetAudioSettings';
|
||||
import useTextToSpeechEdge from './useTextToSpeechEdge';
|
||||
import { usePauseGlobalAudio } from '../Audio';
|
||||
import { logger } from '~/utils';
|
||||
import store from '~/store';
|
||||
|
||||
const useTextToSpeech = (message?: TMessage, isLast = false, index = 0) => {
|
||||
const [voice, setVoice] = useRecoilState(store.voice);
|
||||
const { textToSpeechEndpoint } = useGetAudioSettings();
|
||||
const { pauseGlobalAudio } = usePauseGlobalAudio(index);
|
||||
const audioRef = useRef<HTMLAudioElement | null>(null);
|
||||
|
|
@ -33,9 +38,47 @@ const useTextToSpeech = (message?: TMessage, isLast = false, index = 0) => {
|
|||
isLoading: isLoadingExternal,
|
||||
audioRef: audioRefExternal,
|
||||
voices: voicesExternal,
|
||||
} = useTextToSpeechExternal(message?.messageId || '', isLast, index);
|
||||
} = useTextToSpeechExternal(message?.messageId ?? '', isLast, index);
|
||||
|
||||
let generateSpeech, cancelSpeech, isSpeaking, isLoading, voices;
|
||||
let generateSpeech, cancelSpeech, isSpeaking, isLoading;
|
||||
|
||||
const voices: Option[] | string[] = useMemo(() => {
|
||||
const voiceMap = {
|
||||
external: voicesExternal,
|
||||
edge: voicesEdge,
|
||||
browser: voicesLocal,
|
||||
};
|
||||
|
||||
return voiceMap[textToSpeechEndpoint];
|
||||
}, [textToSpeechEndpoint, voicesEdge, voicesExternal, voicesLocal]);
|
||||
|
||||
useEffect(() => {
|
||||
const firstVoice = voices[0];
|
||||
if (voices.length && typeof firstVoice === 'object') {
|
||||
const lastSelectedVoice = voices.find((v) =>
|
||||
typeof v === 'object' ? v.value === voice : v === voice,
|
||||
);
|
||||
if (lastSelectedVoice != null) {
|
||||
const currentVoice =
|
||||
typeof lastSelectedVoice === 'object' ? lastSelectedVoice.value : lastSelectedVoice;
|
||||
logger.log('useTextToSpeech.ts - Effect:', { voices, voice: currentVoice });
|
||||
setVoice(currentVoice?.toString() ?? undefined);
|
||||
return;
|
||||
}
|
||||
|
||||
logger.log('useTextToSpeech.ts - Effect:', { voices, voice: firstVoice.value });
|
||||
setVoice(firstVoice.value?.toString() ?? undefined);
|
||||
} else if (voices.length) {
|
||||
const lastSelectedVoice = voices.find((v) => v === voice);
|
||||
if (lastSelectedVoice != null) {
|
||||
logger.log('useTextToSpeech.ts - Effect:', { voices, voice: lastSelectedVoice });
|
||||
setVoice(lastSelectedVoice.toString());
|
||||
return;
|
||||
}
|
||||
logger.log('useTextToSpeech.ts - Effect:', { voices, voice: firstVoice });
|
||||
setVoice(firstVoice.toString());
|
||||
}
|
||||
}, [setVoice, textToSpeechEndpoint, voice, voices]);
|
||||
|
||||
switch (textToSpeechEndpoint) {
|
||||
case 'external':
|
||||
|
|
@ -43,17 +86,15 @@ const useTextToSpeech = (message?: TMessage, isLast = false, index = 0) => {
|
|||
cancelSpeech = cancelSpeechExternal;
|
||||
isSpeaking = isSpeakingExternal;
|
||||
isLoading = isLoadingExternal;
|
||||
if (audioRefExternal) {
|
||||
if (audioRefExternal.current) {
|
||||
audioRef.current = audioRefExternal.current;
|
||||
}
|
||||
voices = voicesExternal;
|
||||
break;
|
||||
case 'edge':
|
||||
generateSpeech = generateSpeechEdge;
|
||||
cancelSpeech = cancelSpeechEdge;
|
||||
isSpeaking = isSpeakingEdge;
|
||||
isLoading = false;
|
||||
voices = voicesEdge;
|
||||
break;
|
||||
case 'browser':
|
||||
default:
|
||||
|
|
@ -61,7 +102,6 @@ const useTextToSpeech = (message?: TMessage, isLast = false, index = 0) => {
|
|||
cancelSpeech = cancelSpeechLocal;
|
||||
isSpeaking = isSpeakingLocal;
|
||||
isLoading = false;
|
||||
voices = voicesLocal;
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
@ -82,7 +122,7 @@ const useTextToSpeech = (message?: TMessage, isLast = false, index = 0) => {
|
|||
|
||||
const handleMouseUp = () => {
|
||||
isMouseDownRef.current = false;
|
||||
if (timerRef.current) {
|
||||
if (timerRef.current != null) {
|
||||
window.clearTimeout(timerRef.current);
|
||||
}
|
||||
};
|
||||
|
|
@ -105,8 +145,8 @@ const useTextToSpeech = (message?: TMessage, isLast = false, index = 0) => {
|
|||
toggleSpeech,
|
||||
isSpeaking,
|
||||
isLoading,
|
||||
voices,
|
||||
audioRef,
|
||||
voices,
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue