mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-19 18:00:15 +01:00
Create SpeechRecognition.tsx
This commit is contained in:
parent
46c53d1395
commit
2ffb5bedd3
1 changed files with 67 additions and 0 deletions
67
client/src/components/Input/SpeechRecognition.tsx
Normal file
67
client/src/components/Input/SpeechRecognition.tsx
Normal file
|
|
@ -0,0 +1,67 @@
|
||||||
|
import { useState, useEffect } from 'react';
|
||||||
|
|
||||||
|
const useSpeechRecognition = (ask) => {
|
||||||
|
const [isSpeechSupported, setIsSpeechSupported] = useState(false);
|
||||||
|
const [isListening, setIsListening] = useState(false);
|
||||||
|
const [text, setText] = useState('');
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if ('SpeechRecognition' in window || 'webkitSpeechRecognition' in window) {
|
||||||
|
setIsSpeechSupported(true);
|
||||||
|
} else {
|
||||||
|
console.log("Browser does not support SpeechRecognition");
|
||||||
|
setIsSpeechSupported(false);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
|
||||||
|
const recognition = new SpeechRecognition();
|
||||||
|
|
||||||
|
recognition.onstart = () => {
|
||||||
|
console.log("Speech recognition started");
|
||||||
|
};
|
||||||
|
|
||||||
|
recognition.interimResults = true;
|
||||||
|
|
||||||
|
recognition.onresult = (event) => {
|
||||||
|
let transcript = '';
|
||||||
|
|
||||||
|
for (let i = 0; i < event.results.length; i++) {
|
||||||
|
const result = event.results[i];
|
||||||
|
transcript += result[0].transcript;
|
||||||
|
|
||||||
|
if (result.isFinal) {
|
||||||
|
setText(transcript);
|
||||||
|
ask({ text: transcript });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the text with both interim and final results
|
||||||
|
setText(transcript);
|
||||||
|
};
|
||||||
|
|
||||||
|
recognition.onend = () => {
|
||||||
|
setIsListening(false);
|
||||||
|
setText('');
|
||||||
|
};
|
||||||
|
|
||||||
|
if (isListening) {
|
||||||
|
recognition.start();
|
||||||
|
} else {
|
||||||
|
recognition.stop();
|
||||||
|
}
|
||||||
|
|
||||||
|
return () => {
|
||||||
|
recognition.stop();
|
||||||
|
};
|
||||||
|
}, [isListening, ask]);
|
||||||
|
|
||||||
|
const toggleListening = (e) => {
|
||||||
|
e.preventDefault();
|
||||||
|
setIsListening((prevState) => !prevState);
|
||||||
|
};
|
||||||
|
|
||||||
|
return { isSpeechSupported, isListening, text, toggleListening };
|
||||||
|
};
|
||||||
|
|
||||||
|
export default useSpeechRecognition;
|
||||||
Loading…
Add table
Add a link
Reference in a new issue