mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 00:40:14 +01:00
🗣️ refactor: speech services; fix: OpenAI STT (#3431)
* fix: OpenAI STT * refactor: STT and TTS service, slightly imporve of performance * fix(DecibelSelector): update default value
This commit is contained in:
parent
4ffdefc2a8
commit
51cd847606
8 changed files with 737 additions and 714 deletions
248
api/server/services/Files/Audio/STTService.js
Normal file
248
api/server/services/Files/Audio/STTService.js
Normal file
|
|
@ -0,0 +1,248 @@
|
|||
const { Readable } = require('stream');
|
||||
const axios = require('axios');
|
||||
const { extractEnvVariable, STTProviders } = require('librechat-data-provider');
|
||||
const getCustomConfig = require('~/server/services/Config/getCustomConfig');
|
||||
const { genAzureEndpoint } = require('~/utils');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
* Service class for handling Speech-to-Text (STT) operations.
|
||||
* @class
|
||||
*/
|
||||
class STTService {
|
||||
/**
|
||||
* Creates an instance of STTService.
|
||||
* @param {Object} customConfig - The custom configuration object.
|
||||
*/
|
||||
constructor(customConfig) {
|
||||
this.customConfig = customConfig;
|
||||
this.providerStrategies = {
|
||||
[STTProviders.OPENAI]: this.openAIProvider,
|
||||
[STTProviders.AZURE_OPENAI]: this.azureOpenAIProvider,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a singleton instance of STTService.
|
||||
* @static
|
||||
* @async
|
||||
* @returns {Promise<STTService>} The STTService instance.
|
||||
* @throws {Error} If the custom config is not found.
|
||||
*/
|
||||
static async getInstance() {
|
||||
const customConfig = await getCustomConfig();
|
||||
if (!customConfig) {
|
||||
throw new Error('Custom config not found');
|
||||
}
|
||||
return new STTService(customConfig);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the configured STT provider and its schema.
|
||||
* @returns {Promise<[string, Object]>} A promise that resolves to an array containing the provider name and its schema.
|
||||
* @throws {Error} If no STT schema is set, multiple providers are set, or no provider is set.
|
||||
*/
|
||||
async getProviderSchema() {
|
||||
const sttSchema = this.customConfig.speech.stt;
|
||||
|
||||
if (!sttSchema) {
|
||||
throw new Error(
|
||||
'No STT schema is set. Did you configure STT in the custom config (librechat.yaml)?',
|
||||
);
|
||||
}
|
||||
|
||||
const providers = Object.entries(sttSchema).filter(
|
||||
([, value]) => Object.keys(value).length > 0,
|
||||
);
|
||||
|
||||
if (providers.length !== 1) {
|
||||
throw new Error(
|
||||
providers.length > 1
|
||||
? 'Multiple providers are set. Please set only one provider.'
|
||||
: 'No provider is set. Please set a provider.',
|
||||
);
|
||||
}
|
||||
|
||||
const [provider, schema] = providers[0];
|
||||
return [provider, schema];
|
||||
}
|
||||
|
||||
/**
|
||||
* Recursively removes undefined properties from an object.
|
||||
* @param {Object} obj - The object to clean.
|
||||
* @returns {void}
|
||||
*/
|
||||
removeUndefined(obj) {
|
||||
Object.keys(obj).forEach((key) => {
|
||||
if (obj[key] && typeof obj[key] === 'object') {
|
||||
this.removeUndefined(obj[key]);
|
||||
if (Object.keys(obj[key]).length === 0) {
|
||||
delete obj[key];
|
||||
}
|
||||
} else if (obj[key] === undefined) {
|
||||
delete obj[key];
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepares the request for the OpenAI STT provider.
|
||||
* @param {Object} sttSchema - The STT schema for OpenAI.
|
||||
* @param {Stream} audioReadStream - The audio data to be transcribed.
|
||||
* @returns {Array} An array containing the URL, data, and headers for the request.
|
||||
*/
|
||||
openAIProvider(sttSchema, audioReadStream) {
|
||||
const url = sttSchema?.url || 'https://api.openai.com/v1/audio/transcriptions';
|
||||
const apiKey = extractEnvVariable(sttSchema.apiKey) || '';
|
||||
|
||||
const data = {
|
||||
file: audioReadStream,
|
||||
model: sttSchema.model,
|
||||
};
|
||||
|
||||
const headers = {
|
||||
'Content-Type': 'multipart/form-data',
|
||||
...(apiKey && { Authorization: `Bearer ${apiKey}` }),
|
||||
};
|
||||
[headers].forEach(this.removeUndefined);
|
||||
|
||||
return [url, data, headers];
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepares the request for the Azure OpenAI STT provider.
|
||||
* @param {Object} sttSchema - The STT schema for Azure OpenAI.
|
||||
* @param {Buffer} audioBuffer - The audio data to be transcribed.
|
||||
* @param {Object} audioFile - The audio file object containing originalname, mimetype, and size.
|
||||
* @returns {Array} An array containing the URL, data, and headers for the request.
|
||||
* @throws {Error} If the audio file size exceeds 25MB or the audio file format is not accepted.
|
||||
*/
|
||||
azureOpenAIProvider(sttSchema, audioBuffer, audioFile) {
|
||||
const url = `${genAzureEndpoint({
|
||||
azureOpenAIApiInstanceName: sttSchema?.instanceName,
|
||||
azureOpenAIApiDeploymentName: sttSchema?.deploymentName,
|
||||
})}/audio/transcriptions?api-version=${sttSchema?.apiVersion}`;
|
||||
|
||||
const apiKey = sttSchema.apiKey ? extractEnvVariable(sttSchema.apiKey) : '';
|
||||
|
||||
if (audioBuffer.byteLength > 25 * 1024 * 1024) {
|
||||
throw new Error('The audio file size exceeds the limit of 25MB');
|
||||
}
|
||||
|
||||
const acceptedFormats = ['flac', 'mp3', 'mp4', 'mpeg', 'mpga', 'm4a', 'ogg', 'wav', 'webm'];
|
||||
const fileFormat = audioFile.mimetype.split('/')[1];
|
||||
if (!acceptedFormats.includes(fileFormat)) {
|
||||
throw new Error(`The audio file format ${fileFormat} is not accepted`);
|
||||
}
|
||||
|
||||
const formData = new FormData();
|
||||
const audioBlob = new Blob([audioBuffer], { type: audioFile.mimetype });
|
||||
formData.append('file', audioBlob, audioFile.originalname);
|
||||
|
||||
const headers = {
|
||||
'Content-Type': 'multipart/form-data',
|
||||
...(apiKey && { 'api-key': apiKey }),
|
||||
};
|
||||
|
||||
[headers].forEach(this.removeUndefined);
|
||||
|
||||
return [url, formData, headers];
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends an STT request to the specified provider.
|
||||
* @async
|
||||
* @param {string} provider - The STT provider to use.
|
||||
* @param {Object} sttSchema - The STT schema for the provider.
|
||||
* @param {Object} requestData - The data required for the STT request.
|
||||
* @param {Buffer} requestData.audioBuffer - The audio data to be transcribed.
|
||||
* @param {Object} requestData.audioFile - The audio file object containing originalname, mimetype, and size.
|
||||
* @returns {Promise<string>} A promise that resolves to the transcribed text.
|
||||
* @throws {Error} If the provider is invalid, the response status is not 200, or the response data is missing.
|
||||
*/
|
||||
async sttRequest(provider, sttSchema, { audioBuffer, audioFile }) {
|
||||
const strategy = this.providerStrategies[provider];
|
||||
if (!strategy) {
|
||||
throw new Error('Invalid provider');
|
||||
}
|
||||
|
||||
const audioReadStream = Readable.from(audioBuffer);
|
||||
audioReadStream.path = 'audio.wav';
|
||||
|
||||
const [url, data, headers] = strategy.call(this, sttSchema, audioReadStream, audioFile);
|
||||
|
||||
if (!Readable.from && data instanceof FormData) {
|
||||
const audioBlob = new Blob([audioBuffer], { type: audioFile.mimetype });
|
||||
data.set('file', audioBlob, audioFile.originalname);
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await axios.post(url, data, { headers });
|
||||
|
||||
if (response.status !== 200) {
|
||||
throw new Error('Invalid response from the STT API');
|
||||
}
|
||||
|
||||
if (!response.data || !response.data.text) {
|
||||
throw new Error('Missing data in response from the STT API');
|
||||
}
|
||||
|
||||
return response.data.text.trim();
|
||||
} catch (error) {
|
||||
logger.error(`STT request failed for provider ${provider}:`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Processes a speech-to-text request.
|
||||
* @async
|
||||
* @param {Object} req - The request object.
|
||||
* @param {Object} res - The response object.
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async processTextToSpeech(req, res) {
|
||||
if (!req.file || !req.file.buffer) {
|
||||
return res.status(400).json({ message: 'No audio file provided in the FormData' });
|
||||
}
|
||||
|
||||
const audioBuffer = req.file.buffer;
|
||||
const audioFile = {
|
||||
originalname: req.file.originalname,
|
||||
mimetype: req.file.mimetype,
|
||||
size: req.file.size,
|
||||
};
|
||||
|
||||
try {
|
||||
const [provider, sttSchema] = await this.getProviderSchema();
|
||||
const text = await this.sttRequest(provider, sttSchema, { audioBuffer, audioFile });
|
||||
res.json({ text });
|
||||
} catch (error) {
|
||||
logger.error('An error occurred while processing the audio:', error);
|
||||
res.sendStatus(500);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Factory function to create an STTService instance.
|
||||
* @async
|
||||
* @returns {Promise<STTService>} A promise that resolves to an STTService instance.
|
||||
*/
|
||||
async function createSTTService() {
|
||||
return STTService.getInstance();
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrapper function for speech-to-text processing.
|
||||
* @async
|
||||
* @param {Object} req - The request object.
|
||||
* @param {Object} res - The response object.
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async function speechToText(req, res) {
|
||||
const sttService = await createSTTService();
|
||||
await sttService.processTextToSpeech(req, res);
|
||||
}
|
||||
|
||||
module.exports = speechToText;
|
||||
477
api/server/services/Files/Audio/TTSService.js
Normal file
477
api/server/services/Files/Audio/TTSService.js
Normal file
|
|
@ -0,0 +1,477 @@
|
|||
const axios = require('axios');
|
||||
const { extractEnvVariable, TTSProviders } = require('librechat-data-provider');
|
||||
const { logger } = require('~/config');
|
||||
const getCustomConfig = require('~/server/services/Config/getCustomConfig');
|
||||
const { genAzureEndpoint } = require('~/utils');
|
||||
const { getRandomVoiceId, createChunkProcessor, splitTextIntoChunks } = require('./streamAudio');
|
||||
|
||||
/**
|
||||
* Service class for handling Text-to-Speech (TTS) operations.
|
||||
* @class
|
||||
*/
|
||||
class TTSService {
|
||||
/**
|
||||
* Creates an instance of TTSService.
|
||||
* @param {Object} customConfig - The custom configuration object.
|
||||
*/
|
||||
constructor(customConfig) {
|
||||
this.customConfig = customConfig;
|
||||
this.providerStrategies = {
|
||||
[TTSProviders.OPENAI]: this.openAIProvider.bind(this),
|
||||
[TTSProviders.AZURE_OPENAI]: this.azureOpenAIProvider.bind(this),
|
||||
[TTSProviders.ELEVENLABS]: this.elevenLabsProvider.bind(this),
|
||||
[TTSProviders.LOCALAI]: this.localAIProvider.bind(this),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a singleton instance of TTSService.
|
||||
* @static
|
||||
* @async
|
||||
* @returns {Promise<TTSService>} The TTSService instance.
|
||||
* @throws {Error} If the custom config is not found.
|
||||
*/
|
||||
static async getInstance() {
|
||||
const customConfig = await getCustomConfig();
|
||||
if (!customConfig) {
|
||||
throw new Error('Custom config not found');
|
||||
}
|
||||
return new TTSService(customConfig);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the configured TTS provider.
|
||||
* @returns {string} The name of the configured provider.
|
||||
* @throws {Error} If no provider is set or multiple providers are set.
|
||||
*/
|
||||
getProvider() {
|
||||
const ttsSchema = this.customConfig.speech.tts;
|
||||
if (!ttsSchema) {
|
||||
throw new Error(
|
||||
'No TTS schema is set. Did you configure TTS in the custom config (librechat.yaml)?',
|
||||
);
|
||||
}
|
||||
const providers = Object.entries(ttsSchema).filter(
|
||||
([, value]) => Object.keys(value).length > 0,
|
||||
);
|
||||
|
||||
if (providers.length !== 1) {
|
||||
throw new Error(
|
||||
providers.length > 1
|
||||
? 'Multiple providers are set. Please set only one provider.'
|
||||
: 'No provider is set. Please set a provider.',
|
||||
);
|
||||
}
|
||||
return providers[0][0];
|
||||
}
|
||||
|
||||
/**
|
||||
* Selects a voice for TTS based on provider schema and request.
|
||||
* @async
|
||||
* @param {Object} providerSchema - The schema for the selected provider.
|
||||
* @param {string} requestVoice - The requested voice.
|
||||
* @returns {Promise<string>} The selected voice.
|
||||
*/
|
||||
async getVoice(providerSchema, requestVoice) {
|
||||
const voices = providerSchema.voices.filter((voice) => voice && voice.toUpperCase() !== 'ALL');
|
||||
let voice = requestVoice;
|
||||
if (!voice || !voices.includes(voice) || (voice.toUpperCase() === 'ALL' && voices.length > 1)) {
|
||||
voice = getRandomVoiceId(voices);
|
||||
}
|
||||
return voice;
|
||||
}
|
||||
|
||||
/**
|
||||
* Recursively removes undefined properties from an object.
|
||||
* @param {Object} obj - The object to clean.
|
||||
*/
|
||||
removeUndefined(obj) {
|
||||
Object.keys(obj).forEach((key) => {
|
||||
if (obj[key] && typeof obj[key] === 'object') {
|
||||
this.removeUndefined(obj[key]);
|
||||
if (Object.keys(obj[key]).length === 0) {
|
||||
delete obj[key];
|
||||
}
|
||||
} else if (obj[key] === undefined) {
|
||||
delete obj[key];
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepares the request for OpenAI TTS provider.
|
||||
* @param {Object} ttsSchema - The TTS schema for OpenAI.
|
||||
* @param {string} input - The input text.
|
||||
* @param {string} voice - The selected voice.
|
||||
* @returns {Array} An array containing the URL, data, and headers for the request.
|
||||
* @throws {Error} If the selected voice is not available.
|
||||
*/
|
||||
openAIProvider(ttsSchema, input, voice) {
|
||||
const url = ttsSchema?.url || 'https://api.openai.com/v1/audio/speech';
|
||||
|
||||
if (
|
||||
ttsSchema?.voices &&
|
||||
ttsSchema.voices.length > 0 &&
|
||||
!ttsSchema.voices.includes(voice) &&
|
||||
!ttsSchema.voices.includes('ALL')
|
||||
) {
|
||||
throw new Error(`Voice ${voice} is not available.`);
|
||||
}
|
||||
|
||||
const data = {
|
||||
input,
|
||||
model: ttsSchema?.model,
|
||||
voice: ttsSchema?.voices && ttsSchema.voices.length > 0 ? voice : undefined,
|
||||
backend: ttsSchema?.backend,
|
||||
};
|
||||
|
||||
const headers = {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${extractEnvVariable(ttsSchema?.apiKey)}`,
|
||||
};
|
||||
|
||||
return [url, data, headers];
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepares the request for Azure OpenAI TTS provider.
|
||||
* @param {Object} ttsSchema - The TTS schema for Azure OpenAI.
|
||||
* @param {string} input - The input text.
|
||||
* @param {string} voice - The selected voice.
|
||||
* @returns {Array} An array containing the URL, data, and headers for the request.
|
||||
* @throws {Error} If the selected voice is not available.
|
||||
*/
|
||||
azureOpenAIProvider(ttsSchema, input, voice) {
|
||||
const url = `${genAzureEndpoint({
|
||||
azureOpenAIApiInstanceName: ttsSchema?.instanceName,
|
||||
azureOpenAIApiDeploymentName: ttsSchema?.deploymentName,
|
||||
})}/audio/speech?api-version=${ttsSchema?.apiVersion}`;
|
||||
|
||||
if (
|
||||
ttsSchema?.voices &&
|
||||
ttsSchema.voices.length > 0 &&
|
||||
!ttsSchema.voices.includes(voice) &&
|
||||
!ttsSchema.voices.includes('ALL')
|
||||
) {
|
||||
throw new Error(`Voice ${voice} is not available.`);
|
||||
}
|
||||
|
||||
const data = {
|
||||
model: ttsSchema?.model,
|
||||
input,
|
||||
voice: ttsSchema?.voices && ttsSchema.voices.length > 0 ? voice : undefined,
|
||||
};
|
||||
|
||||
const headers = {
|
||||
'Content-Type': 'application/json',
|
||||
'api-key': ttsSchema.apiKey ? extractEnvVariable(ttsSchema.apiKey) : '',
|
||||
};
|
||||
|
||||
return [url, data, headers];
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepares the request for ElevenLabs TTS provider.
|
||||
* @param {Object} ttsSchema - The TTS schema for ElevenLabs.
|
||||
* @param {string} input - The input text.
|
||||
* @param {string} voice - The selected voice.
|
||||
* @param {boolean} stream - Whether to use streaming.
|
||||
* @returns {Array} An array containing the URL, data, and headers for the request.
|
||||
* @throws {Error} If the selected voice is not available.
|
||||
*/
|
||||
elevenLabsProvider(ttsSchema, input, voice, stream) {
|
||||
let url =
|
||||
ttsSchema?.url ||
|
||||
`https://api.elevenlabs.io/v1/text-to-speech/${voice}${stream ? '/stream' : ''}`;
|
||||
|
||||
if (!ttsSchema?.voices.includes(voice) && !ttsSchema?.voices.includes('ALL')) {
|
||||
throw new Error(`Voice ${voice} is not available.`);
|
||||
}
|
||||
|
||||
const data = {
|
||||
model_id: ttsSchema?.model,
|
||||
text: input,
|
||||
voice_settings: {
|
||||
similarity_boost: ttsSchema?.voice_settings?.similarity_boost,
|
||||
stability: ttsSchema?.voice_settings?.stability,
|
||||
style: ttsSchema?.voice_settings?.style,
|
||||
use_speaker_boost: ttsSchema?.voice_settings?.use_speaker_boost,
|
||||
},
|
||||
pronunciation_dictionary_locators: ttsSchema?.pronunciation_dictionary_locators,
|
||||
};
|
||||
|
||||
const headers = {
|
||||
'Content-Type': 'application/json',
|
||||
'xi-api-key': extractEnvVariable(ttsSchema?.apiKey),
|
||||
Accept: 'audio/mpeg',
|
||||
};
|
||||
|
||||
return [url, data, headers];
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepares the request for LocalAI TTS provider.
|
||||
* @param {Object} ttsSchema - The TTS schema for LocalAI.
|
||||
* @param {string} input - The input text.
|
||||
* @param {string} voice - The selected voice.
|
||||
* @returns {Array} An array containing the URL, data, and headers for the request.
|
||||
* @throws {Error} If the selected voice is not available.
|
||||
*/
|
||||
localAIProvider(ttsSchema, input, voice) {
|
||||
const url = ttsSchema?.url;
|
||||
|
||||
if (
|
||||
ttsSchema?.voices &&
|
||||
ttsSchema.voices.length > 0 &&
|
||||
!ttsSchema.voices.includes(voice) &&
|
||||
!ttsSchema.voices.includes('ALL')
|
||||
) {
|
||||
throw new Error(`Voice ${voice} is not available.`);
|
||||
}
|
||||
|
||||
const data = {
|
||||
input,
|
||||
model: ttsSchema?.voices && ttsSchema.voices.length > 0 ? voice : undefined,
|
||||
backend: ttsSchema?.backend,
|
||||
};
|
||||
|
||||
const headers = {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${extractEnvVariable(ttsSchema?.apiKey)}`,
|
||||
};
|
||||
|
||||
if (extractEnvVariable(ttsSchema.apiKey) === '') {
|
||||
delete headers.Authorization;
|
||||
}
|
||||
|
||||
return [url, data, headers];
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a TTS request to the specified provider.
|
||||
* @async
|
||||
* @param {string} provider - The TTS provider to use.
|
||||
* @param {Object} ttsSchema - The TTS schema for the provider.
|
||||
* @param {Object} options - The options for the TTS request.
|
||||
* @param {string} options.input - The input text.
|
||||
* @param {string} options.voice - The voice to use.
|
||||
* @param {boolean} [options.stream=true] - Whether to use streaming.
|
||||
* @returns {Promise<Object>} The axios response object.
|
||||
* @throws {Error} If the provider is invalid or the request fails.
|
||||
*/
|
||||
async ttsRequest(provider, ttsSchema, { input, voice, stream = true }) {
|
||||
const strategy = this.providerStrategies[provider];
|
||||
if (!strategy) {
|
||||
throw new Error('Invalid provider');
|
||||
}
|
||||
|
||||
const [url, data, headers] = strategy.call(this, ttsSchema, input, voice, stream);
|
||||
|
||||
[data, headers].forEach(this.removeUndefined.bind(this));
|
||||
|
||||
const options = { headers, responseType: stream ? 'stream' : 'arraybuffer' };
|
||||
|
||||
try {
|
||||
return await axios.post(url, data, options);
|
||||
} catch (error) {
|
||||
logger.error(`TTS request failed for provider ${provider}:`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Processes a text-to-speech request.
|
||||
* @async
|
||||
* @param {Object} req - The request object.
|
||||
* @param {Object} res - The response object.
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async processTextToSpeech(req, res) {
|
||||
const { input, voice: requestVoice } = req.body;
|
||||
|
||||
if (!input) {
|
||||
return res.status(400).send('Missing text in request body');
|
||||
}
|
||||
|
||||
try {
|
||||
res.setHeader('Content-Type', 'audio/mpeg');
|
||||
const provider = this.getProvider();
|
||||
const ttsSchema = this.customConfig.speech.tts[provider];
|
||||
const voice = await this.getVoice(ttsSchema, requestVoice);
|
||||
|
||||
if (input.length < 4096) {
|
||||
const response = await this.ttsRequest(provider, ttsSchema, { input, voice });
|
||||
response.data.pipe(res);
|
||||
return;
|
||||
}
|
||||
|
||||
const textChunks = splitTextIntoChunks(input, 1000);
|
||||
|
||||
for (const chunk of textChunks) {
|
||||
try {
|
||||
const response = await this.ttsRequest(provider, ttsSchema, {
|
||||
voice,
|
||||
input: chunk.text,
|
||||
stream: true,
|
||||
});
|
||||
|
||||
logger.debug(`[textToSpeech] user: ${req?.user?.id} | writing audio stream`);
|
||||
await new Promise((resolve) => {
|
||||
response.data.pipe(res, { end: chunk.isFinished });
|
||||
response.data.on('end', resolve);
|
||||
});
|
||||
|
||||
if (chunk.isFinished) {
|
||||
break;
|
||||
}
|
||||
} catch (innerError) {
|
||||
logger.error('Error processing manual update:', chunk, innerError);
|
||||
if (!res.headersSent) {
|
||||
return res.status(500).end();
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (!res.headersSent) {
|
||||
res.end();
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Error creating the audio stream:', error);
|
||||
if (!res.headersSent) {
|
||||
return res.status(500).send('An error occurred');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Streams audio data from the TTS provider.
|
||||
* @async
|
||||
* @param {Object} req - The request object.
|
||||
* @param {Object} res - The response object.
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async streamAudio(req, res) {
|
||||
res.setHeader('Content-Type', 'audio/mpeg');
|
||||
const provider = this.getProvider();
|
||||
const ttsSchema = this.customConfig.speech.tts[provider];
|
||||
const voice = await this.getVoice(ttsSchema, req.body.voice);
|
||||
|
||||
let shouldContinue = true;
|
||||
|
||||
req.on('close', () => {
|
||||
logger.warn('[streamAudio] Audio Stream Request closed by client');
|
||||
shouldContinue = false;
|
||||
});
|
||||
|
||||
const processChunks = createChunkProcessor(req.body.messageId);
|
||||
|
||||
try {
|
||||
while (shouldContinue) {
|
||||
const updates = await processChunks();
|
||||
if (typeof updates === 'string') {
|
||||
logger.error(`Error processing audio stream updates: ${updates}`);
|
||||
return res.status(500).end();
|
||||
}
|
||||
|
||||
if (updates.length === 0) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 1250));
|
||||
continue;
|
||||
}
|
||||
|
||||
for (const update of updates) {
|
||||
try {
|
||||
const response = await this.ttsRequest(provider, ttsSchema, {
|
||||
voice,
|
||||
input: update.text,
|
||||
stream: true,
|
||||
});
|
||||
|
||||
if (!shouldContinue) {
|
||||
break;
|
||||
}
|
||||
|
||||
logger.debug(`[streamAudio] user: ${req?.user?.id} | writing audio stream`);
|
||||
await new Promise((resolve) => {
|
||||
response.data.pipe(res, { end: update.isFinished });
|
||||
response.data.on('end', resolve);
|
||||
});
|
||||
|
||||
if (update.isFinished) {
|
||||
shouldContinue = false;
|
||||
break;
|
||||
}
|
||||
} catch (innerError) {
|
||||
logger.error('Error processing audio stream update:', update, innerError);
|
||||
if (!res.headersSent) {
|
||||
return res.status(500).end();
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (!shouldContinue) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!res.headersSent) {
|
||||
res.end();
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Failed to fetch audio:', error);
|
||||
if (!res.headersSent) {
|
||||
res.status(500).end();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Factory function to create a TTSService instance.
|
||||
* @async
|
||||
* @returns {Promise<TTSService>} A promise that resolves to a TTSService instance.
|
||||
*/
|
||||
async function createTTSService() {
|
||||
return TTSService.getInstance();
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrapper function for text-to-speech processing.
|
||||
* @async
|
||||
* @param {Object} req - The request object.
|
||||
* @param {Object} res - The response object.
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async function textToSpeech(req, res) {
|
||||
const ttsService = await createTTSService();
|
||||
await ttsService.processTextToSpeech(req, res);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrapper function for audio streaming.
|
||||
* @async
|
||||
* @param {Object} req - The request object.
|
||||
* @param {Object} res - The response object.
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async function streamAudio(req, res) {
|
||||
const ttsService = await createTTSService();
|
||||
await ttsService.streamAudio(req, res);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrapper function to get the configured TTS provider.
|
||||
* @async
|
||||
* @returns {Promise<string>} A promise that resolves to the name of the configured provider.
|
||||
*/
|
||||
async function getProvider() {
|
||||
const ttsService = await createTTSService();
|
||||
return ttsService.getProvider();
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
textToSpeech,
|
||||
streamAudio,
|
||||
getProvider,
|
||||
};
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
const { TTSProviders } = require('librechat-data-provider');
|
||||
const getCustomConfig = require('~/server/services/Config/getCustomConfig');
|
||||
const { getProvider } = require('./textToSpeech');
|
||||
const { getProvider } = require('./TTSService');
|
||||
|
||||
/**
|
||||
* This function retrieves the available voices for the current TTS provider
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
const getVoices = require('./getVoices');
|
||||
const getCustomConfigSpeech = require('./getCustomConfigSpeech');
|
||||
const textToSpeech = require('./textToSpeech');
|
||||
const speechToText = require('./speechToText');
|
||||
const TTSService = require('./TTSService');
|
||||
const STTService = require('./STTService');
|
||||
|
||||
module.exports = {
|
||||
getVoices,
|
||||
getCustomConfigSpeech,
|
||||
speechToText,
|
||||
...textToSpeech,
|
||||
...STTService,
|
||||
...TTSService,
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,225 +0,0 @@
|
|||
const { Readable } = require('stream');
|
||||
const axios = require('axios');
|
||||
const { extractEnvVariable, STTProviders } = require('librechat-data-provider');
|
||||
const getCustomConfig = require('~/server/services/Config/getCustomConfig');
|
||||
const { genAzureEndpoint } = require('~/utils');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
* Handle the response from the STT API
|
||||
* @param {Object} response - The response from the STT API
|
||||
*
|
||||
* @returns {string} The text from the response data
|
||||
*
|
||||
* @throws Will throw an error if the response status is not 200 or the response data is missing
|
||||
*/
|
||||
async function handleResponse(response) {
|
||||
if (response.status !== 200) {
|
||||
throw new Error('Invalid response from the STT API');
|
||||
}
|
||||
|
||||
if (!response.data || !response.data.text) {
|
||||
throw new Error('Missing data in response from the STT API');
|
||||
}
|
||||
|
||||
return response.data.text.trim();
|
||||
}
|
||||
|
||||
/**
|
||||
* getProviderSchema function
|
||||
* This function takes the customConfig object and returns the name of the provider and its schema
|
||||
* If more than one provider is set or no provider is set, it throws an error
|
||||
*
|
||||
* @param {Object} customConfig - The custom configuration containing the STT schema
|
||||
* @returns {Promise<[string, Object]>} The name of the provider and its schema
|
||||
* @throws {Error} Throws an error if multiple providers are set or no provider is set
|
||||
*/
|
||||
async function getProviderSchema(customConfig) {
|
||||
const sttSchema = customConfig.speech.stt;
|
||||
|
||||
if (!sttSchema) {
|
||||
throw new Error(`No STT schema is set. Did you configure STT in the custom config (librechat.yaml)?
|
||||
|
||||
https://www.librechat.ai/docs/configuration/stt_tts#stt`);
|
||||
}
|
||||
|
||||
const providers = Object.entries(sttSchema).filter(([, value]) => Object.keys(value).length > 0);
|
||||
|
||||
if (providers.length > 1) {
|
||||
throw new Error('Multiple providers are set. Please set only one provider.');
|
||||
} else if (providers.length === 0) {
|
||||
throw new Error('No provider is set. Please set a provider.');
|
||||
} else {
|
||||
const provider = providers[0][0];
|
||||
return [provider, sttSchema[provider]];
|
||||
}
|
||||
}
|
||||
|
||||
function removeUndefined(obj) {
|
||||
Object.keys(obj).forEach((key) => {
|
||||
if (obj[key] && typeof obj[key] === 'object') {
|
||||
removeUndefined(obj[key]);
|
||||
if (Object.keys(obj[key]).length === 0) {
|
||||
delete obj[key];
|
||||
}
|
||||
} else if (obj[key] === undefined) {
|
||||
delete obj[key];
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* This function prepares the necessary data and headers for making a request to the OpenAI API
|
||||
* It uses the provided speech-to-text schema and audio stream to create the request
|
||||
*
|
||||
* @param {Object} sttSchema - The speech-to-text schema containing the OpenAI configuration
|
||||
* @param {Stream} audioReadStream - The audio data to be transcribed
|
||||
*
|
||||
* @returns {Array} An array containing the URL for the API request, the data to be sent, and the headers for the request
|
||||
* If an error occurs, it returns an array with three null values and logs the error with logger
|
||||
*/
|
||||
function openAIProvider(sttSchema, audioReadStream) {
|
||||
try {
|
||||
const url = sttSchema.openai?.url || 'https://api.openai.com/v1/audio/transcriptions';
|
||||
const apiKey = sttSchema.openai.apiKey ? extractEnvVariable(sttSchema.openai.apiKey) : '';
|
||||
|
||||
let data = {
|
||||
file: audioReadStream,
|
||||
model: sttSchema.openai.model,
|
||||
};
|
||||
|
||||
let headers = {
|
||||
'Content-Type': 'multipart/form-data',
|
||||
};
|
||||
|
||||
[headers].forEach(removeUndefined);
|
||||
|
||||
if (apiKey) {
|
||||
headers.Authorization = 'Bearer ' + apiKey;
|
||||
}
|
||||
|
||||
return [url, data, headers];
|
||||
} catch (error) {
|
||||
logger.error('An error occurred while preparing the OpenAI API STT request: ', error);
|
||||
return [null, null, null];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepares the necessary data and headers for making a request to the Azure API.
|
||||
* It uses the provided Speech-to-Text (STT) schema and audio file to create the request.
|
||||
*
|
||||
* @param {Object} sttSchema - The STT schema object, which should contain instanceName, deploymentName, apiVersion, and apiKey.
|
||||
* @param {Buffer} audioBuffer - The audio data to be transcribed
|
||||
* @param {Object} audioFile - The audio file object, which should contain originalname, mimetype, and size.
|
||||
*
|
||||
* @returns {Array} An array containing the URL for the API request, the data to be sent, and the headers for the request.
|
||||
* If an error occurs, it logs the error with logger and returns an array with three null values.
|
||||
*/
|
||||
function azureOpenAIProvider(sttSchema, audioBuffer, audioFile) {
|
||||
try {
|
||||
const instanceName = sttSchema?.instanceName;
|
||||
const deploymentName = sttSchema?.deploymentName;
|
||||
const apiVersion = sttSchema?.apiVersion;
|
||||
|
||||
const url =
|
||||
genAzureEndpoint({
|
||||
azureOpenAIApiInstanceName: instanceName,
|
||||
azureOpenAIApiDeploymentName: deploymentName,
|
||||
}) +
|
||||
'/audio/transcriptions?api-version=' +
|
||||
apiVersion;
|
||||
|
||||
const apiKey = sttSchema.apiKey ? extractEnvVariable(sttSchema.apiKey) : '';
|
||||
|
||||
if (audioBuffer.byteLength > 25 * 1024 * 1024) {
|
||||
throw new Error('The audio file size exceeds the limit of 25MB');
|
||||
}
|
||||
const acceptedFormats = ['flac', 'mp3', 'mp4', 'mpeg', 'mpga', 'm4a', 'ogg', 'wav', 'webm'];
|
||||
const fileFormat = audioFile.mimetype.split('/')[1];
|
||||
if (!acceptedFormats.includes(fileFormat)) {
|
||||
throw new Error(`The audio file format ${fileFormat} is not accepted`);
|
||||
}
|
||||
|
||||
const formData = new FormData();
|
||||
|
||||
const audioBlob = new Blob([audioBuffer], { type: audioFile.mimetype });
|
||||
|
||||
formData.append('file', audioBlob, audioFile.originalname);
|
||||
|
||||
let data = formData;
|
||||
|
||||
let headers = {
|
||||
'Content-Type': 'multipart/form-data',
|
||||
};
|
||||
|
||||
[headers].forEach(removeUndefined);
|
||||
|
||||
if (apiKey) {
|
||||
headers['api-key'] = apiKey;
|
||||
}
|
||||
|
||||
return [url, data, headers];
|
||||
} catch (error) {
|
||||
logger.error('An error occurred while preparing the Azure OpenAI API STT request: ', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert speech to text
|
||||
* @param {Object} req - The request object
|
||||
* @param {Object} res - The response object
|
||||
*
|
||||
* @returns {Object} The response object with the text from the STT API
|
||||
*
|
||||
* @throws Will throw an error if an error occurs while processing the audio
|
||||
*/
|
||||
|
||||
async function speechToText(req, res) {
|
||||
const customConfig = await getCustomConfig();
|
||||
if (!customConfig) {
|
||||
return res.status(500).send('Custom config not found');
|
||||
}
|
||||
|
||||
if (!req.file || !req.file.buffer) {
|
||||
return res.status(400).json({ message: 'No audio file provided in the FormData' });
|
||||
}
|
||||
|
||||
const audioBuffer = req.file.buffer;
|
||||
const audioReadStream = Readable.from(audioBuffer);
|
||||
audioReadStream.path = 'audio.wav';
|
||||
|
||||
const [provider, sttSchema] = await getProviderSchema(customConfig);
|
||||
|
||||
let [url, data, headers] = [];
|
||||
|
||||
switch (provider) {
|
||||
case STTProviders.OPENAI:
|
||||
[url, data, headers] = openAIProvider(sttSchema, audioReadStream);
|
||||
break;
|
||||
case STTProviders.AZURE_OPENAI:
|
||||
[url, data, headers] = azureOpenAIProvider(sttSchema, audioBuffer, req.file);
|
||||
break;
|
||||
default:
|
||||
throw new Error('Invalid provider');
|
||||
}
|
||||
|
||||
if (!Readable.from) {
|
||||
const audioBlob = new Blob([audioBuffer], { type: req.file.mimetype });
|
||||
delete data['file'];
|
||||
data['file'] = audioBlob;
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await axios.post(url, data, { headers: headers });
|
||||
const text = await handleResponse(response);
|
||||
|
||||
res.json({ text });
|
||||
} catch (error) {
|
||||
logger.error('An error occurred while processing the audio:', error);
|
||||
res.sendStatus(500);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = speechToText;
|
||||
|
|
@ -1,473 +0,0 @@
|
|||
const axios = require('axios');
|
||||
const { extractEnvVariable, TTSProviders } = require('librechat-data-provider');
|
||||
const { logger } = require('~/config');
|
||||
const getCustomConfig = require('~/server/services/Config/getCustomConfig');
|
||||
const { genAzureEndpoint } = require('~/utils');
|
||||
const { getRandomVoiceId, createChunkProcessor, splitTextIntoChunks } = require('./streamAudio');
|
||||
|
||||
/**
|
||||
* getProvider function
|
||||
* This function takes the ttsSchema object and returns the name of the provider
|
||||
* If more than one provider is set or no provider is set, it throws an error
|
||||
*
|
||||
* @param {Object} ttsSchema - The TTS schema containing the provider configuration
|
||||
* @returns {string} The name of the provider
|
||||
* @throws {Error} Throws an error if multiple providers are set or no provider is set
|
||||
*/
|
||||
function getProvider(ttsSchema) {
|
||||
if (!ttsSchema) {
|
||||
throw new Error(`No TTS schema is set. Did you configure TTS in the custom config (librechat.yaml)?
|
||||
|
||||
https://www.librechat.ai/docs/configuration/stt_tts#tts`);
|
||||
}
|
||||
const providers = Object.entries(ttsSchema).filter(([, value]) => Object.keys(value).length > 0);
|
||||
|
||||
if (providers.length > 1) {
|
||||
throw new Error('Multiple providers are set. Please set only one provider.');
|
||||
} else if (providers.length === 0) {
|
||||
throw new Error('No provider is set. Please set a provider.');
|
||||
} else {
|
||||
return providers[0][0];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* removeUndefined function
|
||||
* This function takes an object and removes all keys with undefined values
|
||||
* It also removes keys with empty objects as values
|
||||
*
|
||||
* @param {Object} obj - The object to be cleaned
|
||||
* @returns {void} This function does not return a value. It modifies the input object directly
|
||||
*/
|
||||
function removeUndefined(obj) {
|
||||
Object.keys(obj).forEach((key) => {
|
||||
if (obj[key] && typeof obj[key] === 'object') {
|
||||
removeUndefined(obj[key]);
|
||||
if (Object.keys(obj[key]).length === 0) {
|
||||
delete obj[key];
|
||||
}
|
||||
} else if (obj[key] === undefined) {
|
||||
delete obj[key];
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* This function prepares the necessary data and headers for making a request to the OpenAI TTS
|
||||
* It uses the provided TTS schema, input text, and voice to create the request
|
||||
*
|
||||
* @param {TCustomConfig['tts']['openai']} ttsSchema - The TTS schema containing the OpenAI configuration
|
||||
* @param {string} input - The text to be converted to speech
|
||||
* @param {string} voice - The voice to be used for the speech
|
||||
*
|
||||
* @returns {Array} An array containing the URL for the API request, the data to be sent, and the headers for the request
|
||||
* If an error occurs, it throws an error with a message indicating that the selected voice is not available
|
||||
*/
|
||||
function openAIProvider(ttsSchema, input, voice) {
|
||||
const url = ttsSchema?.url || 'https://api.openai.com/v1/audio/speech';
|
||||
|
||||
if (
|
||||
ttsSchema?.voices &&
|
||||
ttsSchema.voices.length > 0 &&
|
||||
!ttsSchema.voices.includes(voice) &&
|
||||
!ttsSchema.voices.includes('ALL')
|
||||
) {
|
||||
throw new Error(`Voice ${voice} is not available.`);
|
||||
}
|
||||
|
||||
let data = {
|
||||
input,
|
||||
model: ttsSchema?.model,
|
||||
voice: ttsSchema?.voices && ttsSchema.voices.length > 0 ? voice : undefined,
|
||||
backend: ttsSchema?.backend,
|
||||
};
|
||||
|
||||
let headers = {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: 'Bearer ' + extractEnvVariable(ttsSchema?.apiKey),
|
||||
};
|
||||
|
||||
[data, headers].forEach(removeUndefined);
|
||||
|
||||
return [url, data, headers];
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates the necessary parameters for making a request to Azure's OpenAI Text-to-Speech API.
|
||||
*
|
||||
* @param {TCustomConfig['tts']['azureOpenAI']} ttsSchema - The TTS schema containing the AzureOpenAI configuration
|
||||
* @param {string} input - The text to be converted to speech
|
||||
* @param {string} voice - The voice to be used for the speech
|
||||
*
|
||||
* @returns {Array} An array containing the URL for the API request, the data to be sent, and the headers for the request
|
||||
* If an error occurs, it throws an error with a message indicating that the selected voice is not available
|
||||
*/
|
||||
function azureOpenAIProvider(ttsSchema, input, voice) {
|
||||
const instanceName = ttsSchema?.instanceName;
|
||||
const deploymentName = ttsSchema?.deploymentName;
|
||||
const apiVersion = ttsSchema?.apiVersion;
|
||||
|
||||
const url =
|
||||
genAzureEndpoint({
|
||||
azureOpenAIApiInstanceName: instanceName,
|
||||
azureOpenAIApiDeploymentName: deploymentName,
|
||||
}) +
|
||||
'/audio/speech?api-version=' +
|
||||
apiVersion;
|
||||
|
||||
const apiKey = ttsSchema.apiKey ? extractEnvVariable(ttsSchema.apiKey) : '';
|
||||
|
||||
if (
|
||||
ttsSchema?.voices &&
|
||||
ttsSchema.voices.length > 0 &&
|
||||
!ttsSchema.voices.includes(voice) &&
|
||||
!ttsSchema.voices.includes('ALL')
|
||||
) {
|
||||
throw new Error(`Voice ${voice} is not available.`);
|
||||
}
|
||||
|
||||
let data = {
|
||||
model: ttsSchema?.model,
|
||||
input,
|
||||
voice: ttsSchema?.voices && ttsSchema.voices.length > 0 ? voice : undefined,
|
||||
};
|
||||
|
||||
let headers = {
|
||||
'Content-Type': 'application/json',
|
||||
};
|
||||
|
||||
[data, headers].forEach(removeUndefined);
|
||||
|
||||
if (apiKey) {
|
||||
headers['api-key'] = apiKey;
|
||||
}
|
||||
|
||||
return [url, data, headers];
|
||||
}
|
||||
|
||||
/**
|
||||
* elevenLabsProvider function
|
||||
* This function prepares the necessary data and headers for making a request to the Eleven Labs TTS
|
||||
* It uses the provided TTS schema, input text, and voice to create the request
|
||||
*
|
||||
* @param {TCustomConfig['tts']['elevenLabs']} ttsSchema - The TTS schema containing the Eleven Labs configuration
|
||||
* @param {string} input - The text to be converted to speech
|
||||
* @param {string} voice - The voice to be used for the speech
|
||||
* @param {boolean} stream - Whether to stream the audio or not
|
||||
*
|
||||
* @returns {Array} An array containing the URL for the API request, the data to be sent, and the headers for the request
|
||||
* @throws {Error} Throws an error if the selected voice is not available
|
||||
*/
|
||||
function elevenLabsProvider(ttsSchema, input, voice, stream) {
|
||||
let url =
|
||||
ttsSchema?.url ||
|
||||
`https://api.elevenlabs.io/v1/text-to-speech/{voice_id}${stream ? '/stream' : ''}`;
|
||||
|
||||
if (!ttsSchema?.voices.includes(voice) && !ttsSchema?.voices.includes('ALL')) {
|
||||
throw new Error(`Voice ${voice} is not available.`);
|
||||
}
|
||||
|
||||
url = url.replace('{voice_id}', voice);
|
||||
|
||||
let data = {
|
||||
model_id: ttsSchema?.model,
|
||||
text: input,
|
||||
// voice_id: voice,
|
||||
voice_settings: {
|
||||
similarity_boost: ttsSchema?.voice_settings?.similarity_boost,
|
||||
stability: ttsSchema?.voice_settings?.stability,
|
||||
style: ttsSchema?.voice_settings?.style,
|
||||
use_speaker_boost: ttsSchema?.voice_settings?.use_speaker_boost || undefined,
|
||||
},
|
||||
pronunciation_dictionary_locators: ttsSchema?.pronunciation_dictionary_locators,
|
||||
};
|
||||
|
||||
let headers = {
|
||||
'Content-Type': 'application/json',
|
||||
'xi-api-key': extractEnvVariable(ttsSchema?.apiKey),
|
||||
Accept: 'audio/mpeg',
|
||||
};
|
||||
|
||||
[data, headers].forEach(removeUndefined);
|
||||
|
||||
return [url, data, headers];
|
||||
}
|
||||
|
||||
/**
|
||||
* localAIProvider function
|
||||
* This function prepares the necessary data and headers for making a request to the LocalAI TTS
|
||||
* It uses the provided TTS schema, input text, and voice to create the request
|
||||
*
|
||||
* @param {TCustomConfig['tts']['localai']} ttsSchema - The TTS schema containing the LocalAI configuration
|
||||
* @param {string} input - The text to be converted to speech
|
||||
* @param {string} voice - The voice to be used for the speech
|
||||
*
|
||||
* @returns {Array} An array containing the URL for the API request, the data to be sent, and the headers for the request
|
||||
* @throws {Error} Throws an error if the selected voice is not available
|
||||
*/
|
||||
function localAIProvider(ttsSchema, input, voice) {
|
||||
let url = ttsSchema?.url;
|
||||
|
||||
if (
|
||||
ttsSchema?.voices &&
|
||||
ttsSchema.voices.length > 0 &&
|
||||
!ttsSchema.voices.includes(voice) &&
|
||||
!ttsSchema.voices.includes('ALL')
|
||||
) {
|
||||
throw new Error(`Voice ${voice} is not available.`);
|
||||
}
|
||||
|
||||
let data = {
|
||||
input,
|
||||
model: ttsSchema?.voices && ttsSchema.voices.length > 0 ? voice : undefined,
|
||||
backend: ttsSchema?.backend,
|
||||
};
|
||||
|
||||
let headers = {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: 'Bearer ' + extractEnvVariable(ttsSchema?.apiKey),
|
||||
};
|
||||
|
||||
[data, headers].forEach(removeUndefined);
|
||||
|
||||
if (extractEnvVariable(ttsSchema.apiKey) === '') {
|
||||
delete headers.Authorization;
|
||||
}
|
||||
|
||||
return [url, data, headers];
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* Returns provider and its schema for use with TTS requests
|
||||
* @param {TCustomConfig} customConfig
|
||||
* @param {string} _voice
|
||||
* @returns {Promise<[string, TProviderSchema]>}
|
||||
*/
|
||||
async function getProviderSchema(customConfig) {
|
||||
const provider = getProvider(customConfig.speech.tts);
|
||||
return [provider, customConfig.speech.tts[provider]];
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* Returns a tuple of the TTS schema as well as the voice for the TTS request
|
||||
* @param {TProviderSchema} providerSchema
|
||||
* @param {string} requestVoice
|
||||
* @returns {Promise<string>}
|
||||
*/
|
||||
async function getVoice(providerSchema, requestVoice) {
|
||||
const voices = providerSchema.voices.filter((voice) => voice && voice.toUpperCase() !== 'ALL');
|
||||
let voice = requestVoice;
|
||||
if (!voice || !voices.includes(voice) || (voice.toUpperCase() === 'ALL' && voices.length > 1)) {
|
||||
voice = getRandomVoiceId(voices);
|
||||
}
|
||||
|
||||
return voice;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {string} provider
|
||||
* @param {TProviderSchema} ttsSchema
|
||||
* @param {object} params
|
||||
* @param {string} params.voice
|
||||
* @param {string} params.input
|
||||
* @param {boolean} [params.stream]
|
||||
* @returns {Promise<ArrayBuffer>}
|
||||
*/
|
||||
async function ttsRequest(provider, ttsSchema, { input, voice, stream = true } = { stream: true }) {
|
||||
let [url, data, headers] = [];
|
||||
switch (provider) {
|
||||
case TTSProviders.OPENAI:
|
||||
[url, data, headers] = openAIProvider(ttsSchema, input, voice);
|
||||
break;
|
||||
case TTSProviders.AZURE_OPENAI:
|
||||
[url, data, headers] = azureOpenAIProvider(ttsSchema, input, voice);
|
||||
break;
|
||||
case TTSProviders.ELEVENLABS:
|
||||
[url, data, headers] = elevenLabsProvider(ttsSchema, input, voice, stream);
|
||||
break;
|
||||
case TTSProviders.LOCALAI:
|
||||
[url, data, headers] = localAIProvider(ttsSchema, input, voice);
|
||||
break;
|
||||
default:
|
||||
throw new Error('Invalid provider');
|
||||
}
|
||||
|
||||
if (stream) {
|
||||
return await axios.post(url, data, { headers, responseType: 'stream' });
|
||||
}
|
||||
|
||||
return await axios.post(url, data, { headers, responseType: 'arraybuffer' });
|
||||
}
|
||||
|
||||
/**
|
||||
* Handles a text-to-speech request. Extracts input and voice from the request, retrieves the TTS configuration,
|
||||
* and sends a request to the appropriate provider. The resulting audio data is sent in the response
|
||||
*
|
||||
* @param {Object} req - The request object, which should contain the input text and voice in its body
|
||||
* @param {Object} res - The response object, used to send the audio data or an error message
|
||||
*
|
||||
* @returns {Promise<void>} This function does not return a value. It sends the audio data or an error message in the response
|
||||
*
|
||||
* @throws {Error} Throws an error if the provider is invalid
|
||||
*/
|
||||
async function textToSpeech(req, res) {
|
||||
const { input } = req.body;
|
||||
|
||||
if (!input) {
|
||||
return res.status(400).send('Missing text in request body');
|
||||
}
|
||||
|
||||
const customConfig = await getCustomConfig();
|
||||
if (!customConfig) {
|
||||
res.status(500).send('Custom config not found');
|
||||
}
|
||||
|
||||
try {
|
||||
res.setHeader('Content-Type', 'audio/mpeg');
|
||||
const [provider, ttsSchema] = await getProviderSchema(customConfig);
|
||||
const voice = await getVoice(ttsSchema, req.body.voice);
|
||||
if (input.length < 4096) {
|
||||
const response = await ttsRequest(provider, ttsSchema, { input, voice });
|
||||
response.data.pipe(res);
|
||||
return;
|
||||
}
|
||||
|
||||
const textChunks = splitTextIntoChunks(input, 1000);
|
||||
|
||||
for (const chunk of textChunks) {
|
||||
try {
|
||||
const response = await ttsRequest(provider, ttsSchema, {
|
||||
voice,
|
||||
input: chunk.text,
|
||||
stream: true,
|
||||
});
|
||||
|
||||
logger.debug(`[textToSpeech] user: ${req?.user?.id} | writing audio stream`);
|
||||
await new Promise((resolve) => {
|
||||
response.data.pipe(res, { end: chunk.isFinished });
|
||||
response.data.on('end', () => {
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
|
||||
if (chunk.isFinished) {
|
||||
break;
|
||||
}
|
||||
} catch (innerError) {
|
||||
logger.error('Error processing manual update:', chunk, innerError);
|
||||
if (!res.headersSent) {
|
||||
res.status(500).end();
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (!res.headersSent) {
|
||||
res.end();
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(
|
||||
'Error creating the audio stream. Suggestion: check your provider quota. Error:',
|
||||
error,
|
||||
);
|
||||
res.status(500).send('An error occurred');
|
||||
}
|
||||
}
|
||||
|
||||
async function streamAudio(req, res) {
|
||||
res.setHeader('Content-Type', 'audio/mpeg');
|
||||
const customConfig = await getCustomConfig();
|
||||
if (!customConfig) {
|
||||
return res.status(500).send('Custom config not found');
|
||||
}
|
||||
|
||||
const [provider, ttsSchema] = await getProviderSchema(customConfig);
|
||||
const voice = await getVoice(ttsSchema, req.body.voice);
|
||||
|
||||
try {
|
||||
let shouldContinue = true;
|
||||
|
||||
req.on('close', () => {
|
||||
logger.warn('[streamAudio] Audio Stream Request closed by client');
|
||||
shouldContinue = false;
|
||||
});
|
||||
|
||||
const processChunks = createChunkProcessor(req.body.messageId);
|
||||
|
||||
while (shouldContinue) {
|
||||
// example updates
|
||||
// const updates = [
|
||||
// { text: 'This is a test.', isFinished: false },
|
||||
// { text: 'This is only a test.', isFinished: false },
|
||||
// { text: 'Your voice is like a combination of Fergie and Jesus!', isFinished: true },
|
||||
// ];
|
||||
|
||||
const updates = await processChunks();
|
||||
if (typeof updates === 'string') {
|
||||
logger.error(`Error processing audio stream updates: ${JSON.stringify(updates)}`);
|
||||
res.status(500).end();
|
||||
return;
|
||||
}
|
||||
|
||||
if (updates.length === 0) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 1250));
|
||||
continue;
|
||||
}
|
||||
|
||||
for (const update of updates) {
|
||||
try {
|
||||
const response = await ttsRequest(provider, ttsSchema, {
|
||||
voice,
|
||||
input: update.text,
|
||||
stream: true,
|
||||
});
|
||||
|
||||
if (!shouldContinue) {
|
||||
break;
|
||||
}
|
||||
|
||||
logger.debug(`[streamAudio] user: ${req?.user?.id} | writing audio stream`);
|
||||
await new Promise((resolve) => {
|
||||
response.data.pipe(res, { end: update.isFinished });
|
||||
response.data.on('end', () => {
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
|
||||
if (update.isFinished) {
|
||||
shouldContinue = false;
|
||||
break;
|
||||
}
|
||||
} catch (innerError) {
|
||||
logger.error('Error processing audio stream update:', update, innerError);
|
||||
if (!res.headersSent) {
|
||||
res.status(500).end();
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (!shouldContinue) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!res.headersSent) {
|
||||
res.end();
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Failed to fetch audio:', error);
|
||||
if (!res.headersSent) {
|
||||
res.status(500).end();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
textToSpeech,
|
||||
getProvider,
|
||||
streamAudio,
|
||||
};
|
||||
|
|
@ -15,7 +15,7 @@ export default function DecibelSelector() {
|
|||
<div className="flex items-center justify-between">
|
||||
<div>{localize('com_nav_db_sensitivity')}</div>
|
||||
<div className="w-2" />
|
||||
<small className="opacity-40">({localize('com_endpoint_default_with_num', '0.45')})</small>
|
||||
<small className="opacity-40">({localize('com_endpoint_default_with_num', '-45')})</small>
|
||||
</div>
|
||||
<div className="flex items-center justify-between">
|
||||
<Slider
|
||||
|
|
|
|||
|
|
@ -45,7 +45,9 @@ const useSpeechToTextExternal = (onTranscriptionComplete: (text: string) => void
|
|||
|
||||
const cleanup = () => {
|
||||
if (mediaRecorderRef.current) {
|
||||
mediaRecorderRef.current.removeEventListener('dataavailable', handleDataAvailable);
|
||||
mediaRecorderRef.current.removeEventListener('dataavailable', (event: BlobEvent) => {
|
||||
audioChunks.push(event.data);
|
||||
});
|
||||
mediaRecorderRef.current.removeEventListener('stop', handleStop);
|
||||
mediaRecorderRef.current = null;
|
||||
}
|
||||
|
|
@ -68,14 +70,6 @@ const useSpeechToTextExternal = (onTranscriptionComplete: (text: string) => void
|
|||
}
|
||||
};
|
||||
|
||||
const handleDataAvailable = (event: BlobEvent) => {
|
||||
if (event.data.size > 0) {
|
||||
audioChunks.push(event.data);
|
||||
} else {
|
||||
showToast({ message: 'No audio data available', status: 'warning' });
|
||||
}
|
||||
};
|
||||
|
||||
const handleStop = () => {
|
||||
if (audioChunks.length > 0) {
|
||||
const audioBlob = new Blob(audioChunks, { type: 'audio/wav' });
|
||||
|
|
@ -139,7 +133,9 @@ const useSpeechToTextExternal = (onTranscriptionComplete: (text: string) => void
|
|||
try {
|
||||
setAudioChunks([]);
|
||||
mediaRecorderRef.current = new MediaRecorder(audioStream.current);
|
||||
mediaRecorderRef.current.addEventListener('dataavailable', handleDataAvailable);
|
||||
mediaRecorderRef.current.addEventListener('dataavailable', (event: BlobEvent) => {
|
||||
audioChunks.push(event.data);
|
||||
});
|
||||
mediaRecorderRef.current.addEventListener('stop', handleStop);
|
||||
mediaRecorderRef.current.start(100);
|
||||
if (!audioContextRef.current && autoTranscribeAudio && speechToText) {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue