2024-07-30 09:18:52 -04:00
|
|
|
const axios = require('axios');
|
🧠 feat: User Memories for Conversational Context (#7760)
* 🧠 feat: User Memories for Conversational Context
chore: mcp typing, use `t`
WIP: first pass, Memories UI
- Added MemoryViewer component for displaying, editing, and deleting user memories.
- Integrated data provider hooks for fetching, updating, and deleting memories.
- Implemented pagination and loading states for better user experience.
- Created unit tests for MemoryViewer to ensure functionality and interaction with data provider.
- Updated translation files to include new UI strings related to memories.
chore: move mcp-related files to own directory
chore: rename librechat-mcp to librechat-api
WIP: first pass, memory processing and data schemas
chore: linting in fileSearch.js query description
chore: rename librechat-api to @librechat/api across the project
WIP: first pass, functional memory agent
feat: add MemoryEditDialog and MemoryViewer components for managing user memories
- Introduced MemoryEditDialog for editing memory entries with validation and toast notifications.
- Updated MemoryViewer to support editing and deleting memories, including pagination and loading states.
- Enhanced data provider to handle memory updates with optional original key for better management.
- Added new localization strings for memory-related UI elements.
feat: add memory permissions management
- Implemented memory permissions in the backend, allowing roles to have specific permissions for using, creating, updating, and reading memories.
- Added new API endpoints for updating memory permissions associated with roles.
- Created a new AdminSettings component for managing memory permissions in the frontend.
- Integrated memory permissions into the existing roles and permissions schemas.
- Updated the interface to include memory settings and permissions.
- Enhanced the MemoryViewer component to conditionally render admin settings based on user roles.
- Added localization support for memory permissions in the translation files.
feat: move AdminSettings component to a new position in MemoryViewer for better visibility
refactor: clean up commented code in MemoryViewer component
feat: enhance MemoryViewer with search functionality and improve MemoryEditDialog integration
- Added a search input to filter memories in the MemoryViewer component.
- Refactored MemoryEditDialog to accept children for better customization.
- Updated MemoryViewer to utilize the new EditMemoryButton and DeleteMemoryButton components for editing and deleting memories.
- Improved localization support by adding new strings for memory filtering and deletion confirmation.
refactor: optimize memory filtering in MemoryViewer using match-sorter
- Replaced manual filtering logic with match-sorter for improved search functionality.
- Enhanced performance and readability of the filteredMemories computation.
feat: enhance MemoryEditDialog with triggerRef and improve updateMemory mutation handling
feat: implement access control for MemoryEditDialog and MemoryViewer components
refactor: remove commented out code and create runMemory method
refactor: rename role based files
feat: implement access control for memory usage in AgentClient
refactor: simplify checkVisionRequest method in AgentClient by removing commented-out code
refactor: make `agents` dir in api package
refactor: migrate Azure utilities to TypeScript and consolidate imports
refactor: move sanitizeFilename function to a new file and update imports, add related tests
refactor: update LLM configuration types and consolidate Azure options in the API package
chore: linting
chore: import order
refactor: replace getLLMConfig with getOpenAIConfig and remove unused LLM configuration file
chore: update winston-daily-rotate-file to version 5.0.0 and add object-hash dependency in package-lock.json
refactor: move primeResources and optionalChainWithEmptyCheck functions to resources.ts and update imports
refactor: move createRun function to a new run.ts file and update related imports
fix: ensure safeAttachments is correctly typed as an array of TFile
chore: add node-fetch dependency and refactor fetch-related functions into packages/api/utils, removing the old generators file
refactor: enhance TEndpointOption type by using Pick to streamline endpoint fields and add new properties for model parameters and client options
feat: implement initializeOpenAIOptions function and update OpenAI types for enhanced configuration handling
fix: update types due to new TEndpointOption typing
fix: ensure safe access to group parameters in initializeOpenAIOptions function
fix: remove redundant API key validation comment in initializeOpenAIOptions function
refactor: rename initializeOpenAIOptions to initializeOpenAI for consistency and update related documentation
refactor: decouple req.body fields and tool loading from initializeAgentOptions
chore: linting
refactor: adjust column widths in MemoryViewer for improved layout
refactor: simplify agent initialization by creating loadAgent function and removing unused code
feat: add memory configuration loading and validation functions
WIP: first pass, memory processing with config
feat: implement memory callback and artifact handling
feat: implement memory artifacts display and processing updates
feat: add memory configuration options and schema validation for validKeys
fix: update MemoryEditDialog and MemoryViewer to handle memory state and display improvements
refactor: remove padding from BookmarkTable and MemoryViewer headers for consistent styling
WIP: initial tokenLimit config and move Tokenizer to @librechat/api
refactor: update mongoMeili plugin methods to use callback for better error handling
feat: enhance memory management with token tracking and usage metrics
- Added token counting for memory entries to enforce limits and provide usage statistics.
- Updated memory retrieval and update routes to include total token usage and limit.
- Enhanced MemoryEditDialog and MemoryViewer components to display memory usage and token information.
- Refactored memory processing functions to handle token limits and provide feedback on memory capacity.
feat: implement memory artifact handling in attachment handler
- Enhanced useAttachmentHandler to process memory artifacts when receiving updates.
- Introduced handleMemoryArtifact utility to manage memory updates and deletions.
- Updated query client to reflect changes in memory state based on incoming data.
refactor: restructure web search key extraction logic
- Moved the logic for extracting API keys from the webSearchAuth configuration into a dedicated function, getWebSearchKeys.
- Updated webSearchKeys to utilize the new function for improved clarity and maintainability.
- Prevents build time errors
feat: add personalization settings and memory preferences management
- Introduced a new Personalization tab in settings to manage user memory preferences.
- Implemented API endpoints and client-side logic for updating memory preferences.
- Enhanced user interface components to reflect personalization options and memory usage.
- Updated permissions to allow users to opt out of memory features.
- Added localization support for new settings and messages related to personalization.
style: personalization switch class
feat: add PersonalizationIcon and align Side Panel UI
feat: implement memory creation functionality
- Added a new API endpoint for creating memory entries, including validation for key and value.
- Introduced MemoryCreateDialog component for user interface to facilitate memory creation.
- Integrated token limit checks to prevent exceeding user memory capacity.
- Updated MemoryViewer to include a button for opening the memory creation dialog.
- Enhanced localization support for new messages related to memory creation.
feat: enhance message processing with configurable window size
- Updated AgentClient to use a configurable message window size for processing messages.
- Introduced messageWindowSize option in memory configuration schema with a default value of 5.
- Improved logic for selecting messages to process based on the configured window size.
chore: update librechat-data-provider version to 0.7.87 in package.json and package-lock.json
chore: remove OpenAPIPlugin and its associated tests
chore: remove MIGRATION_README.md as migration tasks are completed
ci: fix backend tests
chore: remove unused translation keys from localization file
chore: remove problematic test file and unused var in AgentClient
chore: remove unused import and import directly for JSDoc
* feat: add api package build stage in Dockerfile for improved modularity
* docs: reorder build steps in contributing guide for clarity
2025-06-07 18:52:22 -04:00
|
|
|
const { genAzureEndpoint } = require('@librechat/api');
|
2024-07-30 09:18:52 -04:00
|
|
|
const { extractEnvVariable, TTSProviders } = require('librechat-data-provider');
|
|
|
|
const { getRandomVoiceId, createChunkProcessor, splitTextIntoChunks } = require('./streamAudio');
|
2024-11-04 12:59:04 -05:00
|
|
|
const { getCustomConfig } = require('~/server/services/Config');
|
|
|
|
const { logger } = require('~/config');
|
2024-07-30 09:18:52 -04:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Service class for handling Text-to-Speech (TTS) operations.
|
|
|
|
* @class
|
|
|
|
*/
|
|
|
|
class TTSService {
|
|
|
|
/**
|
|
|
|
* Creates an instance of TTSService.
|
|
|
|
* @param {Object} customConfig - The custom configuration object.
|
|
|
|
*/
|
|
|
|
constructor(customConfig) {
|
|
|
|
this.customConfig = customConfig;
|
|
|
|
this.providerStrategies = {
|
|
|
|
[TTSProviders.OPENAI]: this.openAIProvider.bind(this),
|
|
|
|
[TTSProviders.AZURE_OPENAI]: this.azureOpenAIProvider.bind(this),
|
|
|
|
[TTSProviders.ELEVENLABS]: this.elevenLabsProvider.bind(this),
|
|
|
|
[TTSProviders.LOCALAI]: this.localAIProvider.bind(this),
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Creates a singleton instance of TTSService.
|
|
|
|
* @static
|
|
|
|
* @async
|
|
|
|
* @returns {Promise<TTSService>} The TTSService instance.
|
|
|
|
* @throws {Error} If the custom config is not found.
|
|
|
|
*/
|
|
|
|
static async getInstance() {
|
|
|
|
const customConfig = await getCustomConfig();
|
|
|
|
if (!customConfig) {
|
|
|
|
throw new Error('Custom config not found');
|
|
|
|
}
|
|
|
|
return new TTSService(customConfig);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Retrieves the configured TTS provider.
|
|
|
|
* @returns {string} The name of the configured provider.
|
|
|
|
* @throws {Error} If no provider is set or multiple providers are set.
|
|
|
|
*/
|
|
|
|
getProvider() {
|
|
|
|
const ttsSchema = this.customConfig.speech.tts;
|
|
|
|
if (!ttsSchema) {
|
|
|
|
throw new Error(
|
|
|
|
'No TTS schema is set. Did you configure TTS in the custom config (librechat.yaml)?',
|
|
|
|
);
|
|
|
|
}
|
|
|
|
const providers = Object.entries(ttsSchema).filter(
|
|
|
|
([, value]) => Object.keys(value).length > 0,
|
|
|
|
);
|
|
|
|
|
|
|
|
if (providers.length !== 1) {
|
|
|
|
throw new Error(
|
|
|
|
providers.length > 1
|
|
|
|
? 'Multiple providers are set. Please set only one provider.'
|
|
|
|
: 'No provider is set. Please set a provider.',
|
|
|
|
);
|
|
|
|
}
|
|
|
|
return providers[0][0];
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Selects a voice for TTS based on provider schema and request.
|
|
|
|
* @async
|
|
|
|
* @param {Object} providerSchema - The schema for the selected provider.
|
|
|
|
* @param {string} requestVoice - The requested voice.
|
|
|
|
* @returns {Promise<string>} The selected voice.
|
|
|
|
*/
|
|
|
|
async getVoice(providerSchema, requestVoice) {
|
|
|
|
const voices = providerSchema.voices.filter((voice) => voice && voice.toUpperCase() !== 'ALL');
|
|
|
|
let voice = requestVoice;
|
|
|
|
if (!voice || !voices.includes(voice) || (voice.toUpperCase() === 'ALL' && voices.length > 1)) {
|
|
|
|
voice = getRandomVoiceId(voices);
|
|
|
|
}
|
|
|
|
return voice;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Recursively removes undefined properties from an object.
|
|
|
|
* @param {Object} obj - The object to clean.
|
|
|
|
*/
|
|
|
|
removeUndefined(obj) {
|
|
|
|
Object.keys(obj).forEach((key) => {
|
|
|
|
if (obj[key] && typeof obj[key] === 'object') {
|
|
|
|
this.removeUndefined(obj[key]);
|
|
|
|
if (Object.keys(obj[key]).length === 0) {
|
|
|
|
delete obj[key];
|
|
|
|
}
|
|
|
|
} else if (obj[key] === undefined) {
|
|
|
|
delete obj[key];
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Prepares the request for OpenAI TTS provider.
|
|
|
|
* @param {Object} ttsSchema - The TTS schema for OpenAI.
|
|
|
|
* @param {string} input - The input text.
|
|
|
|
* @param {string} voice - The selected voice.
|
|
|
|
* @returns {Array} An array containing the URL, data, and headers for the request.
|
|
|
|
* @throws {Error} If the selected voice is not available.
|
|
|
|
*/
|
|
|
|
openAIProvider(ttsSchema, input, voice) {
|
|
|
|
const url = ttsSchema?.url || 'https://api.openai.com/v1/audio/speech';
|
|
|
|
|
|
|
|
if (
|
|
|
|
ttsSchema?.voices &&
|
|
|
|
ttsSchema.voices.length > 0 &&
|
|
|
|
!ttsSchema.voices.includes(voice) &&
|
|
|
|
!ttsSchema.voices.includes('ALL')
|
|
|
|
) {
|
|
|
|
throw new Error(`Voice ${voice} is not available.`);
|
|
|
|
}
|
|
|
|
|
|
|
|
const data = {
|
|
|
|
input,
|
|
|
|
model: ttsSchema?.model,
|
|
|
|
voice: ttsSchema?.voices && ttsSchema.voices.length > 0 ? voice : undefined,
|
|
|
|
backend: ttsSchema?.backend,
|
|
|
|
};
|
|
|
|
|
|
|
|
const headers = {
|
|
|
|
'Content-Type': 'application/json',
|
|
|
|
Authorization: `Bearer ${extractEnvVariable(ttsSchema?.apiKey)}`,
|
|
|
|
};
|
|
|
|
|
|
|
|
return [url, data, headers];
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Prepares the request for Azure OpenAI TTS provider.
|
|
|
|
* @param {Object} ttsSchema - The TTS schema for Azure OpenAI.
|
|
|
|
* @param {string} input - The input text.
|
|
|
|
* @param {string} voice - The selected voice.
|
|
|
|
* @returns {Array} An array containing the URL, data, and headers for the request.
|
|
|
|
* @throws {Error} If the selected voice is not available.
|
|
|
|
*/
|
|
|
|
azureOpenAIProvider(ttsSchema, input, voice) {
|
|
|
|
const url = `${genAzureEndpoint({
|
2024-12-04 17:44:00 +01:00
|
|
|
azureOpenAIApiInstanceName: extractEnvVariable(ttsSchema?.instanceName),
|
|
|
|
azureOpenAIApiDeploymentName: extractEnvVariable(ttsSchema?.deploymentName),
|
|
|
|
})}/audio/speech?api-version=${extractEnvVariable(ttsSchema?.apiVersion)}`;
|
2024-07-30 09:18:52 -04:00
|
|
|
|
|
|
|
if (
|
|
|
|
ttsSchema?.voices &&
|
|
|
|
ttsSchema.voices.length > 0 &&
|
|
|
|
!ttsSchema.voices.includes(voice) &&
|
|
|
|
!ttsSchema.voices.includes('ALL')
|
|
|
|
) {
|
|
|
|
throw new Error(`Voice ${voice} is not available.`);
|
|
|
|
}
|
|
|
|
|
|
|
|
const data = {
|
2024-12-04 17:44:00 +01:00
|
|
|
model: extractEnvVariable(ttsSchema?.model),
|
2024-07-30 09:18:52 -04:00
|
|
|
input,
|
|
|
|
voice: ttsSchema?.voices && ttsSchema.voices.length > 0 ? voice : undefined,
|
|
|
|
};
|
|
|
|
|
|
|
|
const headers = {
|
|
|
|
'Content-Type': 'application/json',
|
|
|
|
'api-key': ttsSchema.apiKey ? extractEnvVariable(ttsSchema.apiKey) : '',
|
|
|
|
};
|
|
|
|
|
|
|
|
return [url, data, headers];
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Prepares the request for ElevenLabs TTS provider.
|
|
|
|
* @param {Object} ttsSchema - The TTS schema for ElevenLabs.
|
|
|
|
* @param {string} input - The input text.
|
|
|
|
* @param {string} voice - The selected voice.
|
|
|
|
* @param {boolean} stream - Whether to use streaming.
|
|
|
|
* @returns {Array} An array containing the URL, data, and headers for the request.
|
|
|
|
* @throws {Error} If the selected voice is not available.
|
|
|
|
*/
|
|
|
|
elevenLabsProvider(ttsSchema, input, voice, stream) {
|
|
|
|
let url =
|
|
|
|
ttsSchema?.url ||
|
|
|
|
`https://api.elevenlabs.io/v1/text-to-speech/${voice}${stream ? '/stream' : ''}`;
|
|
|
|
|
|
|
|
if (!ttsSchema?.voices.includes(voice) && !ttsSchema?.voices.includes('ALL')) {
|
|
|
|
throw new Error(`Voice ${voice} is not available.`);
|
|
|
|
}
|
|
|
|
|
|
|
|
const data = {
|
|
|
|
model_id: ttsSchema?.model,
|
|
|
|
text: input,
|
|
|
|
voice_settings: {
|
|
|
|
similarity_boost: ttsSchema?.voice_settings?.similarity_boost,
|
|
|
|
stability: ttsSchema?.voice_settings?.stability,
|
|
|
|
style: ttsSchema?.voice_settings?.style,
|
|
|
|
use_speaker_boost: ttsSchema?.voice_settings?.use_speaker_boost,
|
|
|
|
},
|
|
|
|
pronunciation_dictionary_locators: ttsSchema?.pronunciation_dictionary_locators,
|
|
|
|
};
|
|
|
|
|
|
|
|
const headers = {
|
|
|
|
'Content-Type': 'application/json',
|
|
|
|
'xi-api-key': extractEnvVariable(ttsSchema?.apiKey),
|
|
|
|
Accept: 'audio/mpeg',
|
|
|
|
};
|
|
|
|
|
|
|
|
return [url, data, headers];
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Prepares the request for LocalAI TTS provider.
|
|
|
|
* @param {Object} ttsSchema - The TTS schema for LocalAI.
|
|
|
|
* @param {string} input - The input text.
|
|
|
|
* @param {string} voice - The selected voice.
|
|
|
|
* @returns {Array} An array containing the URL, data, and headers for the request.
|
|
|
|
* @throws {Error} If the selected voice is not available.
|
|
|
|
*/
|
|
|
|
localAIProvider(ttsSchema, input, voice) {
|
|
|
|
const url = ttsSchema?.url;
|
|
|
|
|
|
|
|
if (
|
|
|
|
ttsSchema?.voices &&
|
|
|
|
ttsSchema.voices.length > 0 &&
|
|
|
|
!ttsSchema.voices.includes(voice) &&
|
|
|
|
!ttsSchema.voices.includes('ALL')
|
|
|
|
) {
|
|
|
|
throw new Error(`Voice ${voice} is not available.`);
|
|
|
|
}
|
|
|
|
|
|
|
|
const data = {
|
|
|
|
input,
|
|
|
|
model: ttsSchema?.voices && ttsSchema.voices.length > 0 ? voice : undefined,
|
|
|
|
backend: ttsSchema?.backend,
|
|
|
|
};
|
|
|
|
|
|
|
|
const headers = {
|
|
|
|
'Content-Type': 'application/json',
|
|
|
|
Authorization: `Bearer ${extractEnvVariable(ttsSchema?.apiKey)}`,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (extractEnvVariable(ttsSchema.apiKey) === '') {
|
|
|
|
delete headers.Authorization;
|
|
|
|
}
|
|
|
|
|
|
|
|
return [url, data, headers];
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Sends a TTS request to the specified provider.
|
|
|
|
* @async
|
|
|
|
* @param {string} provider - The TTS provider to use.
|
|
|
|
* @param {Object} ttsSchema - The TTS schema for the provider.
|
|
|
|
* @param {Object} options - The options for the TTS request.
|
|
|
|
* @param {string} options.input - The input text.
|
|
|
|
* @param {string} options.voice - The voice to use.
|
|
|
|
* @param {boolean} [options.stream=true] - Whether to use streaming.
|
|
|
|
* @returns {Promise<Object>} The axios response object.
|
|
|
|
* @throws {Error} If the provider is invalid or the request fails.
|
|
|
|
*/
|
|
|
|
async ttsRequest(provider, ttsSchema, { input, voice, stream = true }) {
|
|
|
|
const strategy = this.providerStrategies[provider];
|
|
|
|
if (!strategy) {
|
|
|
|
throw new Error('Invalid provider');
|
|
|
|
}
|
|
|
|
|
|
|
|
const [url, data, headers] = strategy.call(this, ttsSchema, input, voice, stream);
|
|
|
|
|
|
|
|
[data, headers].forEach(this.removeUndefined.bind(this));
|
|
|
|
|
|
|
|
const options = { headers, responseType: stream ? 'stream' : 'arraybuffer' };
|
|
|
|
|
|
|
|
try {
|
|
|
|
return await axios.post(url, data, options);
|
|
|
|
} catch (error) {
|
|
|
|
logger.error(`TTS request failed for provider ${provider}:`, error);
|
|
|
|
throw error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Processes a text-to-speech request.
|
|
|
|
* @async
|
|
|
|
* @param {Object} req - The request object.
|
|
|
|
* @param {Object} res - The response object.
|
|
|
|
* @returns {Promise<void>}
|
|
|
|
*/
|
|
|
|
async processTextToSpeech(req, res) {
|
|
|
|
const { input, voice: requestVoice } = req.body;
|
|
|
|
|
|
|
|
if (!input) {
|
|
|
|
return res.status(400).send('Missing text in request body');
|
|
|
|
}
|
|
|
|
|
|
|
|
try {
|
|
|
|
res.setHeader('Content-Type', 'audio/mpeg');
|
|
|
|
const provider = this.getProvider();
|
|
|
|
const ttsSchema = this.customConfig.speech.tts[provider];
|
|
|
|
const voice = await this.getVoice(ttsSchema, requestVoice);
|
|
|
|
|
|
|
|
if (input.length < 4096) {
|
|
|
|
const response = await this.ttsRequest(provider, ttsSchema, { input, voice });
|
|
|
|
response.data.pipe(res);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
const textChunks = splitTextIntoChunks(input, 1000);
|
|
|
|
|
|
|
|
for (const chunk of textChunks) {
|
|
|
|
try {
|
|
|
|
const response = await this.ttsRequest(provider, ttsSchema, {
|
|
|
|
voice,
|
|
|
|
input: chunk.text,
|
|
|
|
stream: true,
|
|
|
|
});
|
|
|
|
|
|
|
|
logger.debug(`[textToSpeech] user: ${req?.user?.id} | writing audio stream`);
|
|
|
|
await new Promise((resolve) => {
|
|
|
|
response.data.pipe(res, { end: chunk.isFinished });
|
|
|
|
response.data.on('end', resolve);
|
|
|
|
});
|
|
|
|
|
|
|
|
if (chunk.isFinished) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} catch (innerError) {
|
|
|
|
logger.error('Error processing manual update:', chunk, innerError);
|
|
|
|
if (!res.headersSent) {
|
|
|
|
return res.status(500).end();
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!res.headersSent) {
|
|
|
|
res.end();
|
|
|
|
}
|
|
|
|
} catch (error) {
|
|
|
|
logger.error('Error creating the audio stream:', error);
|
|
|
|
if (!res.headersSent) {
|
|
|
|
return res.status(500).send('An error occurred');
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Streams audio data from the TTS provider.
|
|
|
|
* @async
|
|
|
|
* @param {Object} req - The request object.
|
|
|
|
* @param {Object} res - The response object.
|
|
|
|
* @returns {Promise<void>}
|
|
|
|
*/
|
|
|
|
async streamAudio(req, res) {
|
|
|
|
res.setHeader('Content-Type', 'audio/mpeg');
|
|
|
|
const provider = this.getProvider();
|
|
|
|
const ttsSchema = this.customConfig.speech.tts[provider];
|
|
|
|
const voice = await this.getVoice(ttsSchema, req.body.voice);
|
|
|
|
|
|
|
|
let shouldContinue = true;
|
|
|
|
|
|
|
|
req.on('close', () => {
|
|
|
|
logger.warn('[streamAudio] Audio Stream Request closed by client');
|
|
|
|
shouldContinue = false;
|
|
|
|
});
|
|
|
|
|
2025-01-29 19:46:58 -05:00
|
|
|
const processChunks = createChunkProcessor(req.user.id, req.body.messageId);
|
2024-07-30 09:18:52 -04:00
|
|
|
|
|
|
|
try {
|
|
|
|
while (shouldContinue) {
|
|
|
|
const updates = await processChunks();
|
|
|
|
if (typeof updates === 'string') {
|
|
|
|
logger.error(`Error processing audio stream updates: ${updates}`);
|
|
|
|
return res.status(500).end();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (updates.length === 0) {
|
|
|
|
await new Promise((resolve) => setTimeout(resolve, 1250));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (const update of updates) {
|
|
|
|
try {
|
|
|
|
const response = await this.ttsRequest(provider, ttsSchema, {
|
|
|
|
voice,
|
|
|
|
input: update.text,
|
|
|
|
stream: true,
|
|
|
|
});
|
|
|
|
|
|
|
|
if (!shouldContinue) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
logger.debug(`[streamAudio] user: ${req?.user?.id} | writing audio stream`);
|
|
|
|
await new Promise((resolve) => {
|
|
|
|
response.data.pipe(res, { end: update.isFinished });
|
|
|
|
response.data.on('end', resolve);
|
|
|
|
});
|
|
|
|
|
|
|
|
if (update.isFinished) {
|
|
|
|
shouldContinue = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} catch (innerError) {
|
|
|
|
logger.error('Error processing audio stream update:', update, innerError);
|
|
|
|
if (!res.headersSent) {
|
|
|
|
return res.status(500).end();
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!shouldContinue) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!res.headersSent) {
|
|
|
|
res.end();
|
|
|
|
}
|
|
|
|
} catch (error) {
|
|
|
|
logger.error('Failed to fetch audio:', error);
|
|
|
|
if (!res.headersSent) {
|
|
|
|
res.status(500).end();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Factory function to create a TTSService instance.
|
|
|
|
* @async
|
|
|
|
* @returns {Promise<TTSService>} A promise that resolves to a TTSService instance.
|
|
|
|
*/
|
|
|
|
async function createTTSService() {
|
|
|
|
return TTSService.getInstance();
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Wrapper function for text-to-speech processing.
|
|
|
|
* @async
|
|
|
|
* @param {Object} req - The request object.
|
|
|
|
* @param {Object} res - The response object.
|
|
|
|
* @returns {Promise<void>}
|
|
|
|
*/
|
|
|
|
async function textToSpeech(req, res) {
|
|
|
|
const ttsService = await createTTSService();
|
|
|
|
await ttsService.processTextToSpeech(req, res);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Wrapper function for audio streaming.
|
|
|
|
* @async
|
|
|
|
* @param {Object} req - The request object.
|
|
|
|
* @param {Object} res - The response object.
|
|
|
|
* @returns {Promise<void>}
|
|
|
|
*/
|
|
|
|
async function streamAudio(req, res) {
|
|
|
|
const ttsService = await createTTSService();
|
|
|
|
await ttsService.streamAudio(req, res);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Wrapper function to get the configured TTS provider.
|
|
|
|
* @async
|
|
|
|
* @returns {Promise<string>} A promise that resolves to the name of the configured provider.
|
|
|
|
*/
|
|
|
|
async function getProvider() {
|
|
|
|
const ttsService = await createTTSService();
|
|
|
|
return ttsService.getProvider();
|
|
|
|
}
|
|
|
|
|
|
|
|
module.exports = {
|
|
|
|
textToSpeech,
|
|
|
|
streamAudio,
|
|
|
|
getProvider,
|
|
|
|
};
|