mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 00:40:14 +01:00
🪦 refactor: Remove Legacy Code (#10533)
* 🗑️ chore: Remove unused Legacy Provider clients and related helpers * Deleted OpenAIClient and GoogleClient files along with their associated tests. * Removed references to these clients in the clients index file. * Cleaned up typedefs by removing the OpenAISpecClient export. * Updated chat controllers to use the OpenAI SDK directly instead of the removed client classes. * chore/remove-openapi-specs * 🗑️ chore: Remove unused mergeSort and misc utility functions * Deleted mergeSort.js and misc.js files as they are no longer needed. * Removed references to cleanUpPrimaryKeyValue in messages.js and adjusted related logic. * Updated mongoMeili.ts to eliminate local implementations of removed functions. * chore: remove legacy endpoints * chore: remove all plugins endpoint related code * chore: remove unused prompt handling code and clean up imports * Deleted handleInputs.js and instructions.js files as they are no longer needed. * Removed references to these files in the prompts index.js. * Updated docker-compose.yml to simplify reverse proxy configuration. * chore: remove unused LightningIcon import from Icons.tsx * chore: clean up translation.json by removing deprecated and unused keys * chore: update Jest configuration and remove unused mock file * Simplified the setupFiles array in jest.config.js by removing the fetchEventSource mock. * Deleted the fetchEventSource.js mock file as it is no longer needed. * fix: simplify endpoint type check in Landing and ConversationStarters components * Updated the endpoint type check to use strict equality for better clarity and performance. * Ensured consistency in the handling of the azureOpenAI endpoint across both components. * chore: remove unused dependencies from package.json and package-lock.json * chore: remove legacy EditController, associated routes and imports * chore: update banResponse logic to refine request handling for banned users * chore: remove unused validateEndpoint middleware and its references * chore: remove unused 'res' parameter from initializeClient in multiple endpoint files * chore: remove unused 'isSmallScreen' prop from BookmarkNav and NewChat components; clean up imports in ArchivedChatsTable and useSetIndexOptions hooks; enhance localization in PromptVersions * chore: remove unused import of Constants and TMessage from MobileNav; retain only necessary QueryKeys import * chore: remove unused TResPlugin type and related references; clean up imports in types and schemas
This commit is contained in:
parent
b6dcefc53a
commit
656e1abaea
161 changed files with 256 additions and 10513 deletions
|
|
@ -1,247 +0,0 @@
|
|||
const { sendEvent } = require('@librechat/api');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { getResponseSender } = require('librechat-data-provider');
|
||||
const {
|
||||
handleAbortError,
|
||||
createAbortController,
|
||||
cleanupAbortController,
|
||||
} = require('~/server/middleware');
|
||||
const {
|
||||
disposeClient,
|
||||
processReqData,
|
||||
clientRegistry,
|
||||
requestDataMap,
|
||||
} = require('~/server/cleanup');
|
||||
const { createOnProgress } = require('~/server/utils');
|
||||
const { saveMessage } = require('~/models');
|
||||
|
||||
const EditController = async (req, res, next, initializeClient) => {
|
||||
let {
|
||||
text,
|
||||
generation,
|
||||
endpointOption,
|
||||
conversationId,
|
||||
modelDisplayLabel,
|
||||
responseMessageId,
|
||||
isContinued = false,
|
||||
parentMessageId = null,
|
||||
overrideParentMessageId = null,
|
||||
} = req.body;
|
||||
|
||||
let client = null;
|
||||
let abortKey = null;
|
||||
let cleanupHandlers = [];
|
||||
let clientRef = null; // Declare clientRef here
|
||||
|
||||
logger.debug('[EditController]', {
|
||||
text,
|
||||
generation,
|
||||
isContinued,
|
||||
conversationId,
|
||||
...endpointOption,
|
||||
modelsConfig: endpointOption.modelsConfig ? 'exists' : '',
|
||||
});
|
||||
|
||||
let userMessage = null;
|
||||
let userMessagePromise = null;
|
||||
let promptTokens = null;
|
||||
let getAbortData = null;
|
||||
|
||||
const sender = getResponseSender({
|
||||
...endpointOption,
|
||||
model: endpointOption.modelOptions.model,
|
||||
modelDisplayLabel,
|
||||
});
|
||||
const userMessageId = parentMessageId;
|
||||
const userId = req.user.id;
|
||||
|
||||
let reqDataContext = { userMessage, userMessagePromise, responseMessageId, promptTokens };
|
||||
|
||||
const updateReqData = (data = {}) => {
|
||||
reqDataContext = processReqData(data, reqDataContext);
|
||||
abortKey = reqDataContext.abortKey;
|
||||
userMessage = reqDataContext.userMessage;
|
||||
userMessagePromise = reqDataContext.userMessagePromise;
|
||||
responseMessageId = reqDataContext.responseMessageId;
|
||||
promptTokens = reqDataContext.promptTokens;
|
||||
};
|
||||
|
||||
let { onProgress: progressCallback, getPartialText } = createOnProgress({
|
||||
generation,
|
||||
});
|
||||
|
||||
const performCleanup = () => {
|
||||
logger.debug('[EditController] Performing cleanup');
|
||||
if (Array.isArray(cleanupHandlers)) {
|
||||
for (const handler of cleanupHandlers) {
|
||||
try {
|
||||
if (typeof handler === 'function') {
|
||||
handler();
|
||||
}
|
||||
} catch (e) {
|
||||
// Ignore
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (abortKey) {
|
||||
logger.debug('[EditController] Cleaning up abort controller');
|
||||
cleanupAbortController(abortKey);
|
||||
abortKey = null;
|
||||
}
|
||||
|
||||
if (client) {
|
||||
disposeClient(client);
|
||||
client = null;
|
||||
}
|
||||
|
||||
reqDataContext = null;
|
||||
userMessage = null;
|
||||
userMessagePromise = null;
|
||||
promptTokens = null;
|
||||
getAbortData = null;
|
||||
progressCallback = null;
|
||||
endpointOption = null;
|
||||
cleanupHandlers = null;
|
||||
|
||||
if (requestDataMap.has(req)) {
|
||||
requestDataMap.delete(req);
|
||||
}
|
||||
logger.debug('[EditController] Cleanup completed');
|
||||
};
|
||||
|
||||
try {
|
||||
({ client } = await initializeClient({ req, res, endpointOption }));
|
||||
|
||||
if (clientRegistry && client) {
|
||||
clientRegistry.register(client, { userId }, client);
|
||||
}
|
||||
|
||||
if (client) {
|
||||
requestDataMap.set(req, { client });
|
||||
}
|
||||
|
||||
clientRef = new WeakRef(client);
|
||||
|
||||
getAbortData = () => {
|
||||
const currentClient = clientRef?.deref();
|
||||
const currentText =
|
||||
currentClient?.getStreamText != null ? currentClient.getStreamText() : getPartialText();
|
||||
|
||||
return {
|
||||
sender,
|
||||
conversationId,
|
||||
messageId: reqDataContext.responseMessageId,
|
||||
parentMessageId: overrideParentMessageId ?? userMessageId,
|
||||
text: currentText,
|
||||
userMessage: userMessage,
|
||||
userMessagePromise: userMessagePromise,
|
||||
promptTokens: reqDataContext.promptTokens,
|
||||
};
|
||||
};
|
||||
|
||||
const { onStart, abortController } = createAbortController(
|
||||
req,
|
||||
res,
|
||||
getAbortData,
|
||||
updateReqData,
|
||||
);
|
||||
|
||||
const closeHandler = () => {
|
||||
logger.debug('[EditController] Request closed');
|
||||
if (!abortController || abortController.signal.aborted || abortController.requestCompleted) {
|
||||
return;
|
||||
}
|
||||
abortController.abort();
|
||||
logger.debug('[EditController] Request aborted on close');
|
||||
};
|
||||
|
||||
res.on('close', closeHandler);
|
||||
cleanupHandlers.push(() => {
|
||||
try {
|
||||
res.removeListener('close', closeHandler);
|
||||
} catch (e) {
|
||||
// Ignore
|
||||
}
|
||||
});
|
||||
|
||||
let response = await client.sendMessage(text, {
|
||||
user: userId,
|
||||
generation,
|
||||
isContinued,
|
||||
isEdited: true,
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
responseMessageId: reqDataContext.responseMessageId,
|
||||
overrideParentMessageId,
|
||||
getReqData: updateReqData,
|
||||
onStart,
|
||||
abortController,
|
||||
progressCallback,
|
||||
progressOptions: {
|
||||
res,
|
||||
},
|
||||
});
|
||||
|
||||
const databasePromise = response.databasePromise;
|
||||
delete response.databasePromise;
|
||||
|
||||
const { conversation: convoData = {} } = await databasePromise;
|
||||
const conversation = { ...convoData };
|
||||
conversation.title =
|
||||
conversation && !conversation.title ? null : conversation?.title || 'New Chat';
|
||||
|
||||
if (client?.options?.attachments && endpointOption?.modelOptions?.model) {
|
||||
conversation.model = endpointOption.modelOptions.model;
|
||||
}
|
||||
|
||||
if (!abortController.signal.aborted) {
|
||||
const finalUserMessage = reqDataContext.userMessage;
|
||||
const finalResponseMessage = { ...response };
|
||||
|
||||
sendEvent(res, {
|
||||
final: true,
|
||||
conversation,
|
||||
title: conversation.title,
|
||||
requestMessage: finalUserMessage,
|
||||
responseMessage: finalResponseMessage,
|
||||
});
|
||||
res.end();
|
||||
|
||||
await saveMessage(
|
||||
req,
|
||||
{ ...finalResponseMessage, user: userId },
|
||||
{ context: 'api/server/controllers/EditController.js - response end' },
|
||||
);
|
||||
}
|
||||
|
||||
performCleanup();
|
||||
} catch (error) {
|
||||
logger.error('[EditController] Error handling request', error);
|
||||
let partialText = '';
|
||||
try {
|
||||
const currentClient = clientRef?.deref();
|
||||
partialText =
|
||||
currentClient?.getStreamText != null ? currentClient.getStreamText() : getPartialText();
|
||||
} catch (getTextError) {
|
||||
logger.error('[EditController] Error calling getText() during error handling', getTextError);
|
||||
}
|
||||
|
||||
handleAbortError(res, req, error, {
|
||||
sender,
|
||||
partialText,
|
||||
conversationId,
|
||||
messageId: reqDataContext.responseMessageId,
|
||||
parentMessageId: overrideParentMessageId ?? userMessageId ?? parentMessageId,
|
||||
userMessageId,
|
||||
})
|
||||
.catch((err) => {
|
||||
logger.error('[EditController] Error in `handleAbortError` during catch block', err);
|
||||
})
|
||||
.finally(() => {
|
||||
performCleanup();
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
module.exports = EditController;
|
||||
|
|
@ -7,6 +7,7 @@ const {
|
|||
Constants,
|
||||
RunStatus,
|
||||
CacheKeys,
|
||||
VisionModes,
|
||||
ContentTypes,
|
||||
EModelEndpoint,
|
||||
ViolationTypes,
|
||||
|
|
@ -25,6 +26,7 @@ const {
|
|||
const { runAssistant, createOnTextProgress } = require('~/server/services/AssistantService');
|
||||
const validateAuthor = require('~/server/middleware/assistants/validateAuthor');
|
||||
const { formatMessage, createVisionPrompt } = require('~/app/clients/prompts');
|
||||
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
|
||||
const { createRun, StreamRunManager } = require('~/server/services/Runs');
|
||||
const { addTitle } = require('~/server/services/Endpoints/assistants');
|
||||
const { createRunBody } = require('~/server/services/createRunBody');
|
||||
|
|
@ -64,7 +66,7 @@ const chatV1 = async (req, res) => {
|
|||
clientTimestamp,
|
||||
} = req.body;
|
||||
|
||||
/** @type {OpenAIClient} */
|
||||
/** @type {OpenAI} */
|
||||
let openai;
|
||||
/** @type {string|undefined} - the current thread id */
|
||||
let thread_id = _thread_id;
|
||||
|
|
@ -285,11 +287,10 @@ const chatV1 = async (req, res) => {
|
|||
});
|
||||
};
|
||||
|
||||
const { openai: _openai, client } = await getOpenAIClient({
|
||||
const { openai: _openai } = await getOpenAIClient({
|
||||
req,
|
||||
res,
|
||||
endpointOption,
|
||||
initAppClient: true,
|
||||
});
|
||||
|
||||
openai = _openai;
|
||||
|
|
@ -364,7 +365,15 @@ const chatV1 = async (req, res) => {
|
|||
role: 'user',
|
||||
content: '',
|
||||
};
|
||||
const files = await client.addImageURLs(visionMessage, attachments);
|
||||
const { files, image_urls } = await encodeAndFormat(
|
||||
req,
|
||||
attachments,
|
||||
{
|
||||
endpoint: EModelEndpoint.assistants,
|
||||
},
|
||||
VisionModes.generative,
|
||||
);
|
||||
visionMessage.image_urls = image_urls.length ? image_urls : undefined;
|
||||
if (!visionMessage.image_urls?.length) {
|
||||
return;
|
||||
}
|
||||
|
|
@ -609,7 +618,6 @@ const chatV1 = async (req, res) => {
|
|||
text,
|
||||
responseText: response.text,
|
||||
conversationId,
|
||||
client,
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ const chatV2 = async (req, res) => {
|
|||
clientTimestamp,
|
||||
} = req.body;
|
||||
|
||||
/** @type {OpenAIClient} */
|
||||
/** @type {OpenAI} */
|
||||
let openai;
|
||||
/** @type {string|undefined} - the current thread id */
|
||||
let thread_id = _thread_id;
|
||||
|
|
@ -160,11 +160,10 @@ const chatV2 = async (req, res) => {
|
|||
});
|
||||
};
|
||||
|
||||
const { openai: _openai, client } = await getOpenAIClient({
|
||||
const { openai: _openai } = await getOpenAIClient({
|
||||
req,
|
||||
res,
|
||||
endpointOption,
|
||||
initAppClient: true,
|
||||
});
|
||||
|
||||
openai = _openai;
|
||||
|
|
@ -453,7 +452,6 @@ const chatV2 = async (req, res) => {
|
|||
text,
|
||||
responseText: response.text,
|
||||
conversationId,
|
||||
client,
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ const _listAssistants = async ({ req, res, version, query }) => {
|
|||
* @returns {Promise<Array<Assistant>>} A promise that resolves to the response from the `openai.beta.assistants.list` method call.
|
||||
*/
|
||||
const listAllAssistants = async ({ req, res, version, query }) => {
|
||||
/** @type {{ openai: OpenAIClient }} */
|
||||
/** @type {{ openai: OpenAI }} */
|
||||
const { openai } = await getOpenAIClient({ req, res, version });
|
||||
const allAssistants = [];
|
||||
|
||||
|
|
@ -181,7 +181,7 @@ const listAssistantsForAzure = async ({ req, res, version, azureConfig = {}, que
|
|||
* @param {TEndpointOption} params.endpointOption - The endpoint options.
|
||||
* @param {boolean} params.initAppClient - Whether to initialize the app client.
|
||||
* @param {string} params.overrideEndpoint - The endpoint to override.
|
||||
* @returns {Promise<{ openai: OpenAIClient, openAIApiKey: string; client: import('~/app/clients/OpenAIClient') }>} - The initialized OpenAI client.
|
||||
* @returns {Promise<{ openai: OpenAI, openAIApiKey: string }>} - The initialized OpenAI SDK client.
|
||||
*/
|
||||
async function getOpenAIClient({ req, res, endpointOption, initAppClient, overrideEndpoint }) {
|
||||
let endpoint = overrideEndpoint ?? req.body.endpoint ?? req.query.endpoint;
|
||||
|
|
|
|||
|
|
@ -286,7 +286,6 @@ if (cluster.isMaster) {
|
|||
app.use('/api/keys', routes.keys);
|
||||
app.use('/api/user', routes.user);
|
||||
app.use('/api/search', routes.search);
|
||||
app.use('/api/edit', routes.edit);
|
||||
app.use('/api/messages', routes.messages);
|
||||
app.use('/api/convos', routes.convos);
|
||||
app.use('/api/presets', routes.presets);
|
||||
|
|
|
|||
|
|
@ -122,7 +122,6 @@ const startServer = async () => {
|
|||
app.use('/api/keys', routes.keys);
|
||||
app.use('/api/user', routes.user);
|
||||
app.use('/api/search', routes.search);
|
||||
app.use('/api/edit', routes.edit);
|
||||
app.use('/api/messages', routes.messages);
|
||||
app.use('/api/convos', routes.convos);
|
||||
app.use('/api/presets', routes.presets);
|
||||
|
|
@ -131,7 +130,6 @@ const startServer = async () => {
|
|||
app.use('/api/endpoints', routes.endpoints);
|
||||
app.use('/api/balance', routes.balance);
|
||||
app.use('/api/models', routes.models);
|
||||
app.use('/api/plugins', routes.plugins);
|
||||
app.use('/api/config', routes.config);
|
||||
app.use('/api/assistants', routes.assistants);
|
||||
app.use('/api/files', await routes.files.initialize());
|
||||
|
|
|
|||
|
|
@ -19,14 +19,14 @@ const message = 'Your account has been temporarily banned due to violations of o
|
|||
* @param {Object} req - Express Request object.
|
||||
* @param {Object} res - Express Response object.
|
||||
*
|
||||
* @returns {Promise<Object>} - Returns a Promise which when resolved sends a response status of 403 with a specific message if request is not of api/ask or api/edit types. If it is, calls `denyRequest()` function.
|
||||
* @returns {Promise<Object>} - Returns a Promise which when resolved sends a response status of 403 with a specific message if request is not of api/agents/chat. If it is, calls `denyRequest()` function.
|
||||
*/
|
||||
const banResponse = async (req, res) => {
|
||||
const ua = uap(req.headers['user-agent']);
|
||||
const { baseUrl } = req;
|
||||
const { baseUrl, originalUrl } = req;
|
||||
if (!ua.browser.name) {
|
||||
return res.status(403).json({ message });
|
||||
} else if (baseUrl === '/api/ask' || baseUrl === '/api/edit') {
|
||||
} else if (baseUrl === '/api/agents' && originalUrl.startsWith('/api/agents/chat')) {
|
||||
return await denyRequest(req, res, { type: ViolationTypes.BAN });
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ const buildEndpointOption = require('./buildEndpointOption');
|
|||
const validateMessageReq = require('./validateMessageReq');
|
||||
const checkDomainAllowed = require('./checkDomainAllowed');
|
||||
const concurrentLimiter = require('./concurrentLimiter');
|
||||
const validateEndpoint = require('./validateEndpoint');
|
||||
const requireLocalAuth = require('./requireLocalAuth');
|
||||
const canDeleteAccount = require('./canDeleteAccount');
|
||||
const accessResources = require('./accessResources');
|
||||
|
|
@ -42,7 +41,6 @@ module.exports = {
|
|||
requireLdapAuth,
|
||||
requireLocalAuth,
|
||||
canDeleteAccount,
|
||||
validateEndpoint,
|
||||
configMiddleware,
|
||||
concurrentLimiter,
|
||||
checkDomainAllowed,
|
||||
|
|
|
|||
|
|
@ -1,20 +0,0 @@
|
|||
const { handleError } = require('@librechat/api');
|
||||
|
||||
function validateEndpoint(req, res, next) {
|
||||
const { endpoint: _endpoint, endpointType } = req.body;
|
||||
const endpoint = endpointType ?? _endpoint;
|
||||
|
||||
if (!req.body.text || req.body.text.length === 0) {
|
||||
return handleError(res, { text: 'Prompt empty or too short' });
|
||||
}
|
||||
|
||||
const pathEndpoint = req.baseUrl.split('/')[3];
|
||||
|
||||
if (endpoint !== pathEndpoint) {
|
||||
return handleError(res, { text: 'Illegal request: Endpoint mismatch' });
|
||||
}
|
||||
|
||||
next();
|
||||
}
|
||||
|
||||
module.exports = validateEndpoint;
|
||||
|
|
@ -5,7 +5,6 @@ const {
|
|||
setHeaders,
|
||||
handleAbort,
|
||||
validateModel,
|
||||
// validateEndpoint,
|
||||
buildEndpointOption,
|
||||
} = require('~/server/middleware');
|
||||
const validateConvoAccess = require('~/server/middleware/validate/convoAccess');
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ const {
|
|||
setHeaders,
|
||||
handleAbort,
|
||||
validateModel,
|
||||
// validateEndpoint,
|
||||
buildEndpointOption,
|
||||
} = require('~/server/middleware');
|
||||
const validateConvoAccess = require('~/server/middleware/validate/convoAccess');
|
||||
|
|
|
|||
|
|
@ -1,24 +0,0 @@
|
|||
const express = require('express');
|
||||
const EditController = require('~/server/controllers/EditController');
|
||||
const { initializeClient } = require('~/server/services/Endpoints/anthropic');
|
||||
const {
|
||||
setHeaders,
|
||||
validateModel,
|
||||
validateEndpoint,
|
||||
buildEndpointOption,
|
||||
} = require('~/server/middleware');
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
router.post(
|
||||
'/',
|
||||
validateEndpoint,
|
||||
validateModel,
|
||||
buildEndpointOption,
|
||||
setHeaders,
|
||||
async (req, res, next) => {
|
||||
await EditController(req, res, next, initializeClient);
|
||||
},
|
||||
);
|
||||
|
||||
module.exports = router;
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
const express = require('express');
|
||||
const EditController = require('~/server/controllers/EditController');
|
||||
const { initializeClient } = require('~/server/services/Endpoints/custom');
|
||||
const { addTitle } = require('~/server/services/Endpoints/openAI');
|
||||
const {
|
||||
handleAbort,
|
||||
setHeaders,
|
||||
validateModel,
|
||||
validateEndpoint,
|
||||
buildEndpointOption,
|
||||
} = require('~/server/middleware');
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
router.post(
|
||||
'/',
|
||||
validateEndpoint,
|
||||
validateModel,
|
||||
buildEndpointOption,
|
||||
setHeaders,
|
||||
async (req, res, next) => {
|
||||
await EditController(req, res, next, initializeClient, addTitle);
|
||||
},
|
||||
);
|
||||
|
||||
module.exports = router;
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
const express = require('express');
|
||||
const EditController = require('~/server/controllers/EditController');
|
||||
const { initializeClient } = require('~/server/services/Endpoints/google');
|
||||
const {
|
||||
setHeaders,
|
||||
validateModel,
|
||||
validateEndpoint,
|
||||
buildEndpointOption,
|
||||
} = require('~/server/middleware');
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
router.post(
|
||||
'/',
|
||||
validateEndpoint,
|
||||
validateModel,
|
||||
buildEndpointOption,
|
||||
setHeaders,
|
||||
async (req, res, next) => {
|
||||
await EditController(req, res, next, initializeClient);
|
||||
},
|
||||
);
|
||||
|
||||
module.exports = router;
|
||||
|
|
@ -1,45 +0,0 @@
|
|||
const { isEnabled } = require('@librechat/api');
|
||||
const { EModelEndpoint } = require('librechat-data-provider');
|
||||
const {
|
||||
validateConvoAccess,
|
||||
messageUserLimiter,
|
||||
concurrentLimiter,
|
||||
messageIpLimiter,
|
||||
requireJwtAuth,
|
||||
checkBan,
|
||||
uaParser,
|
||||
} = require('~/server/middleware');
|
||||
const anthropic = require('./anthropic');
|
||||
const express = require('express');
|
||||
const openAI = require('./openAI');
|
||||
const custom = require('./custom');
|
||||
const google = require('./google');
|
||||
|
||||
const { LIMIT_CONCURRENT_MESSAGES, LIMIT_MESSAGE_IP, LIMIT_MESSAGE_USER } = process.env ?? {};
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
router.use(requireJwtAuth);
|
||||
router.use(checkBan);
|
||||
router.use(uaParser);
|
||||
|
||||
if (isEnabled(LIMIT_CONCURRENT_MESSAGES)) {
|
||||
router.use(concurrentLimiter);
|
||||
}
|
||||
|
||||
if (isEnabled(LIMIT_MESSAGE_IP)) {
|
||||
router.use(messageIpLimiter);
|
||||
}
|
||||
|
||||
if (isEnabled(LIMIT_MESSAGE_USER)) {
|
||||
router.use(messageUserLimiter);
|
||||
}
|
||||
|
||||
router.use(validateConvoAccess);
|
||||
|
||||
router.use([`/${EModelEndpoint.azureOpenAI}`, `/${EModelEndpoint.openAI}`], openAI);
|
||||
router.use(`/${EModelEndpoint.anthropic}`, anthropic);
|
||||
router.use(`/${EModelEndpoint.google}`, google);
|
||||
router.use(`/${EModelEndpoint.custom}`, custom);
|
||||
|
||||
module.exports = router;
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
const express = require('express');
|
||||
const EditController = require('~/server/controllers/EditController');
|
||||
const { initializeClient } = require('~/server/services/Endpoints/openAI');
|
||||
const {
|
||||
setHeaders,
|
||||
validateModel,
|
||||
validateEndpoint,
|
||||
buildEndpointOption,
|
||||
moderateText,
|
||||
} = require('~/server/middleware');
|
||||
|
||||
const router = express.Router();
|
||||
router.use(moderateText);
|
||||
|
||||
router.post(
|
||||
'/',
|
||||
validateEndpoint,
|
||||
validateModel,
|
||||
buildEndpointOption,
|
||||
setHeaders,
|
||||
async (req, res, next) => {
|
||||
await EditController(req, res, next, initializeClient);
|
||||
},
|
||||
);
|
||||
|
||||
module.exports = router;
|
||||
|
|
@ -8,7 +8,6 @@ const memories = require('./memories');
|
|||
const presets = require('./presets');
|
||||
const prompts = require('./prompts');
|
||||
const balance = require('./balance');
|
||||
const plugins = require('./plugins');
|
||||
const actions = require('./actions');
|
||||
const banner = require('./banner');
|
||||
const search = require('./search');
|
||||
|
|
@ -22,14 +21,12 @@ const files = require('./files');
|
|||
const share = require('./share');
|
||||
const tags = require('./tags');
|
||||
const auth = require('./auth');
|
||||
const edit = require('./edit');
|
||||
const keys = require('./keys');
|
||||
const user = require('./user');
|
||||
const mcp = require('./mcp');
|
||||
|
||||
module.exports = {
|
||||
mcp,
|
||||
edit,
|
||||
auth,
|
||||
keys,
|
||||
user,
|
||||
|
|
@ -45,7 +42,6 @@ module.exports = {
|
|||
config,
|
||||
models,
|
||||
prompts,
|
||||
plugins,
|
||||
actions,
|
||||
presets,
|
||||
balance,
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@ const {
|
|||
} = require('~/models');
|
||||
const { findAllArtifacts, replaceArtifactContent } = require('~/server/services/Artifacts/update');
|
||||
const { requireJwtAuth, validateMessageReq } = require('~/server/middleware');
|
||||
const { cleanUpPrimaryKeyValue } = require('~/lib/utils/misc');
|
||||
const { getConvosQueried } = require('~/models/Conversation');
|
||||
const { Message } = require('~/db/models');
|
||||
|
||||
|
|
@ -68,9 +67,6 @@ router.get('/', async (req, res) => {
|
|||
const cleanedMessages = [];
|
||||
for (let i = 0; i < messages.length; i++) {
|
||||
let message = messages[i];
|
||||
if (message.conversationId.includes('--')) {
|
||||
message.conversationId = cleanUpPrimaryKeyValue(message.conversationId);
|
||||
}
|
||||
if (result.convoMap[message.conversationId]) {
|
||||
messageIds.push(message.messageId);
|
||||
cleanedMessages.push(message);
|
||||
|
|
|
|||
|
|
@ -1,9 +0,0 @@
|
|||
const express = require('express');
|
||||
const { getAvailablePluginsController } = require('~/server/controllers/PluginController');
|
||||
const { requireJwtAuth } = require('~/server/middleware');
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
router.get('/', requireJwtAuth, getAvailablePluginsController);
|
||||
|
||||
module.exports = router;
|
||||
|
|
@ -23,7 +23,7 @@ const { TextStream } = require('~/app/clients');
|
|||
* Sorts, processes, and flattens messages to a single string.
|
||||
*
|
||||
* @param {Object} params - Params for creating the onTextProgress function.
|
||||
* @param {OpenAIClient} params.openai - The OpenAI client instance.
|
||||
* @param {OpenAI} params.openai - The OpenAI SDK client instance.
|
||||
* @param {string} params.conversationId - The current conversation ID.
|
||||
* @param {string} params.userMessageId - The user message ID; response's `parentMessageId`.
|
||||
* @param {string} params.messageId - The response message ID.
|
||||
|
|
@ -74,7 +74,7 @@ async function createOnTextProgress({
|
|||
* Retrieves the response from an OpenAI run.
|
||||
*
|
||||
* @param {Object} params - The parameters for getting the response.
|
||||
* @param {OpenAIClient} params.openai - The OpenAI client instance.
|
||||
* @param {OpenAI} params.openai - The OpenAI SDK client instance.
|
||||
* @param {string} params.run_id - The ID of the run to get the response for.
|
||||
* @param {string} params.thread_id - The ID of the thread associated with the run.
|
||||
* @return {Promise<OpenAIAssistantFinish | OpenAIAssistantAction[] | ThreadMessage[] | RequiredActionFunctionToolCall[]>}
|
||||
|
|
@ -162,7 +162,7 @@ function hasToolCallChanged(previousCall, currentCall) {
|
|||
* Creates a handler function for steps in progress, specifically for
|
||||
* processing messages and managing seen completed messages.
|
||||
*
|
||||
* @param {OpenAIClient} openai - The OpenAI client instance.
|
||||
* @param {OpenAI} openai - The OpenAI SDK client instance.
|
||||
* @param {string} thread_id - The ID of the thread the run is in.
|
||||
* @param {ThreadMessage[]} messages - The accumulated messages for the run.
|
||||
* @return {InProgressFunction} a function to handle steps in progress.
|
||||
|
|
@ -334,7 +334,7 @@ function createInProgressHandler(openai, thread_id, messages) {
|
|||
* Initializes a RunManager with handlers, then invokes waitForRun to monitor and manage an OpenAI run.
|
||||
*
|
||||
* @param {Object} params - The parameters for managing and monitoring the run.
|
||||
* @param {OpenAIClient} params.openai - The OpenAI client instance.
|
||||
* @param {OpenAI} params.openai - The OpenAI SDK client instance.
|
||||
* @param {string} params.run_id - The ID of the run to manage and monitor.
|
||||
* @param {string} params.thread_id - The ID of the thread associated with the run.
|
||||
* @param {RunStep[]} params.accumulatedSteps - The accumulated steps for the run.
|
||||
|
|
|
|||
|
|
@ -8,8 +8,6 @@ const {
|
|||
ASSISTANTS_API_KEY: assistantsApiKey,
|
||||
AZURE_API_KEY: azureOpenAIApiKey,
|
||||
ANTHROPIC_API_KEY: anthropicApiKey,
|
||||
CHATGPT_TOKEN: chatGPTToken,
|
||||
PLUGINS_USE_AZURE,
|
||||
GOOGLE_KEY: googleKey,
|
||||
OPENAI_REVERSE_PROXY,
|
||||
AZURE_OPENAI_BASEURL,
|
||||
|
|
@ -17,21 +15,15 @@ const {
|
|||
AZURE_ASSISTANTS_BASE_URL,
|
||||
} = process.env ?? {};
|
||||
|
||||
const useAzurePlugins = !!PLUGINS_USE_AZURE;
|
||||
|
||||
const userProvidedOpenAI = useAzurePlugins
|
||||
? isUserProvided(azureOpenAIApiKey)
|
||||
: isUserProvided(openAIApiKey);
|
||||
const userProvidedOpenAI = isUserProvided(openAIApiKey);
|
||||
|
||||
module.exports = {
|
||||
config: {
|
||||
googleKey,
|
||||
openAIApiKey,
|
||||
azureOpenAIApiKey,
|
||||
useAzurePlugins,
|
||||
userProvidedOpenAI,
|
||||
googleKey,
|
||||
[EModelEndpoint.anthropic]: generateConfig(anthropicApiKey),
|
||||
[EModelEndpoint.chatGPTBrowser]: generateConfig(chatGPTToken),
|
||||
[EModelEndpoint.openAI]: generateConfig(openAIApiKey, OPENAI_REVERSE_PROXY),
|
||||
[EModelEndpoint.azureOpenAI]: generateConfig(azureOpenAIApiKey, AZURE_OPENAI_BASEURL),
|
||||
[EModelEndpoint.assistants]: generateConfig(
|
||||
|
|
|
|||
|
|
@ -1,17 +1,11 @@
|
|||
const path = require('path');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { EModelEndpoint } = require('librechat-data-provider');
|
||||
const { loadServiceKey, isUserProvided } = require('@librechat/api');
|
||||
const { config } = require('./EndpointService');
|
||||
|
||||
const { openAIApiKey, azureOpenAIApiKey, useAzurePlugins, userProvidedOpenAI, googleKey } = config;
|
||||
|
||||
/**
|
||||
* Load async endpoints and return a configuration object
|
||||
* @param {AppConfig} [appConfig] - The app configuration object
|
||||
*/
|
||||
async function loadAsyncEndpoints(appConfig) {
|
||||
async function loadAsyncEndpoints() {
|
||||
let serviceKey, googleUserProvides;
|
||||
const { googleKey } = config;
|
||||
|
||||
/** Check if GOOGLE_KEY is provided at all(including 'user_provided') */
|
||||
const isGoogleKeyProvided = googleKey && googleKey.trim() !== '';
|
||||
|
|
@ -34,21 +28,7 @@ async function loadAsyncEndpoints(appConfig) {
|
|||
|
||||
const google = serviceKey || isGoogleKeyProvided ? { userProvide: googleUserProvides } : false;
|
||||
|
||||
const useAzure = !!appConfig?.endpoints?.[EModelEndpoint.azureOpenAI]?.plugins;
|
||||
const gptPlugins =
|
||||
useAzure || openAIApiKey || azureOpenAIApiKey
|
||||
? {
|
||||
availableAgents: ['classic', 'functions'],
|
||||
userProvide: useAzure ? false : userProvidedOpenAI,
|
||||
userProvideURL: useAzure
|
||||
? false
|
||||
: config[EModelEndpoint.openAI]?.userProvideURL ||
|
||||
config[EModelEndpoint.azureOpenAI]?.userProvideURL,
|
||||
azure: useAzurePlugins || useAzure,
|
||||
}
|
||||
: false;
|
||||
|
||||
return { google, gptPlugins };
|
||||
return { google };
|
||||
}
|
||||
|
||||
module.exports = loadAsyncEndpoints;
|
||||
|
|
|
|||
|
|
@ -25,10 +25,6 @@ async function loadConfigModels(req) {
|
|||
modelsConfig[EModelEndpoint.azureOpenAI] = modelNames;
|
||||
}
|
||||
|
||||
if (modelNames && azureConfig && azureConfig.plugins) {
|
||||
modelsConfig[EModelEndpoint.gptPlugins] = modelNames;
|
||||
}
|
||||
|
||||
if (azureConfig?.assistants && azureConfig.assistantModels) {
|
||||
modelsConfig[EModelEndpoint.azureAssistants] = azureConfig.assistantModels;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,8 +8,8 @@ const { config } = require('./EndpointService');
|
|||
* @returns {Promise<Object.<string, EndpointWithOrder>>} An object whose keys are endpoint names and values are objects that contain the endpoint configuration and an order.
|
||||
*/
|
||||
async function loadDefaultEndpointsConfig(appConfig) {
|
||||
const { google, gptPlugins } = await loadAsyncEndpoints(appConfig);
|
||||
const { assistants, azureAssistants, azureOpenAI, chatGPTBrowser } = config;
|
||||
const { google } = await loadAsyncEndpoints(appConfig);
|
||||
const { assistants, azureAssistants, azureOpenAI } = config;
|
||||
|
||||
const enabledEndpoints = getEnabledEndpoints();
|
||||
|
||||
|
|
@ -20,8 +20,6 @@ async function loadDefaultEndpointsConfig(appConfig) {
|
|||
[EModelEndpoint.azureAssistants]: azureAssistants,
|
||||
[EModelEndpoint.azureOpenAI]: azureOpenAI,
|
||||
[EModelEndpoint.google]: google,
|
||||
[EModelEndpoint.chatGPTBrowser]: chatGPTBrowser,
|
||||
[EModelEndpoint.gptPlugins]: gptPlugins,
|
||||
[EModelEndpoint.anthropic]: config[EModelEndpoint.anthropic],
|
||||
[EModelEndpoint.bedrock]: config[EModelEndpoint.bedrock],
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,9 +1,8 @@
|
|||
const { getLLMConfig } = require('@librechat/api');
|
||||
const { EModelEndpoint } = require('librechat-data-provider');
|
||||
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
|
||||
const AnthropicClient = require('~/app/clients/AnthropicClient');
|
||||
|
||||
const initializeClient = async ({ req, res, endpointOption, overrideModel, optionsOnly }) => {
|
||||
const initializeClient = async ({ req, endpointOption, overrideModel }) => {
|
||||
const appConfig = req.config;
|
||||
const { ANTHROPIC_API_KEY, ANTHROPIC_REVERSE_PROXY, PROXY } = process.env;
|
||||
const expiresAt = req.body.key;
|
||||
|
|
@ -36,35 +35,19 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
|
|||
clientOptions._lc_stream_delay = allConfig.streamRate;
|
||||
}
|
||||
|
||||
if (optionsOnly) {
|
||||
clientOptions = Object.assign(
|
||||
{
|
||||
proxy: PROXY ?? null,
|
||||
reverseProxyUrl: ANTHROPIC_REVERSE_PROXY ?? null,
|
||||
modelOptions: endpointOption?.model_parameters ?? {},
|
||||
},
|
||||
clientOptions,
|
||||
);
|
||||
if (overrideModel) {
|
||||
clientOptions.modelOptions.model = overrideModel;
|
||||
}
|
||||
clientOptions.modelOptions.user = req.user.id;
|
||||
return getLLMConfig(anthropicApiKey, clientOptions);
|
||||
clientOptions = Object.assign(
|
||||
{
|
||||
proxy: PROXY ?? null,
|
||||
reverseProxyUrl: ANTHROPIC_REVERSE_PROXY ?? null,
|
||||
modelOptions: endpointOption?.model_parameters ?? {},
|
||||
},
|
||||
clientOptions,
|
||||
);
|
||||
if (overrideModel) {
|
||||
clientOptions.modelOptions.model = overrideModel;
|
||||
}
|
||||
|
||||
const client = new AnthropicClient(anthropicApiKey, {
|
||||
req,
|
||||
res,
|
||||
reverseProxyUrl: ANTHROPIC_REVERSE_PROXY ?? null,
|
||||
proxy: PROXY ?? null,
|
||||
...clientOptions,
|
||||
...endpointOption,
|
||||
});
|
||||
|
||||
return {
|
||||
client,
|
||||
anthropicApiKey,
|
||||
};
|
||||
clientOptions.modelOptions.user = req.user.id;
|
||||
return getLLMConfig(anthropicApiKey, clientOptions);
|
||||
};
|
||||
|
||||
module.exports = initializeClient;
|
||||
|
|
|
|||
|
|
@ -7,9 +7,8 @@ const {
|
|||
getUserKeyExpiry,
|
||||
checkUserKeyExpiry,
|
||||
} = require('~/server/services/UserService');
|
||||
const OAIClient = require('~/app/clients/OpenAIClient');
|
||||
|
||||
const initializeClient = async ({ req, res, endpointOption, version, initAppClient = false }) => {
|
||||
const initializeClient = async ({ req, res, version }) => {
|
||||
const { PROXY, OPENAI_ORGANIZATION, ASSISTANTS_API_KEY, ASSISTANTS_BASE_URL } = process.env;
|
||||
|
||||
const userProvidesKey = isUserProvided(ASSISTANTS_API_KEY);
|
||||
|
|
@ -34,14 +33,6 @@ const initializeClient = async ({ req, res, endpointOption, version, initAppClie
|
|||
},
|
||||
};
|
||||
|
||||
const clientOptions = {
|
||||
reverseProxyUrl: baseURL ?? null,
|
||||
proxy: PROXY ?? null,
|
||||
req,
|
||||
res,
|
||||
...endpointOption,
|
||||
};
|
||||
|
||||
if (userProvidesKey & !apiKey) {
|
||||
throw new Error(
|
||||
JSON.stringify({
|
||||
|
|
@ -78,15 +69,6 @@ const initializeClient = async ({ req, res, endpointOption, version, initAppClie
|
|||
openai.req = req;
|
||||
openai.res = res;
|
||||
|
||||
if (endpointOption && initAppClient) {
|
||||
const client = new OAIClient(apiKey, clientOptions);
|
||||
return {
|
||||
client,
|
||||
openai,
|
||||
openAIApiKey: apiKey,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
openai,
|
||||
openAIApiKey: apiKey,
|
||||
|
|
|
|||
|
|
@ -1,113 +0,0 @@
|
|||
// const OpenAI = require('openai');
|
||||
const { ProxyAgent } = require('undici');
|
||||
const { ErrorTypes } = require('librechat-data-provider');
|
||||
const { getUserKey, getUserKeyExpiry, getUserKeyValues } = require('~/server/services/UserService');
|
||||
const initializeClient = require('./initalize');
|
||||
// const { OpenAIClient } = require('~/app');
|
||||
|
||||
jest.mock('~/server/services/UserService', () => ({
|
||||
getUserKey: jest.fn(),
|
||||
getUserKeyExpiry: jest.fn(),
|
||||
getUserKeyValues: jest.fn(),
|
||||
checkUserKeyExpiry: jest.requireActual('~/server/services/UserService').checkUserKeyExpiry,
|
||||
}));
|
||||
|
||||
const today = new Date();
|
||||
const tenDaysFromToday = new Date(today.setDate(today.getDate() + 10));
|
||||
const isoString = tenDaysFromToday.toISOString();
|
||||
|
||||
describe('initializeClient', () => {
|
||||
// Set up environment variables
|
||||
const originalEnvironment = process.env;
|
||||
const app = {
|
||||
locals: {},
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
jest.resetModules(); // Clears the cache
|
||||
process.env = { ...originalEnvironment }; // Make a copy
|
||||
});
|
||||
|
||||
afterAll(() => {
|
||||
process.env = originalEnvironment; // Restore original env vars
|
||||
});
|
||||
|
||||
test('initializes OpenAI client with default API key and URL', async () => {
|
||||
process.env.ASSISTANTS_API_KEY = 'default-api-key';
|
||||
process.env.ASSISTANTS_BASE_URL = 'https://default.api.url';
|
||||
|
||||
// Assuming 'isUserProvided' to return false for this test case
|
||||
jest.mock('~/server/utils', () => ({
|
||||
isUserProvided: jest.fn().mockReturnValueOnce(false),
|
||||
}));
|
||||
|
||||
const req = { user: { id: 'user123' }, app };
|
||||
const res = {};
|
||||
|
||||
const { openai, openAIApiKey } = await initializeClient({ req, res });
|
||||
expect(openai.apiKey).toBe('default-api-key');
|
||||
expect(openAIApiKey).toBe('default-api-key');
|
||||
expect(openai.baseURL).toBe('https://default.api.url');
|
||||
});
|
||||
|
||||
test('initializes OpenAI client with user-provided API key and URL', async () => {
|
||||
process.env.ASSISTANTS_API_KEY = 'user_provided';
|
||||
process.env.ASSISTANTS_BASE_URL = 'user_provided';
|
||||
|
||||
getUserKeyValues.mockResolvedValue({ apiKey: 'user-api-key', baseURL: 'https://user.api.url' });
|
||||
getUserKeyExpiry.mockResolvedValue(isoString);
|
||||
|
||||
const req = { user: { id: 'user123' }, app };
|
||||
const res = {};
|
||||
|
||||
const { openai, openAIApiKey } = await initializeClient({ req, res });
|
||||
expect(openAIApiKey).toBe('user-api-key');
|
||||
expect(openai.apiKey).toBe('user-api-key');
|
||||
expect(openai.baseURL).toBe('https://user.api.url');
|
||||
});
|
||||
|
||||
test('throws error for invalid JSON in user-provided values', async () => {
|
||||
process.env.ASSISTANTS_API_KEY = 'user_provided';
|
||||
getUserKey.mockResolvedValue('invalid-json');
|
||||
getUserKeyExpiry.mockResolvedValue(isoString);
|
||||
getUserKeyValues.mockImplementation(() => {
|
||||
let userValues = getUserKey();
|
||||
try {
|
||||
userValues = JSON.parse(userValues);
|
||||
} catch (e) {
|
||||
throw new Error(
|
||||
JSON.stringify({
|
||||
type: ErrorTypes.INVALID_USER_KEY,
|
||||
}),
|
||||
);
|
||||
}
|
||||
return userValues;
|
||||
});
|
||||
|
||||
const req = { user: { id: 'user123' } };
|
||||
const res = {};
|
||||
|
||||
await expect(initializeClient({ req, res })).rejects.toThrow(/invalid_user_key/);
|
||||
});
|
||||
|
||||
test('throws error if API key is not provided', async () => {
|
||||
delete process.env.ASSISTANTS_API_KEY; // Simulate missing API key
|
||||
|
||||
const req = { user: { id: 'user123' }, app };
|
||||
const res = {};
|
||||
|
||||
await expect(initializeClient({ req, res })).rejects.toThrow(/Assistants API key not/);
|
||||
});
|
||||
|
||||
test('initializes OpenAI client with proxy configuration', async () => {
|
||||
process.env.ASSISTANTS_API_KEY = 'test-key';
|
||||
process.env.PROXY = 'http://proxy.server';
|
||||
|
||||
const req = { user: { id: 'user123' }, app };
|
||||
const res = {};
|
||||
|
||||
const { openai } = await initializeClient({ req, res });
|
||||
expect(openai.fetchOptions).toBeDefined();
|
||||
expect(openai.fetchOptions.dispatcher).toBeInstanceOf(ProxyAgent);
|
||||
});
|
||||
});
|
||||
|
|
@ -1,32 +1,84 @@
|
|||
const { isEnabled } = require('@librechat/api');
|
||||
const { isEnabled, sanitizeTitle } = require('@librechat/api');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { CacheKeys } = require('librechat-data-provider');
|
||||
const { saveConvo } = require('~/models/Conversation');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
const initializeClient = require('./initalize');
|
||||
|
||||
const addTitle = async (req, { text, responseText, conversationId, client }) => {
|
||||
/**
|
||||
* Generates a conversation title using OpenAI SDK
|
||||
* @param {Object} params
|
||||
* @param {OpenAI} params.openai - The OpenAI SDK client instance
|
||||
* @param {string} params.text - User's message text
|
||||
* @param {string} params.responseText - Assistant's response text
|
||||
* @returns {Promise<string>}
|
||||
*/
|
||||
const generateTitle = async ({ openai, text, responseText }) => {
|
||||
const titlePrompt = `Please generate a concise title (max 40 characters) for a conversation that starts with:
|
||||
User: ${text}
|
||||
Assistant: ${responseText}
|
||||
|
||||
Title:`;
|
||||
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: 'gpt-3.5-turbo',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: titlePrompt,
|
||||
},
|
||||
],
|
||||
temperature: 0.7,
|
||||
max_tokens: 20,
|
||||
});
|
||||
|
||||
const title = completion.choices[0]?.message?.content?.trim() || 'New conversation';
|
||||
return sanitizeTitle(title);
|
||||
};
|
||||
|
||||
/**
|
||||
* Adds a title to a conversation asynchronously
|
||||
* @param {ServerRequest} req
|
||||
* @param {Object} params
|
||||
* @param {string} params.text - User's message text
|
||||
* @param {string} params.responseText - Assistant's response text
|
||||
* @param {string} params.conversationId - Conversation ID
|
||||
*/
|
||||
const addTitle = async (req, { text, responseText, conversationId }) => {
|
||||
const { TITLE_CONVO = 'true' } = process.env ?? {};
|
||||
if (!isEnabled(TITLE_CONVO)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (client.options.titleConvo === false) {
|
||||
return;
|
||||
}
|
||||
|
||||
const titleCache = getLogStores(CacheKeys.GEN_TITLE);
|
||||
const key = `${req.user.id}-${conversationId}`;
|
||||
|
||||
const title = await client.titleConvo({ text, conversationId, responseText });
|
||||
await titleCache.set(key, title, 120000);
|
||||
try {
|
||||
const { openai } = await initializeClient({ req });
|
||||
const title = await generateTitle({ openai, text, responseText });
|
||||
await titleCache.set(key, title, 120000);
|
||||
|
||||
await saveConvo(
|
||||
req,
|
||||
{
|
||||
conversationId,
|
||||
title,
|
||||
},
|
||||
{ context: 'api/server/services/Endpoints/assistants/addTitle.js' },
|
||||
);
|
||||
await saveConvo(
|
||||
req,
|
||||
{
|
||||
conversationId,
|
||||
title,
|
||||
},
|
||||
{ context: 'api/server/services/Endpoints/assistants/addTitle.js' },
|
||||
);
|
||||
} catch (error) {
|
||||
logger.error('[addTitle] Error generating title:', error);
|
||||
const fallbackTitle = text.length > 40 ? text.substring(0, 37) + '...' : text;
|
||||
await titleCache.set(key, fallbackTitle, 120000);
|
||||
await saveConvo(
|
||||
req,
|
||||
{
|
||||
conversationId,
|
||||
title: fallbackTitle,
|
||||
},
|
||||
{ context: 'api/server/services/Endpoints/assistants/addTitle.js' },
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
module.exports = addTitle;
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@ const {
|
|||
getUserKeyValues,
|
||||
getUserKeyExpiry,
|
||||
} = require('~/server/services/UserService');
|
||||
const OAIClient = require('~/app/clients/OpenAIClient');
|
||||
|
||||
class Files {
|
||||
constructor(client) {
|
||||
|
|
@ -184,15 +183,6 @@ const initializeClient = async ({ req, res, version, endpointOption, initAppClie
|
|||
openai.locals = { ...(openai.locals ?? {}), azureOptions };
|
||||
}
|
||||
|
||||
if (endpointOption && initAppClient) {
|
||||
const client = new OAIClient(apiKey, clientOptions);
|
||||
return {
|
||||
client,
|
||||
openai,
|
||||
openAIApiKey: apiKey,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
openai,
|
||||
openAIApiKey: apiKey,
|
||||
|
|
|
|||
|
|
@ -1,134 +0,0 @@
|
|||
// const OpenAI = require('openai');
|
||||
const { ProxyAgent } = require('undici');
|
||||
const { ErrorTypes, EModelEndpoint } = require('librechat-data-provider');
|
||||
const { getUserKey, getUserKeyExpiry, getUserKeyValues } = require('~/server/services/UserService');
|
||||
const initializeClient = require('./initialize');
|
||||
// const { OpenAIClient } = require('~/app');
|
||||
|
||||
jest.mock('~/server/services/UserService', () => ({
|
||||
getUserKey: jest.fn(),
|
||||
getUserKeyExpiry: jest.fn(),
|
||||
getUserKeyValues: jest.fn(),
|
||||
checkUserKeyExpiry: jest.requireActual('~/server/services/UserService').checkUserKeyExpiry,
|
||||
}));
|
||||
|
||||
// Config is now passed via req.config, not getAppConfig
|
||||
|
||||
const today = new Date();
|
||||
const tenDaysFromToday = new Date(today.setDate(today.getDate() + 10));
|
||||
const isoString = tenDaysFromToday.toISOString();
|
||||
|
||||
describe('initializeClient', () => {
|
||||
// Set up environment variables
|
||||
const originalEnvironment = process.env;
|
||||
const app = {
|
||||
locals: {},
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
jest.resetModules(); // Clears the cache
|
||||
process.env = { ...originalEnvironment }; // Make a copy
|
||||
});
|
||||
|
||||
afterAll(() => {
|
||||
process.env = originalEnvironment; // Restore original env vars
|
||||
});
|
||||
|
||||
test('initializes OpenAI client with default API key and URL', async () => {
|
||||
process.env.AZURE_ASSISTANTS_API_KEY = 'default-api-key';
|
||||
process.env.AZURE_ASSISTANTS_BASE_URL = 'https://default.api.url';
|
||||
|
||||
// Assuming 'isUserProvided' to return false for this test case
|
||||
jest.mock('~/server/utils', () => ({
|
||||
isUserProvided: jest.fn().mockReturnValueOnce(false),
|
||||
}));
|
||||
|
||||
const req = {
|
||||
user: { id: 'user123' },
|
||||
app,
|
||||
config: { endpoints: { [EModelEndpoint.azureOpenAI]: {} } },
|
||||
};
|
||||
const res = {};
|
||||
|
||||
const { openai, openAIApiKey } = await initializeClient({ req, res });
|
||||
expect(openai.apiKey).toBe('default-api-key');
|
||||
expect(openAIApiKey).toBe('default-api-key');
|
||||
expect(openai.baseURL).toBe('https://default.api.url');
|
||||
});
|
||||
|
||||
test('initializes OpenAI client with user-provided API key and URL', async () => {
|
||||
process.env.AZURE_ASSISTANTS_API_KEY = 'user_provided';
|
||||
process.env.AZURE_ASSISTANTS_BASE_URL = 'user_provided';
|
||||
|
||||
getUserKeyValues.mockResolvedValue({ apiKey: 'user-api-key', baseURL: 'https://user.api.url' });
|
||||
getUserKeyExpiry.mockResolvedValue(isoString);
|
||||
|
||||
const req = {
|
||||
user: { id: 'user123' },
|
||||
app,
|
||||
config: { endpoints: { [EModelEndpoint.azureOpenAI]: {} } },
|
||||
};
|
||||
const res = {};
|
||||
|
||||
const { openai, openAIApiKey } = await initializeClient({ req, res });
|
||||
expect(openAIApiKey).toBe('user-api-key');
|
||||
expect(openai.apiKey).toBe('user-api-key');
|
||||
expect(openai.baseURL).toBe('https://user.api.url');
|
||||
});
|
||||
|
||||
test('throws error for invalid JSON in user-provided values', async () => {
|
||||
process.env.AZURE_ASSISTANTS_API_KEY = 'user_provided';
|
||||
getUserKey.mockResolvedValue('invalid-json');
|
||||
getUserKeyExpiry.mockResolvedValue(isoString);
|
||||
getUserKeyValues.mockImplementation(() => {
|
||||
let userValues = getUserKey();
|
||||
try {
|
||||
userValues = JSON.parse(userValues);
|
||||
} catch {
|
||||
throw new Error(
|
||||
JSON.stringify({
|
||||
type: ErrorTypes.INVALID_USER_KEY,
|
||||
}),
|
||||
);
|
||||
}
|
||||
return userValues;
|
||||
});
|
||||
|
||||
const req = {
|
||||
user: { id: 'user123' },
|
||||
config: { endpoints: { [EModelEndpoint.azureOpenAI]: {} } },
|
||||
};
|
||||
const res = {};
|
||||
|
||||
await expect(initializeClient({ req, res })).rejects.toThrow(/invalid_user_key/);
|
||||
});
|
||||
|
||||
test('throws error if API key is not provided', async () => {
|
||||
delete process.env.AZURE_ASSISTANTS_API_KEY; // Simulate missing API key
|
||||
|
||||
const req = {
|
||||
user: { id: 'user123' },
|
||||
app,
|
||||
config: { endpoints: { [EModelEndpoint.azureOpenAI]: {} } },
|
||||
};
|
||||
const res = {};
|
||||
|
||||
await expect(initializeClient({ req, res })).rejects.toThrow(/Assistants API key not/);
|
||||
});
|
||||
|
||||
test('initializes OpenAI client with proxy configuration', async () => {
|
||||
process.env.AZURE_ASSISTANTS_API_KEY = 'test-key';
|
||||
process.env.PROXY = 'http://proxy.server';
|
||||
|
||||
const req = {
|
||||
user: { id: 'user123' },
|
||||
app,
|
||||
config: { endpoints: { [EModelEndpoint.azureOpenAI]: {} } },
|
||||
};
|
||||
const res = {};
|
||||
|
||||
const { openai } = await initializeClient({ req, res });
|
||||
expect(openai.fetchOptions).toBeDefined();
|
||||
expect(openai.fetchOptions.dispatcher).toBeInstanceOf(ProxyAgent);
|
||||
});
|
||||
});
|
||||
|
|
@ -8,12 +8,11 @@ const {
|
|||
} = require('librechat-data-provider');
|
||||
const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService');
|
||||
const { fetchModels } = require('~/server/services/ModelService');
|
||||
const OpenAIClient = require('~/app/clients/OpenAIClient');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
|
||||
const { PROXY } = process.env;
|
||||
|
||||
const initializeClient = async ({ req, res, endpointOption, optionsOnly, overrideEndpoint }) => {
|
||||
const initializeClient = async ({ req, endpointOption, overrideEndpoint }) => {
|
||||
const appConfig = req.config;
|
||||
const { key: expiresAt } = req.body;
|
||||
const endpoint = overrideEndpoint ?? req.body.endpoint;
|
||||
|
|
@ -120,38 +119,27 @@ const initializeClient = async ({ req, res, endpointOption, optionsOnly, overrid
|
|||
let clientOptions = {
|
||||
reverseProxyUrl: baseURL ?? null,
|
||||
proxy: PROXY ?? null,
|
||||
req,
|
||||
res,
|
||||
...customOptions,
|
||||
...endpointOption,
|
||||
};
|
||||
|
||||
if (optionsOnly) {
|
||||
const modelOptions = endpointOption?.model_parameters ?? {};
|
||||
clientOptions = Object.assign(
|
||||
{
|
||||
modelOptions,
|
||||
},
|
||||
clientOptions,
|
||||
);
|
||||
clientOptions.modelOptions.user = req.user.id;
|
||||
const options = getOpenAIConfig(apiKey, clientOptions, endpoint);
|
||||
if (options != null) {
|
||||
options.useLegacyContent = true;
|
||||
options.endpointTokenConfig = endpointTokenConfig;
|
||||
}
|
||||
if (!clientOptions.streamRate) {
|
||||
return options;
|
||||
}
|
||||
options.llmConfig._lc_stream_delay = clientOptions.streamRate;
|
||||
return options;
|
||||
const modelOptions = endpointOption?.model_parameters ?? {};
|
||||
clientOptions = Object.assign(
|
||||
{
|
||||
modelOptions,
|
||||
},
|
||||
clientOptions,
|
||||
);
|
||||
clientOptions.modelOptions.user = req.user.id;
|
||||
const options = getOpenAIConfig(apiKey, clientOptions, endpoint);
|
||||
if (options != null) {
|
||||
options.useLegacyContent = true;
|
||||
options.endpointTokenConfig = endpointTokenConfig;
|
||||
}
|
||||
|
||||
const client = new OpenAIClient(apiKey, clientOptions);
|
||||
return {
|
||||
client,
|
||||
openAIApiKey: apiKey,
|
||||
};
|
||||
if (clientOptions.streamRate) {
|
||||
options.llmConfig._lc_stream_delay = clientOptions.streamRate;
|
||||
}
|
||||
return options;
|
||||
};
|
||||
|
||||
module.exports = initializeClient;
|
||||
|
|
|
|||
|
|
@ -1,106 +0,0 @@
|
|||
const initializeClient = require('./initialize');
|
||||
|
||||
jest.mock('@librechat/api', () => ({
|
||||
...jest.requireActual('@librechat/api'),
|
||||
resolveHeaders: jest.fn(),
|
||||
getOpenAIConfig: jest.fn(),
|
||||
getCustomEndpointConfig: jest.fn().mockReturnValue({
|
||||
apiKey: 'test-key',
|
||||
baseURL: 'https://test.com',
|
||||
headers: { 'x-user': '{{LIBRECHAT_USER_ID}}', 'x-email': '{{LIBRECHAT_USER_EMAIL}}' },
|
||||
models: { default: ['test-model'] },
|
||||
}),
|
||||
}));
|
||||
|
||||
jest.mock('~/server/services/UserService', () => ({
|
||||
getUserKeyValues: jest.fn(),
|
||||
checkUserKeyExpiry: jest.fn(),
|
||||
}));
|
||||
|
||||
// Config is now passed via req.config, not getAppConfig
|
||||
|
||||
jest.mock('~/server/services/ModelService', () => ({
|
||||
fetchModels: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('~/app/clients/OpenAIClient', () => {
|
||||
return jest.fn().mockImplementation(() => ({
|
||||
options: {},
|
||||
}));
|
||||
});
|
||||
|
||||
jest.mock('~/cache/getLogStores', () =>
|
||||
jest.fn().mockReturnValue({
|
||||
get: jest.fn(),
|
||||
}),
|
||||
);
|
||||
|
||||
describe('custom/initializeClient', () => {
|
||||
const mockRequest = {
|
||||
body: { endpoint: 'test-endpoint' },
|
||||
user: { id: 'user-123', email: 'test@example.com', role: 'user' },
|
||||
app: { locals: {} },
|
||||
config: {
|
||||
endpoints: {
|
||||
all: {
|
||||
streamRate: 25,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
const mockResponse = {};
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
const { getCustomEndpointConfig, resolveHeaders, getOpenAIConfig } = require('@librechat/api');
|
||||
getCustomEndpointConfig.mockReturnValue({
|
||||
apiKey: 'test-key',
|
||||
baseURL: 'https://test.com',
|
||||
headers: { 'x-user': '{{LIBRECHAT_USER_ID}}', 'x-email': '{{LIBRECHAT_USER_EMAIL}}' },
|
||||
models: { default: ['test-model'] },
|
||||
});
|
||||
resolveHeaders.mockReturnValue({ 'x-user': 'user-123', 'x-email': 'test@example.com' });
|
||||
getOpenAIConfig.mockReturnValue({
|
||||
useLegacyContent: true,
|
||||
endpointTokenConfig: null,
|
||||
llmConfig: {
|
||||
callbacks: [],
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it('stores original template headers for deferred resolution', async () => {
|
||||
/**
|
||||
* Note: Request-based Header Resolution is deferred until right before LLM request is made
|
||||
* in the OpenAIClient or AgentClient, not during initialization.
|
||||
* This test verifies that the initialize function completes successfully with optionsOnly flag,
|
||||
* and that headers are passed through to be resolved later during the actual LLM request.
|
||||
*/
|
||||
const result = await initializeClient({
|
||||
req: mockRequest,
|
||||
res: mockResponse,
|
||||
optionsOnly: true,
|
||||
});
|
||||
// Verify that options are returned for later use
|
||||
expect(result).toBeDefined();
|
||||
expect(result).toHaveProperty('useLegacyContent', true);
|
||||
});
|
||||
|
||||
it('throws if endpoint config is missing', async () => {
|
||||
const { getCustomEndpointConfig } = require('@librechat/api');
|
||||
getCustomEndpointConfig.mockReturnValueOnce(null);
|
||||
await expect(
|
||||
initializeClient({ req: mockRequest, res: mockResponse, optionsOnly: true }),
|
||||
).rejects.toThrow('Config not found for the test-endpoint custom endpoint.');
|
||||
});
|
||||
|
||||
it('throws if user is missing', async () => {
|
||||
await expect(
|
||||
initializeClient({
|
||||
req: { ...mockRequest, user: undefined },
|
||||
res: mockResponse,
|
||||
optionsOnly: true,
|
||||
}),
|
||||
).rejects.toThrow("Cannot read properties of undefined (reading 'id')");
|
||||
});
|
||||
});
|
||||
|
|
@ -2,9 +2,8 @@ const path = require('path');
|
|||
const { EModelEndpoint, AuthKeys } = require('librechat-data-provider');
|
||||
const { getGoogleConfig, isEnabled, loadServiceKey } = require('@librechat/api');
|
||||
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
|
||||
const { GoogleClient } = require('~/app');
|
||||
|
||||
const initializeClient = async ({ req, res, endpointOption, overrideModel, optionsOnly }) => {
|
||||
const initializeClient = async ({ req, endpointOption, overrideModel }) => {
|
||||
const { GOOGLE_KEY, GOOGLE_REVERSE_PROXY, GOOGLE_AUTH_HEADER, PROXY } = process.env;
|
||||
const isUserProvided = GOOGLE_KEY === 'user_provided';
|
||||
const { key: expiresAt } = req.body;
|
||||
|
|
@ -62,8 +61,6 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
|
|||
}
|
||||
|
||||
clientOptions = {
|
||||
req,
|
||||
res,
|
||||
reverseProxyUrl: GOOGLE_REVERSE_PROXY ?? null,
|
||||
authHeader: isEnabled(GOOGLE_AUTH_HEADER) ?? null,
|
||||
proxy: PROXY ?? null,
|
||||
|
|
@ -71,25 +68,16 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
|
|||
...endpointOption,
|
||||
};
|
||||
|
||||
if (optionsOnly) {
|
||||
clientOptions = Object.assign(
|
||||
{
|
||||
modelOptions: endpointOption?.model_parameters ?? {},
|
||||
},
|
||||
clientOptions,
|
||||
);
|
||||
if (overrideModel) {
|
||||
clientOptions.modelOptions.model = overrideModel;
|
||||
}
|
||||
return getGoogleConfig(credentials, clientOptions);
|
||||
clientOptions = Object.assign(
|
||||
{
|
||||
modelOptions: endpointOption?.model_parameters ?? {},
|
||||
},
|
||||
clientOptions,
|
||||
);
|
||||
if (overrideModel) {
|
||||
clientOptions.modelOptions.model = overrideModel;
|
||||
}
|
||||
|
||||
const client = new GoogleClient(credentials, clientOptions);
|
||||
|
||||
return {
|
||||
client,
|
||||
credentials,
|
||||
};
|
||||
return getGoogleConfig(credentials, clientOptions);
|
||||
};
|
||||
|
||||
module.exports = initializeClient;
|
||||
|
|
|
|||
|
|
@ -1,101 +0,0 @@
|
|||
// file deepcode ignore HardcodedNonCryptoSecret: No hardcoded secrets
|
||||
const { getUserKey } = require('~/server/services/UserService');
|
||||
const initializeClient = require('./initialize');
|
||||
const { GoogleClient } = require('~/app');
|
||||
|
||||
jest.mock('~/server/services/UserService', () => ({
|
||||
checkUserKeyExpiry: jest.requireActual('~/server/services/UserService').checkUserKeyExpiry,
|
||||
getUserKey: jest.fn().mockImplementation(() => ({})),
|
||||
}));
|
||||
|
||||
// Config is now passed via req.config, not getAppConfig
|
||||
|
||||
const app = { locals: {} };
|
||||
|
||||
describe('google/initializeClient', () => {
|
||||
afterEach(() => {
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
test('should initialize GoogleClient with user-provided credentials', async () => {
|
||||
process.env.GOOGLE_KEY = 'user_provided';
|
||||
process.env.GOOGLE_REVERSE_PROXY = 'http://reverse.proxy';
|
||||
process.env.PROXY = 'http://proxy';
|
||||
|
||||
const expiresAt = new Date(Date.now() + 60000).toISOString();
|
||||
|
||||
const req = {
|
||||
body: { key: expiresAt },
|
||||
user: { id: '123' },
|
||||
app,
|
||||
config: {
|
||||
endpoints: {
|
||||
all: {},
|
||||
google: {},
|
||||
},
|
||||
},
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = { modelOptions: { model: 'default-model' } };
|
||||
|
||||
const { client, credentials } = await initializeClient({ req, res, endpointOption });
|
||||
|
||||
expect(getUserKey).toHaveBeenCalledWith({ userId: '123', name: 'google' });
|
||||
expect(client).toBeInstanceOf(GoogleClient);
|
||||
expect(client.options.reverseProxyUrl).toBe('http://reverse.proxy');
|
||||
expect(client.options.proxy).toBe('http://proxy');
|
||||
expect(credentials).toEqual({});
|
||||
});
|
||||
|
||||
test('should initialize GoogleClient with service key credentials', async () => {
|
||||
process.env.GOOGLE_KEY = 'service_key';
|
||||
process.env.GOOGLE_REVERSE_PROXY = 'http://reverse.proxy';
|
||||
process.env.PROXY = 'http://proxy';
|
||||
|
||||
const req = {
|
||||
body: { key: null },
|
||||
user: { id: '123' },
|
||||
app,
|
||||
config: {
|
||||
endpoints: {
|
||||
all: {},
|
||||
google: {},
|
||||
},
|
||||
},
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = { modelOptions: { model: 'default-model' } };
|
||||
|
||||
const { client, credentials } = await initializeClient({ req, res, endpointOption });
|
||||
|
||||
expect(client).toBeInstanceOf(GoogleClient);
|
||||
expect(client.options.reverseProxyUrl).toBe('http://reverse.proxy');
|
||||
expect(client.options.proxy).toBe('http://proxy');
|
||||
expect(credentials).toEqual({
|
||||
GOOGLE_SERVICE_KEY: {},
|
||||
GOOGLE_API_KEY: 'service_key',
|
||||
});
|
||||
});
|
||||
|
||||
test('should handle expired user-provided key', async () => {
|
||||
process.env.GOOGLE_KEY = 'user_provided';
|
||||
|
||||
const expiresAt = new Date(Date.now() - 10000).toISOString(); // Expired
|
||||
const req = {
|
||||
body: { key: expiresAt },
|
||||
user: { id: '123' },
|
||||
app,
|
||||
config: {
|
||||
endpoints: {
|
||||
all: {},
|
||||
google: {},
|
||||
},
|
||||
},
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = { modelOptions: { model: 'default-model' } };
|
||||
await expect(initializeClient({ req, res, endpointOption })).rejects.toThrow(
|
||||
/expired_user_key/,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
|
@ -7,16 +7,8 @@ const {
|
|||
getAzureCredentials,
|
||||
} = require('@librechat/api');
|
||||
const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService');
|
||||
const OpenAIClient = require('~/app/clients/OpenAIClient');
|
||||
|
||||
const initializeClient = async ({
|
||||
req,
|
||||
res,
|
||||
endpointOption,
|
||||
optionsOnly,
|
||||
overrideEndpoint,
|
||||
overrideModel,
|
||||
}) => {
|
||||
const initializeClient = async ({ req, endpointOption, overrideEndpoint, overrideModel }) => {
|
||||
const appConfig = req.config;
|
||||
const {
|
||||
PROXY,
|
||||
|
|
@ -137,28 +129,19 @@ const initializeClient = async ({
|
|||
throw new Error(`${endpoint} API Key not provided.`);
|
||||
}
|
||||
|
||||
if (optionsOnly) {
|
||||
const modelOptions = endpointOption?.model_parameters ?? {};
|
||||
modelOptions.model = modelName;
|
||||
clientOptions = Object.assign({ modelOptions }, clientOptions);
|
||||
clientOptions.modelOptions.user = req.user.id;
|
||||
const options = getOpenAIConfig(apiKey, clientOptions, endpoint);
|
||||
if (options != null && serverless === true) {
|
||||
options.useLegacyContent = true;
|
||||
}
|
||||
const streamRate = clientOptions.streamRate;
|
||||
if (!streamRate) {
|
||||
return options;
|
||||
}
|
||||
options.llmConfig._lc_stream_delay = streamRate;
|
||||
return options;
|
||||
const modelOptions = endpointOption?.model_parameters ?? {};
|
||||
modelOptions.model = modelName;
|
||||
clientOptions = Object.assign({ modelOptions }, clientOptions);
|
||||
clientOptions.modelOptions.user = req.user.id;
|
||||
const options = getOpenAIConfig(apiKey, clientOptions, endpoint);
|
||||
if (options != null && serverless === true) {
|
||||
options.useLegacyContent = true;
|
||||
}
|
||||
|
||||
const client = new OpenAIClient(apiKey, Object.assign({ req, res }, clientOptions));
|
||||
return {
|
||||
client,
|
||||
openAIApiKey: apiKey,
|
||||
};
|
||||
const streamRate = clientOptions.streamRate;
|
||||
if (streamRate) {
|
||||
options.llmConfig._lc_stream_delay = streamRate;
|
||||
}
|
||||
return options;
|
||||
};
|
||||
|
||||
module.exports = initializeClient;
|
||||
|
|
|
|||
|
|
@ -1,431 +0,0 @@
|
|||
jest.mock('~/cache/getLogStores', () => ({
|
||||
getLogStores: jest.fn().mockReturnValue({
|
||||
get: jest.fn().mockResolvedValue({
|
||||
openAI: { apiKey: 'test-key' },
|
||||
}),
|
||||
set: jest.fn(),
|
||||
delete: jest.fn(),
|
||||
}),
|
||||
}));
|
||||
|
||||
const { EModelEndpoint, ErrorTypes, validateAzureGroups } = require('librechat-data-provider');
|
||||
const { getUserKey, getUserKeyValues } = require('~/server/services/UserService');
|
||||
const initializeClient = require('./initialize');
|
||||
const { OpenAIClient } = require('~/app');
|
||||
|
||||
// Mock getUserKey since it's the only function we want to mock
|
||||
jest.mock('~/server/services/UserService', () => ({
|
||||
getUserKey: jest.fn(),
|
||||
getUserKeyValues: jest.fn(),
|
||||
checkUserKeyExpiry: jest.requireActual('~/server/services/UserService').checkUserKeyExpiry,
|
||||
}));
|
||||
|
||||
const mockAppConfig = {
|
||||
endpoints: {
|
||||
openAI: {
|
||||
apiKey: 'test-key',
|
||||
},
|
||||
azureOpenAI: {
|
||||
apiKey: 'test-azure-key',
|
||||
modelNames: ['gpt-4-vision-preview', 'gpt-3.5-turbo', 'gpt-4'],
|
||||
modelGroupMap: {
|
||||
'gpt-4-vision-preview': {
|
||||
group: 'librechat-westus',
|
||||
deploymentName: 'gpt-4-vision-preview',
|
||||
version: '2024-02-15-preview',
|
||||
},
|
||||
},
|
||||
groupMap: {
|
||||
'librechat-westus': {
|
||||
apiKey: 'WESTUS_API_KEY',
|
||||
instanceName: 'librechat-westus',
|
||||
version: '2023-12-01-preview',
|
||||
models: {
|
||||
'gpt-4-vision-preview': {
|
||||
deploymentName: 'gpt-4-vision-preview',
|
||||
version: '2024-02-15-preview',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
describe('initializeClient', () => {
|
||||
// Set up environment variables
|
||||
const originalEnvironment = process.env;
|
||||
const app = {
|
||||
locals: {},
|
||||
};
|
||||
|
||||
const validAzureConfigs = [
|
||||
{
|
||||
group: 'librechat-westus',
|
||||
apiKey: 'WESTUS_API_KEY',
|
||||
instanceName: 'librechat-westus',
|
||||
version: '2023-12-01-preview',
|
||||
models: {
|
||||
'gpt-4-vision-preview': {
|
||||
deploymentName: 'gpt-4-vision-preview',
|
||||
version: '2024-02-15-preview',
|
||||
},
|
||||
'gpt-3.5-turbo': {
|
||||
deploymentName: 'gpt-35-turbo',
|
||||
},
|
||||
'gpt-3.5-turbo-1106': {
|
||||
deploymentName: 'gpt-35-turbo-1106',
|
||||
},
|
||||
'gpt-4': {
|
||||
deploymentName: 'gpt-4',
|
||||
},
|
||||
'gpt-4-1106-preview': {
|
||||
deploymentName: 'gpt-4-1106-preview',
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
group: 'librechat-eastus',
|
||||
apiKey: 'EASTUS_API_KEY',
|
||||
instanceName: 'librechat-eastus',
|
||||
deploymentName: 'gpt-4-turbo',
|
||||
version: '2024-02-15-preview',
|
||||
models: {
|
||||
'gpt-4-turbo': true,
|
||||
},
|
||||
baseURL: 'https://eastus.example.com',
|
||||
additionalHeaders: {
|
||||
'x-api-key': 'x-api-key-value',
|
||||
},
|
||||
},
|
||||
{
|
||||
group: 'mistral-inference',
|
||||
apiKey: 'AZURE_MISTRAL_API_KEY',
|
||||
baseURL:
|
||||
'https://Mistral-large-vnpet-serverless.region.inference.ai.azure.com/v1/chat/completions',
|
||||
serverless: true,
|
||||
models: {
|
||||
'mistral-large': true,
|
||||
},
|
||||
},
|
||||
{
|
||||
group: 'llama-70b-chat',
|
||||
apiKey: 'AZURE_LLAMA2_70B_API_KEY',
|
||||
baseURL:
|
||||
'https://Llama-2-70b-chat-qmvyb-serverless.region.inference.ai.azure.com/v1/chat/completions',
|
||||
serverless: true,
|
||||
models: {
|
||||
'llama-70b-chat': true,
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
const { modelNames } = validateAzureGroups(validAzureConfigs);
|
||||
|
||||
beforeEach(() => {
|
||||
jest.resetModules(); // Clears the cache
|
||||
process.env = { ...originalEnvironment }; // Make a copy
|
||||
});
|
||||
|
||||
afterAll(() => {
|
||||
process.env = originalEnvironment; // Restore original env vars
|
||||
});
|
||||
|
||||
test('should initialize client with OpenAI API key and default options', async () => {
|
||||
process.env.OPENAI_API_KEY = 'test-openai-api-key';
|
||||
process.env.DEBUG_OPENAI = 'false';
|
||||
process.env.OPENAI_SUMMARIZE = 'false';
|
||||
|
||||
const req = {
|
||||
body: { key: null, endpoint: EModelEndpoint.openAI },
|
||||
user: { id: '123' },
|
||||
app,
|
||||
config: mockAppConfig,
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = {};
|
||||
|
||||
const result = await initializeClient({ req, res, endpointOption });
|
||||
|
||||
expect(result.openAIApiKey).toBe('test-openai-api-key');
|
||||
expect(result.client).toBeInstanceOf(OpenAIClient);
|
||||
});
|
||||
|
||||
test('should initialize client with Azure credentials when endpoint is azureOpenAI', async () => {
|
||||
process.env.AZURE_API_KEY = 'test-azure-api-key';
|
||||
(process.env.AZURE_OPENAI_API_INSTANCE_NAME = 'some-value'),
|
||||
(process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME = 'some-value'),
|
||||
(process.env.AZURE_OPENAI_API_VERSION = 'some-value'),
|
||||
(process.env.AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME = 'some-value'),
|
||||
(process.env.AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME = 'some-value'),
|
||||
(process.env.OPENAI_API_KEY = 'test-openai-api-key');
|
||||
process.env.DEBUG_OPENAI = 'false';
|
||||
process.env.OPENAI_SUMMARIZE = 'false';
|
||||
|
||||
const req = {
|
||||
body: {
|
||||
key: null,
|
||||
endpoint: 'azureOpenAI',
|
||||
model: 'gpt-4-vision-preview',
|
||||
},
|
||||
user: { id: '123' },
|
||||
app,
|
||||
config: mockAppConfig,
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = {};
|
||||
|
||||
const client = await initializeClient({ req, res, endpointOption });
|
||||
|
||||
expect(client.openAIApiKey).toBe('WESTUS_API_KEY');
|
||||
expect(client.client).toBeInstanceOf(OpenAIClient);
|
||||
});
|
||||
|
||||
test('should use the debug option when DEBUG_OPENAI is enabled', async () => {
|
||||
process.env.OPENAI_API_KEY = 'test-openai-api-key';
|
||||
process.env.DEBUG_OPENAI = 'true';
|
||||
|
||||
const req = {
|
||||
body: { key: null, endpoint: EModelEndpoint.openAI },
|
||||
user: { id: '123' },
|
||||
app,
|
||||
config: mockAppConfig,
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = {};
|
||||
|
||||
const client = await initializeClient({ req, res, endpointOption });
|
||||
|
||||
expect(client.client.options.debug).toBe(true);
|
||||
});
|
||||
|
||||
test('should set contextStrategy to summarize when OPENAI_SUMMARIZE is enabled', async () => {
|
||||
process.env.OPENAI_API_KEY = 'test-openai-api-key';
|
||||
process.env.OPENAI_SUMMARIZE = 'true';
|
||||
|
||||
const req = {
|
||||
body: { key: null, endpoint: EModelEndpoint.openAI },
|
||||
user: { id: '123' },
|
||||
app,
|
||||
config: mockAppConfig,
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = {};
|
||||
|
||||
const client = await initializeClient({ req, res, endpointOption });
|
||||
|
||||
expect(client.client.options.contextStrategy).toBe('summarize');
|
||||
});
|
||||
|
||||
test('should set reverseProxyUrl and proxy when they are provided in the environment', async () => {
|
||||
process.env.OPENAI_API_KEY = 'test-openai-api-key';
|
||||
process.env.OPENAI_REVERSE_PROXY = 'http://reverse.proxy';
|
||||
process.env.PROXY = 'http://proxy';
|
||||
|
||||
const req = {
|
||||
body: { key: null, endpoint: EModelEndpoint.openAI },
|
||||
user: { id: '123' },
|
||||
app,
|
||||
config: mockAppConfig,
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = {};
|
||||
|
||||
const client = await initializeClient({ req, res, endpointOption });
|
||||
|
||||
expect(client.client.options.reverseProxyUrl).toBe('http://reverse.proxy');
|
||||
expect(client.client.options.proxy).toBe('http://proxy');
|
||||
});
|
||||
|
||||
test('should throw an error if the user-provided key has expired', async () => {
|
||||
process.env.OPENAI_API_KEY = 'user_provided';
|
||||
process.env.AZURE_API_KEY = 'user_provided';
|
||||
process.env.DEBUG_OPENAI = 'false';
|
||||
process.env.OPENAI_SUMMARIZE = 'false';
|
||||
|
||||
const expiresAt = new Date(Date.now() - 10000).toISOString(); // Expired
|
||||
const req = {
|
||||
body: { key: expiresAt, endpoint: EModelEndpoint.openAI },
|
||||
user: { id: '123' },
|
||||
app,
|
||||
config: mockAppConfig,
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = {};
|
||||
|
||||
await expect(initializeClient({ req, res, endpointOption })).rejects.toThrow(
|
||||
/expired_user_key/,
|
||||
);
|
||||
});
|
||||
|
||||
test('should throw an error if no API keys are provided in the environment', async () => {
|
||||
// Clear the environment variables for API keys
|
||||
delete process.env.OPENAI_API_KEY;
|
||||
delete process.env.AZURE_API_KEY;
|
||||
|
||||
const req = {
|
||||
body: { key: null, endpoint: EModelEndpoint.openAI },
|
||||
user: { id: '123' },
|
||||
app,
|
||||
config: mockAppConfig,
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = {};
|
||||
|
||||
await expect(initializeClient({ req, res, endpointOption })).rejects.toThrow(
|
||||
`${EModelEndpoint.openAI} API Key not provided.`,
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle user-provided keys and check expiry', async () => {
|
||||
// Set up the req.body to simulate user-provided key scenario
|
||||
const req = {
|
||||
body: {
|
||||
key: new Date(Date.now() + 10000).toISOString(),
|
||||
endpoint: EModelEndpoint.openAI,
|
||||
},
|
||||
user: {
|
||||
id: '123',
|
||||
},
|
||||
app,
|
||||
config: mockAppConfig,
|
||||
};
|
||||
|
||||
const res = {};
|
||||
const endpointOption = {};
|
||||
|
||||
// Ensure the environment variable is set to 'user_provided' to match the isUserProvided condition
|
||||
process.env.OPENAI_API_KEY = 'user_provided';
|
||||
|
||||
// Mock getUserKey to return the expected key
|
||||
getUserKeyValues.mockResolvedValue({ apiKey: 'test-user-provided-openai-api-key' });
|
||||
|
||||
// Call the initializeClient function
|
||||
const result = await initializeClient({ req, res, endpointOption });
|
||||
|
||||
// Assertions
|
||||
expect(result.openAIApiKey).toBe('test-user-provided-openai-api-key');
|
||||
});
|
||||
|
||||
test('should throw an error if the user-provided key is invalid', async () => {
|
||||
const invalidKey = new Date(Date.now() - 100000).toISOString();
|
||||
const req = {
|
||||
body: { key: invalidKey, endpoint: EModelEndpoint.openAI },
|
||||
user: { id: '123' },
|
||||
app,
|
||||
config: mockAppConfig,
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = {};
|
||||
|
||||
// Ensure the environment variable is set to 'user_provided' to match the isUserProvided condition
|
||||
process.env.OPENAI_API_KEY = 'user_provided';
|
||||
|
||||
// Mock getUserKey to return an invalid key
|
||||
getUserKey.mockResolvedValue(invalidKey);
|
||||
|
||||
await expect(initializeClient({ req, res, endpointOption })).rejects.toThrow(
|
||||
/expired_user_key/,
|
||||
);
|
||||
});
|
||||
|
||||
test('should throw an error when user-provided values are not valid JSON', async () => {
|
||||
process.env.OPENAI_API_KEY = 'user_provided';
|
||||
const req = {
|
||||
body: { key: new Date(Date.now() + 10000).toISOString(), endpoint: EModelEndpoint.openAI },
|
||||
user: { id: '123' },
|
||||
app,
|
||||
config: mockAppConfig,
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = {};
|
||||
|
||||
// Mock getUserKey to return a non-JSON string
|
||||
getUserKey.mockResolvedValue('not-a-json');
|
||||
getUserKeyValues.mockImplementation(() => {
|
||||
let userValues = getUserKey();
|
||||
try {
|
||||
userValues = JSON.parse(userValues);
|
||||
} catch {
|
||||
throw new Error(
|
||||
JSON.stringify({
|
||||
type: ErrorTypes.INVALID_USER_KEY,
|
||||
}),
|
||||
);
|
||||
}
|
||||
return userValues;
|
||||
});
|
||||
|
||||
await expect(initializeClient({ req, res, endpointOption })).rejects.toThrow(
|
||||
/invalid_user_key/,
|
||||
);
|
||||
});
|
||||
|
||||
test('should initialize client correctly for Azure OpenAI with valid configuration', async () => {
|
||||
// Set up Azure environment variables
|
||||
process.env.WESTUS_API_KEY = 'test-westus-key';
|
||||
|
||||
const req = {
|
||||
body: {
|
||||
key: null,
|
||||
endpoint: EModelEndpoint.azureOpenAI,
|
||||
model: modelNames[0],
|
||||
},
|
||||
user: { id: '123' },
|
||||
config: mockAppConfig,
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = {};
|
||||
|
||||
const client = await initializeClient({ req, res, endpointOption });
|
||||
expect(client.client.options.azure).toBeDefined();
|
||||
});
|
||||
|
||||
test('should initialize client with default options when certain env vars are not set', async () => {
|
||||
delete process.env.DEBUG_OPENAI;
|
||||
delete process.env.OPENAI_SUMMARIZE;
|
||||
process.env.OPENAI_API_KEY = 'some-api-key';
|
||||
|
||||
const req = {
|
||||
body: { key: null, endpoint: EModelEndpoint.openAI },
|
||||
user: { id: '123' },
|
||||
app,
|
||||
config: mockAppConfig,
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = {};
|
||||
|
||||
const client = await initializeClient({ req, res, endpointOption });
|
||||
|
||||
expect(client.client.options.debug).toBe(false);
|
||||
expect(client.client.options.contextStrategy).toBe(null);
|
||||
});
|
||||
|
||||
test('should correctly use user-provided apiKey and baseURL when provided', async () => {
|
||||
process.env.OPENAI_API_KEY = 'user_provided';
|
||||
process.env.OPENAI_REVERSE_PROXY = 'user_provided';
|
||||
const req = {
|
||||
body: {
|
||||
key: new Date(Date.now() + 10000).toISOString(),
|
||||
endpoint: EModelEndpoint.openAI,
|
||||
},
|
||||
user: {
|
||||
id: '123',
|
||||
},
|
||||
app,
|
||||
config: mockAppConfig,
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = {};
|
||||
|
||||
getUserKeyValues.mockResolvedValue({
|
||||
apiKey: 'test',
|
||||
baseURL: 'https://user-provided-url.com',
|
||||
});
|
||||
|
||||
const result = await initializeClient({ req, res, endpointOption });
|
||||
|
||||
expect(result.openAIApiKey).toBe('test');
|
||||
expect(result.client.options.reverseProxyUrl).toBe('https://user-provided-url.com');
|
||||
});
|
||||
});
|
||||
|
|
@ -9,6 +9,7 @@ const {
|
|||
EModelEndpoint,
|
||||
} = require('librechat-data-provider');
|
||||
const { OllamaClient } = require('~/app/clients/OllamaClient');
|
||||
const { config } = require('./Config/EndpointService');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
const { extractBaseURL } = require('~/utils');
|
||||
|
||||
|
|
@ -27,8 +28,6 @@ const splitAndTrim = (input) => {
|
|||
.filter(Boolean);
|
||||
};
|
||||
|
||||
const { openAIApiKey, userProvidedOpenAI } = require('./Config/EndpointService').config;
|
||||
|
||||
/**
|
||||
* Fetches OpenAI models from the specified base API path or Azure, based on the provided configuration.
|
||||
*
|
||||
|
|
@ -138,11 +137,11 @@ const fetchModels = async ({
|
|||
* @param {string} opts.user - The user ID to send to the API.
|
||||
* @param {boolean} [opts.azure=false] - Whether to fetch models from Azure.
|
||||
* @param {boolean} [opts.assistants=false] - Whether to fetch models from Azure.
|
||||
* @param {boolean} [opts.plugins=false] - Whether to fetch models from the plugins.
|
||||
* @param {string[]} [_models=[]] - The models to use as a fallback.
|
||||
*/
|
||||
const fetchOpenAIModels = async (opts, _models = []) => {
|
||||
let models = _models.slice() ?? [];
|
||||
const { openAIApiKey } = config;
|
||||
let apiKey = openAIApiKey;
|
||||
const openaiBaseURL = 'https://api.openai.com/v1';
|
||||
let baseURL = openaiBaseURL;
|
||||
|
|
@ -204,7 +203,6 @@ const fetchOpenAIModels = async (opts, _models = []) => {
|
|||
* @param {object} opts - The options for fetching the models.
|
||||
* @param {string} opts.user - The user ID to send to the API.
|
||||
* @param {boolean} [opts.azure=false] - Whether to fetch models from Azure.
|
||||
* @param {boolean} [opts.plugins=false] - Whether to fetch models for the plugins endpoint.
|
||||
* @param {boolean} [opts.assistants=false] - Whether to fetch models for the Assistants endpoint.
|
||||
*/
|
||||
const getOpenAIModels = async (opts) => {
|
||||
|
|
@ -216,24 +214,11 @@ const getOpenAIModels = async (opts) => {
|
|||
models = defaultModels[EModelEndpoint.azureAssistants];
|
||||
}
|
||||
|
||||
if (opts.plugins) {
|
||||
models = models.filter(
|
||||
(model) =>
|
||||
!model.includes('text-davinci') &&
|
||||
!model.includes('instruct') &&
|
||||
!model.includes('0613') &&
|
||||
!model.includes('0314') &&
|
||||
!model.includes('0301'),
|
||||
);
|
||||
}
|
||||
|
||||
let key;
|
||||
if (opts.assistants) {
|
||||
key = 'ASSISTANTS_MODELS';
|
||||
} else if (opts.azure) {
|
||||
key = 'AZURE_OPENAI_MODELS';
|
||||
} else if (opts.plugins) {
|
||||
key = 'PLUGIN_MODELS';
|
||||
} else {
|
||||
key = 'OPENAI_MODELS';
|
||||
}
|
||||
|
|
@ -243,22 +228,13 @@ const getOpenAIModels = async (opts) => {
|
|||
return models;
|
||||
}
|
||||
|
||||
if (userProvidedOpenAI) {
|
||||
if (config.userProvidedOpenAI) {
|
||||
return models;
|
||||
}
|
||||
|
||||
return await fetchOpenAIModels(opts, models);
|
||||
};
|
||||
|
||||
const getChatGPTBrowserModels = () => {
|
||||
let models = ['text-davinci-002-render-sha', 'gpt-4'];
|
||||
if (process.env.CHATGPT_MODELS) {
|
||||
models = splitAndTrim(process.env.CHATGPT_MODELS);
|
||||
}
|
||||
|
||||
return models;
|
||||
};
|
||||
|
||||
/**
|
||||
* Fetches models from the Anthropic API.
|
||||
* @async
|
||||
|
|
@ -348,8 +324,7 @@ module.exports = {
|
|||
fetchModels,
|
||||
splitAndTrim,
|
||||
getOpenAIModels,
|
||||
getBedrockModels,
|
||||
getChatGPTBrowserModels,
|
||||
getAnthropicModels,
|
||||
getGoogleModels,
|
||||
getBedrockModels,
|
||||
getAnthropicModels,
|
||||
};
|
||||
|
|
|
|||
|
|
@ -216,12 +216,6 @@ describe('getOpenAIModels', () => {
|
|||
expect(models).toEqual(expect.arrayContaining(['azure-model', 'azure-model-2']));
|
||||
});
|
||||
|
||||
it('returns `PLUGIN_MODELS` with `plugins` flag (and fetch fails)', async () => {
|
||||
process.env.PLUGIN_MODELS = 'plugins-model,plugins-model-2';
|
||||
const models = await getOpenAIModels({ plugins: true });
|
||||
expect(models).toEqual(expect.arrayContaining(['plugins-model', 'plugins-model-2']));
|
||||
});
|
||||
|
||||
it('returns `OPENAI_MODELS` with no flags (and fetch fails)', async () => {
|
||||
process.env.OPENAI_MODELS = 'openai-model,openai-model-2';
|
||||
const models = await getOpenAIModels({});
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
const partialRight = require('lodash/partialRight');
|
||||
const {
|
||||
Capabilities,
|
||||
EModelEndpoint,
|
||||
|
|
@ -7,8 +8,7 @@ const {
|
|||
defaultAssistantsVersion,
|
||||
defaultAgentCapabilities,
|
||||
} = require('librechat-data-provider');
|
||||
const { sendEvent } = require('@librechat/api');
|
||||
const partialRight = require('lodash/partialRight');
|
||||
const { sendEvent, isUserProvided } = require('@librechat/api');
|
||||
|
||||
const addSpaceIfNeeded = (text) => (text.length > 0 && !text.endsWith(' ') ? text + ' ' : text);
|
||||
|
||||
|
|
@ -117,14 +117,6 @@ function formatAction(action) {
|
|||
return formattedAction;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the provided value is 'user_provided'.
|
||||
*
|
||||
* @param {string} value - The value to check.
|
||||
* @returns {boolean} - Returns true if the value is 'user_provided', otherwise false.
|
||||
*/
|
||||
const isUserProvided = (value) => value === 'user_provided';
|
||||
|
||||
/**
|
||||
* Generate the configuration for a given key and base URL.
|
||||
* @param {string} key
|
||||
|
|
@ -174,7 +166,6 @@ module.exports = {
|
|||
handleText,
|
||||
formatSteps,
|
||||
formatAction,
|
||||
isUserProvided,
|
||||
generateConfig,
|
||||
addSpaceIfNeeded,
|
||||
createOnProgress,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue