mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 08:50:15 +01:00
* chore: remove unused redis file * chore: bump keyv dependencies, and update related imports * refactor: Implement IoRedis client for rate limiting across middleware, as node-redis via keyv not compatible * fix: Set max listeners to expected amount * WIP: memory improvements * refactor: Simplify getAbortData assignment in createAbortController * refactor: Update getAbortData to use WeakRef for content management * WIP: memory improvements in agent chat requests * refactor: Enhance memory management with finalization registry and cleanup functions * refactor: Simplify domainParser calls by removing unnecessary request parameter * refactor: Update parameter types for action tools and agent loading functions to use minimal configs * refactor: Simplify domainParser tests by removing unnecessary request parameter * refactor: Simplify domainParser call by removing unnecessary request parameter * refactor: Enhance client disposal by nullifying additional properties to improve memory management * refactor: Improve title generation by adding abort controller and timeout handling, consolidate request cleanup * refactor: Update checkIdleConnections to skip current user when checking for idle connections if passed * refactor: Update createMCPTool to derive userId from config and handle abort signals * refactor: Introduce createTokenCounter function and update tokenCounter usage; enhance disposeClient to reset Graph values * refactor: Update getMCPManager to accept userId parameter for improved idle connection handling * refactor: Extract logToolError function for improved error handling in AgentClient * refactor: Update disposeClient to clear handlerRegistry and graphRunnable references in client.run * refactor: Extract createHandleNewToken function to streamline token handling in initializeClient * chore: bump @librechat/agents * refactor: Improve timeout handling in addTitle function for better error management * refactor: Introduce createFetch instead of using class method * refactor: Enhance client disposal and request data handling in AskController and EditController * refactor: Update import statements for AnthropicClient and OpenAIClient to use specific paths * refactor: Use WeakRef for response handling in SplitStreamHandler to prevent memory leaks * refactor: Simplify client disposal and rename getReqData to processReqData in AskController and EditController * refactor: Improve logging structure and parameter handling in OpenAIClient * refactor: Remove unused GraphEvents and improve stream event handling in AnthropicClient and OpenAIClient * refactor: Simplify client initialization in AskController and EditController * refactor: Remove unused mock functions and implement in-memory store for KeyvMongo * chore: Update dependencies in package-lock.json to latest versions * refactor: Await token usage recording in OpenAIClient to ensure proper async handling * refactor: Remove handleAbort route from multiple endpoints and enhance client disposal logic * refactor: Enhance abort controller logic by managing abortKey more effectively * refactor: Add newConversation handling in useEventHandlers for improved conversation management * fix: dropparams * refactor: Use optional chaining for safer access to request properties in BaseClient * refactor: Move client disposal and request data processing logic to cleanup module for better organization * refactor: Remove aborted request check from addTitle function for cleaner logic * feat: Add Grok 3 model pricing and update tests for new models * chore: Remove trace warnings and inspect flags from backend start script used for debugging * refactor: Replace user identifier handling with userId for consistency across controllers, use UserId in clientRegistry * refactor: Enhance client disposal logic to prevent memory leaks by clearing additional references * chore: Update @librechat/agents to version 2.4.14 in package.json and package-lock.json
176 lines
5.1 KiB
JavaScript
176 lines
5.1 KiB
JavaScript
const {
|
|
CacheKeys,
|
|
ErrorTypes,
|
|
envVarRegex,
|
|
FetchTokenConfig,
|
|
extractEnvVariable,
|
|
} = require('librechat-data-provider');
|
|
const { Providers } = require('@librechat/agents');
|
|
const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService');
|
|
const { getLLMConfig } = require('~/server/services/Endpoints/openAI/llm');
|
|
const { getCustomEndpointConfig } = require('~/server/services/Config');
|
|
const { fetchModels } = require('~/server/services/ModelService');
|
|
const { isUserProvided, sleep } = require('~/server/utils');
|
|
const OpenAIClient = require('~/app/clients/OpenAIClient');
|
|
const getLogStores = require('~/cache/getLogStores');
|
|
|
|
const { PROXY } = process.env;
|
|
|
|
const initializeClient = async ({ req, res, endpointOption, optionsOnly, overrideEndpoint }) => {
|
|
const { key: expiresAt } = req.body;
|
|
const endpoint = overrideEndpoint ?? req.body.endpoint;
|
|
|
|
const endpointConfig = await getCustomEndpointConfig(endpoint);
|
|
if (!endpointConfig) {
|
|
throw new Error(`Config not found for the ${endpoint} custom endpoint.`);
|
|
}
|
|
|
|
const CUSTOM_API_KEY = extractEnvVariable(endpointConfig.apiKey);
|
|
const CUSTOM_BASE_URL = extractEnvVariable(endpointConfig.baseURL);
|
|
|
|
let resolvedHeaders = {};
|
|
if (endpointConfig.headers && typeof endpointConfig.headers === 'object') {
|
|
Object.keys(endpointConfig.headers).forEach((key) => {
|
|
resolvedHeaders[key] = extractEnvVariable(endpointConfig.headers[key]);
|
|
});
|
|
}
|
|
|
|
if (CUSTOM_API_KEY.match(envVarRegex)) {
|
|
throw new Error(`Missing API Key for ${endpoint}.`);
|
|
}
|
|
|
|
if (CUSTOM_BASE_URL.match(envVarRegex)) {
|
|
throw new Error(`Missing Base URL for ${endpoint}.`);
|
|
}
|
|
|
|
const userProvidesKey = isUserProvided(CUSTOM_API_KEY);
|
|
const userProvidesURL = isUserProvided(CUSTOM_BASE_URL);
|
|
|
|
let userValues = null;
|
|
if (expiresAt && (userProvidesKey || userProvidesURL)) {
|
|
checkUserKeyExpiry(expiresAt, endpoint);
|
|
userValues = await getUserKeyValues({ userId: req.user.id, name: endpoint });
|
|
}
|
|
|
|
let apiKey = userProvidesKey ? userValues?.apiKey : CUSTOM_API_KEY;
|
|
let baseURL = userProvidesURL ? userValues?.baseURL : CUSTOM_BASE_URL;
|
|
|
|
if (userProvidesKey & !apiKey) {
|
|
throw new Error(
|
|
JSON.stringify({
|
|
type: ErrorTypes.NO_USER_KEY,
|
|
}),
|
|
);
|
|
}
|
|
|
|
if (userProvidesURL && !baseURL) {
|
|
throw new Error(
|
|
JSON.stringify({
|
|
type: ErrorTypes.NO_BASE_URL,
|
|
}),
|
|
);
|
|
}
|
|
|
|
if (!apiKey) {
|
|
throw new Error(`${endpoint} API key not provided.`);
|
|
}
|
|
|
|
if (!baseURL) {
|
|
throw new Error(`${endpoint} Base URL not provided.`);
|
|
}
|
|
|
|
const cache = getLogStores(CacheKeys.TOKEN_CONFIG);
|
|
const tokenKey =
|
|
!endpointConfig.tokenConfig && (userProvidesKey || userProvidesURL)
|
|
? `${endpoint}:${req.user.id}`
|
|
: endpoint;
|
|
|
|
let endpointTokenConfig =
|
|
!endpointConfig.tokenConfig &&
|
|
FetchTokenConfig[endpoint.toLowerCase()] &&
|
|
(await cache.get(tokenKey));
|
|
|
|
if (
|
|
FetchTokenConfig[endpoint.toLowerCase()] &&
|
|
endpointConfig &&
|
|
endpointConfig.models.fetch &&
|
|
!endpointTokenConfig
|
|
) {
|
|
await fetchModels({ apiKey, baseURL, name: endpoint, user: req.user.id, tokenKey });
|
|
endpointTokenConfig = await cache.get(tokenKey);
|
|
}
|
|
|
|
const customOptions = {
|
|
headers: resolvedHeaders,
|
|
addParams: endpointConfig.addParams,
|
|
dropParams: endpointConfig.dropParams,
|
|
titleConvo: endpointConfig.titleConvo,
|
|
titleModel: endpointConfig.titleModel,
|
|
forcePrompt: endpointConfig.forcePrompt,
|
|
summaryModel: endpointConfig.summaryModel,
|
|
modelDisplayLabel: endpointConfig.modelDisplayLabel,
|
|
titleMethod: endpointConfig.titleMethod ?? 'completion',
|
|
contextStrategy: endpointConfig.summarize ? 'summarize' : null,
|
|
directEndpoint: endpointConfig.directEndpoint,
|
|
titleMessageRole: endpointConfig.titleMessageRole,
|
|
streamRate: endpointConfig.streamRate,
|
|
endpointTokenConfig,
|
|
};
|
|
|
|
/** @type {undefined | TBaseEndpoint} */
|
|
const allConfig = req.app.locals.all;
|
|
if (allConfig) {
|
|
customOptions.streamRate = allConfig.streamRate;
|
|
}
|
|
|
|
let clientOptions = {
|
|
reverseProxyUrl: baseURL ?? null,
|
|
proxy: PROXY ?? null,
|
|
req,
|
|
res,
|
|
...customOptions,
|
|
...endpointOption,
|
|
};
|
|
|
|
if (optionsOnly) {
|
|
const modelOptions = endpointOption.model_parameters;
|
|
if (endpoint !== Providers.OLLAMA) {
|
|
clientOptions = Object.assign(
|
|
{
|
|
modelOptions,
|
|
},
|
|
clientOptions,
|
|
);
|
|
clientOptions.modelOptions.user = req.user.id;
|
|
const options = getLLMConfig(apiKey, clientOptions, endpoint);
|
|
if (!customOptions.streamRate) {
|
|
return options;
|
|
}
|
|
options.llmConfig.callbacks = [
|
|
{
|
|
handleLLMNewToken: async () => {
|
|
await sleep(customOptions.streamRate);
|
|
},
|
|
},
|
|
];
|
|
return options;
|
|
}
|
|
|
|
if (clientOptions.reverseProxyUrl) {
|
|
modelOptions.baseUrl = clientOptions.reverseProxyUrl.split('/v1')[0];
|
|
delete clientOptions.reverseProxyUrl;
|
|
}
|
|
|
|
return {
|
|
llmConfig: modelOptions,
|
|
};
|
|
}
|
|
|
|
const client = new OpenAIClient(apiKey, clientOptions);
|
|
return {
|
|
client,
|
|
openAIApiKey: apiKey,
|
|
};
|
|
};
|
|
|
|
module.exports = initializeClient;
|