mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-16 16:30:15 +01:00
* 🗑️ chore: Remove unused Legacy Provider clients and related helpers * Deleted OpenAIClient and GoogleClient files along with their associated tests. * Removed references to these clients in the clients index file. * Cleaned up typedefs by removing the OpenAISpecClient export. * Updated chat controllers to use the OpenAI SDK directly instead of the removed client classes. * chore/remove-openapi-specs * 🗑️ chore: Remove unused mergeSort and misc utility functions * Deleted mergeSort.js and misc.js files as they are no longer needed. * Removed references to cleanUpPrimaryKeyValue in messages.js and adjusted related logic. * Updated mongoMeili.ts to eliminate local implementations of removed functions. * chore: remove legacy endpoints * chore: remove all plugins endpoint related code * chore: remove unused prompt handling code and clean up imports * Deleted handleInputs.js and instructions.js files as they are no longer needed. * Removed references to these files in the prompts index.js. * Updated docker-compose.yml to simplify reverse proxy configuration. * chore: remove unused LightningIcon import from Icons.tsx * chore: clean up translation.json by removing deprecated and unused keys * chore: update Jest configuration and remove unused mock file * Simplified the setupFiles array in jest.config.js by removing the fetchEventSource mock. * Deleted the fetchEventSource.js mock file as it is no longer needed. * fix: simplify endpoint type check in Landing and ConversationStarters components * Updated the endpoint type check to use strict equality for better clarity and performance. * Ensured consistency in the handling of the azureOpenAI endpoint across both components. * chore: remove unused dependencies from package.json and package-lock.json * chore: remove legacy EditController, associated routes and imports * chore: update banResponse logic to refine request handling for banned users * chore: remove unused validateEndpoint middleware and its references * chore: remove unused 'res' parameter from initializeClient in multiple endpoint files * chore: remove unused 'isSmallScreen' prop from BookmarkNav and NewChat components; clean up imports in ArchivedChatsTable and useSetIndexOptions hooks; enhance localization in PromptVersions * chore: remove unused import of Constants and TMessage from MobileNav; retain only necessary QueryKeys import * chore: remove unused TResPlugin type and related references; clean up imports in types and schemas
145 lines
4.2 KiB
JavaScript
145 lines
4.2 KiB
JavaScript
const { isUserProvided, getOpenAIConfig, getCustomEndpointConfig } = require('@librechat/api');
|
|
const {
|
|
CacheKeys,
|
|
ErrorTypes,
|
|
envVarRegex,
|
|
FetchTokenConfig,
|
|
extractEnvVariable,
|
|
} = require('librechat-data-provider');
|
|
const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService');
|
|
const { fetchModels } = require('~/server/services/ModelService');
|
|
const getLogStores = require('~/cache/getLogStores');
|
|
|
|
const { PROXY } = process.env;
|
|
|
|
const initializeClient = async ({ req, endpointOption, overrideEndpoint }) => {
|
|
const appConfig = req.config;
|
|
const { key: expiresAt } = req.body;
|
|
const endpoint = overrideEndpoint ?? req.body.endpoint;
|
|
|
|
const endpointConfig = getCustomEndpointConfig({
|
|
endpoint,
|
|
appConfig,
|
|
});
|
|
if (!endpointConfig) {
|
|
throw new Error(`Config not found for the ${endpoint} custom endpoint.`);
|
|
}
|
|
|
|
const CUSTOM_API_KEY = extractEnvVariable(endpointConfig.apiKey);
|
|
const CUSTOM_BASE_URL = extractEnvVariable(endpointConfig.baseURL);
|
|
|
|
if (CUSTOM_API_KEY.match(envVarRegex)) {
|
|
throw new Error(`Missing API Key for ${endpoint}.`);
|
|
}
|
|
|
|
if (CUSTOM_BASE_URL.match(envVarRegex)) {
|
|
throw new Error(`Missing Base URL for ${endpoint}.`);
|
|
}
|
|
|
|
const userProvidesKey = isUserProvided(CUSTOM_API_KEY);
|
|
const userProvidesURL = isUserProvided(CUSTOM_BASE_URL);
|
|
|
|
let userValues = null;
|
|
if (expiresAt && (userProvidesKey || userProvidesURL)) {
|
|
checkUserKeyExpiry(expiresAt, endpoint);
|
|
userValues = await getUserKeyValues({ userId: req.user.id, name: endpoint });
|
|
}
|
|
|
|
let apiKey = userProvidesKey ? userValues?.apiKey : CUSTOM_API_KEY;
|
|
let baseURL = userProvidesURL ? userValues?.baseURL : CUSTOM_BASE_URL;
|
|
|
|
if (userProvidesKey & !apiKey) {
|
|
throw new Error(
|
|
JSON.stringify({
|
|
type: ErrorTypes.NO_USER_KEY,
|
|
}),
|
|
);
|
|
}
|
|
|
|
if (userProvidesURL && !baseURL) {
|
|
throw new Error(
|
|
JSON.stringify({
|
|
type: ErrorTypes.NO_BASE_URL,
|
|
}),
|
|
);
|
|
}
|
|
|
|
if (!apiKey) {
|
|
throw new Error(`${endpoint} API key not provided.`);
|
|
}
|
|
|
|
if (!baseURL) {
|
|
throw new Error(`${endpoint} Base URL not provided.`);
|
|
}
|
|
|
|
const cache = getLogStores(CacheKeys.TOKEN_CONFIG);
|
|
const tokenKey =
|
|
!endpointConfig.tokenConfig && (userProvidesKey || userProvidesURL)
|
|
? `${endpoint}:${req.user.id}`
|
|
: endpoint;
|
|
|
|
let endpointTokenConfig =
|
|
!endpointConfig.tokenConfig &&
|
|
FetchTokenConfig[endpoint.toLowerCase()] &&
|
|
(await cache.get(tokenKey));
|
|
|
|
if (
|
|
FetchTokenConfig[endpoint.toLowerCase()] &&
|
|
endpointConfig &&
|
|
endpointConfig.models.fetch &&
|
|
!endpointTokenConfig
|
|
) {
|
|
await fetchModels({ apiKey, baseURL, name: endpoint, user: req.user.id, tokenKey });
|
|
endpointTokenConfig = await cache.get(tokenKey);
|
|
}
|
|
|
|
const customOptions = {
|
|
headers: endpointConfig.headers,
|
|
addParams: endpointConfig.addParams,
|
|
dropParams: endpointConfig.dropParams,
|
|
customParams: endpointConfig.customParams,
|
|
titleConvo: endpointConfig.titleConvo,
|
|
titleModel: endpointConfig.titleModel,
|
|
forcePrompt: endpointConfig.forcePrompt,
|
|
summaryModel: endpointConfig.summaryModel,
|
|
modelDisplayLabel: endpointConfig.modelDisplayLabel,
|
|
titleMethod: endpointConfig.titleMethod ?? 'completion',
|
|
contextStrategy: endpointConfig.summarize ? 'summarize' : null,
|
|
directEndpoint: endpointConfig.directEndpoint,
|
|
titleMessageRole: endpointConfig.titleMessageRole,
|
|
streamRate: endpointConfig.streamRate,
|
|
endpointTokenConfig,
|
|
};
|
|
|
|
const allConfig = appConfig.endpoints?.all;
|
|
if (allConfig) {
|
|
customOptions.streamRate = allConfig.streamRate;
|
|
}
|
|
|
|
let clientOptions = {
|
|
reverseProxyUrl: baseURL ?? null,
|
|
proxy: PROXY ?? null,
|
|
...customOptions,
|
|
...endpointOption,
|
|
};
|
|
|
|
const modelOptions = endpointOption?.model_parameters ?? {};
|
|
clientOptions = Object.assign(
|
|
{
|
|
modelOptions,
|
|
},
|
|
clientOptions,
|
|
);
|
|
clientOptions.modelOptions.user = req.user.id;
|
|
const options = getOpenAIConfig(apiKey, clientOptions, endpoint);
|
|
if (options != null) {
|
|
options.useLegacyContent = true;
|
|
options.endpointTokenConfig = endpointTokenConfig;
|
|
}
|
|
if (clientOptions.streamRate) {
|
|
options.llmConfig._lc_stream_delay = clientOptions.streamRate;
|
|
}
|
|
return options;
|
|
};
|
|
|
|
module.exports = initializeClient;
|