mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-19 09:50:15 +01:00
* chore: update @librechat/agents to version 2.1.9
* feat: xAI standalone provider for agents
* chore: bump librechat-data-provider version to 0.7.6997
* fix: reorder import statements and enhance user listing output
* fix: Update Docker Compose commands to support v2 syntax with fallback
* 🔧 fix: drop `reasoning_effort` for o1-preview/mini models
* chore: requireLocalAuth logging
* fix: edge case artifact message editing logic to handle `new` conversation IDs
* fix: remove `temperature` from model options in OpenAIClient if o1-mini/preview
* fix: update type annotation for fetchPromisesMap to use Promise<string[]> instead of string[]
* feat: anthropic model fetching
* fix: update model name to use EModelEndpoint.openAI in fetchModels and fetchOpenAIModels
* fix: add error handling to modelController for loadModels
* fix: add error handling and logging for model fetching in loadDefaultModels
* ci: update getAnthropicModels tests to be asynchronous
* feat: add user ID to model options in OpenAI and custom endpoint initialization
---------
Co-authored-by: Andrei Berceanu <andreicberceanu@gmail.com>
Co-authored-by: KiGamji <maloyh44@gmail.com>
166 lines
4.9 KiB
JavaScript
166 lines
4.9 KiB
JavaScript
const {
|
|
ErrorTypes,
|
|
EModelEndpoint,
|
|
resolveHeaders,
|
|
mapModelToAzureConfig,
|
|
} = require('librechat-data-provider');
|
|
const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService');
|
|
const { getLLMConfig } = require('~/server/services/Endpoints/openAI/llm');
|
|
const { isEnabled, isUserProvided, sleep } = require('~/server/utils');
|
|
const { getAzureCredentials } = require('~/utils');
|
|
const { OpenAIClient } = require('~/app');
|
|
|
|
const initializeClient = async ({
|
|
req,
|
|
res,
|
|
endpointOption,
|
|
optionsOnly,
|
|
overrideEndpoint,
|
|
overrideModel,
|
|
}) => {
|
|
const {
|
|
PROXY,
|
|
OPENAI_API_KEY,
|
|
AZURE_API_KEY,
|
|
OPENAI_REVERSE_PROXY,
|
|
AZURE_OPENAI_BASEURL,
|
|
OPENAI_SUMMARIZE,
|
|
DEBUG_OPENAI,
|
|
} = process.env;
|
|
const { key: expiresAt } = req.body;
|
|
const modelName = overrideModel ?? req.body.model;
|
|
const endpoint = overrideEndpoint ?? req.body.endpoint;
|
|
const contextStrategy = isEnabled(OPENAI_SUMMARIZE) ? 'summarize' : null;
|
|
|
|
const credentials = {
|
|
[EModelEndpoint.openAI]: OPENAI_API_KEY,
|
|
[EModelEndpoint.azureOpenAI]: AZURE_API_KEY,
|
|
};
|
|
|
|
const baseURLOptions = {
|
|
[EModelEndpoint.openAI]: OPENAI_REVERSE_PROXY,
|
|
[EModelEndpoint.azureOpenAI]: AZURE_OPENAI_BASEURL,
|
|
};
|
|
|
|
const userProvidesKey = isUserProvided(credentials[endpoint]);
|
|
const userProvidesURL = isUserProvided(baseURLOptions[endpoint]);
|
|
|
|
let userValues = null;
|
|
if (expiresAt && (userProvidesKey || userProvidesURL)) {
|
|
checkUserKeyExpiry(expiresAt, endpoint);
|
|
userValues = await getUserKeyValues({ userId: req.user.id, name: endpoint });
|
|
}
|
|
|
|
let apiKey = userProvidesKey ? userValues?.apiKey : credentials[endpoint];
|
|
let baseURL = userProvidesURL ? userValues?.baseURL : baseURLOptions[endpoint];
|
|
|
|
let clientOptions = {
|
|
contextStrategy,
|
|
proxy: PROXY ?? null,
|
|
debug: isEnabled(DEBUG_OPENAI),
|
|
reverseProxyUrl: baseURL ? baseURL : null,
|
|
...endpointOption,
|
|
};
|
|
|
|
const isAzureOpenAI = endpoint === EModelEndpoint.azureOpenAI;
|
|
/** @type {false | TAzureConfig} */
|
|
const azureConfig = isAzureOpenAI && req.app.locals[EModelEndpoint.azureOpenAI];
|
|
|
|
if (isAzureOpenAI && azureConfig) {
|
|
const { modelGroupMap, groupMap } = azureConfig;
|
|
const {
|
|
azureOptions,
|
|
baseURL,
|
|
headers = {},
|
|
serverless,
|
|
} = mapModelToAzureConfig({
|
|
modelName,
|
|
modelGroupMap,
|
|
groupMap,
|
|
});
|
|
|
|
clientOptions.reverseProxyUrl = baseURL ?? clientOptions.reverseProxyUrl;
|
|
clientOptions.headers = resolveHeaders({ ...headers, ...(clientOptions.headers ?? {}) });
|
|
|
|
clientOptions.titleConvo = azureConfig.titleConvo;
|
|
clientOptions.titleModel = azureConfig.titleModel;
|
|
|
|
const azureRate = modelName.includes('gpt-4') ? 30 : 17;
|
|
clientOptions.streamRate = azureConfig.streamRate ?? azureRate;
|
|
|
|
clientOptions.titleMethod = azureConfig.titleMethod ?? 'completion';
|
|
|
|
const groupName = modelGroupMap[modelName].group;
|
|
clientOptions.addParams = azureConfig.groupMap[groupName].addParams;
|
|
clientOptions.dropParams = azureConfig.groupMap[groupName].dropParams;
|
|
clientOptions.forcePrompt = azureConfig.groupMap[groupName].forcePrompt;
|
|
|
|
apiKey = azureOptions.azureOpenAIApiKey;
|
|
clientOptions.azure = !serverless && azureOptions;
|
|
if (serverless === true) {
|
|
clientOptions.defaultQuery = azureOptions.azureOpenAIApiVersion
|
|
? { 'api-version': azureOptions.azureOpenAIApiVersion }
|
|
: undefined;
|
|
clientOptions.headers['api-key'] = apiKey;
|
|
}
|
|
} else if (isAzureOpenAI) {
|
|
clientOptions.azure = userProvidesKey ? JSON.parse(userValues.apiKey) : getAzureCredentials();
|
|
apiKey = clientOptions.azure.azureOpenAIApiKey;
|
|
}
|
|
|
|
/** @type {undefined | TBaseEndpoint} */
|
|
const openAIConfig = req.app.locals[EModelEndpoint.openAI];
|
|
|
|
if (!isAzureOpenAI && openAIConfig) {
|
|
clientOptions.streamRate = openAIConfig.streamRate;
|
|
clientOptions.titleModel = openAIConfig.titleModel;
|
|
}
|
|
|
|
/** @type {undefined | TBaseEndpoint} */
|
|
const allConfig = req.app.locals.all;
|
|
if (allConfig) {
|
|
clientOptions.streamRate = allConfig.streamRate;
|
|
}
|
|
|
|
if (userProvidesKey & !apiKey) {
|
|
throw new Error(
|
|
JSON.stringify({
|
|
type: ErrorTypes.NO_USER_KEY,
|
|
}),
|
|
);
|
|
}
|
|
|
|
if (!apiKey) {
|
|
throw new Error(`${endpoint} API Key not provided.`);
|
|
}
|
|
|
|
if (optionsOnly) {
|
|
clientOptions = Object.assign(
|
|
{
|
|
modelOptions: endpointOption.model_parameters,
|
|
},
|
|
clientOptions,
|
|
);
|
|
clientOptions.modelOptions.user = req.user.id;
|
|
const options = getLLMConfig(apiKey, clientOptions);
|
|
if (!clientOptions.streamRate) {
|
|
return options;
|
|
}
|
|
options.llmConfig.callbacks = [
|
|
{
|
|
handleLLMNewToken: async () => {
|
|
await sleep(clientOptions.streamRate);
|
|
},
|
|
},
|
|
];
|
|
return options;
|
|
}
|
|
|
|
const client = new OpenAIClient(apiKey, Object.assign({ req, res }, clientOptions));
|
|
return {
|
|
client,
|
|
openAIApiKey: apiKey,
|
|
};
|
|
};
|
|
|
|
module.exports = initializeClient;
|