mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 00:40:14 +01:00
* feat: Add support for agent handoffs with edges in agent forms and schemas chore: Mark `agent_ids` field as deprecated in favor of edges across various schemas and types chore: Update dependencies for @langchain/core and @librechat/agents to latest versions chore: Update peer dependency for @librechat/agents to version 3.0.0-rc2 in package.json chore: Update @librechat/agents dependency to version 3.0.0-rc3 in package.json and package-lock.json feat: first pass, multi-agent handoffs fix: update output type to ToolMessage in memory handling functions fix: improve type checking for graphConfig in createRun function refactor: remove unused content filtering logic in AgentClient chore: update @librechat/agents dependency to version 3.0.0-rc4 in package.json and package-lock.json fix: update @langchain/core peer dependency version to ^0.3.72 in package.json and package-lock.json fix: update @librechat/agents dependency to version 3.0.0-rc6 in package.json and package-lock.json; refactor stream rate handling in various endpoints feat: Agent handoff UI chore: update @librechat/agents dependency to version 3.0.0-rc8 in package.json and package-lock.json fix: improve hasInfo condition and adjust UI element classes in AgentHandoff component refactor: remove current fixed agent display from AgentHandoffs component due to redundancy feat: enhance AgentHandoffs UI with localized beta label and improved layout chore: update @librechat/agents dependency to version 3.0.0-rc10 in package.json and package-lock.json feat: add `createSequentialChainEdges` function to add back agent chaining via multi-agents feat: update `createSequentialChainEdges` call to only provide conversation context between agents feat: deprecate Agent Chain functionality and update related methods for improved clarity * chore: update @librechat/agents dependency to version 3.0.0-rc11 in package.json and package-lock.json * refactor: remove unused addCacheControl function and related imports and import from @librechat/agents * chore: remove unused i18n keys * refactor: remove unused format export from index.ts * chore: update @librechat/agents to v3.0.0-rc13 * chore: remove BEDROCK_LEGACY provider from Providers enum * chore: update @librechat/agents to version 3.0.2 in package.json
164 lines
5 KiB
JavaScript
164 lines
5 KiB
JavaScript
const { ErrorTypes, EModelEndpoint, mapModelToAzureConfig } = require('librechat-data-provider');
|
|
const {
|
|
isEnabled,
|
|
resolveHeaders,
|
|
isUserProvided,
|
|
getOpenAIConfig,
|
|
getAzureCredentials,
|
|
} = require('@librechat/api');
|
|
const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService');
|
|
const OpenAIClient = require('~/app/clients/OpenAIClient');
|
|
|
|
const initializeClient = async ({
|
|
req,
|
|
res,
|
|
endpointOption,
|
|
optionsOnly,
|
|
overrideEndpoint,
|
|
overrideModel,
|
|
}) => {
|
|
const appConfig = req.config;
|
|
const {
|
|
PROXY,
|
|
OPENAI_API_KEY,
|
|
AZURE_API_KEY,
|
|
OPENAI_REVERSE_PROXY,
|
|
AZURE_OPENAI_BASEURL,
|
|
OPENAI_SUMMARIZE,
|
|
DEBUG_OPENAI,
|
|
} = process.env;
|
|
const { key: expiresAt } = req.body;
|
|
const modelName = overrideModel ?? req.body.model;
|
|
const endpoint = overrideEndpoint ?? req.body.endpoint;
|
|
const contextStrategy = isEnabled(OPENAI_SUMMARIZE) ? 'summarize' : null;
|
|
|
|
const credentials = {
|
|
[EModelEndpoint.openAI]: OPENAI_API_KEY,
|
|
[EModelEndpoint.azureOpenAI]: AZURE_API_KEY,
|
|
};
|
|
|
|
const baseURLOptions = {
|
|
[EModelEndpoint.openAI]: OPENAI_REVERSE_PROXY,
|
|
[EModelEndpoint.azureOpenAI]: AZURE_OPENAI_BASEURL,
|
|
};
|
|
|
|
const userProvidesKey = isUserProvided(credentials[endpoint]);
|
|
const userProvidesURL = isUserProvided(baseURLOptions[endpoint]);
|
|
|
|
let userValues = null;
|
|
if (expiresAt && (userProvidesKey || userProvidesURL)) {
|
|
checkUserKeyExpiry(expiresAt, endpoint);
|
|
userValues = await getUserKeyValues({ userId: req.user.id, name: endpoint });
|
|
}
|
|
|
|
let apiKey = userProvidesKey ? userValues?.apiKey : credentials[endpoint];
|
|
let baseURL = userProvidesURL ? userValues?.baseURL : baseURLOptions[endpoint];
|
|
|
|
let clientOptions = {
|
|
contextStrategy,
|
|
proxy: PROXY ?? null,
|
|
debug: isEnabled(DEBUG_OPENAI),
|
|
reverseProxyUrl: baseURL ? baseURL : null,
|
|
...endpointOption,
|
|
};
|
|
|
|
const isAzureOpenAI = endpoint === EModelEndpoint.azureOpenAI;
|
|
/** @type {false | TAzureConfig} */
|
|
const azureConfig = isAzureOpenAI && appConfig.endpoints?.[EModelEndpoint.azureOpenAI];
|
|
let serverless = false;
|
|
if (isAzureOpenAI && azureConfig) {
|
|
const { modelGroupMap, groupMap } = azureConfig;
|
|
const {
|
|
azureOptions,
|
|
baseURL,
|
|
headers = {},
|
|
serverless: _serverless,
|
|
} = mapModelToAzureConfig({
|
|
modelName,
|
|
modelGroupMap,
|
|
groupMap,
|
|
});
|
|
serverless = _serverless;
|
|
|
|
clientOptions.reverseProxyUrl = baseURL ?? clientOptions.reverseProxyUrl;
|
|
clientOptions.headers = resolveHeaders({
|
|
headers: { ...headers, ...(clientOptions.headers ?? {}) },
|
|
user: req.user,
|
|
});
|
|
|
|
clientOptions.titleConvo = azureConfig.titleConvo;
|
|
clientOptions.titleModel = azureConfig.titleModel;
|
|
|
|
const azureRate = modelName.includes('gpt-4') ? 30 : 17;
|
|
clientOptions.streamRate = azureConfig.streamRate ?? azureRate;
|
|
|
|
clientOptions.titleMethod = azureConfig.titleMethod ?? 'completion';
|
|
|
|
const groupName = modelGroupMap[modelName].group;
|
|
clientOptions.addParams = azureConfig.groupMap[groupName].addParams;
|
|
clientOptions.dropParams = azureConfig.groupMap[groupName].dropParams;
|
|
clientOptions.forcePrompt = azureConfig.groupMap[groupName].forcePrompt;
|
|
|
|
apiKey = azureOptions.azureOpenAIApiKey;
|
|
clientOptions.azure = !serverless && azureOptions;
|
|
if (serverless === true) {
|
|
clientOptions.defaultQuery = azureOptions.azureOpenAIApiVersion
|
|
? { 'api-version': azureOptions.azureOpenAIApiVersion }
|
|
: undefined;
|
|
clientOptions.headers['api-key'] = apiKey;
|
|
}
|
|
} else if (isAzureOpenAI) {
|
|
clientOptions.azure = userProvidesKey ? JSON.parse(userValues.apiKey) : getAzureCredentials();
|
|
apiKey = clientOptions.azure.azureOpenAIApiKey;
|
|
}
|
|
|
|
/** @type {undefined | TBaseEndpoint} */
|
|
const openAIConfig = appConfig.endpoints?.[EModelEndpoint.openAI];
|
|
|
|
if (!isAzureOpenAI && openAIConfig) {
|
|
clientOptions.streamRate = openAIConfig.streamRate;
|
|
clientOptions.titleModel = openAIConfig.titleModel;
|
|
}
|
|
|
|
const allConfig = appConfig.endpoints?.all;
|
|
if (allConfig) {
|
|
clientOptions.streamRate = allConfig.streamRate;
|
|
}
|
|
|
|
if (userProvidesKey & !apiKey) {
|
|
throw new Error(
|
|
JSON.stringify({
|
|
type: ErrorTypes.NO_USER_KEY,
|
|
}),
|
|
);
|
|
}
|
|
|
|
if (!apiKey) {
|
|
throw new Error(`${endpoint} API Key not provided.`);
|
|
}
|
|
|
|
if (optionsOnly) {
|
|
const modelOptions = endpointOption?.model_parameters ?? {};
|
|
modelOptions.model = modelName;
|
|
clientOptions = Object.assign({ modelOptions }, clientOptions);
|
|
clientOptions.modelOptions.user = req.user.id;
|
|
const options = getOpenAIConfig(apiKey, clientOptions, endpoint);
|
|
if (options != null && serverless === true) {
|
|
options.useLegacyContent = true;
|
|
}
|
|
const streamRate = clientOptions.streamRate;
|
|
if (!streamRate) {
|
|
return options;
|
|
}
|
|
options.llmConfig._lc_stream_delay = streamRate;
|
|
return options;
|
|
}
|
|
|
|
const client = new OpenAIClient(apiKey, Object.assign({ req, res }, clientOptions));
|
|
return {
|
|
client,
|
|
openAIApiKey: apiKey,
|
|
};
|
|
};
|
|
|
|
module.exports = initializeClient;
|