mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-16 16:30:15 +01:00
* chore: update peer dependency for @librechat/agents to version 2.4.41 * 🔧 chore: proxy handling in OpenAI endpoint to use undici * 🔧 chore: update @anthropic-ai/sdk to version 0.52.0 and refactor proxy handling to use undici * 🔧 chore: update globIgnores in vite.config.ts to exclude index.html from caching * 🔧 ci: update proxy handling in getLLMConfig to use fetchOptions and ProxyAgent * 🔧 chore: refactor proxy handling in Anthropic and OpenAI clients to use fetchOptions * refactor: agent initialization to streamline model parameters and resendFiles handling * chore: update @google/generative-ai to version 0.24.0
196 lines
6.2 KiB
JavaScript
196 lines
6.2 KiB
JavaScript
const { Providers } = require('@librechat/agents');
|
|
const { primeResources, optionalChainWithEmptyCheck } = require('@librechat/api');
|
|
const {
|
|
ErrorTypes,
|
|
EModelEndpoint,
|
|
EToolResources,
|
|
replaceSpecialVars,
|
|
providerEndpointMap,
|
|
} = require('librechat-data-provider');
|
|
const initAnthropic = require('~/server/services/Endpoints/anthropic/initialize');
|
|
const getBedrockOptions = require('~/server/services/Endpoints/bedrock/options');
|
|
const initOpenAI = require('~/server/services/Endpoints/openAI/initialize');
|
|
const initCustom = require('~/server/services/Endpoints/custom/initialize');
|
|
const initGoogle = require('~/server/services/Endpoints/google/initialize');
|
|
const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts');
|
|
const { getCustomEndpointConfig } = require('~/server/services/Config');
|
|
const { processFiles } = require('~/server/services/Files/process');
|
|
const { getConvoFiles } = require('~/models/Conversation');
|
|
const { getToolFilesByIds } = require('~/models/File');
|
|
const { getModelMaxTokens } = require('~/utils');
|
|
const { getFiles } = require('~/models/File');
|
|
|
|
const providerConfigMap = {
|
|
[Providers.XAI]: initCustom,
|
|
[Providers.OLLAMA]: initCustom,
|
|
[Providers.DEEPSEEK]: initCustom,
|
|
[Providers.OPENROUTER]: initCustom,
|
|
[EModelEndpoint.openAI]: initOpenAI,
|
|
[EModelEndpoint.google]: initGoogle,
|
|
[EModelEndpoint.azureOpenAI]: initOpenAI,
|
|
[EModelEndpoint.anthropic]: initAnthropic,
|
|
[EModelEndpoint.bedrock]: getBedrockOptions,
|
|
};
|
|
|
|
/**
|
|
* @param {object} params
|
|
* @param {ServerRequest} params.req
|
|
* @param {ServerResponse} params.res
|
|
* @param {Agent} params.agent
|
|
* @param {string | null} [params.conversationId]
|
|
* @param {Array<IMongoFile>} [params.requestFiles]
|
|
* @param {typeof import('~/server/services/ToolService').loadAgentTools | undefined} [params.loadTools]
|
|
* @param {TEndpointOption} [params.endpointOption]
|
|
* @param {Set<string>} [params.allowedProviders]
|
|
* @param {boolean} [params.isInitialAgent]
|
|
* @returns {Promise<Agent & { tools: StructuredTool[], attachments: Array<MongoFile>, toolContextMap: Record<string, unknown>, maxContextTokens: number }>}
|
|
*/
|
|
const initializeAgent = async ({
|
|
req,
|
|
res,
|
|
agent,
|
|
loadTools,
|
|
requestFiles,
|
|
conversationId,
|
|
endpointOption,
|
|
allowedProviders,
|
|
isInitialAgent = false,
|
|
}) => {
|
|
if (allowedProviders.size > 0 && !allowedProviders.has(agent.provider)) {
|
|
throw new Error(
|
|
`{ "type": "${ErrorTypes.INVALID_AGENT_PROVIDER}", "info": "${agent.provider}" }`,
|
|
);
|
|
}
|
|
let currentFiles;
|
|
|
|
const _modelOptions = structuredClone(
|
|
Object.assign(
|
|
{ model: agent.model },
|
|
agent.model_parameters ?? { model: agent.model },
|
|
isInitialAgent === true ? endpointOption?.model_parameters : {},
|
|
),
|
|
);
|
|
|
|
const { resendFiles = true, ...modelOptions } = _modelOptions;
|
|
|
|
if (isInitialAgent && conversationId != null && resendFiles) {
|
|
const fileIds = (await getConvoFiles(conversationId)) ?? [];
|
|
/** @type {Set<EToolResources>} */
|
|
const toolResourceSet = new Set();
|
|
for (const tool of agent.tools) {
|
|
if (EToolResources[tool]) {
|
|
toolResourceSet.add(EToolResources[tool]);
|
|
}
|
|
}
|
|
const toolFiles = await getToolFilesByIds(fileIds, toolResourceSet);
|
|
if (requestFiles.length || toolFiles.length) {
|
|
currentFiles = await processFiles(requestFiles.concat(toolFiles));
|
|
}
|
|
} else if (isInitialAgent && requestFiles.length) {
|
|
currentFiles = await processFiles(requestFiles);
|
|
}
|
|
|
|
const { attachments, tool_resources } = await primeResources({
|
|
req,
|
|
getFiles,
|
|
attachments: currentFiles,
|
|
tool_resources: agent.tool_resources,
|
|
requestFileSet: new Set(requestFiles?.map((file) => file.file_id)),
|
|
});
|
|
|
|
const provider = agent.provider;
|
|
const { tools, toolContextMap } =
|
|
(await loadTools?.({
|
|
req,
|
|
res,
|
|
provider,
|
|
agentId: agent.id,
|
|
tools: agent.tools,
|
|
model: agent.model,
|
|
tool_resources,
|
|
})) ?? {};
|
|
|
|
agent.endpoint = provider;
|
|
let getOptions = providerConfigMap[provider];
|
|
if (!getOptions && providerConfigMap[provider.toLowerCase()] != null) {
|
|
agent.provider = provider.toLowerCase();
|
|
getOptions = providerConfigMap[agent.provider];
|
|
} else if (!getOptions) {
|
|
const customEndpointConfig = await getCustomEndpointConfig(provider);
|
|
if (!customEndpointConfig) {
|
|
throw new Error(`Provider ${provider} not supported`);
|
|
}
|
|
getOptions = initCustom;
|
|
agent.provider = Providers.OPENAI;
|
|
}
|
|
|
|
const _endpointOption =
|
|
isInitialAgent === true
|
|
? Object.assign({}, endpointOption, { model_parameters: modelOptions })
|
|
: { model_parameters: modelOptions };
|
|
|
|
const options = await getOptions({
|
|
req,
|
|
res,
|
|
optionsOnly: true,
|
|
overrideEndpoint: provider,
|
|
overrideModel: agent.model,
|
|
endpointOption: _endpointOption,
|
|
});
|
|
|
|
const tokensModel =
|
|
agent.provider === EModelEndpoint.azureOpenAI ? agent.model : modelOptions.model;
|
|
const maxTokens = optionalChainWithEmptyCheck(
|
|
modelOptions.maxOutputTokens,
|
|
modelOptions.maxTokens,
|
|
0,
|
|
);
|
|
const maxContextTokens = optionalChainWithEmptyCheck(
|
|
modelOptions.maxContextTokens,
|
|
modelOptions.max_context_tokens,
|
|
getModelMaxTokens(tokensModel, providerEndpointMap[provider]),
|
|
4096,
|
|
);
|
|
|
|
if (
|
|
agent.endpoint === EModelEndpoint.azureOpenAI &&
|
|
options.llmConfig?.azureOpenAIApiInstanceName == null
|
|
) {
|
|
agent.provider = Providers.OPENAI;
|
|
}
|
|
|
|
if (options.provider != null) {
|
|
agent.provider = options.provider;
|
|
}
|
|
|
|
/** @type {import('@librechat/agents').ClientOptions} */
|
|
agent.model_parameters = { ...options.llmConfig };
|
|
if (options.configOptions) {
|
|
agent.model_parameters.configuration = options.configOptions;
|
|
}
|
|
|
|
if (agent.instructions && agent.instructions !== '') {
|
|
agent.instructions = replaceSpecialVars({
|
|
text: agent.instructions,
|
|
user: req.user,
|
|
});
|
|
}
|
|
|
|
if (typeof agent.artifacts === 'string' && agent.artifacts !== '') {
|
|
agent.additional_instructions = generateArtifactsPrompt({
|
|
endpoint: agent.provider,
|
|
artifacts: agent.artifacts,
|
|
});
|
|
}
|
|
|
|
return {
|
|
...agent,
|
|
tools,
|
|
attachments,
|
|
resendFiles,
|
|
toolContextMap,
|
|
maxContextTokens: (maxContextTokens - maxTokens) * 0.9,
|
|
};
|
|
};
|
|
|
|
module.exports = { initializeAgent };
|