mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-09-22 06:00:56 +02:00

* refactor: modularize openai llm config logic into new getOpenAILLMConfig function (#9412) * ✈️ refactor: Migrate Anthropic's getLLMConfig to TypeScript (#9413) * refactor: move tokens.js over to packages/api and update imports * refactor: port tokens.js to typescript * refactor: move helpers.js over to packages/api and update imports * refactor: port helpers.js to typescript * refactor: move anthropic/llm.js over to packages/api and update imports * refactor: port anthropic/llm.js to typescript with supporting types in types/anthropic.ts and updated tests in llm.spec.js * refactor: move llm.spec.js over to packages/api and update import * refactor: port llm.spec.js over to typescript * 📝 Add Prompt Parameter Support for Anthropic Custom Endpoints (#9414) feat: add anthropic llm config support for openai-like (custom) endpoints * fix: missed compiler / type issues from addition of getAnthropicLLMConfig * refactor: update tokens.ts to export constants and functions, enhance type definitions, and adjust default values * WIP: first pass, decouple `llmConfig` from `configOptions` * chore: update import path for OpenAI configuration from 'llm' to 'config' * refactor: enhance type definitions for ThinkingConfig and update modelOptions in AnthropicConfigOptions * refactor: cleanup type, introduce openai transform from alt provider * chore: integrate removeNullishValues in Google llmConfig and update OpenAI exports * chore: bump version of @librechat/api to 1.3.5 in package.json and package-lock.json * refactor: update customParams type in OpenAIConfigOptions to use TConfig['customParams'] * refactor: enhance transformToOpenAIConfig to include fromEndpoint and improve config extraction * refactor: conform userId field for anthropic/openai, cleanup anthropic typing * ci: add backward compatibility tests for getOpenAIConfig with various endpoints and configurations * ci: replace userId with user in clientOptions for getLLMConfig * test: add Azure OpenAI endpoint tests for various configurations in getOpenAIConfig * refactor: defaultHeaders retrieval for prompt caching for anthropic-based custom endpoint (litellm) * test: add unit tests for getOpenAIConfig with various Anthropic model configurations * test: enhance Anthropic compatibility tests with addParams and dropParams handling * chore: update @librechat/agents dependency to version 2.4.78 in package.json and package-lock.json * chore: update @librechat/agents dependency to version 2.4.79 in package.json and package-lock.json --------- Co-authored-by: Danny Avila <danny@librechat.ai>
210 lines
6 KiB
JavaScript
210 lines
6 KiB
JavaScript
const { Providers } = require('@librechat/agents');
|
|
const {
|
|
primeResources,
|
|
getModelMaxTokens,
|
|
extractLibreChatParams,
|
|
optionalChainWithEmptyCheck,
|
|
} = require('@librechat/api');
|
|
const {
|
|
ErrorTypes,
|
|
EModelEndpoint,
|
|
EToolResources,
|
|
isAgentsEndpoint,
|
|
replaceSpecialVars,
|
|
providerEndpointMap,
|
|
} = require('librechat-data-provider');
|
|
const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts');
|
|
const { getProviderConfig } = require('~/server/services/Endpoints');
|
|
const { processFiles } = require('~/server/services/Files/process');
|
|
const { getFiles, getToolFilesByIds } = require('~/models/File');
|
|
const { getConvoFiles } = require('~/models/Conversation');
|
|
|
|
/**
|
|
* @param {object} params
|
|
* @param {ServerRequest} params.req
|
|
* @param {ServerResponse} params.res
|
|
* @param {Agent} params.agent
|
|
* @param {string | null} [params.conversationId]
|
|
* @param {Array<IMongoFile>} [params.requestFiles]
|
|
* @param {typeof import('~/server/services/ToolService').loadAgentTools | undefined} [params.loadTools]
|
|
* @param {TEndpointOption} [params.endpointOption]
|
|
* @param {Set<string>} [params.allowedProviders]
|
|
* @param {boolean} [params.isInitialAgent]
|
|
* @returns {Promise<Agent & {
|
|
* tools: StructuredTool[],
|
|
* attachments: Array<MongoFile>,
|
|
* toolContextMap: Record<string, unknown>,
|
|
* maxContextTokens: number,
|
|
* userMCPAuthMap?: Record<string, Record<string, string>>
|
|
* }>}
|
|
*/
|
|
const initializeAgent = async ({
|
|
req,
|
|
res,
|
|
agent,
|
|
loadTools,
|
|
requestFiles,
|
|
conversationId,
|
|
endpointOption,
|
|
allowedProviders,
|
|
isInitialAgent = false,
|
|
}) => {
|
|
const appConfig = req.config;
|
|
if (
|
|
isAgentsEndpoint(endpointOption?.endpoint) &&
|
|
allowedProviders.size > 0 &&
|
|
!allowedProviders.has(agent.provider)
|
|
) {
|
|
throw new Error(
|
|
`{ "type": "${ErrorTypes.INVALID_AGENT_PROVIDER}", "info": "${agent.provider}" }`,
|
|
);
|
|
}
|
|
let currentFiles;
|
|
|
|
const _modelOptions = structuredClone(
|
|
Object.assign(
|
|
{ model: agent.model },
|
|
agent.model_parameters ?? { model: agent.model },
|
|
isInitialAgent === true ? endpointOption?.model_parameters : {},
|
|
),
|
|
);
|
|
|
|
const { resendFiles, maxContextTokens, modelOptions } = extractLibreChatParams(_modelOptions);
|
|
|
|
if (isInitialAgent && conversationId != null && resendFiles) {
|
|
const fileIds = (await getConvoFiles(conversationId)) ?? [];
|
|
/** @type {Set<EToolResources>} */
|
|
const toolResourceSet = new Set();
|
|
for (const tool of agent.tools) {
|
|
if (EToolResources[tool]) {
|
|
toolResourceSet.add(EToolResources[tool]);
|
|
}
|
|
}
|
|
const toolFiles = await getToolFilesByIds(fileIds, toolResourceSet);
|
|
if (requestFiles.length || toolFiles.length) {
|
|
currentFiles = await processFiles(requestFiles.concat(toolFiles));
|
|
}
|
|
} else if (isInitialAgent && requestFiles.length) {
|
|
currentFiles = await processFiles(requestFiles);
|
|
}
|
|
|
|
const { attachments, tool_resources } = await primeResources({
|
|
req,
|
|
getFiles,
|
|
appConfig,
|
|
agentId: agent.id,
|
|
attachments: currentFiles,
|
|
tool_resources: agent.tool_resources,
|
|
requestFileSet: new Set(requestFiles?.map((file) => file.file_id)),
|
|
});
|
|
|
|
const provider = agent.provider;
|
|
const {
|
|
tools: structuredTools,
|
|
toolContextMap,
|
|
userMCPAuthMap,
|
|
} = (await loadTools?.({
|
|
req,
|
|
res,
|
|
provider,
|
|
agentId: agent.id,
|
|
tools: agent.tools,
|
|
model: agent.model,
|
|
tool_resources,
|
|
})) ?? {};
|
|
|
|
agent.endpoint = provider;
|
|
const { getOptions, overrideProvider } = getProviderConfig({ provider, appConfig });
|
|
if (overrideProvider !== agent.provider) {
|
|
agent.provider = overrideProvider;
|
|
}
|
|
|
|
const _endpointOption =
|
|
isInitialAgent === true
|
|
? Object.assign({}, endpointOption, { model_parameters: modelOptions })
|
|
: { model_parameters: modelOptions };
|
|
|
|
const options = await getOptions({
|
|
req,
|
|
res,
|
|
optionsOnly: true,
|
|
overrideEndpoint: provider,
|
|
overrideModel: agent.model,
|
|
endpointOption: _endpointOption,
|
|
});
|
|
|
|
const tokensModel =
|
|
agent.provider === EModelEndpoint.azureOpenAI ? agent.model : modelOptions.model;
|
|
const maxTokens = optionalChainWithEmptyCheck(
|
|
modelOptions.maxOutputTokens,
|
|
modelOptions.maxTokens,
|
|
0,
|
|
);
|
|
const agentMaxContextTokens = optionalChainWithEmptyCheck(
|
|
maxContextTokens,
|
|
getModelMaxTokens(tokensModel, providerEndpointMap[provider], options.endpointTokenConfig),
|
|
4096,
|
|
);
|
|
|
|
if (
|
|
agent.endpoint === EModelEndpoint.azureOpenAI &&
|
|
options.llmConfig?.azureOpenAIApiInstanceName == null
|
|
) {
|
|
agent.provider = Providers.OPENAI;
|
|
}
|
|
|
|
if (options.provider != null) {
|
|
agent.provider = options.provider;
|
|
}
|
|
|
|
/** @type {import('@librechat/agents').GenericTool[]} */
|
|
let tools = options.tools?.length ? options.tools : structuredTools;
|
|
if (
|
|
(agent.provider === Providers.GOOGLE || agent.provider === Providers.VERTEXAI) &&
|
|
options.tools?.length &&
|
|
structuredTools?.length
|
|
) {
|
|
throw new Error(`{ "type": "${ErrorTypes.GOOGLE_TOOL_CONFLICT}"}`);
|
|
} else if (
|
|
(agent.provider === Providers.OPENAI ||
|
|
agent.provider === Providers.AZURE ||
|
|
agent.provider === Providers.ANTHROPIC) &&
|
|
options.tools?.length &&
|
|
structuredTools?.length
|
|
) {
|
|
tools = structuredTools.concat(options.tools);
|
|
}
|
|
|
|
/** @type {import('@librechat/agents').ClientOptions} */
|
|
agent.model_parameters = { ...options.llmConfig };
|
|
if (options.configOptions) {
|
|
agent.model_parameters.configuration = options.configOptions;
|
|
}
|
|
|
|
if (agent.instructions && agent.instructions !== '') {
|
|
agent.instructions = replaceSpecialVars({
|
|
text: agent.instructions,
|
|
user: req.user,
|
|
});
|
|
}
|
|
|
|
if (typeof agent.artifacts === 'string' && agent.artifacts !== '') {
|
|
agent.additional_instructions = generateArtifactsPrompt({
|
|
endpoint: agent.provider,
|
|
artifacts: agent.artifacts,
|
|
});
|
|
}
|
|
|
|
return {
|
|
...agent,
|
|
tools,
|
|
attachments,
|
|
resendFiles,
|
|
userMCPAuthMap,
|
|
toolContextMap,
|
|
useLegacyContent: !!options.useLegacyContent,
|
|
maxContextTokens: Math.round((agentMaxContextTokens - maxTokens) * 0.9),
|
|
};
|
|
};
|
|
|
|
module.exports = { initializeAgent };
|