mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 17:00:15 +01:00
* 🔧 fix: enhance client options handling in AgentClient and set default recursion limit
- Updated the recursion limit to default to 25 if not specified in agentsEConfig.
- Enhanced client options in AgentClient to include model parameters such as apiKey and anthropicApiUrl from agentModelParams.
- Updated requestOptions in the anthropic endpoint to use reverseProxyUrl as anthropicApiUrl.
* Enhance LLM configuration tests with edge case handling
* chore add return type annotation for getCustomEndpointConfig function
* fix: update modelOptions handling to use optional chaining and default to empty object in multiple endpoint initializations
* chore: update @librechat/agents to version 2.4.42
* refactor: streamline agent endpoint configuration and enhance client options handling for title generations
- Introduced a new `getProviderConfig` function to centralize provider configuration logic.
- Updated `AgentClient` to utilize the new provider configuration, improving clarity and maintainability.
- Removed redundant code related to endpoint initialization and model parameter handling.
- Enhanced error logging for missing endpoint configurations.
* fix: add abort handling for image generation and editing in OpenAIImageTools
* ci: enhance getLLMConfig tests to verify fetchOptions and dispatcher properties
* fix: use optional chaining for endpointOption properties in getOptions
* fix: increase title generation timeout from 25s to 45s, pass `endpointOption` to `getOptions`
* fix: update file filtering logic in getToolFilesByIds to ensure text field is properly checked
* fix: add error handling for empty OCR results in uploadMistralOCR and uploadAzureMistralOCR
* fix: enhance error handling in file upload to include 'No OCR result' message
* chore: update error messages in uploadMistralOCR and uploadAzureMistralOCR
* fix: enhance filtering logic in getToolFilesByIds to include context checks for OCR resources to only include files directly attached to agent
---------
Co-authored-by: Matt Burnett <matt.burnett@shopify.com>
173 lines
5.1 KiB
JavaScript
173 lines
5.1 KiB
JavaScript
const { Providers } = require('@librechat/agents');
|
|
const {
|
|
primeResources,
|
|
extractLibreChatParams,
|
|
optionalChainWithEmptyCheck,
|
|
} = require('@librechat/api');
|
|
const {
|
|
ErrorTypes,
|
|
EModelEndpoint,
|
|
EToolResources,
|
|
replaceSpecialVars,
|
|
providerEndpointMap,
|
|
} = require('librechat-data-provider');
|
|
const { getProviderConfig } = require('~/server/services/Endpoints');
|
|
const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts');
|
|
const { processFiles } = require('~/server/services/Files/process');
|
|
const { getFiles, getToolFilesByIds } = require('~/models/File');
|
|
const { getConvoFiles } = require('~/models/Conversation');
|
|
const { getModelMaxTokens } = require('~/utils');
|
|
|
|
/**
|
|
* @param {object} params
|
|
* @param {ServerRequest} params.req
|
|
* @param {ServerResponse} params.res
|
|
* @param {Agent} params.agent
|
|
* @param {string | null} [params.conversationId]
|
|
* @param {Array<IMongoFile>} [params.requestFiles]
|
|
* @param {typeof import('~/server/services/ToolService').loadAgentTools | undefined} [params.loadTools]
|
|
* @param {TEndpointOption} [params.endpointOption]
|
|
* @param {Set<string>} [params.allowedProviders]
|
|
* @param {boolean} [params.isInitialAgent]
|
|
* @returns {Promise<Agent & { tools: StructuredTool[], attachments: Array<MongoFile>, toolContextMap: Record<string, unknown>, maxContextTokens: number }>}
|
|
*/
|
|
const initializeAgent = async ({
|
|
req,
|
|
res,
|
|
agent,
|
|
loadTools,
|
|
requestFiles,
|
|
conversationId,
|
|
endpointOption,
|
|
allowedProviders,
|
|
isInitialAgent = false,
|
|
}) => {
|
|
if (allowedProviders.size > 0 && !allowedProviders.has(agent.provider)) {
|
|
throw new Error(
|
|
`{ "type": "${ErrorTypes.INVALID_AGENT_PROVIDER}", "info": "${agent.provider}" }`,
|
|
);
|
|
}
|
|
let currentFiles;
|
|
|
|
const _modelOptions = structuredClone(
|
|
Object.assign(
|
|
{ model: agent.model },
|
|
agent.model_parameters ?? { model: agent.model },
|
|
isInitialAgent === true ? endpointOption?.model_parameters : {},
|
|
),
|
|
);
|
|
|
|
const { resendFiles, maxContextTokens, modelOptions } = extractLibreChatParams(_modelOptions);
|
|
|
|
if (isInitialAgent && conversationId != null && resendFiles) {
|
|
const fileIds = (await getConvoFiles(conversationId)) ?? [];
|
|
/** @type {Set<EToolResources>} */
|
|
const toolResourceSet = new Set();
|
|
for (const tool of agent.tools) {
|
|
if (EToolResources[tool]) {
|
|
toolResourceSet.add(EToolResources[tool]);
|
|
}
|
|
}
|
|
const toolFiles = await getToolFilesByIds(fileIds, toolResourceSet);
|
|
if (requestFiles.length || toolFiles.length) {
|
|
currentFiles = await processFiles(requestFiles.concat(toolFiles));
|
|
}
|
|
} else if (isInitialAgent && requestFiles.length) {
|
|
currentFiles = await processFiles(requestFiles);
|
|
}
|
|
|
|
const { attachments, tool_resources } = await primeResources({
|
|
req,
|
|
getFiles,
|
|
attachments: currentFiles,
|
|
tool_resources: agent.tool_resources,
|
|
requestFileSet: new Set(requestFiles?.map((file) => file.file_id)),
|
|
});
|
|
|
|
const provider = agent.provider;
|
|
const { tools, toolContextMap } =
|
|
(await loadTools?.({
|
|
req,
|
|
res,
|
|
provider,
|
|
agentId: agent.id,
|
|
tools: agent.tools,
|
|
model: agent.model,
|
|
tool_resources,
|
|
})) ?? {};
|
|
|
|
agent.endpoint = provider;
|
|
const { getOptions, overrideProvider } = await getProviderConfig(provider);
|
|
if (overrideProvider) {
|
|
agent.provider = overrideProvider;
|
|
}
|
|
|
|
const _endpointOption =
|
|
isInitialAgent === true
|
|
? Object.assign({}, endpointOption, { model_parameters: modelOptions })
|
|
: { model_parameters: modelOptions };
|
|
|
|
const options = await getOptions({
|
|
req,
|
|
res,
|
|
optionsOnly: true,
|
|
overrideEndpoint: provider,
|
|
overrideModel: agent.model,
|
|
endpointOption: _endpointOption,
|
|
});
|
|
|
|
const tokensModel =
|
|
agent.provider === EModelEndpoint.azureOpenAI ? agent.model : modelOptions.model;
|
|
const maxTokens = optionalChainWithEmptyCheck(
|
|
modelOptions.maxOutputTokens,
|
|
modelOptions.maxTokens,
|
|
0,
|
|
);
|
|
const agentMaxContextTokens = optionalChainWithEmptyCheck(
|
|
maxContextTokens,
|
|
getModelMaxTokens(tokensModel, providerEndpointMap[provider]),
|
|
4096,
|
|
);
|
|
|
|
if (
|
|
agent.endpoint === EModelEndpoint.azureOpenAI &&
|
|
options.llmConfig?.azureOpenAIApiInstanceName == null
|
|
) {
|
|
agent.provider = Providers.OPENAI;
|
|
}
|
|
|
|
if (options.provider != null) {
|
|
agent.provider = options.provider;
|
|
}
|
|
|
|
/** @type {import('@librechat/agents').ClientOptions} */
|
|
agent.model_parameters = { ...options.llmConfig };
|
|
if (options.configOptions) {
|
|
agent.model_parameters.configuration = options.configOptions;
|
|
}
|
|
|
|
if (agent.instructions && agent.instructions !== '') {
|
|
agent.instructions = replaceSpecialVars({
|
|
text: agent.instructions,
|
|
user: req.user,
|
|
});
|
|
}
|
|
|
|
if (typeof agent.artifacts === 'string' && agent.artifacts !== '') {
|
|
agent.additional_instructions = generateArtifactsPrompt({
|
|
endpoint: agent.provider,
|
|
artifacts: agent.artifacts,
|
|
});
|
|
}
|
|
|
|
return {
|
|
...agent,
|
|
tools,
|
|
attachments,
|
|
resendFiles,
|
|
toolContextMap,
|
|
maxContextTokens: (agentMaxContextTokens - maxTokens) * 0.9,
|
|
};
|
|
};
|
|
|
|
module.exports = { initializeAgent };
|