📦 chore: Bump Agents Packages (#7992)

* chore: update peer dependency for @librechat/agents to version 2.4.41

* 🔧 chore: proxy handling in OpenAI endpoint to use undici

* 🔧 chore: update @anthropic-ai/sdk to version 0.52.0 and refactor proxy handling to use undici

* 🔧 chore: update globIgnores in vite.config.ts to exclude index.html from caching

* 🔧 ci: update proxy handling in getLLMConfig to use fetchOptions and ProxyAgent

* 🔧 chore: refactor proxy handling in Anthropic and OpenAI clients to use fetchOptions

* refactor: agent initialization to streamline model parameters and resendFiles handling

* chore: update @google/generative-ai to version 0.24.0
This commit is contained in:
Danny Avila 2025-06-20 15:49:24 -04:00 committed by GitHub
parent 97085073d2
commit fa54c9ae90
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
12 changed files with 598 additions and 262 deletions

View file

@ -63,11 +63,17 @@ const initializeAgent = async ({
}
let currentFiles;
if (
isInitialAgent &&
conversationId != null &&
(agent.model_parameters?.resendFiles ?? true) === true
) {
const _modelOptions = structuredClone(
Object.assign(
{ model: agent.model },
agent.model_parameters ?? { model: agent.model },
isInitialAgent === true ? endpointOption?.model_parameters : {},
),
);
const { resendFiles = true, ...modelOptions } = _modelOptions;
if (isInitialAgent && conversationId != null && resendFiles) {
const fileIds = (await getConvoFiles(conversationId)) ?? [];
/** @type {Set<EToolResources>} */
const toolResourceSet = new Set();
@ -117,15 +123,11 @@ const initializeAgent = async ({
getOptions = initCustom;
agent.provider = Providers.OPENAI;
}
const model_parameters = Object.assign(
{},
agent.model_parameters ?? { model: agent.model },
isInitialAgent === true ? endpointOption?.model_parameters : {},
);
const _endpointOption =
isInitialAgent === true
? Object.assign({}, endpointOption, { model_parameters })
: { model_parameters };
? Object.assign({}, endpointOption, { model_parameters: modelOptions })
: { model_parameters: modelOptions };
const options = await getOptions({
req,
@ -136,6 +138,20 @@ const initializeAgent = async ({
endpointOption: _endpointOption,
});
const tokensModel =
agent.provider === EModelEndpoint.azureOpenAI ? agent.model : modelOptions.model;
const maxTokens = optionalChainWithEmptyCheck(
modelOptions.maxOutputTokens,
modelOptions.maxTokens,
0,
);
const maxContextTokens = optionalChainWithEmptyCheck(
modelOptions.maxContextTokens,
modelOptions.max_context_tokens,
getModelMaxTokens(tokensModel, providerEndpointMap[provider]),
4096,
);
if (
agent.endpoint === EModelEndpoint.azureOpenAI &&
options.llmConfig?.azureOpenAIApiInstanceName == null
@ -148,15 +164,11 @@ const initializeAgent = async ({
}
/** @type {import('@librechat/agents').ClientOptions} */
agent.model_parameters = Object.assign(model_parameters, options.llmConfig);
agent.model_parameters = { ...options.llmConfig };
if (options.configOptions) {
agent.model_parameters.configuration = options.configOptions;
}
if (!agent.model_parameters.model) {
agent.model_parameters.model = agent.model;
}
if (agent.instructions && agent.instructions !== '') {
agent.instructions = replaceSpecialVars({
text: agent.instructions,
@ -171,23 +183,11 @@ const initializeAgent = async ({
});
}
const tokensModel =
agent.provider === EModelEndpoint.azureOpenAI ? agent.model : agent.model_parameters.model;
const maxTokens = optionalChainWithEmptyCheck(
agent.model_parameters.maxOutputTokens,
agent.model_parameters.maxTokens,
0,
);
const maxContextTokens = optionalChainWithEmptyCheck(
agent.model_parameters.maxContextTokens,
agent.max_context_tokens,
getModelMaxTokens(tokensModel, providerEndpointMap[provider]),
4096,
);
return {
...agent,
tools,
attachments,
resendFiles,
toolContextMap,
maxContextTokens: (maxContextTokens - maxTokens) * 0.9,
};

View file

@ -130,8 +130,8 @@ const initializeClient = async ({ req, res, endpointOption }) => {
iconURL: endpointOption.iconURL,
attachments: primaryConfig.attachments,
endpointType: endpointOption.endpointType,
resendFiles: primaryConfig.resendFiles ?? true,
maxContextTokens: primaryConfig.maxContextTokens,
resendFiles: primaryConfig.model_parameters?.resendFiles ?? true,
endpoint:
primaryConfig.id === Constants.EPHEMERAL_AGENT_ID
? primaryConfig.endpoint

View file

@ -1,4 +1,4 @@
const { HttpsProxyAgent } = require('https-proxy-agent');
const { ProxyAgent } = require('undici');
const { anthropicSettings, removeNullishValues } = require('librechat-data-provider');
const { checkPromptCacheSupport, getClaudeHeaders, configureReasoning } = require('./helpers');
@ -67,7 +67,10 @@ function getLLMConfig(apiKey, options = {}) {
}
if (options.proxy) {
requestOptions.clientOptions.httpAgent = new HttpsProxyAgent(options.proxy);
const proxyAgent = new ProxyAgent(options.proxy);
requestOptions.clientOptions.fetchOptions = {
dispatcher: proxyAgent,
};
}
if (options.reverseProxyUrl) {

View file

@ -21,8 +21,12 @@ describe('getLLMConfig', () => {
proxy: 'http://proxy:8080',
});
expect(result.llmConfig.clientOptions).toHaveProperty('httpAgent');
expect(result.llmConfig.clientOptions.httpAgent).toHaveProperty('proxy', 'http://proxy:8080');
expect(result.llmConfig.clientOptions).toHaveProperty('fetchOptions');
expect(result.llmConfig.clientOptions.fetchOptions).toHaveProperty('dispatcher');
expect(result.llmConfig.clientOptions.fetchOptions.dispatcher).toBeDefined();
expect(result.llmConfig.clientOptions.fetchOptions.dispatcher.constructor.name).toBe(
'ProxyAgent',
);
});
it('should include reverse proxy URL when provided', () => {