📦 chore: Bump Agents Packages (#7992)

* chore: update peer dependency for @librechat/agents to version 2.4.41

* 🔧 chore: proxy handling in OpenAI endpoint to use undici

* 🔧 chore: update @anthropic-ai/sdk to version 0.52.0 and refactor proxy handling to use undici

* 🔧 chore: update globIgnores in vite.config.ts to exclude index.html from caching

* 🔧 ci: update proxy handling in getLLMConfig to use fetchOptions and ProxyAgent

* 🔧 chore: refactor proxy handling in Anthropic and OpenAI clients to use fetchOptions

* refactor: agent initialization to streamline model parameters and resendFiles handling

* chore: update @google/generative-ai to version 0.24.0
This commit is contained in:
Danny Avila 2025-06-20 15:49:24 -04:00 committed by GitHub
parent 97085073d2
commit fa54c9ae90
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
12 changed files with 598 additions and 262 deletions

View file

@ -190,10 +190,11 @@ class AnthropicClient extends BaseClient {
reverseProxyUrl: this.options.reverseProxyUrl,
}),
apiKey: this.apiKey,
fetchOptions: {},
};
if (this.options.proxy) {
options.httpAgent = new HttpsProxyAgent(this.options.proxy);
options.fetchOptions.agent = new HttpsProxyAgent(this.options.proxy);
}
if (this.options.reverseProxyUrl) {

View file

@ -1159,6 +1159,7 @@ ${convo}
logger.debug('[OpenAIClient] chatCompletion', { baseURL, modelOptions });
const opts = {
baseURL,
fetchOptions: {},
};
if (this.useOpenRouter) {
@ -1177,7 +1178,7 @@ ${convo}
}
if (this.options.proxy) {
opts.httpAgent = new HttpsProxyAgent(this.options.proxy);
opts.fetchOptions.agent = new HttpsProxyAgent(this.options.proxy);
}
/** @type {TAzureConfig | undefined} */
@ -1395,7 +1396,7 @@ ${convo}
...modelOptions,
stream: true,
};
const stream = await openai.beta.chat.completions
const stream = await openai.chat.completions
.stream(params)
.on('abort', () => {
/* Do nothing here */

View file

@ -309,7 +309,7 @@ describe('AnthropicClient', () => {
};
client.setOptions({ modelOptions, promptCache: true });
const anthropicClient = client.getClient(modelOptions);
expect(anthropicClient.defaultHeaders).not.toHaveProperty('anthropic-beta');
expect(anthropicClient._options.defaultHeaders).toBeUndefined();
});
it('should not add beta header for other models', () => {
@ -320,7 +320,7 @@ describe('AnthropicClient', () => {
},
});
const anthropicClient = client.getClient();
expect(anthropicClient.defaultHeaders).not.toHaveProperty('anthropic-beta');
expect(anthropicClient._options.defaultHeaders).toBeUndefined();
});
});

View file

@ -34,21 +34,21 @@
},
"homepage": "https://librechat.ai",
"dependencies": {
"@anthropic-ai/sdk": "^0.37.0",
"@anthropic-ai/sdk": "^0.52.0",
"@aws-sdk/client-s3": "^3.758.0",
"@aws-sdk/s3-request-presigner": "^3.758.0",
"@azure/identity": "^4.7.0",
"@azure/search-documents": "^12.0.0",
"@azure/storage-blob": "^12.27.0",
"@google/generative-ai": "^0.23.0",
"@google/generative-ai": "^0.24.0",
"@googleapis/youtube": "^20.0.0",
"@keyv/redis": "^4.3.3",
"@langchain/community": "^0.3.44",
"@langchain/core": "^0.3.57",
"@langchain/google-genai": "^0.2.9",
"@langchain/google-vertexai": "^0.2.9",
"@langchain/community": "^0.3.47",
"@langchain/core": "^0.3.60",
"@langchain/google-genai": "^0.2.13",
"@langchain/google-vertexai": "^0.2.13",
"@langchain/textsplitters": "^0.1.0",
"@librechat/agents": "^2.4.38",
"@librechat/agents": "^2.4.41",
"@librechat/api": "*",
"@librechat/data-schemas": "*",
"@node-saml/passport-saml": "^5.0.0",

View file

@ -63,11 +63,17 @@ const initializeAgent = async ({
}
let currentFiles;
if (
isInitialAgent &&
conversationId != null &&
(agent.model_parameters?.resendFiles ?? true) === true
) {
const _modelOptions = structuredClone(
Object.assign(
{ model: agent.model },
agent.model_parameters ?? { model: agent.model },
isInitialAgent === true ? endpointOption?.model_parameters : {},
),
);
const { resendFiles = true, ...modelOptions } = _modelOptions;
if (isInitialAgent && conversationId != null && resendFiles) {
const fileIds = (await getConvoFiles(conversationId)) ?? [];
/** @type {Set<EToolResources>} */
const toolResourceSet = new Set();
@ -117,15 +123,11 @@ const initializeAgent = async ({
getOptions = initCustom;
agent.provider = Providers.OPENAI;
}
const model_parameters = Object.assign(
{},
agent.model_parameters ?? { model: agent.model },
isInitialAgent === true ? endpointOption?.model_parameters : {},
);
const _endpointOption =
isInitialAgent === true
? Object.assign({}, endpointOption, { model_parameters })
: { model_parameters };
? Object.assign({}, endpointOption, { model_parameters: modelOptions })
: { model_parameters: modelOptions };
const options = await getOptions({
req,
@ -136,6 +138,20 @@ const initializeAgent = async ({
endpointOption: _endpointOption,
});
const tokensModel =
agent.provider === EModelEndpoint.azureOpenAI ? agent.model : modelOptions.model;
const maxTokens = optionalChainWithEmptyCheck(
modelOptions.maxOutputTokens,
modelOptions.maxTokens,
0,
);
const maxContextTokens = optionalChainWithEmptyCheck(
modelOptions.maxContextTokens,
modelOptions.max_context_tokens,
getModelMaxTokens(tokensModel, providerEndpointMap[provider]),
4096,
);
if (
agent.endpoint === EModelEndpoint.azureOpenAI &&
options.llmConfig?.azureOpenAIApiInstanceName == null
@ -148,15 +164,11 @@ const initializeAgent = async ({
}
/** @type {import('@librechat/agents').ClientOptions} */
agent.model_parameters = Object.assign(model_parameters, options.llmConfig);
agent.model_parameters = { ...options.llmConfig };
if (options.configOptions) {
agent.model_parameters.configuration = options.configOptions;
}
if (!agent.model_parameters.model) {
agent.model_parameters.model = agent.model;
}
if (agent.instructions && agent.instructions !== '') {
agent.instructions = replaceSpecialVars({
text: agent.instructions,
@ -171,23 +183,11 @@ const initializeAgent = async ({
});
}
const tokensModel =
agent.provider === EModelEndpoint.azureOpenAI ? agent.model : agent.model_parameters.model;
const maxTokens = optionalChainWithEmptyCheck(
agent.model_parameters.maxOutputTokens,
agent.model_parameters.maxTokens,
0,
);
const maxContextTokens = optionalChainWithEmptyCheck(
agent.model_parameters.maxContextTokens,
agent.max_context_tokens,
getModelMaxTokens(tokensModel, providerEndpointMap[provider]),
4096,
);
return {
...agent,
tools,
attachments,
resendFiles,
toolContextMap,
maxContextTokens: (maxContextTokens - maxTokens) * 0.9,
};

View file

@ -130,8 +130,8 @@ const initializeClient = async ({ req, res, endpointOption }) => {
iconURL: endpointOption.iconURL,
attachments: primaryConfig.attachments,
endpointType: endpointOption.endpointType,
resendFiles: primaryConfig.resendFiles ?? true,
maxContextTokens: primaryConfig.maxContextTokens,
resendFiles: primaryConfig.model_parameters?.resendFiles ?? true,
endpoint:
primaryConfig.id === Constants.EPHEMERAL_AGENT_ID
? primaryConfig.endpoint

View file

@ -1,4 +1,4 @@
const { HttpsProxyAgent } = require('https-proxy-agent');
const { ProxyAgent } = require('undici');
const { anthropicSettings, removeNullishValues } = require('librechat-data-provider');
const { checkPromptCacheSupport, getClaudeHeaders, configureReasoning } = require('./helpers');
@ -67,7 +67,10 @@ function getLLMConfig(apiKey, options = {}) {
}
if (options.proxy) {
requestOptions.clientOptions.httpAgent = new HttpsProxyAgent(options.proxy);
const proxyAgent = new ProxyAgent(options.proxy);
requestOptions.clientOptions.fetchOptions = {
dispatcher: proxyAgent,
};
}
if (options.reverseProxyUrl) {

View file

@ -21,8 +21,12 @@ describe('getLLMConfig', () => {
proxy: 'http://proxy:8080',
});
expect(result.llmConfig.clientOptions).toHaveProperty('httpAgent');
expect(result.llmConfig.clientOptions.httpAgent).toHaveProperty('proxy', 'http://proxy:8080');
expect(result.llmConfig.clientOptions).toHaveProperty('fetchOptions');
expect(result.llmConfig.clientOptions.fetchOptions).toHaveProperty('dispatcher');
expect(result.llmConfig.clientOptions.fetchOptions.dispatcher).toBeDefined();
expect(result.llmConfig.clientOptions.fetchOptions.dispatcher.constructor.name).toBe(
'ProxyAgent',
);
});
it('should include reverse proxy URL when provided', () => {

View file

@ -46,7 +46,7 @@ export default defineConfig(({ command }) => ({
'assets/maskable-icon.png',
'manifest.webmanifest',
],
globIgnores: ['images/**/*', '**/*.map'],
globIgnores: ['images/**/*', '**/*.map', 'index.html'],
maximumFileSizeToCacheInBytes: 4 * 1024 * 1024,
navigateFallbackDenylist: [/^\/oauth/, /^\/api/],
},

744
package-lock.json generated

File diff suppressed because it is too large Load diff

View file

@ -69,7 +69,7 @@
"registry": "https://registry.npmjs.org/"
},
"peerDependencies": {
"@librechat/agents": "^2.4.37",
"@librechat/agents": "^2.4.41",
"@librechat/data-schemas": "*",
"@modelcontextprotocol/sdk": "^1.12.3",
"axios": "^1.8.2",
@ -80,6 +80,7 @@
"librechat-data-provider": "*",
"node-fetch": "2.7.0",
"tiktoken": "^1.0.15",
"undici": "^7.10.0",
"zod": "^3.22.4"
}
}

View file

@ -1,4 +1,4 @@
import { HttpsProxyAgent } from 'https-proxy-agent';
import { ProxyAgent } from 'undici';
import { KnownEndpoints } from 'librechat-data-provider';
import type * as t from '~/types';
import { sanitizeModelName, constructAzureURL } from '~/utils/azure';
@ -102,8 +102,10 @@ export function getOpenAIConfig(
}
if (proxy) {
const proxyAgent = new HttpsProxyAgent(proxy);
configOptions.httpAgent = proxyAgent;
const proxyAgent = new ProxyAgent(proxy);
configOptions.fetchOptions = {
dispatcher: proxyAgent,
};
}
if (azure) {