mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 17:00:15 +01:00
📦 chore: Bump Agents Packages (#7992)
* chore: update peer dependency for @librechat/agents to version 2.4.41 * 🔧 chore: proxy handling in OpenAI endpoint to use undici * 🔧 chore: update @anthropic-ai/sdk to version 0.52.0 and refactor proxy handling to use undici * 🔧 chore: update globIgnores in vite.config.ts to exclude index.html from caching * 🔧 ci: update proxy handling in getLLMConfig to use fetchOptions and ProxyAgent * 🔧 chore: refactor proxy handling in Anthropic and OpenAI clients to use fetchOptions * refactor: agent initialization to streamline model parameters and resendFiles handling * chore: update @google/generative-ai to version 0.24.0
This commit is contained in:
parent
97085073d2
commit
fa54c9ae90
12 changed files with 598 additions and 262 deletions
|
|
@ -63,11 +63,17 @@ const initializeAgent = async ({
|
|||
}
|
||||
let currentFiles;
|
||||
|
||||
if (
|
||||
isInitialAgent &&
|
||||
conversationId != null &&
|
||||
(agent.model_parameters?.resendFiles ?? true) === true
|
||||
) {
|
||||
const _modelOptions = structuredClone(
|
||||
Object.assign(
|
||||
{ model: agent.model },
|
||||
agent.model_parameters ?? { model: agent.model },
|
||||
isInitialAgent === true ? endpointOption?.model_parameters : {},
|
||||
),
|
||||
);
|
||||
|
||||
const { resendFiles = true, ...modelOptions } = _modelOptions;
|
||||
|
||||
if (isInitialAgent && conversationId != null && resendFiles) {
|
||||
const fileIds = (await getConvoFiles(conversationId)) ?? [];
|
||||
/** @type {Set<EToolResources>} */
|
||||
const toolResourceSet = new Set();
|
||||
|
|
@ -117,15 +123,11 @@ const initializeAgent = async ({
|
|||
getOptions = initCustom;
|
||||
agent.provider = Providers.OPENAI;
|
||||
}
|
||||
const model_parameters = Object.assign(
|
||||
{},
|
||||
agent.model_parameters ?? { model: agent.model },
|
||||
isInitialAgent === true ? endpointOption?.model_parameters : {},
|
||||
);
|
||||
|
||||
const _endpointOption =
|
||||
isInitialAgent === true
|
||||
? Object.assign({}, endpointOption, { model_parameters })
|
||||
: { model_parameters };
|
||||
? Object.assign({}, endpointOption, { model_parameters: modelOptions })
|
||||
: { model_parameters: modelOptions };
|
||||
|
||||
const options = await getOptions({
|
||||
req,
|
||||
|
|
@ -136,6 +138,20 @@ const initializeAgent = async ({
|
|||
endpointOption: _endpointOption,
|
||||
});
|
||||
|
||||
const tokensModel =
|
||||
agent.provider === EModelEndpoint.azureOpenAI ? agent.model : modelOptions.model;
|
||||
const maxTokens = optionalChainWithEmptyCheck(
|
||||
modelOptions.maxOutputTokens,
|
||||
modelOptions.maxTokens,
|
||||
0,
|
||||
);
|
||||
const maxContextTokens = optionalChainWithEmptyCheck(
|
||||
modelOptions.maxContextTokens,
|
||||
modelOptions.max_context_tokens,
|
||||
getModelMaxTokens(tokensModel, providerEndpointMap[provider]),
|
||||
4096,
|
||||
);
|
||||
|
||||
if (
|
||||
agent.endpoint === EModelEndpoint.azureOpenAI &&
|
||||
options.llmConfig?.azureOpenAIApiInstanceName == null
|
||||
|
|
@ -148,15 +164,11 @@ const initializeAgent = async ({
|
|||
}
|
||||
|
||||
/** @type {import('@librechat/agents').ClientOptions} */
|
||||
agent.model_parameters = Object.assign(model_parameters, options.llmConfig);
|
||||
agent.model_parameters = { ...options.llmConfig };
|
||||
if (options.configOptions) {
|
||||
agent.model_parameters.configuration = options.configOptions;
|
||||
}
|
||||
|
||||
if (!agent.model_parameters.model) {
|
||||
agent.model_parameters.model = agent.model;
|
||||
}
|
||||
|
||||
if (agent.instructions && agent.instructions !== '') {
|
||||
agent.instructions = replaceSpecialVars({
|
||||
text: agent.instructions,
|
||||
|
|
@ -171,23 +183,11 @@ const initializeAgent = async ({
|
|||
});
|
||||
}
|
||||
|
||||
const tokensModel =
|
||||
agent.provider === EModelEndpoint.azureOpenAI ? agent.model : agent.model_parameters.model;
|
||||
const maxTokens = optionalChainWithEmptyCheck(
|
||||
agent.model_parameters.maxOutputTokens,
|
||||
agent.model_parameters.maxTokens,
|
||||
0,
|
||||
);
|
||||
const maxContextTokens = optionalChainWithEmptyCheck(
|
||||
agent.model_parameters.maxContextTokens,
|
||||
agent.max_context_tokens,
|
||||
getModelMaxTokens(tokensModel, providerEndpointMap[provider]),
|
||||
4096,
|
||||
);
|
||||
return {
|
||||
...agent,
|
||||
tools,
|
||||
attachments,
|
||||
resendFiles,
|
||||
toolContextMap,
|
||||
maxContextTokens: (maxContextTokens - maxTokens) * 0.9,
|
||||
};
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue