mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-03-05 15:50:19 +01:00
🔃 fix: Draft Clearing, Claude Titles, Remove Default Vision Max Tokens (#6501)
* refactor: remove legacy max_tokens setting for vision models in OpenAIClient (intended for gpt-4-preview) * refactor: streamline capability checks in loadAgentTools function, still allow actions if tools are disabled * fix: enhance error handling for token limits in AnthropicClient and update error message in translations * feat: append timestamp to cloned agent names for better identification * chore: update @librechat/agents dependency to version 2.3.94 * refactor: remove clearDraft helper from useSubmitMessage and centralize draft clearing logic to SSE handling, helps prevent user message loss if logout occurs * refactor: increase debounce time for clearDraft function to improve auto-save performance
This commit is contained in:
parent
20f353630e
commit
4b85fe9206
11 changed files with 634 additions and 590 deletions
|
|
@ -2,6 +2,7 @@ const Anthropic = require('@anthropic-ai/sdk');
|
|||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||
const {
|
||||
Constants,
|
||||
ErrorTypes,
|
||||
EModelEndpoint,
|
||||
anthropicSettings,
|
||||
getResponseSender,
|
||||
|
|
@ -147,12 +148,17 @@ class AnthropicClient extends BaseClient {
|
|||
this.maxPromptTokens =
|
||||
this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens;
|
||||
|
||||
if (this.maxPromptTokens + this.maxResponseTokens > this.maxContextTokens) {
|
||||
throw new Error(
|
||||
`maxPromptTokens + maxOutputTokens (${this.maxPromptTokens} + ${this.maxResponseTokens} = ${
|
||||
this.maxPromptTokens + this.maxResponseTokens
|
||||
}) must be less than or equal to maxContextTokens (${this.maxContextTokens})`,
|
||||
);
|
||||
const reservedTokens = this.maxPromptTokens + this.maxResponseTokens;
|
||||
if (reservedTokens > this.maxContextTokens) {
|
||||
const info = `Total Possible Tokens + Max Output Tokens must be less than or equal to Max Context Tokens: ${this.maxPromptTokens} (total possible output) + ${this.maxResponseTokens} (max output) = ${reservedTokens}/${this.maxContextTokens} (max context)`;
|
||||
const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`;
|
||||
logger.warn(info);
|
||||
throw new Error(errorMessage);
|
||||
} else if (this.maxResponseTokens === this.maxContextTokens) {
|
||||
const info = `Max Output Tokens must be less than Max Context Tokens: ${this.maxResponseTokens} (max output) = ${this.maxContextTokens} (max context)`;
|
||||
const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`;
|
||||
logger.warn(info);
|
||||
throw new Error(errorMessage);
|
||||
}
|
||||
|
||||
this.sender =
|
||||
|
|
|
|||
|
|
@ -1185,10 +1185,6 @@ ${convo}
|
|||
opts.httpAgent = new HttpsProxyAgent(this.options.proxy);
|
||||
}
|
||||
|
||||
if (this.isVisionModel) {
|
||||
modelOptions.max_tokens = 4000;
|
||||
}
|
||||
|
||||
/** @type {TAzureConfig | undefined} */
|
||||
const azureConfig = this.options?.req?.app?.locals?.[EModelEndpoint.azureOpenAI];
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue