🚀 feat: Agent Cache Tokens & Anthropic Reasoning Support (#6098)

* fix: handling of top_k and top_p parameters for Claude-3.7 models (allowed without reasoning)

* feat: bump @librechat/agents for Anthropic Reasoning support

* fix: update reasoning handling for OpenRouter integration

* fix: enhance agent token spending logic to include cache creation and read details

* fix: update logic for thinking status in ContentParts component

* refactor: improve agent title handling

* chore: bump @librechat/agents to version 2.1.7 for parallel tool calling for Google models
This commit is contained in:
Danny Avila 2025-02-27 12:59:51 -05:00 committed by GitHub
parent 34f967eff8
commit 9802629848
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 187 additions and 40 deletions

View file

@ -746,15 +746,6 @@ class AnthropicClient extends BaseClient {
metadata,
};
if (!/claude-3[-.]7/.test(model)) {
if (top_p !== undefined) {
requestOptions.top_p = top_p;
}
if (top_k !== undefined) {
requestOptions.top_k = top_k;
}
}
if (this.useMessages) {
requestOptions.messages = payload;
requestOptions.max_tokens =
@ -769,6 +760,14 @@ class AnthropicClient extends BaseClient {
thinkingBudget: this.options.thinkingBudget,
});
if (!/claude-3[-.]7/.test(model)) {
requestOptions.top_p = top_p;
requestOptions.top_k = top_k;
} else if (requestOptions.thinking == null) {
requestOptions.topP = top_p;
requestOptions.topK = top_k;
}
if (this.systemMessage && this.supportsCacheControl === true) {
requestOptions.system = [
{