🚀 feat: Agent Cache Tokens & Anthropic Reasoning Support (#6098)

* fix: handling of top_k and top_p parameters for Claude-3.7 models (allowed without reasoning)

* feat: bump @librechat/agents for Anthropic Reasoning support

* fix: update reasoning handling for OpenRouter integration

* fix: enhance agent token spending logic to include cache creation and read details

* fix: update logic for thinking status in ContentParts component

* refactor: improve agent title handling

* chore: bump @librechat/agents to version 2.1.7 for parallel tool calling for Google models
This commit is contained in:
Danny Avila 2025-02-27 12:59:51 -05:00 committed by GitHub
parent 34f967eff8
commit 9802629848
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 187 additions and 40 deletions

View file

@ -29,7 +29,6 @@ function getLLMConfig(apiKey, options = {}) {
const {
modelOptions = {},
reverseProxyUrl,
useOpenRouter,
defaultQuery,
headers,
proxy,
@ -56,9 +55,11 @@ function getLLMConfig(apiKey, options = {}) {
});
}
let useOpenRouter;
/** @type {OpenAIClientOptions['configuration']} */
const configOptions = {};
if (useOpenRouter || (reverseProxyUrl && reverseProxyUrl.includes(KnownEndpoints.openrouter))) {
if (reverseProxyUrl && reverseProxyUrl.includes(KnownEndpoints.openrouter)) {
useOpenRouter = true;
llmConfig.include_reasoning = true;
configOptions.baseURL = reverseProxyUrl;
configOptions.defaultHeaders = Object.assign(
@ -118,6 +119,13 @@ function getLLMConfig(apiKey, options = {}) {
llmConfig.organization = process.env.OPENAI_ORGANIZATION;
}
if (useOpenRouter && llmConfig.reasoning_effort != null) {
llmConfig.reasoning = {
effort: llmConfig.reasoning_effort,
};
delete llmConfig.reasoning_effort;
}
return {
/** @type {OpenAIClientOptions} */
llmConfig,