🧠 feat: Bedrock Anthropic Reasoning & Update Endpoint Handling (#6163)

* feat: Add thinking and thinkingBudget parameters for Bedrock Anthropic models

* chore: Update @librechat/agents to version 2.1.8

* refactor: change region order in params

* refactor: Add maxTokens parameter to conversation preset schema

* refactor: Update agent client to use bedrockInputSchema and improve error handling for model parameters

* refactor: streamline/optimize llmConfig initialization and saving for bedrock

* fix: ensure config titleModel is used for all endpoints

* refactor: enhance OpenAIClient and agent initialization to support endpoint checks for OpenRouter

* chore: bump @google/generative-ai
This commit is contained in:
Danny Avila 2025-03-03 19:09:22 -05:00 committed by GitHub
parent 3accf91094
commit ceb0da874b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
17 changed files with 2224 additions and 667 deletions

View file

@ -101,6 +101,7 @@ const initializeAgentOptions = async ({
});
const provider = agent.provider;
agent.endpoint = provider;
let getOptions = providerConfigMap[provider];
if (!getOptions && providerConfigMap[provider.toLowerCase()] != null) {
agent.provider = provider.toLowerCase();
@ -112,9 +113,7 @@ const initializeAgentOptions = async ({
}
getOptions = initCustom;
agent.provider = Providers.OPENAI;
agent.endpoint = provider.toLowerCase();
}
const model_parameters = Object.assign(
{},
agent.model_parameters ?? { model: agent.model },

View file

@ -27,6 +27,7 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
if (anthropicConfig) {
clientOptions.streamRate = anthropicConfig.streamRate;
clientOptions.titleModel = anthropicConfig.titleModel;
}
/** @type {undefined | TBaseEndpoint} */

View file

@ -1,6 +1,5 @@
const { removeNullishValues, bedrockInputParser } = require('librechat-data-provider');
const { removeNullishValues } = require('librechat-data-provider');
const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts');
const { logger } = require('~/config');
const buildOptions = (endpoint, parsedBody) => {
const {
@ -15,12 +14,6 @@ const buildOptions = (endpoint, parsedBody) => {
artifacts,
...model_parameters
} = parsedBody;
let parsedParams = model_parameters;
try {
parsedParams = bedrockInputParser.parse(model_parameters);
} catch (error) {
logger.warn('Failed to parse bedrock input', error);
}
const endpointOption = removeNullishValues({
endpoint,
name,
@ -31,7 +24,7 @@ const buildOptions = (endpoint, parsedBody) => {
spec,
promptPrefix,
maxContextTokens,
model_parameters: parsedParams,
model_parameters,
});
if (typeof artifacts === 'string') {

View file

@ -1,14 +1,16 @@
const { HttpsProxyAgent } = require('https-proxy-agent');
const {
EModelEndpoint,
Constants,
AuthType,
Constants,
EModelEndpoint,
bedrockInputParser,
bedrockOutputParser,
removeNullishValues,
} = require('librechat-data-provider');
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
const { sleep } = require('~/server/utils');
const getOptions = async ({ req, endpointOption }) => {
const getOptions = async ({ req, overrideModel, endpointOption }) => {
const {
BEDROCK_AWS_SECRET_ACCESS_KEY,
BEDROCK_AWS_ACCESS_KEY_ID,
@ -62,39 +64,44 @@ const getOptions = async ({ req, endpointOption }) => {
/** @type {BedrockClientOptions} */
const requestOptions = {
model: endpointOption.model,
model: overrideModel ?? endpointOption.model,
region: BEDROCK_AWS_DEFAULT_REGION,
streaming: true,
streamUsage: true,
callbacks: [
{
handleLLMNewToken: async () => {
if (!streamRate) {
return;
}
await sleep(streamRate);
},
},
],
};
if (credentials) {
requestOptions.credentials = credentials;
}
if (BEDROCK_REVERSE_PROXY) {
requestOptions.endpointHost = BEDROCK_REVERSE_PROXY;
}
const configOptions = {};
if (PROXY) {
/** NOTE: NOT SUPPORTED BY BEDROCK */
configOptions.httpAgent = new HttpsProxyAgent(PROXY);
}
const llmConfig = bedrockOutputParser(
bedrockInputParser.parse(
removeNullishValues(Object.assign(requestOptions, endpointOption.model_parameters)),
),
);
if (credentials) {
llmConfig.credentials = credentials;
}
if (BEDROCK_REVERSE_PROXY) {
llmConfig.endpointHost = BEDROCK_REVERSE_PROXY;
}
llmConfig.callbacks = [
{
handleLLMNewToken: async () => {
if (!streamRate) {
return;
}
await sleep(streamRate);
},
},
];
return {
/** @type {BedrockClientOptions} */
llmConfig: removeNullishValues(Object.assign(requestOptions, endpointOption.model_parameters)),
llmConfig,
configOptions,
};
};

View file

@ -141,7 +141,7 @@ const initializeClient = async ({ req, res, endpointOption, optionsOnly, overrid
},
clientOptions,
);
const options = getLLMConfig(apiKey, clientOptions);
const options = getLLMConfig(apiKey, clientOptions, endpoint);
if (!customOptions.streamRate) {
return options;
}

View file

@ -5,12 +5,7 @@ const { isEnabled } = require('~/server/utils');
const { GoogleClient } = require('~/app');
const initializeClient = async ({ req, res, endpointOption, overrideModel, optionsOnly }) => {
const {
GOOGLE_KEY,
GOOGLE_REVERSE_PROXY,
GOOGLE_AUTH_HEADER,
PROXY,
} = process.env;
const { GOOGLE_KEY, GOOGLE_REVERSE_PROXY, GOOGLE_AUTH_HEADER, PROXY } = process.env;
const isUserProvided = GOOGLE_KEY === 'user_provided';
const { key: expiresAt } = req.body;
@ -43,6 +38,7 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
if (googleConfig) {
clientOptions.streamRate = googleConfig.streamRate;
clientOptions.titleModel = googleConfig.titleModel;
}
if (allConfig) {

View file

@ -113,6 +113,7 @@ const initializeClient = async ({
if (!isAzureOpenAI && openAIConfig) {
clientOptions.streamRate = openAIConfig.streamRate;
clientOptions.titleModel = openAIConfig.titleModel;
}
/** @type {undefined | TBaseEndpoint} */

View file

@ -23,9 +23,10 @@ const { isEnabled } = require('~/server/utils');
* @param {boolean} [options.streaming] - Whether to use streaming mode.
* @param {Object} [options.addParams] - Additional parameters to add to the model options.
* @param {string[]} [options.dropParams] - Parameters to remove from the model options.
* @param {string|null} [endpoint=null] - The endpoint name
* @returns {Object} Configuration options for creating an LLM instance.
*/
function getLLMConfig(apiKey, options = {}) {
function getLLMConfig(apiKey, options = {}, endpoint = null) {
const {
modelOptions = {},
reverseProxyUrl,
@ -58,7 +59,10 @@ function getLLMConfig(apiKey, options = {}) {
let useOpenRouter;
/** @type {OpenAIClientOptions['configuration']} */
const configOptions = {};
if (reverseProxyUrl && reverseProxyUrl.includes(KnownEndpoints.openrouter)) {
if (
(reverseProxyUrl && reverseProxyUrl.includes(KnownEndpoints.openrouter)) ||
(endpoint && endpoint.toLowerCase().includes(KnownEndpoints.openrouter))
) {
useOpenRouter = true;
llmConfig.include_reasoning = true;
configOptions.baseURL = reverseProxyUrl;