fix: update @librechat/agents dependency to version 3.0.0-rc6 in package.json and package-lock.json; refactor stream rate handling in various endpoints

This commit is contained in:
Danny Avila 2025-09-04 02:00:39 -04:00
parent 6d91fa1fe5
commit 6e0e47d5dd
No known key found for this signature in database
GPG key ID: BF31EEB2C5CA0956
10 changed files with 21 additions and 54 deletions

View file

@ -49,7 +49,7 @@
"@langchain/google-vertexai": "^0.2.13",
"@langchain/openai": "^0.5.18",
"@langchain/textsplitters": "^0.1.0",
"@librechat/agents": "^3.0.0-rc4",
"@librechat/agents": "^3.0.0-rc6",
"@librechat/api": "*",
"@librechat/data-schemas": "*",
"@microsoft/microsoft-graph-client": "^3.0.7",

View file

@ -27,13 +27,13 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
const anthropicConfig = appConfig.endpoints?.[EModelEndpoint.anthropic];
if (anthropicConfig) {
clientOptions.streamRate = anthropicConfig.streamRate;
clientOptions._lc_stream_delay = anthropicConfig.streamRate;
clientOptions.titleModel = anthropicConfig.titleModel;
}
const allConfig = appConfig.endpoints?.all;
if (allConfig) {
clientOptions.streamRate = allConfig.streamRate;
clientOptions._lc_stream_delay = allConfig.streamRate;
}
if (optionsOnly) {

View file

@ -1,8 +1,6 @@
const { HttpsProxyAgent } = require('https-proxy-agent');
const { createHandleLLMNewToken } = require('@librechat/api');
const {
AuthType,
Constants,
EModelEndpoint,
bedrockInputParser,
bedrockOutputParser,
@ -11,7 +9,6 @@ const {
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
const getOptions = async ({ req, overrideModel, endpointOption }) => {
const appConfig = req.config;
const {
BEDROCK_AWS_SECRET_ACCESS_KEY,
BEDROCK_AWS_ACCESS_KEY_ID,
@ -47,10 +44,12 @@ const getOptions = async ({ req, overrideModel, endpointOption }) => {
checkUserKeyExpiry(expiresAt, EModelEndpoint.bedrock);
}
/** @type {number} */
/*
Callback for stream rate no longer awaits and may end the stream prematurely
/** @type {number}
let streamRate = Constants.DEFAULT_STREAM_RATE;
/** @type {undefined | TBaseEndpoint} */
/** @type {undefined | TBaseEndpoint}
const bedrockConfig = appConfig.endpoints?.[EModelEndpoint.bedrock];
if (bedrockConfig && bedrockConfig.streamRate) {
@ -61,6 +60,7 @@ const getOptions = async ({ req, overrideModel, endpointOption }) => {
if (allConfig && allConfig.streamRate) {
streamRate = allConfig.streamRate;
}
*/
/** @type {BedrockClientOptions} */
const requestOptions = {
@ -88,12 +88,6 @@ const getOptions = async ({ req, overrideModel, endpointOption }) => {
llmConfig.endpointHost = BEDROCK_REVERSE_PROXY;
}
llmConfig.callbacks = [
{
handleLLMNewToken: createHandleLLMNewToken(streamRate),
},
];
return {
/** @type {BedrockClientOptions} */
llmConfig,

View file

@ -4,7 +4,6 @@ const {
isUserProvided,
getOpenAIConfig,
getCustomEndpointConfig,
createHandleLLMNewToken,
} = require('@librechat/api');
const {
CacheKeys,
@ -159,11 +158,7 @@ const initializeClient = async ({ req, res, endpointOption, optionsOnly, overrid
if (!clientOptions.streamRate) {
return options;
}
options.llmConfig.callbacks = [
{
handleLLMNewToken: createHandleLLMNewToken(clientOptions.streamRate),
},
];
options.llmConfig._lc_stream_delay = clientOptions.streamRate;
return options;
}

View file

@ -4,7 +4,6 @@ jest.mock('@librechat/api', () => ({
...jest.requireActual('@librechat/api'),
resolveHeaders: jest.fn(),
getOpenAIConfig: jest.fn(),
createHandleLLMNewToken: jest.fn(),
getCustomEndpointConfig: jest.fn().mockReturnValue({
apiKey: 'test-key',
baseURL: 'https://test.com',

View file

@ -5,7 +5,6 @@ const {
isUserProvided,
getOpenAIConfig,
getAzureCredentials,
createHandleLLMNewToken,
} = require('@librechat/api');
const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService');
const OpenAIClient = require('~/app/clients/OpenAIClient');
@ -151,11 +150,7 @@ const initializeClient = async ({
if (!streamRate) {
return options;
}
options.llmConfig.callbacks = [
{
handleLLMNewToken: createHandleLLMNewToken(streamRate),
},
];
options.llmConfig._lc_stream_delay = streamRate;
return options;
}

10
package-lock.json generated
View file

@ -64,7 +64,7 @@
"@langchain/google-vertexai": "^0.2.13",
"@langchain/openai": "^0.5.18",
"@langchain/textsplitters": "^0.1.0",
"@librechat/agents": "^3.0.0-rc4",
"@librechat/agents": "^3.0.0-rc6",
"@librechat/api": "*",
"@librechat/data-schemas": "*",
"@microsoft/microsoft-graph-client": "^3.0.7",
@ -21909,9 +21909,9 @@
}
},
"node_modules/@librechat/agents": {
"version": "3.0.0-rc4",
"resolved": "https://registry.npmjs.org/@librechat/agents/-/agents-3.0.0-rc4.tgz",
"integrity": "sha512-UNVL22a4ahaPLcUSvmC26vQSW+agZozYdBzTCXY7BOqofR513qLbnixLF4Ta1G6PEDmmhgQXV+PnL5CxDN8YnQ==",
"version": "3.0.0-rc6",
"resolved": "https://registry.npmjs.org/@librechat/agents/-/agents-3.0.0-rc6.tgz",
"integrity": "sha512-MAE+HdoRw/XKWIzhoYOUiJrPjN6xicOiLRlDarYAZe4JewLKV2MuBGhRJW9TCn0kwyvGJsMQkTX8xQIXZw7OuA==",
"license": "MIT",
"dependencies": {
"@langchain/anthropic": "^0.3.26",
@ -51984,7 +51984,7 @@
},
"peerDependencies": {
"@langchain/core": "^0.3.72",
"@librechat/agents": "^3.0.0-rc4",
"@librechat/agents": "^3.0.0-rc6",
"@librechat/data-schemas": "*",
"@modelcontextprotocol/sdk": "^1.17.1",
"axios": "^1.8.2",

View file

@ -74,7 +74,7 @@
},
"peerDependencies": {
"@langchain/core": "^0.3.72",
"@librechat/agents": "^3.0.0-rc4",
"@librechat/agents": "^3.0.0-rc6",
"@librechat/data-schemas": "*",
"@modelcontextprotocol/sdk": "^1.17.1",
"axios": "^1.8.2",

View file

@ -1,11 +1,10 @@
import { ErrorTypes, EModelEndpoint, mapModelToAzureConfig } from 'librechat-data-provider';
import type {
InitializeOpenAIOptionsParams,
OpenAIOptionsResult,
OpenAIConfigOptions,
LLMConfigResult,
UserKeyValues,
} from '~/types';
import { createHandleLLMNewToken } from '~/utils/generators';
import { getAzureCredentials } from '~/utils/azure';
import { isUserProvided } from '~/utils/common';
import { resolveHeaders } from '~/utils/env';
@ -27,7 +26,7 @@ export const initializeOpenAI = async ({
overrideEndpoint,
getUserKeyValues,
checkUserKeyExpiry,
}: InitializeOpenAIOptionsParams): Promise<OpenAIOptionsResult> => {
}: InitializeOpenAIOptionsParams): Promise<LLMConfigResult> => {
const { PROXY, OPENAI_API_KEY, AZURE_API_KEY, OPENAI_REVERSE_PROXY, AZURE_OPENAI_BASEURL } =
process.env;
@ -160,17 +159,8 @@ export const initializeOpenAI = async ({
}
if (streamRate) {
options.llmConfig.callbacks = [
{
handleLLMNewToken: createHandleLLMNewToken(streamRate),
},
];
options.llmConfig._lc_stream_delay = streamRate;
}
const result: OpenAIOptionsResult = {
...options,
streamRate,
};
return result;
return options;
};

View file

@ -28,6 +28,7 @@ export type OpenAIConfiguration = OpenAIClientOptions['configuration'];
export type ClientOptions = OpenAIClientOptions & {
include_reasoning?: boolean;
_lc_stream_delay?: number;
};
/**
@ -94,10 +95,3 @@ export interface InitializeOpenAIOptionsParams {
getUserKeyValues: GetUserKeyValuesFunction;
checkUserKeyExpiry: CheckUserKeyExpiryFunction;
}
/**
* Extended LLM config result with stream rate handling
*/
export interface OpenAIOptionsResult extends LLMConfigResult {
streamRate?: number;
}