diff --git a/api/package.json b/api/package.json index e77b9fc48a..45d8e6b5fb 100644 --- a/api/package.json +++ b/api/package.json @@ -48,7 +48,7 @@ "@langchain/google-genai": "^0.2.13", "@langchain/google-vertexai": "^0.2.13", "@langchain/textsplitters": "^0.1.0", - "@librechat/agents": "^2.4.89", + "@librechat/agents": "^2.4.90", "@librechat/api": "*", "@librechat/data-schemas": "*", "@microsoft/microsoft-graph-client": "^3.0.7", diff --git a/api/server/services/Endpoints/openAI/initialize.js b/api/server/services/Endpoints/openAI/initialize.js index 391b194abe..ab2e80640a 100644 --- a/api/server/services/Endpoints/openAI/initialize.js +++ b/api/server/services/Endpoints/openAI/initialize.js @@ -143,7 +143,7 @@ const initializeClient = async ({ modelOptions.model = modelName; clientOptions = Object.assign({ modelOptions }, clientOptions); clientOptions.modelOptions.user = req.user.id; - const options = getOpenAIConfig(apiKey, clientOptions); + const options = getOpenAIConfig(apiKey, clientOptions, endpoint); if (options != null && serverless === true) { options.useLegacyContent = true; } diff --git a/package-lock.json b/package-lock.json index 5d58cb1a64..b49fdfc2eb 100644 --- a/package-lock.json +++ b/package-lock.json @@ -64,7 +64,7 @@ "@langchain/google-genai": "^0.2.13", "@langchain/google-vertexai": "^0.2.13", "@langchain/textsplitters": "^0.1.0", - "@librechat/agents": "^2.4.89", + "@librechat/agents": "^2.4.90", "@librechat/api": "*", "@librechat/data-schemas": "*", "@microsoft/microsoft-graph-client": "^3.0.7", @@ -21690,9 +21690,9 @@ } }, "node_modules/@librechat/agents": { - "version": "2.4.89", - "resolved": "https://registry.npmjs.org/@librechat/agents/-/agents-2.4.89.tgz", - "integrity": "sha512-QMqaNkkfcDHI8mpaqpgdb1Zz3KT3uVLaaU4NCeXafNH6JvGdG4ueORQH2dM8xtVm3+5DEXwauTdSAi5gHV5tJQ==", + "version": "2.4.90", + "resolved": "https://registry.npmjs.org/@librechat/agents/-/agents-2.4.90.tgz", + "integrity": "sha512-CZI0K0NjIO1mvw4f4tAAMN8x4fFXFyZyrP+NLTCHDvdrIpyTY2k9qRwQHneCGnMgltuUQ+53eXbS3s9psjsAOA==", "license": "MIT", "dependencies": { "@langchain/anthropic": "^0.3.26", @@ -51984,7 +51984,7 @@ }, "packages/api": { "name": "@librechat/api", - "version": "1.4.1", + "version": "1.5.0", "license": "ISC", "devDependencies": { "@babel/preset-env": "^7.21.5", @@ -52023,7 +52023,7 @@ "@azure/storage-blob": "^12.27.0", "@keyv/redis": "^4.3.3", "@langchain/core": "^0.3.62", - "@librechat/agents": "^2.4.89", + "@librechat/agents": "^2.4.90", "@librechat/data-schemas": "*", "@modelcontextprotocol/sdk": "^1.17.1", "axios": "^1.12.1", diff --git a/packages/api/package.json b/packages/api/package.json index 530310e746..7db4a54a21 100644 --- a/packages/api/package.json +++ b/packages/api/package.json @@ -1,6 +1,6 @@ { "name": "@librechat/api", - "version": "1.4.1", + "version": "1.5.0", "type": "commonjs", "description": "MCP services for LibreChat", "main": "dist/index.js", @@ -80,7 +80,7 @@ "@azure/storage-blob": "^12.27.0", "@keyv/redis": "^4.3.3", "@langchain/core": "^0.3.62", - "@librechat/agents": "^2.4.89", + "@librechat/agents": "^2.4.90", "@librechat/data-schemas": "*", "@modelcontextprotocol/sdk": "^1.17.1", "axios": "^1.12.1", diff --git a/packages/api/src/endpoints/openai/config.spec.ts b/packages/api/src/endpoints/openai/config.spec.ts index fa718f1043..8a8e3be07b 100644 --- a/packages/api/src/endpoints/openai/config.spec.ts +++ b/packages/api/src/endpoints/openai/config.spec.ts @@ -1,4 +1,9 @@ -import { Verbosity, ReasoningEffort, ReasoningSummary } from 'librechat-data-provider'; +import { + Verbosity, + EModelEndpoint, + ReasoningEffort, + ReasoningSummary, +} from 'librechat-data-provider'; import type { RequestInit } from 'undici'; import type { OpenAIParameters, AzureOptions } from '~/types'; import { getOpenAIConfig } from './config'; @@ -103,12 +108,89 @@ describe('getOpenAIConfig', () => { const result = getOpenAIConfig(mockApiKey, { modelOptions }); + /** When no endpoint is specified, it's treated as non-openAI/azureOpenAI, so uses reasoning object */ + expect(result.llmConfig.reasoning).toEqual({ + effort: ReasoningEffort.high, + summary: ReasoningSummary.detailed, + }); + expect((result.llmConfig as Record).reasoning_effort).toBeUndefined(); + }); + + it('should use reasoning_effort for openAI endpoint without useResponsesApi', () => { + const modelOptions = { + reasoning_effort: ReasoningEffort.high, + reasoning_summary: ReasoningSummary.detailed, + }; + + const result = getOpenAIConfig(mockApiKey, { modelOptions }, EModelEndpoint.openAI); + expect((result.llmConfig as Record).reasoning_effort).toBe( ReasoningEffort.high, ); expect(result.llmConfig.reasoning).toBeUndefined(); }); + it('should use reasoning_effort for azureOpenAI endpoint without useResponsesApi', () => { + const modelOptions = { + reasoning_effort: ReasoningEffort.high, + reasoning_summary: ReasoningSummary.detailed, + }; + + const result = getOpenAIConfig(mockApiKey, { modelOptions }, EModelEndpoint.azureOpenAI); + + expect((result.llmConfig as Record).reasoning_effort).toBe( + ReasoningEffort.high, + ); + expect(result.llmConfig.reasoning).toBeUndefined(); + }); + + it('should use reasoning object for openAI endpoint with useResponsesApi=true', () => { + const modelOptions = { + reasoning_effort: ReasoningEffort.high, + reasoning_summary: ReasoningSummary.detailed, + useResponsesApi: true, + }; + + const result = getOpenAIConfig(mockApiKey, { modelOptions }, EModelEndpoint.openAI); + + expect(result.llmConfig.reasoning).toEqual({ + effort: ReasoningEffort.high, + summary: ReasoningSummary.detailed, + }); + expect((result.llmConfig as Record).reasoning_effort).toBeUndefined(); + }); + + it('should use reasoning object for azureOpenAI endpoint with useResponsesApi=true', () => { + const modelOptions = { + reasoning_effort: ReasoningEffort.high, + reasoning_summary: ReasoningSummary.detailed, + useResponsesApi: true, + }; + + const result = getOpenAIConfig(mockApiKey, { modelOptions }, EModelEndpoint.azureOpenAI); + + expect(result.llmConfig.reasoning).toEqual({ + effort: ReasoningEffort.high, + summary: ReasoningSummary.detailed, + }); + expect((result.llmConfig as Record).reasoning_effort).toBeUndefined(); + }); + + it('should use reasoning object for non-openAI/azureOpenAI endpoints', () => { + const modelOptions = { + reasoning_effort: ReasoningEffort.high, + reasoning_summary: ReasoningSummary.detailed, + }; + + const result = getOpenAIConfig(mockApiKey, { modelOptions }, 'custom-endpoint'); + + expect(result.llmConfig.reasoning).toEqual({ + effort: ReasoningEffort.high, + summary: ReasoningSummary.detailed, + }); + expect((result.llmConfig as Record).reasoning_effort).toBeUndefined(); + }); + it('should handle OpenRouter configuration', () => { const reverseProxyUrl = 'https://openrouter.ai/api/v1'; diff --git a/packages/api/src/endpoints/openai/config.ts b/packages/api/src/endpoints/openai/config.ts index 52c54ebaa6..d137735552 100644 --- a/packages/api/src/endpoints/openai/config.ts +++ b/packages/api/src/endpoints/openai/config.ts @@ -68,6 +68,7 @@ export function getOpenAIConfig( azure, apiKey, baseURL, + endpoint, streaming, addParams, dropParams, diff --git a/packages/api/src/endpoints/openai/llm.ts b/packages/api/src/endpoints/openai/llm.ts index bcc082c47d..4ce896e291 100644 --- a/packages/api/src/endpoints/openai/llm.ts +++ b/packages/api/src/endpoints/openai/llm.ts @@ -1,4 +1,4 @@ -import { removeNullishValues } from 'librechat-data-provider'; +import { EModelEndpoint, removeNullishValues } from 'librechat-data-provider'; import type { BindToolsInput } from '@langchain/core/language_models/chat_models'; import type { AzureOpenAIInput } from '@langchain/openai'; import type { OpenAI } from 'openai'; @@ -79,6 +79,7 @@ export function getOpenAILLMConfig({ azure, apiKey, baseURL, + endpoint, streaming, addParams, dropParams, @@ -88,6 +89,7 @@ export function getOpenAILLMConfig({ apiKey: string; streaming: boolean; baseURL?: string | null; + endpoint?: EModelEndpoint | string | null; modelOptions: Partial; addParams?: Record; dropParams?: string[]; @@ -155,7 +157,8 @@ export function getOpenAILLMConfig({ if ( hasReasoningParams({ reasoning_effort, reasoning_summary }) && - (llmConfig.useResponsesApi === true || useOpenRouter) + (llmConfig.useResponsesApi === true || + (endpoint !== EModelEndpoint.openAI && endpoint !== EModelEndpoint.azureOpenAI)) ) { llmConfig.reasoning = removeNullishValues( {