🧠 fix: Agent Title Config & Resource Handling (#8028)

* 🔧 fix: enhance client options handling in AgentClient and set default recursion limit

- Updated the recursion limit to default to 25 if not specified in agentsEConfig.
- Enhanced client options in AgentClient to include model parameters such as apiKey and anthropicApiUrl from agentModelParams.
- Updated requestOptions in the anthropic endpoint to use reverseProxyUrl as anthropicApiUrl.

* Enhance LLM configuration tests with edge case handling

* chore add return type annotation for getCustomEndpointConfig function

* fix: update modelOptions handling to use optional chaining and default to empty object in multiple endpoint initializations

* chore: update @librechat/agents to version 2.4.42

* refactor: streamline agent endpoint configuration and enhance client options handling for title generations

- Introduced a new `getProviderConfig` function to centralize provider configuration logic.
- Updated `AgentClient` to utilize the new provider configuration, improving clarity and maintainability.
- Removed redundant code related to endpoint initialization and model parameter handling.
- Enhanced error logging for missing endpoint configurations.

* fix: add abort handling for image generation and editing in OpenAIImageTools

* ci: enhance getLLMConfig tests to verify fetchOptions and dispatcher properties

* fix: use optional chaining for endpointOption properties in getOptions

* fix: increase title generation timeout from 25s to 45s, pass `endpointOption` to `getOptions`

* fix: update file filtering logic in getToolFilesByIds to ensure text field is properly checked

* fix: add error handling for empty OCR results in uploadMistralOCR and uploadAzureMistralOCR

* fix: enhance error handling in file upload to include 'No OCR result' message

* chore: update error messages in uploadMistralOCR and uploadAzureMistralOCR

* fix: enhance filtering logic in getToolFilesByIds to include context checks for OCR resources to only include files directly attached to agent

---------

Co-authored-by: Matt Burnett <matt.burnett@shopify.com>
This commit is contained in:
Danny Avila 2025-06-23 19:44:24 -04:00 committed by GitHub
parent 1b7e044bf5
commit d39b99971f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
18 changed files with 415 additions and 91 deletions

View file

@ -107,6 +107,12 @@ const getImageEditPromptDescription = () => {
return process.env.IMAGE_EDIT_OAI_PROMPT_DESCRIPTION || DEFAULT_IMAGE_EDIT_PROMPT_DESCRIPTION;
};
function createAbortHandler() {
return function () {
logger.debug('[ImageGenOAI] Image generation aborted');
};
}
/**
* Creates OpenAI Image tools (generation and editing)
* @param {Object} fields - Configuration fields
@ -201,10 +207,18 @@ function createOpenAIImageTools(fields = {}) {
}
let resp;
/** @type {AbortSignal} */
let derivedSignal = null;
/** @type {() => void} */
let abortHandler = null;
try {
const derivedSignal = runnableConfig?.signal
? AbortSignal.any([runnableConfig.signal])
: undefined;
if (runnableConfig?.signal) {
derivedSignal = AbortSignal.any([runnableConfig.signal]);
abortHandler = createAbortHandler();
derivedSignal.addEventListener('abort', abortHandler, { once: true });
}
resp = await openai.images.generate(
{
model: 'gpt-image-1',
@ -228,6 +242,10 @@ function createOpenAIImageTools(fields = {}) {
logAxiosError({ error, message });
return returnValue(`Something went wrong when trying to generate the image. The OpenAI API may be unavailable:
Error Message: ${error.message}`);
} finally {
if (abortHandler && derivedSignal) {
derivedSignal.removeEventListener('abort', abortHandler);
}
}
if (!resp) {
@ -409,10 +427,17 @@ Error Message: ${error.message}`);
headers['Authorization'] = `Bearer ${apiKey}`;
}
/** @type {AbortSignal} */
let derivedSignal = null;
/** @type {() => void} */
let abortHandler = null;
try {
const derivedSignal = runnableConfig?.signal
? AbortSignal.any([runnableConfig.signal])
: undefined;
if (runnableConfig?.signal) {
derivedSignal = AbortSignal.any([runnableConfig.signal]);
abortHandler = createAbortHandler();
derivedSignal.addEventListener('abort', abortHandler, { once: true });
}
/** @type {import('axios').AxiosRequestConfig} */
const axiosConfig = {
@ -467,6 +492,10 @@ Error Message: ${error.message}`);
logAxiosError({ error, message });
return returnValue(`Something went wrong when trying to edit the image. The OpenAI API may be unavailable:
Error Message: ${error.message || 'Unknown error'}`);
} finally {
if (abortHandler && derivedSignal) {
derivedSignal.removeEventListener('abort', abortHandler);
}
}
},
{

View file

@ -1,5 +1,5 @@
const { logger } = require('@librechat/data-schemas');
const { EToolResources } = require('librechat-data-provider');
const { EToolResources, FileContext } = require('librechat-data-provider');
const { File } = require('~/db/models');
/**
@ -32,19 +32,19 @@ const getFiles = async (filter, _sortOptions, selectFields = { text: 0 }) => {
* @returns {Promise<Array<MongoFile>>} Files that match the criteria
*/
const getToolFilesByIds = async (fileIds, toolResourceSet) => {
if (!fileIds || !fileIds.length) {
if (!fileIds || !fileIds.length || !toolResourceSet?.size) {
return [];
}
try {
const filter = {
file_id: { $in: fileIds },
$or: [],
};
if (toolResourceSet.size) {
filter.$or = [];
if (toolResourceSet.has(EToolResources.ocr)) {
filter.$or.push({ text: { $exists: true, $ne: null }, context: FileContext.agents });
}
if (toolResourceSet.has(EToolResources.file_search)) {
filter.$or.push({ embedded: true });
}

View file

@ -48,7 +48,7 @@
"@langchain/google-genai": "^0.2.13",
"@langchain/google-vertexai": "^0.2.13",
"@langchain/textsplitters": "^0.1.0",
"@librechat/agents": "^2.4.41",
"@librechat/agents": "^2.4.42",
"@librechat/api": "*",
"@librechat/data-schemas": "*",
"@node-saml/passport-saml": "^5.0.0",

View file

@ -9,6 +9,7 @@ const {
} = require('@librechat/api');
const {
Callback,
Providers,
GraphEvents,
formatMessage,
formatAgentMessages,
@ -31,17 +32,13 @@ const {
} = require('librechat-data-provider');
const { DynamicStructuredTool } = require('@langchain/core/tools');
const { getBufferString, HumanMessage } = require('@langchain/core/messages');
const {
getCustomEndpointConfig,
createGetMCPAuthMap,
checkCapability,
} = require('~/server/services/Config');
const { createGetMCPAuthMap, checkCapability } = require('~/server/services/Config');
const { addCacheControl, createContextHandlers } = require('~/app/clients/prompts');
const { initializeAgent } = require('~/server/services/Endpoints/agents/agent');
const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens');
const { getFormattedMemories, deleteMemory, setMemory } = require('~/models');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const initOpenAI = require('~/server/services/Endpoints/openAI/initialize');
const { getProviderConfig } = require('~/server/services/Endpoints');
const { checkAccess } = require('~/server/middleware/roles/access');
const BaseClient = require('~/app/clients/BaseClient');
const { loadAgent } = require('~/models/Agent');
@ -677,7 +674,7 @@ class AgentClient extends BaseClient {
hide_sequential_outputs: this.options.agent.hide_sequential_outputs,
user: this.options.req.user,
},
recursionLimit: agentsEConfig?.recursionLimit,
recursionLimit: agentsEConfig?.recursionLimit ?? 25,
signal: abortController.signal,
streamMode: 'values',
version: 'v2',
@ -983,23 +980,26 @@ class AgentClient extends BaseClient {
throw new Error('Run not initialized');
}
const { handleLLMEnd, collected: collectedMetadata } = createMetadataAggregator();
const endpoint = this.options.agent.endpoint;
const { req, res } = this.options;
const { req, res, agent } = this.options;
const endpoint = agent.endpoint;
/** @type {import('@librechat/agents').ClientOptions} */
let clientOptions = {
maxTokens: 75,
model: agent.model_parameters.model,
};
let endpointConfig = req.app.locals[endpoint];
const { getOptions, overrideProvider, customEndpointConfig } =
await getProviderConfig(endpoint);
/** @type {TEndpoint | undefined} */
const endpointConfig = req.app.locals[endpoint] ?? customEndpointConfig;
if (!endpointConfig) {
try {
endpointConfig = await getCustomEndpointConfig(endpoint);
} catch (err) {
logger.error(
'[api/server/controllers/agents/client.js #titleConvo] Error getting custom endpoint config',
err,
);
}
logger.warn(
'[api/server/controllers/agents/client.js #titleConvo] Error getting endpoint config',
);
}
if (
endpointConfig &&
endpointConfig.titleModel &&
@ -1007,30 +1007,40 @@ class AgentClient extends BaseClient {
) {
clientOptions.model = endpointConfig.titleModel;
}
const options = await getOptions({
req,
res,
optionsOnly: true,
overrideEndpoint: endpoint,
overrideModel: clientOptions.model,
endpointOption: { model_parameters: clientOptions },
});
let provider = options.provider ?? overrideProvider ?? agent.provider;
if (
endpoint === EModelEndpoint.azureOpenAI &&
clientOptions.model &&
this.options.agent.model_parameters.model !== clientOptions.model
options.llmConfig?.azureOpenAIApiInstanceName == null
) {
clientOptions =
(
await initOpenAI({
req,
res,
optionsOnly: true,
overrideModel: clientOptions.model,
overrideEndpoint: endpoint,
endpointOption: {
model_parameters: clientOptions,
},
})
)?.llmConfig ?? clientOptions;
provider = Providers.OPENAI;
}
if (/\b(o\d)\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
/** @type {import('@librechat/agents').ClientOptions} */
clientOptions = { ...options.llmConfig };
if (options.configOptions) {
clientOptions.configuration = options.configOptions;
}
// Ensure maxTokens is set for non-o1 models
if (!/\b(o\d)\b/i.test(clientOptions.model) && !clientOptions.maxTokens) {
clientOptions.maxTokens = 75;
} else if (/\b(o\d)\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
delete clientOptions.maxTokens;
}
try {
const titleResult = await this.run.generateTitle({
provider,
inputText: text,
contentParts: this.contentParts,
clientOptions,
@ -1048,8 +1058,10 @@ class AgentClient extends BaseClient {
let input_tokens, output_tokens;
if (item.usage) {
input_tokens = item.usage.input_tokens || item.usage.inputTokens;
output_tokens = item.usage.output_tokens || item.usage.outputTokens;
input_tokens =
item.usage.prompt_tokens || item.usage.input_tokens || item.usage.inputTokens;
output_tokens =
item.usage.completion_tokens || item.usage.output_tokens || item.usage.outputTokens;
} else if (item.tokenUsage) {
input_tokens = item.tokenUsage.promptTokens;
output_tokens = item.tokenUsage.completionTokens;

View file

@ -283,7 +283,10 @@ router.post('/', async (req, res) => {
message += ': ' + error.message;
}
if (error.message?.includes('Invalid file format')) {
if (
error.message?.includes('Invalid file format') ||
error.message?.includes('No OCR result')
) {
message = error.message;
}

View file

@ -40,6 +40,7 @@ async function getBalanceConfig() {
/**
*
* @param {string | EModelEndpoint} endpoint
* @returns {Promise<TEndpoint | undefined>}
*/
const getCustomEndpointConfig = async (endpoint) => {
const customConfig = await getCustomConfig();

View file

@ -11,30 +11,13 @@ const {
replaceSpecialVars,
providerEndpointMap,
} = require('librechat-data-provider');
const initAnthropic = require('~/server/services/Endpoints/anthropic/initialize');
const getBedrockOptions = require('~/server/services/Endpoints/bedrock/options');
const initOpenAI = require('~/server/services/Endpoints/openAI/initialize');
const initCustom = require('~/server/services/Endpoints/custom/initialize');
const initGoogle = require('~/server/services/Endpoints/google/initialize');
const { getProviderConfig } = require('~/server/services/Endpoints');
const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts');
const { getCustomEndpointConfig } = require('~/server/services/Config');
const { processFiles } = require('~/server/services/Files/process');
const { getFiles, getToolFilesByIds } = require('~/models/File');
const { getConvoFiles } = require('~/models/Conversation');
const { getModelMaxTokens } = require('~/utils');
const providerConfigMap = {
[Providers.XAI]: initCustom,
[Providers.OLLAMA]: initCustom,
[Providers.DEEPSEEK]: initCustom,
[Providers.OPENROUTER]: initCustom,
[EModelEndpoint.openAI]: initOpenAI,
[EModelEndpoint.google]: initGoogle,
[EModelEndpoint.azureOpenAI]: initOpenAI,
[EModelEndpoint.anthropic]: initAnthropic,
[EModelEndpoint.bedrock]: getBedrockOptions,
};
/**
* @param {object} params
* @param {ServerRequest} params.req
@ -114,17 +97,9 @@ const initializeAgent = async ({
})) ?? {};
agent.endpoint = provider;
let getOptions = providerConfigMap[provider];
if (!getOptions && providerConfigMap[provider.toLowerCase()] != null) {
agent.provider = provider.toLowerCase();
getOptions = providerConfigMap[agent.provider];
} else if (!getOptions) {
const customEndpointConfig = await getCustomEndpointConfig(provider);
if (!customEndpointConfig) {
throw new Error(`Provider ${provider} not supported`);
}
getOptions = initCustom;
agent.provider = Providers.OPENAI;
const { getOptions, overrideProvider } = await getProviderConfig(provider);
if (overrideProvider) {
agent.provider = overrideProvider;
}
const _endpointOption =

View file

@ -23,7 +23,7 @@ const addTitle = async (req, { text, response, client }) => {
let timeoutId;
try {
const timeoutPromise = new Promise((_, reject) => {
timeoutId = setTimeout(() => reject(new Error('Title generation timeout')), 25000);
timeoutId = setTimeout(() => reject(new Error('Title generation timeout')), 45000);
}).catch((error) => {
logger.error('Title error:', error);
});

View file

@ -41,7 +41,7 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
{
reverseProxyUrl: ANTHROPIC_REVERSE_PROXY ?? null,
proxy: PROXY ?? null,
modelOptions: endpointOption.model_parameters,
modelOptions: endpointOption?.model_parameters ?? {},
},
clientOptions,
);

View file

@ -75,6 +75,7 @@ function getLLMConfig(apiKey, options = {}) {
if (options.reverseProxyUrl) {
requestOptions.clientOptions.baseURL = options.reverseProxyUrl;
requestOptions.anthropicApiUrl = options.reverseProxyUrl;
}
return {

View file

@ -1,11 +1,45 @@
const { anthropicSettings } = require('librechat-data-provider');
const { anthropicSettings, removeNullishValues } = require('librechat-data-provider');
const { getLLMConfig } = require('~/server/services/Endpoints/anthropic/llm');
const { checkPromptCacheSupport, getClaudeHeaders, configureReasoning } = require('./helpers');
jest.mock('https-proxy-agent', () => ({
HttpsProxyAgent: jest.fn().mockImplementation((proxy) => ({ proxy })),
}));
jest.mock('./helpers', () => ({
checkPromptCacheSupport: jest.fn(),
getClaudeHeaders: jest.fn(),
configureReasoning: jest.fn((requestOptions) => requestOptions),
}));
jest.mock('librechat-data-provider', () => ({
anthropicSettings: {
model: { default: 'claude-3-opus-20240229' },
maxOutputTokens: { default: 4096, reset: jest.fn(() => 4096) },
thinking: { default: false },
promptCache: { default: false },
thinkingBudget: { default: null },
},
removeNullishValues: jest.fn((obj) => {
const result = {};
for (const key in obj) {
if (obj[key] !== null && obj[key] !== undefined) {
result[key] = obj[key];
}
}
return result;
}),
}));
describe('getLLMConfig', () => {
beforeEach(() => {
jest.clearAllMocks();
checkPromptCacheSupport.mockReturnValue(false);
getClaudeHeaders.mockReturnValue(undefined);
configureReasoning.mockImplementation((requestOptions) => requestOptions);
anthropicSettings.maxOutputTokens.reset.mockReturnValue(4096);
});
it('should create a basic configuration with default values', () => {
const result = getLLMConfig('test-api-key', { modelOptions: {} });
@ -36,6 +70,7 @@ describe('getLLMConfig', () => {
});
expect(result.llmConfig.clientOptions).toHaveProperty('baseURL', 'http://reverse-proxy');
expect(result.llmConfig).toHaveProperty('anthropicApiUrl', 'http://reverse-proxy');
});
it('should include topK and topP for non-Claude-3.7 models', () => {
@ -65,6 +100,11 @@ describe('getLLMConfig', () => {
});
it('should NOT include topK and topP for Claude-3-7 models (hyphen notation)', () => {
configureReasoning.mockImplementation((requestOptions) => {
requestOptions.thinking = { type: 'enabled' };
return requestOptions;
});
const result = getLLMConfig('test-api-key', {
modelOptions: {
model: 'claude-3-7-sonnet',
@ -78,6 +118,11 @@ describe('getLLMConfig', () => {
});
it('should NOT include topK and topP for Claude-3.7 models (decimal notation)', () => {
configureReasoning.mockImplementation((requestOptions) => {
requestOptions.thinking = { type: 'enabled' };
return requestOptions;
});
const result = getLLMConfig('test-api-key', {
modelOptions: {
model: 'claude-3.7-sonnet',
@ -154,4 +199,160 @@ describe('getLLMConfig', () => {
expect(result3.llmConfig).toHaveProperty('topK', 10);
expect(result3.llmConfig).toHaveProperty('topP', 0.9);
});
describe('Edge cases', () => {
it('should handle missing apiKey', () => {
const result = getLLMConfig(undefined, { modelOptions: {} });
expect(result.llmConfig).not.toHaveProperty('apiKey');
});
it('should handle empty modelOptions', () => {
expect(() => {
getLLMConfig('test-api-key', {});
}).toThrow("Cannot read properties of undefined (reading 'thinking')");
});
it('should handle no options parameter', () => {
expect(() => {
getLLMConfig('test-api-key');
}).toThrow("Cannot read properties of undefined (reading 'thinking')");
});
it('should handle temperature, stop sequences, and stream settings', () => {
const result = getLLMConfig('test-api-key', {
modelOptions: {
temperature: 0.7,
stop: ['\n\n', 'END'],
stream: false,
},
});
expect(result.llmConfig).toHaveProperty('temperature', 0.7);
expect(result.llmConfig).toHaveProperty('stopSequences', ['\n\n', 'END']);
expect(result.llmConfig).toHaveProperty('stream', false);
});
it('should handle maxOutputTokens when explicitly set to falsy value', () => {
anthropicSettings.maxOutputTokens.reset.mockReturnValue(8192);
const result = getLLMConfig('test-api-key', {
modelOptions: {
model: 'claude-3-opus',
maxOutputTokens: null,
},
});
expect(anthropicSettings.maxOutputTokens.reset).toHaveBeenCalledWith('claude-3-opus');
expect(result.llmConfig).toHaveProperty('maxTokens', 8192);
});
it('should handle both proxy and reverseProxyUrl', () => {
const result = getLLMConfig('test-api-key', {
modelOptions: {},
proxy: 'http://proxy:8080',
reverseProxyUrl: 'https://reverse-proxy.com',
});
expect(result.llmConfig.clientOptions).toHaveProperty('fetchOptions');
expect(result.llmConfig.clientOptions.fetchOptions).toHaveProperty('dispatcher');
expect(result.llmConfig.clientOptions.fetchOptions.dispatcher).toBeDefined();
expect(result.llmConfig.clientOptions.fetchOptions.dispatcher.constructor.name).toBe(
'ProxyAgent',
);
expect(result.llmConfig.clientOptions).toHaveProperty('baseURL', 'https://reverse-proxy.com');
expect(result.llmConfig).toHaveProperty('anthropicApiUrl', 'https://reverse-proxy.com');
});
it('should handle prompt cache with supported model', () => {
checkPromptCacheSupport.mockReturnValue(true);
getClaudeHeaders.mockReturnValue({ 'anthropic-beta': 'prompt-caching-2024-07-31' });
const result = getLLMConfig('test-api-key', {
modelOptions: {
model: 'claude-3-5-sonnet',
promptCache: true,
},
});
expect(checkPromptCacheSupport).toHaveBeenCalledWith('claude-3-5-sonnet');
expect(getClaudeHeaders).toHaveBeenCalledWith('claude-3-5-sonnet', true);
expect(result.llmConfig.clientOptions.defaultHeaders).toEqual({
'anthropic-beta': 'prompt-caching-2024-07-31',
});
});
it('should handle thinking and thinkingBudget options', () => {
configureReasoning.mockImplementation((requestOptions, systemOptions) => {
if (systemOptions.thinking) {
requestOptions.thinking = { type: 'enabled' };
}
if (systemOptions.thinkingBudget) {
requestOptions.thinking = {
...requestOptions.thinking,
budget_tokens: systemOptions.thinkingBudget,
};
}
return requestOptions;
});
getLLMConfig('test-api-key', {
modelOptions: {
model: 'claude-3-7-sonnet',
thinking: true,
thinkingBudget: 5000,
},
});
expect(configureReasoning).toHaveBeenCalledWith(
expect.any(Object),
expect.objectContaining({
thinking: true,
promptCache: false,
thinkingBudget: 5000,
}),
);
});
it('should remove system options from modelOptions', () => {
const modelOptions = {
model: 'claude-3-opus',
thinking: true,
promptCache: true,
thinkingBudget: 1000,
temperature: 0.5,
};
getLLMConfig('test-api-key', { modelOptions });
expect(modelOptions).not.toHaveProperty('thinking');
expect(modelOptions).not.toHaveProperty('promptCache');
expect(modelOptions).not.toHaveProperty('thinkingBudget');
expect(modelOptions).toHaveProperty('temperature', 0.5);
});
it('should handle all nullish values removal', () => {
removeNullishValues.mockImplementation((obj) => {
const cleaned = {};
Object.entries(obj).forEach(([key, value]) => {
if (value !== null && value !== undefined) {
cleaned[key] = value;
}
});
return cleaned;
});
const result = getLLMConfig('test-api-key', {
modelOptions: {
temperature: null,
topP: undefined,
topK: 0,
stop: [],
},
});
expect(result.llmConfig).not.toHaveProperty('temperature');
expect(result.llmConfig).not.toHaveProperty('topP');
expect(result.llmConfig).toHaveProperty('topK', 0);
expect(result.llmConfig).toHaveProperty('stopSequences', []);
});
});
});

View file

@ -64,7 +64,7 @@ const getOptions = async ({ req, overrideModel, endpointOption }) => {
/** @type {BedrockClientOptions} */
const requestOptions = {
model: overrideModel ?? endpointOption.model,
model: overrideModel ?? endpointOption?.model,
region: BEDROCK_AWS_DEFAULT_REGION,
};
@ -76,7 +76,7 @@ const getOptions = async ({ req, overrideModel, endpointOption }) => {
const llmConfig = bedrockOutputParser(
bedrockInputParser.parse(
removeNullishValues(Object.assign(requestOptions, endpointOption.model_parameters)),
removeNullishValues(Object.assign(requestOptions, endpointOption?.model_parameters ?? {})),
),
);

View file

@ -134,7 +134,7 @@ const initializeClient = async ({ req, res, endpointOption, optionsOnly, overrid
};
if (optionsOnly) {
const modelOptions = endpointOption.model_parameters;
const modelOptions = endpointOption?.model_parameters ?? {};
if (endpoint !== Providers.OLLAMA) {
clientOptions = Object.assign(
{

View file

@ -18,7 +18,7 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
let serviceKey = {};
try {
serviceKey = require('~/data/auth.json');
} catch (e) {
} catch (_e) {
// Do nothing
}
@ -58,7 +58,7 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
if (optionsOnly) {
clientOptions = Object.assign(
{
modelOptions: endpointOption.model_parameters,
modelOptions: endpointOption?.model_parameters ?? {},
},
clientOptions,
);

View file

@ -0,0 +1,58 @@
const { Providers } = require('@librechat/agents');
const { EModelEndpoint } = require('librechat-data-provider');
const initAnthropic = require('~/server/services/Endpoints/anthropic/initialize');
const getBedrockOptions = require('~/server/services/Endpoints/bedrock/options');
const initOpenAI = require('~/server/services/Endpoints/openAI/initialize');
const initCustom = require('~/server/services/Endpoints/custom/initialize');
const initGoogle = require('~/server/services/Endpoints/google/initialize');
const { getCustomEndpointConfig } = require('~/server/services/Config');
const providerConfigMap = {
[Providers.XAI]: initCustom,
[Providers.OLLAMA]: initCustom,
[Providers.DEEPSEEK]: initCustom,
[Providers.OPENROUTER]: initCustom,
[EModelEndpoint.openAI]: initOpenAI,
[EModelEndpoint.google]: initGoogle,
[EModelEndpoint.azureOpenAI]: initOpenAI,
[EModelEndpoint.anthropic]: initAnthropic,
[EModelEndpoint.bedrock]: getBedrockOptions,
};
/**
* Get the provider configuration and override endpoint based on the provider string
* @param {string} provider - The provider string
* @returns {Promise<{
* getOptions: Function,
* overrideProvider?: string,
* customEndpointConfig?: TEndpoint
* }>}
*/
async function getProviderConfig(provider) {
let getOptions = providerConfigMap[provider];
let overrideProvider;
/** @type {TEndpoint | undefined} */
let customEndpointConfig;
if (!getOptions && providerConfigMap[provider.toLowerCase()] != null) {
overrideProvider = provider.toLowerCase();
getOptions = providerConfigMap[overrideProvider];
} else if (!getOptions) {
customEndpointConfig = await getCustomEndpointConfig(provider);
if (!customEndpointConfig) {
throw new Error(`Provider ${provider} not supported`);
}
getOptions = initCustom;
overrideProvider = Providers.OPENAI;
}
return {
getOptions,
overrideProvider,
customEndpointConfig,
};
}
module.exports = {
getProviderConfig,
};

View file

@ -138,7 +138,7 @@ const initializeClient = async ({
}
if (optionsOnly) {
const modelOptions = endpointOption.model_parameters;
const modelOptions = endpointOption?.model_parameters ?? {};
modelOptions.model = modelName;
clientOptions = Object.assign({ modelOptions }, clientOptions);
clientOptions.modelOptions.user = req.user.id;

36
package-lock.json generated
View file

@ -64,7 +64,7 @@
"@langchain/google-genai": "^0.2.13",
"@langchain/google-vertexai": "^0.2.13",
"@langchain/textsplitters": "^0.1.0",
"@librechat/agents": "^2.4.41",
"@librechat/agents": "^2.4.42",
"@librechat/api": "*",
"@librechat/data-schemas": "*",
"@node-saml/passport-saml": "^5.0.0",
@ -1351,6 +1351,33 @@
}
}
},
"api/node_modules/@librechat/agents": {
"version": "2.4.42",
"resolved": "https://registry.npmjs.org/@librechat/agents/-/agents-2.4.42.tgz",
"integrity": "sha512-52ux2PeEAV79yr6/h6GN3omlpqX6H0FYl6qwjJ6gT04MMko/imnLd3bQrX0gm3i0KL5ygHbRjQeonONKjJayHw==",
"license": "MIT",
"dependencies": {
"@langchain/anthropic": "^0.3.23",
"@langchain/aws": "^0.1.11",
"@langchain/community": "^0.3.47",
"@langchain/core": "^0.3.60",
"@langchain/deepseek": "^0.0.2",
"@langchain/google-genai": "^0.2.13",
"@langchain/google-vertexai": "^0.2.13",
"@langchain/langgraph": "^0.3.4",
"@langchain/mistralai": "^0.2.1",
"@langchain/ollama": "^0.2.3",
"@langchain/openai": "^0.5.14",
"@langchain/xai": "^0.0.3",
"cheerio": "^1.0.0",
"dotenv": "^16.4.7",
"https-proxy-agent": "^7.0.6",
"nanoid": "^3.3.7"
},
"engines": {
"node": ">=14.0.0"
}
},
"api/node_modules/@smithy/abort-controller": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/@smithy/abort-controller/-/abort-controller-4.0.2.tgz",
@ -19440,6 +19467,7 @@
"resolved": "https://registry.npmjs.org/@librechat/agents/-/agents-2.4.41.tgz",
"integrity": "sha512-kYmdk5WVRp0qZxTx6BuGCs4l0Ir9iBLLx4ZY4/1wxr80al5/vq3P8wbgGdKMeO2qTu4ZaT4RyWRQYWBg5HDkUQ==",
"license": "MIT",
"peer": true,
"dependencies": {
"@langchain/anthropic": "^0.3.23",
"@langchain/aws": "^0.1.11",
@ -19467,6 +19495,7 @@
"resolved": "https://registry.npmjs.org/@langchain/community/-/community-0.3.47.tgz",
"integrity": "sha512-Vo42kAfkXpTFSevhEkeqqE55az8NyQgDktCbitXYuhipNbFYx08XVvqEDkFkB20MM/Z7u+cvLb+DxCqnKuH0CQ==",
"license": "MIT",
"peer": true,
"dependencies": {
"@langchain/openai": ">=0.2.0 <0.6.0",
"@langchain/weaviate": "^0.2.0",
@ -19992,6 +20021,7 @@
"resolved": "https://registry.npmjs.org/@langchain/openai/-/openai-0.5.14.tgz",
"integrity": "sha512-0GEj5K/qi1MRuZ4nE7NvyI4jTG+RSewLZqsExUwRukWdeqmkPNHGrogTa5ZDt7eaJxAaY7EgLC5ZnvCM3L1oug==",
"license": "MIT",
"peer": true,
"dependencies": {
"js-tiktoken": "^1.0.12",
"openai": "^5.3.0",
@ -20009,6 +20039,7 @@
"resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.3.tgz",
"integrity": "sha512-jRR5wdylq8CkOe6hei19GGZnxM6rBGwFl3Bg0YItGDimvjGtAvdZk4Pu6Cl4u4Igsws4a1fd1Vq3ezrhn4KmFw==",
"license": "MIT",
"peer": true,
"engines": {
"node": ">= 14"
}
@ -20018,6 +20049,7 @@
"resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz",
"integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==",
"license": "MIT",
"peer": true,
"dependencies": {
"agent-base": "^7.1.2",
"debug": "4"
@ -20031,6 +20063,7 @@
"resolved": "https://registry.npmjs.org/openai/-/openai-5.5.1.tgz",
"integrity": "sha512-5i19097mGotHA1eFsM6Tjd/tJ8uo9sa5Ysv4Q6bKJ2vtN6rc0MzMrUefXnLXYAJcmMQrC1Efhj0AvfIkXrQamw==",
"license": "Apache-2.0",
"peer": true,
"bin": {
"openai": "bin/cli"
},
@ -20056,6 +20089,7 @@
"https://github.com/sponsors/ctavan"
],
"license": "MIT",
"peer": true,
"bin": {
"uuid": "dist/bin/uuid"
}

View file

@ -353,7 +353,11 @@ export const uploadMistralOCR = async (context: OCRContext): Promise<MistralOCRU
documentType,
});
// Process result
if (!ocrResult || !ocrResult.pages || ocrResult.pages.length === 0) {
throw new Error(
'No OCR result returned from service, may be down or the file is not supported.',
);
}
const { text, images } = processOCRResult(ocrResult);
return {
@ -364,7 +368,7 @@ export const uploadMistralOCR = async (context: OCRContext): Promise<MistralOCRU
images,
};
} catch (error) {
throw createOCRError(error, 'Error uploading document to Mistral OCR API');
throw createOCRError(error, 'Error uploading document to Mistral OCR API:');
}
};
@ -401,6 +405,12 @@ export const uploadAzureMistralOCR = async (
documentType,
});
if (!ocrResult || !ocrResult.pages || ocrResult.pages.length === 0) {
throw new Error(
'No OCR result returned from service, may be down or the file is not supported.',
);
}
const { text, images } = processOCRResult(ocrResult);
return {
@ -411,6 +421,6 @@ export const uploadAzureMistralOCR = async (
images,
};
} catch (error) {
throw createOCRError(error, 'Error uploading document to Azure Mistral OCR API');
throw createOCRError(error, 'Error uploading document to Azure Mistral OCR API:');
}
};