LibreChat/api/server/services/ModelService.js
Danny Avila 5f2d1c5dc9
👁️ feat: Azure Mistral OCR Strategy (#7888)
* 👁️ feat: Add Azure Mistral OCR strategy and endpoint integration

This commit introduces a new OCR strategy named 'azure_mistral_ocr', allowing the use of a Mistral OCR endpoint deployed on Azure. The configuration, schemas, and file upload strategies have been updated to support this integration, enabling seamless OCR processing via Azure-hosted Mistral services.

* 🗑️ chore: Clean up .gitignore by removing commented-out uncommon directory name

* chore: remove unused vars

* refactor: Move createAxiosInstance to packages/api/utils and update imports

- Removed the createAxiosInstance function from the config module and relocated it to a new utils module for better organization.
- Updated import paths in relevant files to reflect the new location of createAxiosInstance.
- Added tests for createAxiosInstance to ensure proper functionality and proxy configuration handling.

* chore: move axios helpers to packages/api

- Added logAxiosError function to @librechat/api for centralized error logging.
- Updated imports across various files to use the new logAxiosError function.
- Removed the old axios.js utility file as it is no longer needed.

* chore: Update Jest moduleNameMapper for improved path resolution

- Added a new mapping for '~/' to resolve module paths in Jest configuration, enhancing import handling for the project.

* feat: Implement Mistral OCR API integration in TS

* chore: Update MistralOCR tests based on new imports

* fix: Enhance MistralOCR configuration handling and tests

- Introduced helper functions for resolving configuration values from environment variables or hardcoded settings.
- Updated the uploadMistralOCR and uploadAzureMistralOCR functions to utilize the new configuration resolution logic.
- Improved test cases to ensure correct behavior when mixing environment variables and hardcoded values.
- Mocked file upload and signed URL responses in tests to validate functionality without external dependencies.

* feat: Enhance MistralOCR functionality with improved configuration and error handling

- Introduced helper functions for loading authentication configuration and resolving values from environment variables.
- Updated uploadMistralOCR and uploadAzureMistralOCR functions to utilize the new configuration logic.
- Added utility functions for processing OCR results and creating error messages.
- Improved document type determination and result aggregation for better OCR processing.

* refactor: Reorganize OCR type imports in Mistral CRUD file

- Moved OCRResult, OCRResultPage, and OCRImage imports to a more logical grouping for better readability and maintainability.

* feat: Add file exports to API and create files index

* chore: Update OCR types for enhanced structure and clarity

- Redesigned OCRImage interface to include mandatory fields and improved naming conventions.
- Added PageDimensions interface for better representation of page metrics.
- Updated OCRResultPage to include dimensions and mandatory images array.
- Refined OCRResult to include document annotation and usage information.

* refactor: use TS counterpart of uploadOCR methods

* ci: Update MistralOCR tests to reflect new OCR result structure

* chore: Bump version of @librechat/api to 1.2.3 in package.json and package-lock.json

* chore: Update CONFIG_VERSION to 1.2.8

* chore: remove unused sendEvent function from config module (now imported from '@librechat/api')

* chore: remove MistralOCR service files and tests (now in '@librechat/api')

* ci: update logger import in ModelService tests to use @librechat/data-schemas

---------

Co-authored-by: arthurolivierfortin <arthurolivier.fortin@gmail.com>
2025-06-13 15:14:57 -04:00

337 lines
9.7 KiB
JavaScript

const axios = require('axios');
const { Providers } = require('@librechat/agents');
const { logAxiosError } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { EModelEndpoint, defaultModels, CacheKeys } = require('librechat-data-provider');
const { inputSchema, extractBaseURL, processModelData } = require('~/utils');
const { OllamaClient } = require('~/app/clients/OllamaClient');
const { isUserProvided } = require('~/server/utils');
const getLogStores = require('~/cache/getLogStores');
/**
* Splits a string by commas and trims each resulting value.
* @param {string} input - The input string to split.
* @returns {string[]} An array of trimmed values.
*/
const splitAndTrim = (input) => {
if (!input || typeof input !== 'string') {
return [];
}
return input
.split(',')
.map((item) => item.trim())
.filter(Boolean);
};
const { openAIApiKey, userProvidedOpenAI } = require('./Config/EndpointService').config;
/**
* Fetches OpenAI models from the specified base API path or Azure, based on the provided configuration.
*
* @param {Object} params - The parameters for fetching the models.
* @param {Object} params.user - The user ID to send to the API.
* @param {string} params.apiKey - The API key for authentication with the API.
* @param {string} params.baseURL - The base path URL for the API.
* @param {string} [params.name='OpenAI'] - The name of the API; defaults to 'OpenAI'.
* @param {boolean} [params.azure=false] - Whether to fetch models from Azure.
* @param {boolean} [params.userIdQuery=false] - Whether to send the user ID as a query parameter.
* @param {boolean} [params.createTokenConfig=true] - Whether to create a token configuration from the API response.
* @param {string} [params.tokenKey] - The cache key to save the token configuration. Uses `name` if omitted.
* @returns {Promise<string[]>} A promise that resolves to an array of model identifiers.
* @async
*/
const fetchModels = async ({
user,
apiKey,
baseURL,
name = EModelEndpoint.openAI,
azure = false,
userIdQuery = false,
createTokenConfig = true,
tokenKey,
}) => {
let models = [];
if (!baseURL && !azure) {
return models;
}
if (!apiKey) {
return models;
}
if (name && name.toLowerCase().startsWith(Providers.OLLAMA)) {
return await OllamaClient.fetchModels(baseURL);
}
try {
const options = {
headers: {},
timeout: 5000,
};
if (name === EModelEndpoint.anthropic) {
options.headers = {
'x-api-key': apiKey,
'anthropic-version': process.env.ANTHROPIC_VERSION || '2023-06-01',
};
} else {
options.headers.Authorization = `Bearer ${apiKey}`;
}
if (process.env.PROXY) {
options.httpsAgent = new HttpsProxyAgent(process.env.PROXY);
}
if (process.env.OPENAI_ORGANIZATION && baseURL.includes('openai')) {
options.headers['OpenAI-Organization'] = process.env.OPENAI_ORGANIZATION;
}
const url = new URL(`${baseURL}${azure ? '' : '/models'}`);
if (user && userIdQuery) {
url.searchParams.append('user', user);
}
const res = await axios.get(url.toString(), options);
/** @type {z.infer<typeof inputSchema>} */
const input = res.data;
const validationResult = inputSchema.safeParse(input);
if (validationResult.success && createTokenConfig) {
const endpointTokenConfig = processModelData(input);
const cache = getLogStores(CacheKeys.TOKEN_CONFIG);
await cache.set(tokenKey ?? name, endpointTokenConfig);
}
models = input.data.map((item) => item.id);
} catch (error) {
const logMessage = `Failed to fetch models from ${azure ? 'Azure ' : ''}${name} API`;
logAxiosError({ message: logMessage, error });
}
return models;
};
/**
* Fetches models from the specified API path or Azure, based on the provided options.
* @async
* @function
* @param {object} opts - The options for fetching the models.
* @param {string} opts.user - The user ID to send to the API.
* @param {boolean} [opts.azure=false] - Whether to fetch models from Azure.
* @param {boolean} [opts.assistants=false] - Whether to fetch models from Azure.
* @param {boolean} [opts.plugins=false] - Whether to fetch models from the plugins.
* @param {string[]} [_models=[]] - The models to use as a fallback.
*/
const fetchOpenAIModels = async (opts, _models = []) => {
let models = _models.slice() ?? [];
let apiKey = openAIApiKey;
const openaiBaseURL = 'https://api.openai.com/v1';
let baseURL = openaiBaseURL;
let reverseProxyUrl = process.env.OPENAI_REVERSE_PROXY;
if (opts.assistants && process.env.ASSISTANTS_BASE_URL) {
reverseProxyUrl = process.env.ASSISTANTS_BASE_URL;
} else if (opts.azure) {
return models;
// const azure = getAzureCredentials();
// baseURL = (genAzureChatCompletion(azure))
// .split('/deployments')[0]
// .concat(`/models?api-version=${azure.azureOpenAIApiVersion}`);
// apiKey = azureOpenAIApiKey;
}
if (reverseProxyUrl) {
baseURL = extractBaseURL(reverseProxyUrl);
}
const modelsCache = getLogStores(CacheKeys.MODEL_QUERIES);
const cachedModels = await modelsCache.get(baseURL);
if (cachedModels) {
return cachedModels;
}
if (baseURL || opts.azure) {
models = await fetchModels({
apiKey,
baseURL,
azure: opts.azure,
user: opts.user,
name: EModelEndpoint.openAI,
});
}
if (models.length === 0) {
return _models;
}
if (baseURL === openaiBaseURL) {
const regex = /(text-davinci-003|gpt-|o\d+)/;
const excludeRegex = /audio|realtime/;
models = models.filter((model) => regex.test(model) && !excludeRegex.test(model));
const instructModels = models.filter((model) => model.includes('instruct'));
const otherModels = models.filter((model) => !model.includes('instruct'));
models = otherModels.concat(instructModels);
}
await modelsCache.set(baseURL, models);
return models;
};
/**
* Loads the default models for the application.
* @async
* @function
* @param {object} opts - The options for fetching the models.
* @param {string} opts.user - The user ID to send to the API.
* @param {boolean} [opts.azure=false] - Whether to fetch models from Azure.
* @param {boolean} [opts.plugins=false] - Whether to fetch models for the plugins endpoint.
* @param {boolean} [opts.assistants=false] - Whether to fetch models for the Assistants endpoint.
*/
const getOpenAIModels = async (opts) => {
let models = defaultModels[EModelEndpoint.openAI];
if (opts.assistants) {
models = defaultModels[EModelEndpoint.assistants];
} else if (opts.azure) {
models = defaultModels[EModelEndpoint.azureAssistants];
}
if (opts.plugins) {
models = models.filter(
(model) =>
!model.includes('text-davinci') &&
!model.includes('instruct') &&
!model.includes('0613') &&
!model.includes('0314') &&
!model.includes('0301'),
);
}
let key;
if (opts.assistants) {
key = 'ASSISTANTS_MODELS';
} else if (opts.azure) {
key = 'AZURE_OPENAI_MODELS';
} else if (opts.plugins) {
key = 'PLUGIN_MODELS';
} else {
key = 'OPENAI_MODELS';
}
if (process.env[key]) {
models = splitAndTrim(process.env[key]);
return models;
}
if (userProvidedOpenAI) {
return models;
}
return await fetchOpenAIModels(opts, models);
};
const getChatGPTBrowserModels = () => {
let models = ['text-davinci-002-render-sha', 'gpt-4'];
if (process.env.CHATGPT_MODELS) {
models = splitAndTrim(process.env.CHATGPT_MODELS);
}
return models;
};
/**
* Fetches models from the Anthropic API.
* @async
* @function
* @param {object} opts - The options for fetching the models.
* @param {string} opts.user - The user ID to send to the API.
* @param {string[]} [_models=[]] - The models to use as a fallback.
*/
const fetchAnthropicModels = async (opts, _models = []) => {
let models = _models.slice() ?? [];
let apiKey = process.env.ANTHROPIC_API_KEY;
const anthropicBaseURL = 'https://api.anthropic.com/v1';
let baseURL = anthropicBaseURL;
let reverseProxyUrl = process.env.ANTHROPIC_REVERSE_PROXY;
if (reverseProxyUrl) {
baseURL = extractBaseURL(reverseProxyUrl);
}
if (!apiKey) {
return models;
}
const modelsCache = getLogStores(CacheKeys.MODEL_QUERIES);
const cachedModels = await modelsCache.get(baseURL);
if (cachedModels) {
return cachedModels;
}
if (baseURL) {
models = await fetchModels({
apiKey,
baseURL,
user: opts.user,
name: EModelEndpoint.anthropic,
tokenKey: EModelEndpoint.anthropic,
});
}
if (models.length === 0) {
return _models;
}
await modelsCache.set(baseURL, models);
return models;
};
const getAnthropicModels = async (opts = {}) => {
let models = defaultModels[EModelEndpoint.anthropic];
if (process.env.ANTHROPIC_MODELS) {
models = splitAndTrim(process.env.ANTHROPIC_MODELS);
return models;
}
if (isUserProvided(process.env.ANTHROPIC_API_KEY)) {
return models;
}
try {
return await fetchAnthropicModels(opts, models);
} catch (error) {
logger.error('Error fetching Anthropic models:', error);
return models;
}
};
const getGoogleModels = () => {
let models = defaultModels[EModelEndpoint.google];
if (process.env.GOOGLE_MODELS) {
models = splitAndTrim(process.env.GOOGLE_MODELS);
}
return models;
};
const getBedrockModels = () => {
let models = defaultModels[EModelEndpoint.bedrock];
if (process.env.BEDROCK_AWS_MODELS) {
models = splitAndTrim(process.env.BEDROCK_AWS_MODELS);
}
return models;
};
module.exports = {
fetchModels,
splitAndTrim,
getOpenAIModels,
getBedrockModels,
getChatGPTBrowserModels,
getAnthropicModels,
getGoogleModels,
};