LibreChat/packages/api/src/endpoints/models.ts
Danny Avila b478560c81
🧵 refactor: Migrate Endpoint Initialization to TypeScript (#10794)
* refactor: move endpoint initialization methods to typescript

* refactor: move agent init to packages/api

- Introduced `initialize.ts` for agent initialization, including file processing and tool loading.
- Updated `resources.ts` to allow optional appConfig parameter.
- Enhanced endpoint configuration handling in various initialization files to support model parameters.
- Added new artifacts and prompts for React component generation.
- Refactored existing code to improve type safety and maintainability.

* refactor: streamline endpoint initialization and enhance type safety

- Updated initialization functions across various endpoints to use a consistent request structure, replacing `unknown` types with `ServerResponse`.
- Simplified request handling by directly extracting keys from the request body.
- Improved type safety by ensuring user IDs are safely accessed with optional chaining.
- Removed unnecessary parameters and streamlined model options handling for better clarity and maintainability.

* refactor: moved ModelService and extractBaseURL to packages/api

- Added comprehensive tests for the models fetching functionality, covering scenarios for OpenAI, Anthropic, Google, and Ollama models.
- Updated existing endpoint index to include the new models module.
- Enhanced utility functions for URL extraction and model data processing.
- Improved type safety and error handling across the models fetching logic.

* refactor: consolidate utility functions and remove unused files

- Merged `deriveBaseURL` and `extractBaseURL` into the `@librechat/api` module for better organization.
- Removed redundant utility files and their associated tests to streamline the codebase.
- Updated imports across various client files to utilize the new consolidated functions.
- Enhanced overall maintainability by reducing the number of utility modules.

* refactor: replace ModelService references with direct imports from @librechat/api and remove ModelService file

* refactor: move encrypt/decrypt methods and key db methods to data-schemas, use `getProviderConfig` from `@librechat/api`

* chore: remove unused 'res' from options in AgentClient

* refactor: file model imports and methods

- Updated imports in various controllers and services to use the unified file model from '~/models' instead of '~/models/File'.
- Consolidated file-related methods into a new file methods module in the data-schemas package.
- Added comprehensive tests for file methods including creation, retrieval, updating, and deletion.
- Enhanced the initializeAgent function to accept dependency injection for file-related methods.
- Improved error handling and logging in file methods.

* refactor: streamline database method references in agent initialization

* refactor: enhance file method tests and update type references to IMongoFile

* refactor: consolidate database method imports in agent client and initialization

* chore: remove redundant import of initializeAgent from @librechat/api

* refactor: move checkUserKeyExpiry utility to @librechat/api and update references across endpoints

* refactor: move updateUserPlugins logic to user.ts and simplify UserController

* refactor: update imports for user key management and remove UserService

* refactor: remove unused Anthropics and Bedrock endpoint files and clean up imports

* refactor: consolidate and update encryption imports across various files to use @librechat/data-schemas

* chore: update file model mock to use unified import from '~/models'

* chore: import order

* refactor: remove migrated to TS agent.js file and its associated logic from the endpoints

* chore: add reusable function to extract imports from source code in unused-packages workflow

* chore: enhance unused-packages workflow to include @librechat/api dependencies and improve dependency extraction

* chore: improve dependency extraction in unused-packages workflow with enhanced error handling and debugging output

* chore: add detailed debugging output to unused-packages workflow for better visibility into unused dependencies and exclusion lists

* chore: refine subpath handling in unused-packages workflow to correctly process scoped and non-scoped package imports

* chore: clean up unused debug output in unused-packages workflow and reorganize type imports in initialize.ts
2025-12-04 14:34:54 -05:00

383 lines
10 KiB
TypeScript

import axios from 'axios';
import { logger } from '@librechat/data-schemas';
import { HttpsProxyAgent } from 'https-proxy-agent';
import { CacheKeys, KnownEndpoints, EModelEndpoint, defaultModels } from 'librechat-data-provider';
import type { IUser } from '@librechat/data-schemas';
import {
processModelData,
extractBaseURL,
isUserProvided,
resolveHeaders,
deriveBaseURL,
logAxiosError,
inputSchema,
} from '~/utils';
import { standardCache } from '~/cache';
export interface FetchModelsParams {
/** User ID for API requests */
user?: string;
/** API key for authentication */
apiKey: string;
/** Base URL for the API */
baseURL?: string;
/** Endpoint name (defaults to 'openAI') */
name?: string;
/** Whether directEndpoint was configured */
direct?: boolean;
/** Whether to fetch from Azure */
azure?: boolean;
/** Whether to send user ID as query parameter */
userIdQuery?: boolean;
/** Whether to create token configuration from API response */
createTokenConfig?: boolean;
/** Cache key for token configuration (uses name if omitted) */
tokenKey?: string;
/** Optional headers for the request */
headers?: Record<string, string> | null;
/** Optional user object for header resolution */
userObject?: Partial<IUser>;
}
/**
* Fetches Ollama models from the specified base API path.
* @param baseURL - The Ollama server URL
* @param options - Optional configuration
* @returns Promise resolving to array of model names
*/
async function fetchOllamaModels(
baseURL: string,
options: { headers?: Record<string, string> | null; user?: Partial<IUser> } = {},
): Promise<string[]> {
if (!baseURL) {
return [];
}
const ollamaEndpoint = deriveBaseURL(baseURL);
const resolvedHeaders = resolveHeaders({
headers: options.headers ?? undefined,
user: options.user,
});
const response = await axios.get<{ models: Array<{ name: string }> }>(
`${ollamaEndpoint}/api/tags`,
{
headers: resolvedHeaders,
timeout: 5000,
},
);
return response.data.models.map((tag) => tag.name);
}
/**
* Splits a string by commas and trims each resulting value.
* @param input - The input string to split.
* @returns An array of trimmed values.
*/
export function splitAndTrim(input: string | null | undefined): string[] {
if (!input || typeof input !== 'string') {
return [];
}
return input
.split(',')
.map((item) => item.trim())
.filter(Boolean);
}
/**
* Fetches models from the specified base API path or Azure, based on the provided configuration.
*
* @param params - The parameters for fetching the models.
* @returns A promise that resolves to an array of model identifiers.
*/
export async function fetchModels({
user,
apiKey,
baseURL: _baseURL,
name = EModelEndpoint.openAI,
direct = false,
azure = false,
userIdQuery = false,
createTokenConfig = true,
tokenKey,
headers,
userObject,
}: FetchModelsParams): Promise<string[]> {
let models: string[] = [];
const baseURL = direct ? extractBaseURL(_baseURL ?? '') : _baseURL;
if (!baseURL && !azure) {
return models;
}
if (!apiKey) {
return models;
}
if (name && name.toLowerCase().startsWith(KnownEndpoints.ollama)) {
try {
return await fetchOllamaModels(baseURL ?? '', { headers, user: userObject });
} catch (ollamaError) {
const logMessage =
'Failed to fetch models from Ollama API. Attempting to fetch via OpenAI-compatible endpoint.';
logAxiosError({ message: logMessage, error: ollamaError as Error });
}
}
try {
const options: {
headers: Record<string, string>;
timeout: number;
httpsAgent?: HttpsProxyAgent;
} = {
headers: {
...(headers ?? {}),
},
timeout: 5000,
};
if (name === EModelEndpoint.anthropic) {
options.headers = {
'x-api-key': apiKey,
'anthropic-version': process.env.ANTHROPIC_VERSION || '2023-06-01',
};
} else {
options.headers.Authorization = `Bearer ${apiKey}`;
}
if (process.env.PROXY) {
options.httpsAgent = new HttpsProxyAgent(process.env.PROXY);
}
if (process.env.OPENAI_ORGANIZATION && baseURL?.includes('openai')) {
options.headers['OpenAI-Organization'] = process.env.OPENAI_ORGANIZATION;
}
const url = new URL(`${(baseURL ?? '').replace(/\/+$/, '')}${azure ? '' : '/models'}`);
if (user && userIdQuery) {
url.searchParams.append('user', user);
}
const res = await axios.get(url.toString(), options);
const input = res.data;
const validationResult = inputSchema.safeParse(input);
if (validationResult.success && createTokenConfig) {
const endpointTokenConfig = processModelData(input);
const cache = standardCache(CacheKeys.TOKEN_CONFIG);
await cache.set(tokenKey ?? name, endpointTokenConfig);
}
models = input.data.map((item: { id: string }) => item.id);
} catch (error) {
const logMessage = `Failed to fetch models from ${azure ? 'Azure ' : ''}${name} API`;
logAxiosError({ message: logMessage, error: error as Error });
}
return models;
}
/** Options for fetching OpenAI models */
export interface GetOpenAIModelsOptions {
/** User ID for API requests */
user?: string;
/** Whether to fetch from Azure */
azure?: boolean;
/** Whether to fetch models for the Assistants endpoint */
assistants?: boolean;
/** OpenAI API key (if not using environment variable) */
openAIApiKey?: string;
/** Whether user provides their own API key */
userProvidedOpenAI?: boolean;
}
/**
* Fetches models from OpenAI or Azure based on the provided options.
* @param opts - Options for fetching models
* @param _models - Fallback models array
* @returns Promise resolving to array of model IDs
*/
export async function fetchOpenAIModels(
opts: GetOpenAIModelsOptions,
_models: string[] = [],
): Promise<string[]> {
let models = _models.slice() ?? [];
const apiKey = opts.openAIApiKey ?? process.env.OPENAI_API_KEY;
const openaiBaseURL = 'https://api.openai.com/v1';
let baseURL = openaiBaseURL;
let reverseProxyUrl = process.env.OPENAI_REVERSE_PROXY;
if (opts.assistants && process.env.ASSISTANTS_BASE_URL) {
reverseProxyUrl = process.env.ASSISTANTS_BASE_URL;
} else if (opts.azure) {
return models;
}
if (reverseProxyUrl) {
baseURL = extractBaseURL(reverseProxyUrl) ?? openaiBaseURL;
}
const modelsCache = standardCache(CacheKeys.MODEL_QUERIES);
const cachedModels = await modelsCache.get(baseURL);
if (cachedModels) {
return cachedModels as string[];
}
if (baseURL || opts.azure) {
models = await fetchModels({
apiKey: apiKey ?? '',
baseURL,
azure: opts.azure,
user: opts.user,
name: EModelEndpoint.openAI,
});
}
if (models.length === 0) {
return _models;
}
if (baseURL === openaiBaseURL) {
const regex = /(text-davinci-003|gpt-|o\d+)/;
const excludeRegex = /audio|realtime/;
models = models.filter((model) => regex.test(model) && !excludeRegex.test(model));
const instructModels = models.filter((model) => model.includes('instruct'));
const otherModels = models.filter((model) => !model.includes('instruct'));
models = otherModels.concat(instructModels);
}
await modelsCache.set(baseURL, models);
return models;
}
/**
* Loads the default models for OpenAI or Azure.
* @param opts - Options for getting models
* @returns Promise resolving to array of model IDs
*/
export async function getOpenAIModels(opts: GetOpenAIModelsOptions = {}): Promise<string[]> {
let models = defaultModels[EModelEndpoint.openAI];
if (opts.assistants) {
models = defaultModels[EModelEndpoint.assistants];
} else if (opts.azure) {
models = defaultModels[EModelEndpoint.azureAssistants];
}
let key: string;
if (opts.assistants) {
key = 'ASSISTANTS_MODELS';
} else if (opts.azure) {
key = 'AZURE_OPENAI_MODELS';
} else {
key = 'OPENAI_MODELS';
}
if (process.env[key]) {
return splitAndTrim(process.env[key]);
}
if (opts.userProvidedOpenAI) {
return models;
}
return await fetchOpenAIModels(opts, models);
}
/**
* Fetches models from the Anthropic API.
* @param opts - Options for fetching models
* @param _models - Fallback models array
* @returns Promise resolving to array of model IDs
*/
export async function fetchAnthropicModels(
opts: { user?: string } = {},
_models: string[] = [],
): Promise<string[]> {
let models = _models.slice() ?? [];
const apiKey = process.env.ANTHROPIC_API_KEY;
const anthropicBaseURL = 'https://api.anthropic.com/v1';
let baseURL = anthropicBaseURL;
const reverseProxyUrl = process.env.ANTHROPIC_REVERSE_PROXY;
if (reverseProxyUrl) {
baseURL = extractBaseURL(reverseProxyUrl) ?? anthropicBaseURL;
}
if (!apiKey) {
return models;
}
const modelsCache = standardCache(CacheKeys.MODEL_QUERIES);
const cachedModels = await modelsCache.get(baseURL);
if (cachedModels) {
return cachedModels as string[];
}
if (baseURL) {
models = await fetchModels({
apiKey,
baseURL,
user: opts.user,
name: EModelEndpoint.anthropic,
tokenKey: EModelEndpoint.anthropic,
});
}
if (models.length === 0) {
return _models;
}
await modelsCache.set(baseURL, models);
return models;
}
/**
* Gets Anthropic models from environment or API.
* @param opts - Options for fetching models
* @returns Promise resolving to array of model IDs
*/
export async function getAnthropicModels(opts: { user?: string } = {}): Promise<string[]> {
const models = defaultModels[EModelEndpoint.anthropic];
if (process.env.ANTHROPIC_MODELS) {
return splitAndTrim(process.env.ANTHROPIC_MODELS);
}
if (isUserProvided(process.env.ANTHROPIC_API_KEY)) {
return models;
}
try {
return await fetchAnthropicModels(opts, models);
} catch (error) {
logger.error('Error fetching Anthropic models:', error);
return models;
}
}
/**
* Gets Google models from environment or defaults.
* @returns Array of model IDs
*/
export function getGoogleModels(): string[] {
let models = defaultModels[EModelEndpoint.google];
if (process.env.GOOGLE_MODELS) {
models = splitAndTrim(process.env.GOOGLE_MODELS);
}
return models;
}
/**
* Gets Bedrock models from environment or defaults.
* @returns Array of model IDs
*/
export function getBedrockModels(): string[] {
let models = defaultModels[EModelEndpoint.bedrock];
if (process.env.BEDROCK_AWS_MODELS) {
models = splitAndTrim(process.env.BEDROCK_AWS_MODELS);
}
return models;
}