LibreChat/packages/api/src/endpoints/custom/initialize.ts

181 lines
5.6 KiB
TypeScript
Raw Normal View History

🧵 refactor: Migrate Endpoint Initialization to TypeScript (#10794) * refactor: move endpoint initialization methods to typescript * refactor: move agent init to packages/api - Introduced `initialize.ts` for agent initialization, including file processing and tool loading. - Updated `resources.ts` to allow optional appConfig parameter. - Enhanced endpoint configuration handling in various initialization files to support model parameters. - Added new artifacts and prompts for React component generation. - Refactored existing code to improve type safety and maintainability. * refactor: streamline endpoint initialization and enhance type safety - Updated initialization functions across various endpoints to use a consistent request structure, replacing `unknown` types with `ServerResponse`. - Simplified request handling by directly extracting keys from the request body. - Improved type safety by ensuring user IDs are safely accessed with optional chaining. - Removed unnecessary parameters and streamlined model options handling for better clarity and maintainability. * refactor: moved ModelService and extractBaseURL to packages/api - Added comprehensive tests for the models fetching functionality, covering scenarios for OpenAI, Anthropic, Google, and Ollama models. - Updated existing endpoint index to include the new models module. - Enhanced utility functions for URL extraction and model data processing. - Improved type safety and error handling across the models fetching logic. * refactor: consolidate utility functions and remove unused files - Merged `deriveBaseURL` and `extractBaseURL` into the `@librechat/api` module for better organization. - Removed redundant utility files and their associated tests to streamline the codebase. - Updated imports across various client files to utilize the new consolidated functions. - Enhanced overall maintainability by reducing the number of utility modules. * refactor: replace ModelService references with direct imports from @librechat/api and remove ModelService file * refactor: move encrypt/decrypt methods and key db methods to data-schemas, use `getProviderConfig` from `@librechat/api` * chore: remove unused 'res' from options in AgentClient * refactor: file model imports and methods - Updated imports in various controllers and services to use the unified file model from '~/models' instead of '~/models/File'. - Consolidated file-related methods into a new file methods module in the data-schemas package. - Added comprehensive tests for file methods including creation, retrieval, updating, and deletion. - Enhanced the initializeAgent function to accept dependency injection for file-related methods. - Improved error handling and logging in file methods. * refactor: streamline database method references in agent initialization * refactor: enhance file method tests and update type references to IMongoFile * refactor: consolidate database method imports in agent client and initialization * chore: remove redundant import of initializeAgent from @librechat/api * refactor: move checkUserKeyExpiry utility to @librechat/api and update references across endpoints * refactor: move updateUserPlugins logic to user.ts and simplify UserController * refactor: update imports for user key management and remove UserService * refactor: remove unused Anthropics and Bedrock endpoint files and clean up imports * refactor: consolidate and update encryption imports across various files to use @librechat/data-schemas * chore: update file model mock to use unified import from '~/models' * chore: import order * refactor: remove migrated to TS agent.js file and its associated logic from the endpoints * chore: add reusable function to extract imports from source code in unused-packages workflow * chore: enhance unused-packages workflow to include @librechat/api dependencies and improve dependency extraction * chore: improve dependency extraction in unused-packages workflow with enhanced error handling and debugging output * chore: add detailed debugging output to unused-packages workflow for better visibility into unused dependencies and exclusion lists * chore: refine subpath handling in unused-packages workflow to correctly process scoped and non-scoped package imports * chore: clean up unused debug output in unused-packages workflow and reorganize type imports in initialize.ts
2025-12-03 17:21:41 -05:00
import {
CacheKeys,
ErrorTypes,
envVarRegex,
FetchTokenConfig,
extractEnvVariable,
} from 'librechat-data-provider';
import type { TEndpoint } from 'librechat-data-provider';
import type { AppConfig } from '@librechat/data-schemas';
import type { BaseInitializeParams, InitializeResultBase, EndpointTokenConfig } from '~/types';
import { getOpenAIConfig } from '~/endpoints/openai/config';
import { getCustomEndpointConfig } from '~/app/config';
import { fetchModels } from '~/endpoints/models';
import { isUserProvided, checkUserKeyExpiry } from '~/utils';
import { standardCache } from '~/cache';
const { PROXY } = process.env;
/**
* Builds custom options from endpoint configuration
*/
function buildCustomOptions(
endpointConfig: Partial<TEndpoint>,
appConfig?: AppConfig,
endpointTokenConfig?: Record<string, unknown>,
) {
const customOptions: Record<string, unknown> = {
headers: endpointConfig.headers,
addParams: endpointConfig.addParams,
dropParams: endpointConfig.dropParams,
customParams: endpointConfig.customParams,
titleConvo: endpointConfig.titleConvo,
titleModel: endpointConfig.titleModel,
forcePrompt: endpointConfig.forcePrompt,
summaryModel: endpointConfig.summaryModel,
modelDisplayLabel: endpointConfig.modelDisplayLabel,
titleMethod: endpointConfig.titleMethod ?? 'completion',
contextStrategy: endpointConfig.summarize ? 'summarize' : null,
directEndpoint: endpointConfig.directEndpoint,
titleMessageRole: endpointConfig.titleMessageRole,
streamRate: endpointConfig.streamRate,
endpointTokenConfig,
};
const allConfig = appConfig?.endpoints?.all;
if (allConfig) {
customOptions.streamRate = allConfig.streamRate;
}
return customOptions;
}
/**
* Initializes a custom endpoint client configuration.
* This function handles custom endpoints defined in librechat.yaml, including
* user-provided API keys and URLs.
*
* @param params - Configuration parameters
* @returns Promise resolving to endpoint configuration options
* @throws Error if config is missing, API key is not provided, or base URL is missing
*/
export async function initializeCustom({
req,
endpoint,
model_parameters,
db,
}: BaseInitializeParams): Promise<InitializeResultBase> {
const appConfig = req.config;
const { key: expiresAt } = req.body;
const endpointConfig = getCustomEndpointConfig({
endpoint,
appConfig,
});
if (!endpointConfig) {
throw new Error(`Config not found for the ${endpoint} custom endpoint.`);
}
const CUSTOM_API_KEY = extractEnvVariable(endpointConfig.apiKey ?? '');
const CUSTOM_BASE_URL = extractEnvVariable(endpointConfig.baseURL ?? '');
if (CUSTOM_API_KEY.match(envVarRegex)) {
throw new Error(`Missing API Key for ${endpoint}.`);
}
if (CUSTOM_BASE_URL.match(envVarRegex)) {
throw new Error(`Missing Base URL for ${endpoint}.`);
}
const userProvidesKey = isUserProvided(CUSTOM_API_KEY);
const userProvidesURL = isUserProvided(CUSTOM_BASE_URL);
let userValues = null;
if (expiresAt && (userProvidesKey || userProvidesURL)) {
checkUserKeyExpiry(expiresAt, endpoint);
userValues = await db.getUserKeyValues({ userId: req.user?.id ?? '', name: endpoint });
}
const apiKey = userProvidesKey ? userValues?.apiKey : CUSTOM_API_KEY;
const baseURL = userProvidesURL ? userValues?.baseURL : CUSTOM_BASE_URL;
if (userProvidesKey && !apiKey) {
throw new Error(
JSON.stringify({
type: ErrorTypes.NO_USER_KEY,
}),
);
}
if (userProvidesURL && !baseURL) {
throw new Error(
JSON.stringify({
type: ErrorTypes.NO_BASE_URL,
}),
);
}
if (!apiKey) {
throw new Error(`${endpoint} API key not provided.`);
}
if (!baseURL) {
throw new Error(`${endpoint} Base URL not provided.`);
}
let endpointTokenConfig: EndpointTokenConfig | undefined;
const userId = req.user?.id ?? '';
const cache = standardCache(CacheKeys.TOKEN_CONFIG);
/** tokenConfig is an optional extended property on custom endpoints */
const hasTokenConfig = (endpointConfig as Record<string, unknown>).tokenConfig != null;
const tokenKey =
!hasTokenConfig && (userProvidesKey || userProvidesURL) ? `${endpoint}:${userId}` : endpoint;
const cachedConfig =
!hasTokenConfig &&
FetchTokenConfig[endpoint.toLowerCase() as keyof typeof FetchTokenConfig] &&
(await cache.get(tokenKey));
endpointTokenConfig = (cachedConfig as EndpointTokenConfig) || undefined;
if (
FetchTokenConfig[endpoint.toLowerCase() as keyof typeof FetchTokenConfig] &&
endpointConfig &&
endpointConfig.models?.fetch &&
!endpointTokenConfig
) {
await fetchModels({ apiKey, baseURL, name: endpoint, user: userId, tokenKey });
endpointTokenConfig = (await cache.get(tokenKey)) as EndpointTokenConfig | undefined;
}
const customOptions = buildCustomOptions(endpointConfig, appConfig, endpointTokenConfig);
const clientOptions: Record<string, unknown> = {
reverseProxyUrl: baseURL ?? null,
proxy: PROXY ?? null,
...customOptions,
};
const modelOptions = { ...(model_parameters ?? {}), user: userId };
const finalClientOptions = {
modelOptions,
...clientOptions,
};
const options = getOpenAIConfig(apiKey, finalClientOptions, endpoint);
if (options != null) {
(options as InitializeResultBase).useLegacyContent = true;
(options as InitializeResultBase).endpointTokenConfig = endpointTokenConfig;
}
const streamRate = clientOptions.streamRate as number | undefined;
if (streamRate) {
(options.llmConfig as Record<string, unknown>)._lc_stream_delay = streamRate;
}
return options;
}