🧵 refactor: Migrate Endpoint Initialization to TypeScript (#10794)

* refactor: move endpoint initialization methods to typescript

* refactor: move agent init to packages/api

- Introduced `initialize.ts` for agent initialization, including file processing and tool loading.
- Updated `resources.ts` to allow optional appConfig parameter.
- Enhanced endpoint configuration handling in various initialization files to support model parameters.
- Added new artifacts and prompts for React component generation.
- Refactored existing code to improve type safety and maintainability.

* refactor: streamline endpoint initialization and enhance type safety

- Updated initialization functions across various endpoints to use a consistent request structure, replacing `unknown` types with `ServerResponse`.
- Simplified request handling by directly extracting keys from the request body.
- Improved type safety by ensuring user IDs are safely accessed with optional chaining.
- Removed unnecessary parameters and streamlined model options handling for better clarity and maintainability.

* refactor: moved ModelService and extractBaseURL to packages/api

- Added comprehensive tests for the models fetching functionality, covering scenarios for OpenAI, Anthropic, Google, and Ollama models.
- Updated existing endpoint index to include the new models module.
- Enhanced utility functions for URL extraction and model data processing.
- Improved type safety and error handling across the models fetching logic.

* refactor: consolidate utility functions and remove unused files

- Merged `deriveBaseURL` and `extractBaseURL` into the `@librechat/api` module for better organization.
- Removed redundant utility files and their associated tests to streamline the codebase.
- Updated imports across various client files to utilize the new consolidated functions.
- Enhanced overall maintainability by reducing the number of utility modules.

* refactor: replace ModelService references with direct imports from @librechat/api and remove ModelService file

* refactor: move encrypt/decrypt methods and key db methods to data-schemas, use `getProviderConfig` from `@librechat/api`

* chore: remove unused 'res' from options in AgentClient

* refactor: file model imports and methods

- Updated imports in various controllers and services to use the unified file model from '~/models' instead of '~/models/File'.
- Consolidated file-related methods into a new file methods module in the data-schemas package.
- Added comprehensive tests for file methods including creation, retrieval, updating, and deletion.
- Enhanced the initializeAgent function to accept dependency injection for file-related methods.
- Improved error handling and logging in file methods.

* refactor: streamline database method references in agent initialization

* refactor: enhance file method tests and update type references to IMongoFile

* refactor: consolidate database method imports in agent client and initialization

* chore: remove redundant import of initializeAgent from @librechat/api

* refactor: move checkUserKeyExpiry utility to @librechat/api and update references across endpoints

* refactor: move updateUserPlugins logic to user.ts and simplify UserController

* refactor: update imports for user key management and remove UserService

* refactor: remove unused Anthropics and Bedrock endpoint files and clean up imports

* refactor: consolidate and update encryption imports across various files to use @librechat/data-schemas

* chore: update file model mock to use unified import from '~/models'

* chore: import order

* refactor: remove migrated to TS agent.js file and its associated logic from the endpoints

* chore: add reusable function to extract imports from source code in unused-packages workflow

* chore: enhance unused-packages workflow to include @librechat/api dependencies and improve dependency extraction

* chore: improve dependency extraction in unused-packages workflow with enhanced error handling and debugging output

* chore: add detailed debugging output to unused-packages workflow for better visibility into unused dependencies and exclusion lists

* chore: refine subpath handling in unused-packages workflow to correctly process scoped and non-scoped package imports

* chore: clean up unused debug output in unused-packages workflow and reorganize type imports in initialize.ts
This commit is contained in:
Danny Avila 2025-12-03 17:21:41 -05:00
parent 1a11b64266
commit 04a4a2aa44
No known key found for this signature in database
GPG key ID: BF31EEB2C5CA0956
103 changed files with 4135 additions and 2647 deletions

View file

@ -21,4 +21,5 @@ export { default as Tokenizer, countTokens } from './tokenizer';
export * from './yaml';
export * from './http';
export * from './tokens';
export * from './url';
export * from './message';

View file

@ -1,5 +1,6 @@
import path from 'path';
import axios from 'axios';
import { ErrorTypes } from 'librechat-data-provider';
import { logger } from '@librechat/data-schemas';
import { readFileAsString } from './files';
@ -114,3 +115,25 @@ export async function loadServiceKey(keyPath: string): Promise<GoogleServiceKey
return key;
}
/**
* Checks if a user key has expired based on the provided expiration date and endpoint.
* If the key has expired, it throws an Error with details including the type of error,
* the expiration date, and the endpoint.
*
* @param expiresAt - The expiration date of the user key in a format that can be parsed by the Date constructor
* @param endpoint - The endpoint associated with the user key to be checked
* @throws Error if the user key has expired. The error message is a stringified JSON object
* containing the type of error (`ErrorTypes.EXPIRED_USER_KEY`), the expiration date in the local string format, and the endpoint.
*/
export function checkUserKeyExpiry(expiresAt: string, endpoint: string): void {
const expiresAtDate = new Date(expiresAt);
if (expiresAtDate < new Date()) {
const errorMessage = JSON.stringify({
type: ErrorTypes.EXPIRED_USER_KEY,
expiredAt: expiresAtDate.toLocaleString(),
endpoint,
});
throw new Error(errorMessage);
}
}

View file

@ -1,23 +1,6 @@
import z from 'zod';
import { EModelEndpoint } from 'librechat-data-provider';
/** Configuration object mapping model keys to their respective prompt, completion rates, and context limit
*
* Note: the [key: string]: unknown is not in the original JSDoc typedef in /api/typedefs.js, but I've included it since
* getModelMaxOutputTokens calls getModelTokenValue with a key of 'output', which was not in the original JSDoc typedef,
* but would be referenced in a TokenConfig in the if(matchedPattern) portion of getModelTokenValue.
* So in order to preserve functionality for that case and any others which might reference an additional key I'm unaware of,
* I've included it here until the interface can be typed more tightly.
*/
export interface TokenConfig {
prompt: number;
completion: number;
context: number;
[key: string]: unknown;
}
/** An endpoint's config object mapping model keys to their respective prompt, completion rates, and context limit */
export type EndpointTokenConfig = Record<string, TokenConfig>;
import type { EndpointTokenConfig, TokenConfig } from '~/types';
const openAIModels = {
'o4-mini': 200000,

View file

@ -0,0 +1,156 @@
import { extractBaseURL, deriveBaseURL } from './url';
describe('extractBaseURL', () => {
test('should extract base URL up to /v1 for standard endpoints', () => {
const url = 'https://localhost:8080/v1/chat/completions';
expect(extractBaseURL(url)).toBe('https://localhost:8080/v1');
});
test('should include /openai in the extracted URL when present', () => {
const url = 'https://localhost:8080/v1/openai';
expect(extractBaseURL(url)).toBe('https://localhost:8080/v1/openai');
});
test('should stop at /openai and not include any additional paths', () => {
const url = 'https://fake.open.ai/v1/openai/you-are-cool';
expect(extractBaseURL(url)).toBe('https://fake.open.ai/v1/openai');
});
test('should return the correct base URL for official openai endpoints', () => {
const url = 'https://api.openai.com/v1/chat/completions';
expect(extractBaseURL(url)).toBe('https://api.openai.com/v1');
});
test('should handle URLs with reverse proxy pattern correctly', () => {
const url = 'https://gateway.ai.cloudflare.com/v1/ACCOUNT_TAG/GATEWAY/openai/completions';
expect(extractBaseURL(url)).toBe(
'https://gateway.ai.cloudflare.com/v1/ACCOUNT_TAG/GATEWAY/openai',
);
});
test('should return input if the URL does not match the expected pattern', () => {
const url = 'https://someotherdomain.com/notv1';
expect(extractBaseURL(url)).toBe(url);
});
test('should extract base URL up to /v1 for open.ai standard endpoint', () => {
const url = 'https://open.ai/v1/chat';
expect(extractBaseURL(url)).toBe('https://open.ai/v1');
});
test('should extract base URL up to /v1 for open.ai standard endpoint with additional path', () => {
const url = 'https://open.ai/v1/chat/completions';
expect(extractBaseURL(url)).toBe('https://open.ai/v1');
});
test('should handle URLs with ACCOUNT/GATEWAY pattern followed by /openai', () => {
const url = 'https://open.ai/v1/ACCOUNT/GATEWAY/openai/completions';
expect(extractBaseURL(url)).toBe('https://open.ai/v1/ACCOUNT/GATEWAY/openai');
});
test('should include /openai in the extracted URL with additional segments', () => {
const url = 'https://open.ai/v1/hi/openai';
expect(extractBaseURL(url)).toBe('https://open.ai/v1/hi/openai');
});
test('should handle Azure OpenAI Cloudflare endpoint correctly', () => {
const url = 'https://gateway.ai.cloudflare.com/v1/account/gateway/azure-openai/completions';
expect(extractBaseURL(url)).toBe(
'https://gateway.ai.cloudflare.com/v1/account/gateway/azure-openai',
);
});
test('should include various suffixes in the extracted URL when present', () => {
const urls = [
'https://api.example.com/v1/azure-openai/something',
'https://api.example.com/v1/replicate/anotherthing',
'https://api.example.com/v1/huggingface/yetanotherthing',
'https://api.example.com/v1/workers-ai/differentthing',
'https://api.example.com/v1/aws-bedrock/somethingelse',
];
const expected = [
/* Note: exception for azure-openai to allow credential injection */
'https://api.example.com/v1/azure-openai/something',
'https://api.example.com/v1/replicate',
'https://api.example.com/v1/huggingface',
'https://api.example.com/v1/workers-ai',
'https://api.example.com/v1/aws-bedrock',
];
urls.forEach((url, index) => {
expect(extractBaseURL(url)).toBe(expected[index]);
});
});
test('should handle URLs with suffixes not immediately after /v1', () => {
const url = 'https://api.example.com/v1/some/path/azure-openai';
expect(extractBaseURL(url)).toBe('https://api.example.com/v1/some/path/azure-openai');
});
test('should handle URLs with complex paths after the suffix', () => {
const url = 'https://api.example.com/v1/replicate/deep/path/segment';
expect(extractBaseURL(url)).toBe('https://api.example.com/v1/replicate');
});
test('should leave a regular Azure OpenAI baseURL as is', () => {
const url = 'https://instance-name.openai.azure.com/openai/deployments/deployment-name';
expect(extractBaseURL(url)).toBe(url);
});
test('should leave a regular Azure OpenAI baseURL with placeholders as is', () => {
const url = 'https://${INSTANCE_NAME}.openai.azure.com/openai/deployments/${DEPLOYMENT_NAME}';
expect(extractBaseURL(url)).toBe(url);
});
test('should leave an alternate Azure OpenAI baseURL with placeholders as is', () => {
const url = 'https://${INSTANCE_NAME}.com/resources/deployments/${DEPLOYMENT_NAME}';
expect(extractBaseURL(url)).toBe(url);
});
test('should return undefined for null or empty input', () => {
expect(extractBaseURL('')).toBe(undefined);
// @ts-expect-error testing invalid input
expect(extractBaseURL(null)).toBe(undefined);
// @ts-expect-error testing invalid input
expect(extractBaseURL(undefined)).toBe(undefined);
});
});
describe('deriveBaseURL', () => {
test('should extract protocol, hostname and port from a URL', () => {
const fullURL = 'https://api.example.com:8080/v1/models';
const baseURL = deriveBaseURL(fullURL);
expect(baseURL).toBe('https://api.example.com:8080');
});
test('should handle URLs without port', () => {
const fullURL = 'https://api.example.com/v1/models';
const baseURL = deriveBaseURL(fullURL);
expect(baseURL).toBe('https://api.example.com');
});
test('should handle HTTP protocol', () => {
const fullURL = 'http://localhost:11434/api/tags';
const baseURL = deriveBaseURL(fullURL);
expect(baseURL).toBe('http://localhost:11434');
});
test('should handle URLs with paths', () => {
const fullURL = 'https://api.ollama.com/v1/chat/completions';
const baseURL = deriveBaseURL(fullURL);
expect(baseURL).toBe('https://api.ollama.com');
});
test('should return the original URL if parsing fails', () => {
const invalidURL = 'not-a-valid-url';
const result = deriveBaseURL(invalidURL);
expect(result).toBe(invalidURL);
});
test('should handle localhost URLs', () => {
const fullURL = 'http://localhost:11434';
const baseURL = deriveBaseURL(fullURL);
expect(baseURL).toBe('http://localhost:11434');
});
});

View file

@ -0,0 +1,102 @@
import { logger } from '@librechat/data-schemas';
import { CohereConstants } from 'librechat-data-provider';
/**
* Extracts a valid OpenAI baseURL from a given string, matching "url/v1," followed by an optional suffix.
* The suffix can be one of several predefined values (e.g., 'openai', 'azure-openai', etc.),
* accommodating different proxy patterns like Cloudflare, LiteLLM, etc.
* Returns the original URL if no valid pattern is found.
*
* Examples:
* - `https://open.ai/v1/chat` -> `https://open.ai/v1`
* - `https://open.ai/v1/chat/completions` -> `https://open.ai/v1`
* - `https://gateway.ai.cloudflare.com/v1/account/gateway/azure-openai/completions` -> `https://gateway.ai.cloudflare.com/v1/account/gateway/azure-openai`
* - `https://open.ai/v1/hi/openai` -> `https://open.ai/v1/hi/openai`
* - `https://api.example.com/v1/replicate` -> `https://api.example.com/v1/replicate`
*
* @param url - The URL to be processed.
* @returns The matched pattern or input if no match is found.
*/
export function extractBaseURL(url: string): string | null | undefined {
if (!url || typeof url !== 'string') {
return undefined;
}
if (url.startsWith(CohereConstants.API_URL)) {
return null;
}
if (!url.includes('/v1')) {
return url;
}
const v1Index = url.indexOf('/v1');
let baseUrl = url.substring(0, v1Index + 3);
const openai = 'openai';
const suffixes = [
'azure-openai',
openai,
'aws-bedrock',
'anthropic',
'cohere',
'deepseek',
'google-ai-studio',
'google-vertex-ai',
'grok',
'groq',
'mistral',
'openrouter',
'perplexity-ai',
'replicate',
'huggingface',
'workers-ai',
'aws-bedrock',
];
const suffixUsed = suffixes.find((suffix) => url.includes(`/${suffix}`));
if (suffixUsed === 'azure-openai') {
return url.split(/\/(chat|completion)/)[0];
}
const openaiIndex = url.indexOf(`/${openai}`, v1Index + 3);
const suffixIndex =
suffixUsed === openai ? openaiIndex : url.indexOf(`/${suffixUsed}`, v1Index + 3);
if (openaiIndex === v1Index + 3) {
const nextSlashIndex = url.indexOf('/', openaiIndex + 7);
if (nextSlashIndex === -1) {
baseUrl = url.substring(0, openaiIndex + 7);
} else {
baseUrl = url.substring(0, nextSlashIndex);
}
} else if (suffixIndex > 0) {
baseUrl = url.substring(0, suffixIndex + (suffixUsed?.length ?? 0) + 1);
}
return baseUrl;
}
/**
* Extracts the base URL (protocol + hostname + port) from the provided URL.
* Used primarily for Ollama endpoints to derive the host.
* @param fullURL - The full URL.
* @returns The base URL (protocol://hostname:port).
*/
export function deriveBaseURL(fullURL: string): string {
try {
const parsedUrl = new URL(fullURL);
const protocol = parsedUrl.protocol;
const hostname = parsedUrl.hostname;
const port = parsedUrl.port;
if (!protocol || !hostname) {
return fullURL;
}
return `${protocol}//${hostname}${port ? `:${port}` : ''}`;
} catch (error) {
logger.error('Failed to derive base URL', error);
return fullURL;
}
}