mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-01-13 14:08:51 +01:00
* refactor: move endpoint initialization methods to typescript * refactor: move agent init to packages/api - Introduced `initialize.ts` for agent initialization, including file processing and tool loading. - Updated `resources.ts` to allow optional appConfig parameter. - Enhanced endpoint configuration handling in various initialization files to support model parameters. - Added new artifacts and prompts for React component generation. - Refactored existing code to improve type safety and maintainability. * refactor: streamline endpoint initialization and enhance type safety - Updated initialization functions across various endpoints to use a consistent request structure, replacing `unknown` types with `ServerResponse`. - Simplified request handling by directly extracting keys from the request body. - Improved type safety by ensuring user IDs are safely accessed with optional chaining. - Removed unnecessary parameters and streamlined model options handling for better clarity and maintainability. * refactor: moved ModelService and extractBaseURL to packages/api - Added comprehensive tests for the models fetching functionality, covering scenarios for OpenAI, Anthropic, Google, and Ollama models. - Updated existing endpoint index to include the new models module. - Enhanced utility functions for URL extraction and model data processing. - Improved type safety and error handling across the models fetching logic. * refactor: consolidate utility functions and remove unused files - Merged `deriveBaseURL` and `extractBaseURL` into the `@librechat/api` module for better organization. - Removed redundant utility files and their associated tests to streamline the codebase. - Updated imports across various client files to utilize the new consolidated functions. - Enhanced overall maintainability by reducing the number of utility modules. * refactor: replace ModelService references with direct imports from @librechat/api and remove ModelService file * refactor: move encrypt/decrypt methods and key db methods to data-schemas, use `getProviderConfig` from `@librechat/api` * chore: remove unused 'res' from options in AgentClient * refactor: file model imports and methods - Updated imports in various controllers and services to use the unified file model from '~/models' instead of '~/models/File'. - Consolidated file-related methods into a new file methods module in the data-schemas package. - Added comprehensive tests for file methods including creation, retrieval, updating, and deletion. - Enhanced the initializeAgent function to accept dependency injection for file-related methods. - Improved error handling and logging in file methods. * refactor: streamline database method references in agent initialization * refactor: enhance file method tests and update type references to IMongoFile * refactor: consolidate database method imports in agent client and initialization * chore: remove redundant import of initializeAgent from @librechat/api * refactor: move checkUserKeyExpiry utility to @librechat/api and update references across endpoints * refactor: move updateUserPlugins logic to user.ts and simplify UserController * refactor: update imports for user key management and remove UserService * refactor: remove unused Anthropics and Bedrock endpoint files and clean up imports * refactor: consolidate and update encryption imports across various files to use @librechat/data-schemas * chore: update file model mock to use unified import from '~/models' * chore: import order * refactor: remove migrated to TS agent.js file and its associated logic from the endpoints * chore: add reusable function to extract imports from source code in unused-packages workflow * chore: enhance unused-packages workflow to include @librechat/api dependencies and improve dependency extraction * chore: improve dependency extraction in unused-packages workflow with enhanced error handling and debugging output * chore: add detailed debugging output to unused-packages workflow for better visibility into unused dependencies and exclusion lists * chore: refine subpath handling in unused-packages workflow to correctly process scoped and non-scoped package imports * chore: clean up unused debug output in unused-packages workflow and reorganize type imports in initialize.ts
628 lines
17 KiB
TypeScript
628 lines
17 KiB
TypeScript
import axios from 'axios';
|
|
import { EModelEndpoint, defaultModels } from 'librechat-data-provider';
|
|
import {
|
|
fetchModels,
|
|
splitAndTrim,
|
|
getOpenAIModels,
|
|
getGoogleModels,
|
|
getBedrockModels,
|
|
getAnthropicModels,
|
|
} from './models';
|
|
|
|
jest.mock('axios');
|
|
|
|
jest.mock('~/cache', () => ({
|
|
standardCache: jest.fn().mockImplementation(() => ({
|
|
get: jest.fn().mockResolvedValue(undefined),
|
|
set: jest.fn().mockResolvedValue(true),
|
|
})),
|
|
}));
|
|
|
|
jest.mock('~/utils', () => {
|
|
const originalUtils = jest.requireActual('~/utils');
|
|
return {
|
|
...originalUtils,
|
|
processModelData: jest.fn((...args) => originalUtils.processModelData(...args)),
|
|
logAxiosError: jest.fn(),
|
|
resolveHeaders: jest.fn((options) => options?.headers || {}),
|
|
};
|
|
});
|
|
|
|
jest.mock('@librechat/data-schemas', () => ({
|
|
...jest.requireActual('@librechat/data-schemas'),
|
|
logger: {
|
|
error: jest.fn(),
|
|
warn: jest.fn(),
|
|
debug: jest.fn(),
|
|
},
|
|
}));
|
|
|
|
const mockedAxios = axios as jest.Mocked<typeof axios>;
|
|
const { logAxiosError, resolveHeaders } = jest.requireMock('~/utils');
|
|
|
|
mockedAxios.get.mockResolvedValue({
|
|
data: {
|
|
data: [{ id: 'model-1' }, { id: 'model-2' }],
|
|
},
|
|
});
|
|
|
|
describe('fetchModels', () => {
|
|
it('fetches models successfully from the API', async () => {
|
|
const models = await fetchModels({
|
|
user: 'user123',
|
|
apiKey: 'testApiKey',
|
|
baseURL: 'https://api.test.com',
|
|
name: 'TestAPI',
|
|
});
|
|
|
|
expect(models).toEqual(['model-1', 'model-2']);
|
|
expect(mockedAxios.get).toHaveBeenCalledWith(
|
|
expect.stringContaining('https://api.test.com/models'),
|
|
expect.any(Object),
|
|
);
|
|
});
|
|
|
|
it('adds the user ID to the models query when option and ID are passed', async () => {
|
|
const models = await fetchModels({
|
|
user: 'user123',
|
|
apiKey: 'testApiKey',
|
|
baseURL: 'https://api.test.com',
|
|
userIdQuery: true,
|
|
name: 'TestAPI',
|
|
});
|
|
|
|
expect(models).toEqual(['model-1', 'model-2']);
|
|
expect(mockedAxios.get).toHaveBeenCalledWith(
|
|
expect.stringContaining('https://api.test.com/models?user=user123'),
|
|
expect.any(Object),
|
|
);
|
|
});
|
|
|
|
it('should pass custom headers to the API request', async () => {
|
|
const customHeaders = {
|
|
'X-Custom-Header': 'custom-value',
|
|
'X-API-Version': 'v2',
|
|
};
|
|
|
|
await fetchModels({
|
|
user: 'user123',
|
|
apiKey: 'testApiKey',
|
|
baseURL: 'https://api.test.com',
|
|
name: 'TestAPI',
|
|
headers: customHeaders,
|
|
});
|
|
|
|
expect(mockedAxios.get).toHaveBeenCalledWith(
|
|
expect.stringContaining('https://api.test.com/models'),
|
|
expect.objectContaining({
|
|
headers: expect.objectContaining({
|
|
'X-Custom-Header': 'custom-value',
|
|
'X-API-Version': 'v2',
|
|
Authorization: 'Bearer testApiKey',
|
|
}),
|
|
}),
|
|
);
|
|
});
|
|
|
|
it('should handle null headers gracefully', async () => {
|
|
await fetchModels({
|
|
user: 'user123',
|
|
apiKey: 'testApiKey',
|
|
baseURL: 'https://api.test.com',
|
|
name: 'TestAPI',
|
|
headers: null,
|
|
});
|
|
|
|
expect(mockedAxios.get).toHaveBeenCalledWith(
|
|
expect.stringContaining('https://api.test.com/models'),
|
|
expect.objectContaining({
|
|
headers: expect.objectContaining({
|
|
Authorization: 'Bearer testApiKey',
|
|
}),
|
|
}),
|
|
);
|
|
});
|
|
|
|
it('should handle undefined headers gracefully', async () => {
|
|
await fetchModels({
|
|
user: 'user123',
|
|
apiKey: 'testApiKey',
|
|
baseURL: 'https://api.test.com',
|
|
name: 'TestAPI',
|
|
headers: undefined,
|
|
});
|
|
|
|
expect(mockedAxios.get).toHaveBeenCalledWith(
|
|
expect.stringContaining('https://api.test.com/models'),
|
|
expect.objectContaining({
|
|
headers: expect.objectContaining({
|
|
Authorization: 'Bearer testApiKey',
|
|
}),
|
|
}),
|
|
);
|
|
});
|
|
|
|
afterEach(() => {
|
|
jest.clearAllMocks();
|
|
});
|
|
});
|
|
|
|
describe('fetchModels with createTokenConfig true', () => {
|
|
const data = {
|
|
data: [
|
|
{
|
|
id: 'model-1',
|
|
pricing: {
|
|
prompt: '0.002',
|
|
completion: '0.001',
|
|
},
|
|
context_length: 1024,
|
|
},
|
|
{
|
|
id: 'model-2',
|
|
pricing: {
|
|
prompt: '0.003',
|
|
completion: '0.0015',
|
|
},
|
|
context_length: 2048,
|
|
},
|
|
],
|
|
};
|
|
|
|
beforeEach(() => {
|
|
mockedAxios.get.mockResolvedValue({ data });
|
|
});
|
|
|
|
it('creates and stores token configuration if createTokenConfig is true', async () => {
|
|
await fetchModels({
|
|
user: 'user123',
|
|
apiKey: 'testApiKey',
|
|
baseURL: 'https://api.test.com',
|
|
createTokenConfig: true,
|
|
});
|
|
|
|
const { processModelData } = jest.requireMock('~/utils');
|
|
expect(processModelData).toHaveBeenCalled();
|
|
expect(processModelData).toHaveBeenCalledWith(data);
|
|
});
|
|
});
|
|
|
|
describe('getOpenAIModels', () => {
|
|
let originalEnv: NodeJS.ProcessEnv;
|
|
|
|
beforeEach(() => {
|
|
originalEnv = { ...process.env };
|
|
mockedAxios.get.mockRejectedValue(new Error('Network error'));
|
|
});
|
|
|
|
afterEach(() => {
|
|
process.env = originalEnv;
|
|
mockedAxios.get.mockReset();
|
|
});
|
|
|
|
it('returns default models when no environment configurations are provided (and fetch fails)', async () => {
|
|
const models = await getOpenAIModels({ user: 'user456' });
|
|
expect(models).toContain('gpt-4');
|
|
});
|
|
|
|
it('returns `AZURE_OPENAI_MODELS` with `azure` flag (and fetch fails)', async () => {
|
|
process.env.AZURE_OPENAI_MODELS = 'azure-model,azure-model-2';
|
|
const models = await getOpenAIModels({ azure: true });
|
|
expect(models).toEqual(expect.arrayContaining(['azure-model', 'azure-model-2']));
|
|
});
|
|
|
|
it('returns `OPENAI_MODELS` with no flags (and fetch fails)', async () => {
|
|
process.env.OPENAI_MODELS = 'openai-model,openai-model-2';
|
|
const models = await getOpenAIModels({});
|
|
expect(models).toEqual(expect.arrayContaining(['openai-model', 'openai-model-2']));
|
|
});
|
|
|
|
it('utilizes proxy configuration when PROXY is set', async () => {
|
|
mockedAxios.get.mockResolvedValue({
|
|
data: {
|
|
data: [],
|
|
},
|
|
});
|
|
process.env.PROXY = 'http://localhost:8888';
|
|
process.env.OPENAI_API_KEY = 'mockedApiKey';
|
|
await getOpenAIModels({ user: 'user456' });
|
|
|
|
expect(mockedAxios.get).toHaveBeenCalledWith(
|
|
expect.any(String),
|
|
expect.objectContaining({
|
|
httpsAgent: expect.anything(),
|
|
}),
|
|
);
|
|
});
|
|
});
|
|
|
|
describe('getOpenAIModels sorting behavior', () => {
|
|
let originalEnv: NodeJS.ProcessEnv;
|
|
|
|
beforeEach(() => {
|
|
originalEnv = { ...process.env };
|
|
process.env.OPENAI_API_KEY = 'mockedApiKey';
|
|
mockedAxios.get.mockResolvedValue({
|
|
data: {
|
|
data: [
|
|
{ id: 'gpt-3.5-turbo-instruct-0914' },
|
|
{ id: 'gpt-3.5-turbo-instruct' },
|
|
{ id: 'gpt-3.5-turbo' },
|
|
{ id: 'gpt-4-0314' },
|
|
{ id: 'gpt-4-turbo-preview' },
|
|
],
|
|
},
|
|
});
|
|
});
|
|
|
|
afterEach(() => {
|
|
process.env = originalEnv;
|
|
jest.clearAllMocks();
|
|
});
|
|
|
|
it('ensures instruct models are listed last', async () => {
|
|
const models = await getOpenAIModels({ user: 'user456' });
|
|
|
|
expect(models[models.length - 1]).toMatch(/instruct/);
|
|
|
|
const instructIndexes = models
|
|
.map((model, index) => (model.includes('instruct') ? index : -1))
|
|
.filter((index) => index !== -1);
|
|
const nonInstructIndexes = models
|
|
.map((model, index) => (!model.includes('instruct') ? index : -1))
|
|
.filter((index) => index !== -1);
|
|
|
|
expect(Math.max(...nonInstructIndexes)).toBeLessThan(Math.min(...instructIndexes));
|
|
|
|
const expectedOrder = [
|
|
'gpt-3.5-turbo',
|
|
'gpt-4-0314',
|
|
'gpt-4-turbo-preview',
|
|
'gpt-3.5-turbo-instruct-0914',
|
|
'gpt-3.5-turbo-instruct',
|
|
];
|
|
expect(models).toEqual(expectedOrder);
|
|
});
|
|
});
|
|
|
|
describe('fetchModels with Ollama specific logic', () => {
|
|
const mockOllamaData = {
|
|
data: {
|
|
models: [{ name: 'Ollama-Base' }, { name: 'Ollama-Advanced' }],
|
|
},
|
|
};
|
|
|
|
beforeEach(() => {
|
|
mockedAxios.get.mockResolvedValue(mockOllamaData);
|
|
});
|
|
|
|
afterEach(() => {
|
|
jest.clearAllMocks();
|
|
});
|
|
|
|
it('should fetch Ollama models when name starts with "ollama"', async () => {
|
|
const models = await fetchModels({
|
|
user: 'user789',
|
|
apiKey: 'testApiKey',
|
|
baseURL: 'https://api.ollama.test.com',
|
|
name: 'OllamaAPI',
|
|
});
|
|
|
|
expect(models).toEqual(['Ollama-Base', 'Ollama-Advanced']);
|
|
expect(mockedAxios.get).toHaveBeenCalledWith('https://api.ollama.test.com/api/tags', {
|
|
headers: {},
|
|
timeout: 5000,
|
|
});
|
|
});
|
|
|
|
it('should pass headers and user object to Ollama fetchModels', async () => {
|
|
const customHeaders = {
|
|
'Content-Type': 'application/json',
|
|
Authorization: 'Bearer custom-token',
|
|
};
|
|
const userObject = {
|
|
id: 'user789',
|
|
email: 'test@example.com',
|
|
};
|
|
|
|
(resolveHeaders as jest.Mock).mockReturnValueOnce(customHeaders);
|
|
|
|
const models = await fetchModels({
|
|
user: 'user789',
|
|
apiKey: 'testApiKey',
|
|
baseURL: 'https://api.ollama.test.com',
|
|
name: 'ollama',
|
|
headers: customHeaders,
|
|
userObject,
|
|
});
|
|
|
|
expect(models).toEqual(['Ollama-Base', 'Ollama-Advanced']);
|
|
expect(resolveHeaders).toHaveBeenCalledWith({
|
|
headers: customHeaders,
|
|
user: userObject,
|
|
});
|
|
expect(mockedAxios.get).toHaveBeenCalledWith('https://api.ollama.test.com/api/tags', {
|
|
headers: customHeaders,
|
|
timeout: 5000,
|
|
});
|
|
});
|
|
|
|
it('should handle errors gracefully when fetching Ollama models fails and fallback to OpenAI-compatible fetch', async () => {
|
|
mockedAxios.get.mockRejectedValueOnce(new Error('Ollama API error'));
|
|
mockedAxios.get.mockResolvedValueOnce({
|
|
data: {
|
|
data: [{ id: 'fallback-model-1' }, { id: 'fallback-model-2' }],
|
|
},
|
|
});
|
|
|
|
const models = await fetchModels({
|
|
user: 'user789',
|
|
apiKey: 'testApiKey',
|
|
baseURL: 'https://api.ollama.test.com',
|
|
name: 'OllamaAPI',
|
|
});
|
|
|
|
expect(models).toEqual(['fallback-model-1', 'fallback-model-2']);
|
|
expect(logAxiosError).toHaveBeenCalledWith({
|
|
message:
|
|
'Failed to fetch models from Ollama API. Attempting to fetch via OpenAI-compatible endpoint.',
|
|
error: expect.any(Error),
|
|
});
|
|
expect(mockedAxios.get).toHaveBeenCalledTimes(2);
|
|
});
|
|
|
|
it('should return an empty array if no baseURL is provided', async () => {
|
|
const models = await fetchModels({
|
|
user: 'user789',
|
|
apiKey: 'testApiKey',
|
|
name: 'OllamaAPI',
|
|
});
|
|
expect(models).toEqual([]);
|
|
});
|
|
|
|
it('should not fetch Ollama models if the name does not start with "ollama"', async () => {
|
|
mockedAxios.get.mockResolvedValue({
|
|
data: {
|
|
data: [{ id: 'model-1' }, { id: 'model-2' }],
|
|
},
|
|
});
|
|
|
|
const models = await fetchModels({
|
|
user: 'user789',
|
|
apiKey: 'testApiKey',
|
|
baseURL: 'https://api.test.com',
|
|
name: 'TestAPI',
|
|
});
|
|
|
|
expect(models).toEqual(['model-1', 'model-2']);
|
|
expect(mockedAxios.get).toHaveBeenCalledWith('https://api.test.com/models', expect.any(Object));
|
|
});
|
|
});
|
|
|
|
describe('fetchModels URL construction with trailing slashes', () => {
|
|
beforeEach(() => {
|
|
mockedAxios.get.mockResolvedValue({
|
|
data: {
|
|
data: [{ id: 'model-1' }, { id: 'model-2' }],
|
|
},
|
|
});
|
|
});
|
|
|
|
afterEach(() => {
|
|
jest.clearAllMocks();
|
|
});
|
|
|
|
it('should not create double slashes when baseURL has a trailing slash', async () => {
|
|
await fetchModels({
|
|
user: 'user123',
|
|
apiKey: 'testApiKey',
|
|
baseURL: 'https://api.test.com/v1/',
|
|
name: 'TestAPI',
|
|
});
|
|
|
|
expect(mockedAxios.get).toHaveBeenCalledWith(
|
|
'https://api.test.com/v1/models',
|
|
expect.any(Object),
|
|
);
|
|
});
|
|
|
|
it('should handle baseURL without trailing slash normally', async () => {
|
|
await fetchModels({
|
|
user: 'user123',
|
|
apiKey: 'testApiKey',
|
|
baseURL: 'https://api.test.com/v1',
|
|
name: 'TestAPI',
|
|
});
|
|
|
|
expect(mockedAxios.get).toHaveBeenCalledWith(
|
|
'https://api.test.com/v1/models',
|
|
expect.any(Object),
|
|
);
|
|
});
|
|
|
|
it('should handle baseURL with multiple trailing slashes', async () => {
|
|
await fetchModels({
|
|
user: 'user123',
|
|
apiKey: 'testApiKey',
|
|
baseURL: 'https://api.test.com/v1///',
|
|
name: 'TestAPI',
|
|
});
|
|
|
|
expect(mockedAxios.get).toHaveBeenCalledWith(
|
|
'https://api.test.com/v1/models',
|
|
expect.any(Object),
|
|
);
|
|
});
|
|
|
|
it('should correctly append query params after stripping trailing slashes', async () => {
|
|
await fetchModels({
|
|
user: 'user123',
|
|
apiKey: 'testApiKey',
|
|
baseURL: 'https://api.test.com/v1/',
|
|
name: 'TestAPI',
|
|
userIdQuery: true,
|
|
});
|
|
|
|
expect(mockedAxios.get).toHaveBeenCalledWith(
|
|
'https://api.test.com/v1/models?user=user123',
|
|
expect.any(Object),
|
|
);
|
|
});
|
|
});
|
|
|
|
describe('splitAndTrim', () => {
|
|
it('should split a string by commas and trim each value', () => {
|
|
const input = ' model1, model2 , model3,model4 ';
|
|
const expected = ['model1', 'model2', 'model3', 'model4'];
|
|
expect(splitAndTrim(input)).toEqual(expected);
|
|
});
|
|
|
|
it('should return an empty array for empty input', () => {
|
|
expect(splitAndTrim('')).toEqual([]);
|
|
});
|
|
|
|
it('should return an empty array for null input', () => {
|
|
expect(splitAndTrim(null)).toEqual([]);
|
|
});
|
|
|
|
it('should return an empty array for undefined input', () => {
|
|
expect(splitAndTrim(undefined)).toEqual([]);
|
|
});
|
|
|
|
it('should filter out empty values after trimming', () => {
|
|
const input = 'model1,, ,model2,';
|
|
const expected = ['model1', 'model2'];
|
|
expect(splitAndTrim(input)).toEqual(expected);
|
|
});
|
|
});
|
|
|
|
describe('getAnthropicModels', () => {
|
|
let originalEnv: NodeJS.ProcessEnv;
|
|
|
|
beforeEach(() => {
|
|
originalEnv = { ...process.env };
|
|
});
|
|
|
|
afterEach(() => {
|
|
process.env = originalEnv;
|
|
jest.clearAllMocks();
|
|
});
|
|
|
|
it('returns default models when ANTHROPIC_MODELS is not set', async () => {
|
|
delete process.env.ANTHROPIC_MODELS;
|
|
const models = await getAnthropicModels();
|
|
expect(models).toEqual(defaultModels[EModelEndpoint.anthropic]);
|
|
});
|
|
|
|
it('returns models from ANTHROPIC_MODELS when set', async () => {
|
|
process.env.ANTHROPIC_MODELS = 'claude-1, claude-2 ';
|
|
const models = await getAnthropicModels();
|
|
expect(models).toEqual(['claude-1', 'claude-2']);
|
|
});
|
|
|
|
it('should use Anthropic-specific headers when fetching models', async () => {
|
|
delete process.env.ANTHROPIC_MODELS;
|
|
process.env.ANTHROPIC_API_KEY = 'test-anthropic-key';
|
|
|
|
mockedAxios.get.mockResolvedValue({
|
|
data: {
|
|
data: [{ id: 'claude-3' }, { id: 'claude-4' }],
|
|
},
|
|
});
|
|
|
|
await fetchModels({
|
|
user: 'user123',
|
|
apiKey: 'test-anthropic-key',
|
|
baseURL: 'https://api.anthropic.com/v1',
|
|
name: EModelEndpoint.anthropic,
|
|
});
|
|
|
|
expect(mockedAxios.get).toHaveBeenCalledWith(
|
|
expect.any(String),
|
|
expect.objectContaining({
|
|
headers: {
|
|
'x-api-key': 'test-anthropic-key',
|
|
'anthropic-version': expect.any(String),
|
|
},
|
|
}),
|
|
);
|
|
});
|
|
|
|
it('should pass custom headers for Anthropic endpoint', async () => {
|
|
const customHeaders = {
|
|
'X-Custom-Header': 'custom-value',
|
|
};
|
|
|
|
mockedAxios.get.mockResolvedValue({
|
|
data: {
|
|
data: [{ id: 'claude-3' }],
|
|
},
|
|
});
|
|
|
|
await fetchModels({
|
|
user: 'user123',
|
|
apiKey: 'test-anthropic-key',
|
|
baseURL: 'https://api.anthropic.com/v1',
|
|
name: EModelEndpoint.anthropic,
|
|
headers: customHeaders,
|
|
});
|
|
|
|
expect(mockedAxios.get).toHaveBeenCalledWith(
|
|
expect.any(String),
|
|
expect.objectContaining({
|
|
headers: {
|
|
'x-api-key': 'test-anthropic-key',
|
|
'anthropic-version': expect.any(String),
|
|
},
|
|
}),
|
|
);
|
|
});
|
|
});
|
|
|
|
describe('getGoogleModels', () => {
|
|
let originalEnv: NodeJS.ProcessEnv;
|
|
|
|
beforeEach(() => {
|
|
originalEnv = { ...process.env };
|
|
});
|
|
|
|
afterEach(() => {
|
|
process.env = originalEnv;
|
|
});
|
|
|
|
it('returns default models when GOOGLE_MODELS is not set', () => {
|
|
delete process.env.GOOGLE_MODELS;
|
|
const models = getGoogleModels();
|
|
expect(models).toEqual(defaultModels[EModelEndpoint.google]);
|
|
});
|
|
|
|
it('returns models from GOOGLE_MODELS when set', () => {
|
|
process.env.GOOGLE_MODELS = 'gemini-pro, bard ';
|
|
const models = getGoogleModels();
|
|
expect(models).toEqual(['gemini-pro', 'bard']);
|
|
});
|
|
});
|
|
|
|
describe('getBedrockModels', () => {
|
|
let originalEnv: NodeJS.ProcessEnv;
|
|
|
|
beforeEach(() => {
|
|
originalEnv = { ...process.env };
|
|
});
|
|
|
|
afterEach(() => {
|
|
process.env = originalEnv;
|
|
});
|
|
|
|
it('returns default models when BEDROCK_AWS_MODELS is not set', () => {
|
|
delete process.env.BEDROCK_AWS_MODELS;
|
|
const models = getBedrockModels();
|
|
expect(models).toEqual(defaultModels[EModelEndpoint.bedrock]);
|
|
});
|
|
|
|
it('returns models from BEDROCK_AWS_MODELS when set', () => {
|
|
process.env.BEDROCK_AWS_MODELS = 'anthropic.claude-v2, ai21.j2-ultra ';
|
|
const models = getBedrockModels();
|
|
expect(models).toEqual(['anthropic.claude-v2', 'ai21.j2-ultra']);
|
|
});
|
|
});
|