2023-11-04 15:03:31 -04:00
|
|
|
// gptPlugins/initializeClient.spec.js
|
2025-01-03 18:11:14 -05:00
|
|
|
jest.mock('~/cache/getLogStores');
|
2024-04-21 08:31:54 -04:00
|
|
|
const { EModelEndpoint, ErrorTypes, validateAzureGroups } = require('librechat-data-provider');
|
|
|
|
|
const { getUserKey, getUserKeyValues } = require('~/server/services/UserService');
|
2024-10-19 14:24:07 +02:00
|
|
|
const initializeClient = require('./initialize');
|
2024-02-19 01:31:38 -05:00
|
|
|
const { PluginsClient } = require('~/app');
|
2023-11-04 15:03:31 -04:00
|
|
|
|
|
|
|
|
// Mock getUserKey since it's the only function we want to mock
|
2023-12-10 14:54:13 -05:00
|
|
|
jest.mock('~/server/services/UserService', () => ({
|
2023-11-04 15:03:31 -04:00
|
|
|
getUserKey: jest.fn(),
|
2024-04-21 08:31:54 -04:00
|
|
|
getUserKeyValues: jest.fn(),
|
2023-12-10 14:54:13 -05:00
|
|
|
checkUserKeyExpiry: jest.requireActual('~/server/services/UserService').checkUserKeyExpiry,
|
2023-11-04 15:03:31 -04:00
|
|
|
}));
|
|
|
|
|
|
|
|
|
|
describe('gptPlugins/initializeClient', () => {
|
|
|
|
|
// Set up environment variables
|
|
|
|
|
const originalEnvironment = process.env;
|
2024-02-26 14:12:25 -05:00
|
|
|
const app = {
|
|
|
|
|
locals: {},
|
|
|
|
|
};
|
2023-11-04 15:03:31 -04:00
|
|
|
|
2024-02-28 14:27:19 -05:00
|
|
|
const validAzureConfigs = [
|
|
|
|
|
{
|
|
|
|
|
group: 'librechat-westus',
|
|
|
|
|
apiKey: 'WESTUS_API_KEY',
|
|
|
|
|
instanceName: 'librechat-westus',
|
|
|
|
|
version: '2023-12-01-preview',
|
|
|
|
|
models: {
|
|
|
|
|
'gpt-4-vision-preview': {
|
|
|
|
|
deploymentName: 'gpt-4-vision-preview',
|
|
|
|
|
version: '2024-02-15-preview',
|
|
|
|
|
},
|
|
|
|
|
'gpt-3.5-turbo': {
|
|
|
|
|
deploymentName: 'gpt-35-turbo',
|
|
|
|
|
},
|
|
|
|
|
'gpt-3.5-turbo-1106': {
|
|
|
|
|
deploymentName: 'gpt-35-turbo-1106',
|
|
|
|
|
},
|
|
|
|
|
'gpt-4': {
|
|
|
|
|
deploymentName: 'gpt-4',
|
|
|
|
|
},
|
|
|
|
|
'gpt-4-1106-preview': {
|
|
|
|
|
deploymentName: 'gpt-4-1106-preview',
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
group: 'librechat-eastus',
|
|
|
|
|
apiKey: 'EASTUS_API_KEY',
|
|
|
|
|
instanceName: 'librechat-eastus',
|
|
|
|
|
deploymentName: 'gpt-4-turbo',
|
|
|
|
|
version: '2024-02-15-preview',
|
|
|
|
|
models: {
|
|
|
|
|
'gpt-4-turbo': true,
|
|
|
|
|
},
|
|
|
|
|
baseURL: 'https://eastus.example.com',
|
|
|
|
|
additionalHeaders: {
|
|
|
|
|
'x-api-key': 'x-api-key-value',
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
group: 'mistral-inference',
|
|
|
|
|
apiKey: 'AZURE_MISTRAL_API_KEY',
|
|
|
|
|
baseURL:
|
|
|
|
|
'https://Mistral-large-vnpet-serverless.region.inference.ai.azure.com/v1/chat/completions',
|
|
|
|
|
serverless: true,
|
|
|
|
|
models: {
|
|
|
|
|
'mistral-large': true,
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
group: 'llama-70b-chat',
|
|
|
|
|
apiKey: 'AZURE_LLAMA2_70B_API_KEY',
|
|
|
|
|
baseURL:
|
|
|
|
|
'https://Llama-2-70b-chat-qmvyb-serverless.region.inference.ai.azure.com/v1/chat/completions',
|
|
|
|
|
serverless: true,
|
|
|
|
|
models: {
|
|
|
|
|
'llama-70b-chat': true,
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
];
|
|
|
|
|
|
|
|
|
|
const { modelNames, modelGroupMap, groupMap } = validateAzureGroups(validAzureConfigs);
|
|
|
|
|
|
2023-11-04 15:03:31 -04:00
|
|
|
beforeEach(() => {
|
|
|
|
|
jest.resetModules(); // Clears the cache
|
|
|
|
|
process.env = { ...originalEnvironment }; // Make a copy
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
afterAll(() => {
|
|
|
|
|
process.env = originalEnvironment; // Restore original env vars
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
test('should initialize PluginsClient with OpenAI API key and default options', async () => {
|
|
|
|
|
process.env.OPENAI_API_KEY = 'test-openai-api-key';
|
|
|
|
|
process.env.PLUGINS_USE_AZURE = 'false';
|
|
|
|
|
process.env.DEBUG_PLUGINS = 'false';
|
|
|
|
|
process.env.OPENAI_SUMMARIZE = 'false';
|
|
|
|
|
|
|
|
|
|
const req = {
|
|
|
|
|
body: { key: null },
|
|
|
|
|
user: { id: '123' },
|
2024-02-26 14:12:25 -05:00
|
|
|
app,
|
2023-11-04 15:03:31 -04:00
|
|
|
};
|
|
|
|
|
const res = {};
|
|
|
|
|
const endpointOption = { modelOptions: { model: 'default-model' } };
|
|
|
|
|
|
|
|
|
|
const { client, openAIApiKey } = await initializeClient({ req, res, endpointOption });
|
|
|
|
|
|
|
|
|
|
expect(openAIApiKey).toBe('test-openai-api-key');
|
|
|
|
|
expect(client).toBeInstanceOf(PluginsClient);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
test('should initialize PluginsClient with Azure credentials when PLUGINS_USE_AZURE is true', async () => {
|
|
|
|
|
process.env.AZURE_API_KEY = 'test-azure-api-key';
|
2023-11-10 09:58:17 -05:00
|
|
|
(process.env.AZURE_OPENAI_API_INSTANCE_NAME = 'some-value'),
|
🧠 feat: User Memories for Conversational Context (#7760)
* 🧠 feat: User Memories for Conversational Context
chore: mcp typing, use `t`
WIP: first pass, Memories UI
- Added MemoryViewer component for displaying, editing, and deleting user memories.
- Integrated data provider hooks for fetching, updating, and deleting memories.
- Implemented pagination and loading states for better user experience.
- Created unit tests for MemoryViewer to ensure functionality and interaction with data provider.
- Updated translation files to include new UI strings related to memories.
chore: move mcp-related files to own directory
chore: rename librechat-mcp to librechat-api
WIP: first pass, memory processing and data schemas
chore: linting in fileSearch.js query description
chore: rename librechat-api to @librechat/api across the project
WIP: first pass, functional memory agent
feat: add MemoryEditDialog and MemoryViewer components for managing user memories
- Introduced MemoryEditDialog for editing memory entries with validation and toast notifications.
- Updated MemoryViewer to support editing and deleting memories, including pagination and loading states.
- Enhanced data provider to handle memory updates with optional original key for better management.
- Added new localization strings for memory-related UI elements.
feat: add memory permissions management
- Implemented memory permissions in the backend, allowing roles to have specific permissions for using, creating, updating, and reading memories.
- Added new API endpoints for updating memory permissions associated with roles.
- Created a new AdminSettings component for managing memory permissions in the frontend.
- Integrated memory permissions into the existing roles and permissions schemas.
- Updated the interface to include memory settings and permissions.
- Enhanced the MemoryViewer component to conditionally render admin settings based on user roles.
- Added localization support for memory permissions in the translation files.
feat: move AdminSettings component to a new position in MemoryViewer for better visibility
refactor: clean up commented code in MemoryViewer component
feat: enhance MemoryViewer with search functionality and improve MemoryEditDialog integration
- Added a search input to filter memories in the MemoryViewer component.
- Refactored MemoryEditDialog to accept children for better customization.
- Updated MemoryViewer to utilize the new EditMemoryButton and DeleteMemoryButton components for editing and deleting memories.
- Improved localization support by adding new strings for memory filtering and deletion confirmation.
refactor: optimize memory filtering in MemoryViewer using match-sorter
- Replaced manual filtering logic with match-sorter for improved search functionality.
- Enhanced performance and readability of the filteredMemories computation.
feat: enhance MemoryEditDialog with triggerRef and improve updateMemory mutation handling
feat: implement access control for MemoryEditDialog and MemoryViewer components
refactor: remove commented out code and create runMemory method
refactor: rename role based files
feat: implement access control for memory usage in AgentClient
refactor: simplify checkVisionRequest method in AgentClient by removing commented-out code
refactor: make `agents` dir in api package
refactor: migrate Azure utilities to TypeScript and consolidate imports
refactor: move sanitizeFilename function to a new file and update imports, add related tests
refactor: update LLM configuration types and consolidate Azure options in the API package
chore: linting
chore: import order
refactor: replace getLLMConfig with getOpenAIConfig and remove unused LLM configuration file
chore: update winston-daily-rotate-file to version 5.0.0 and add object-hash dependency in package-lock.json
refactor: move primeResources and optionalChainWithEmptyCheck functions to resources.ts and update imports
refactor: move createRun function to a new run.ts file and update related imports
fix: ensure safeAttachments is correctly typed as an array of TFile
chore: add node-fetch dependency and refactor fetch-related functions into packages/api/utils, removing the old generators file
refactor: enhance TEndpointOption type by using Pick to streamline endpoint fields and add new properties for model parameters and client options
feat: implement initializeOpenAIOptions function and update OpenAI types for enhanced configuration handling
fix: update types due to new TEndpointOption typing
fix: ensure safe access to group parameters in initializeOpenAIOptions function
fix: remove redundant API key validation comment in initializeOpenAIOptions function
refactor: rename initializeOpenAIOptions to initializeOpenAI for consistency and update related documentation
refactor: decouple req.body fields and tool loading from initializeAgentOptions
chore: linting
refactor: adjust column widths in MemoryViewer for improved layout
refactor: simplify agent initialization by creating loadAgent function and removing unused code
feat: add memory configuration loading and validation functions
WIP: first pass, memory processing with config
feat: implement memory callback and artifact handling
feat: implement memory artifacts display and processing updates
feat: add memory configuration options and schema validation for validKeys
fix: update MemoryEditDialog and MemoryViewer to handle memory state and display improvements
refactor: remove padding from BookmarkTable and MemoryViewer headers for consistent styling
WIP: initial tokenLimit config and move Tokenizer to @librechat/api
refactor: update mongoMeili plugin methods to use callback for better error handling
feat: enhance memory management with token tracking and usage metrics
- Added token counting for memory entries to enforce limits and provide usage statistics.
- Updated memory retrieval and update routes to include total token usage and limit.
- Enhanced MemoryEditDialog and MemoryViewer components to display memory usage and token information.
- Refactored memory processing functions to handle token limits and provide feedback on memory capacity.
feat: implement memory artifact handling in attachment handler
- Enhanced useAttachmentHandler to process memory artifacts when receiving updates.
- Introduced handleMemoryArtifact utility to manage memory updates and deletions.
- Updated query client to reflect changes in memory state based on incoming data.
refactor: restructure web search key extraction logic
- Moved the logic for extracting API keys from the webSearchAuth configuration into a dedicated function, getWebSearchKeys.
- Updated webSearchKeys to utilize the new function for improved clarity and maintainability.
- Prevents build time errors
feat: add personalization settings and memory preferences management
- Introduced a new Personalization tab in settings to manage user memory preferences.
- Implemented API endpoints and client-side logic for updating memory preferences.
- Enhanced user interface components to reflect personalization options and memory usage.
- Updated permissions to allow users to opt out of memory features.
- Added localization support for new settings and messages related to personalization.
style: personalization switch class
feat: add PersonalizationIcon and align Side Panel UI
feat: implement memory creation functionality
- Added a new API endpoint for creating memory entries, including validation for key and value.
- Introduced MemoryCreateDialog component for user interface to facilitate memory creation.
- Integrated token limit checks to prevent exceeding user memory capacity.
- Updated MemoryViewer to include a button for opening the memory creation dialog.
- Enhanced localization support for new messages related to memory creation.
feat: enhance message processing with configurable window size
- Updated AgentClient to use a configurable message window size for processing messages.
- Introduced messageWindowSize option in memory configuration schema with a default value of 5.
- Improved logic for selecting messages to process based on the configured window size.
chore: update librechat-data-provider version to 0.7.87 in package.json and package-lock.json
chore: remove OpenAPIPlugin and its associated tests
chore: remove MIGRATION_README.md as migration tasks are completed
ci: fix backend tests
chore: remove unused translation keys from localization file
chore: remove problematic test file and unused var in AgentClient
chore: remove unused import and import directly for JSDoc
* feat: add api package build stage in Dockerfile for improved modularity
* docs: reorder build steps in contributing guide for clarity
2025-06-07 18:52:22 -04:00
|
|
|
(process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME = 'some-value'),
|
|
|
|
|
(process.env.AZURE_OPENAI_API_VERSION = 'some-value'),
|
|
|
|
|
(process.env.AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME = 'some-value'),
|
|
|
|
|
(process.env.AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME = 'some-value'),
|
|
|
|
|
(process.env.PLUGINS_USE_AZURE = 'true');
|
2023-11-04 15:03:31 -04:00
|
|
|
process.env.DEBUG_PLUGINS = 'false';
|
|
|
|
|
process.env.OPENAI_SUMMARIZE = 'false';
|
|
|
|
|
|
|
|
|
|
const req = {
|
|
|
|
|
body: { key: null },
|
|
|
|
|
user: { id: '123' },
|
2024-02-26 14:12:25 -05:00
|
|
|
app,
|
2023-11-04 15:03:31 -04:00
|
|
|
};
|
|
|
|
|
const res = {};
|
|
|
|
|
const endpointOption = { modelOptions: { model: 'test-model' } };
|
|
|
|
|
|
|
|
|
|
const { client, azure } = await initializeClient({ req, res, endpointOption });
|
|
|
|
|
|
|
|
|
|
expect(azure.azureOpenAIApiKey).toBe('test-azure-api-key');
|
|
|
|
|
expect(client).toBeInstanceOf(PluginsClient);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
test('should use the debug option when DEBUG_PLUGINS is enabled', async () => {
|
|
|
|
|
process.env.OPENAI_API_KEY = 'test-openai-api-key';
|
|
|
|
|
process.env.DEBUG_PLUGINS = 'true';
|
|
|
|
|
|
|
|
|
|
const req = {
|
|
|
|
|
body: { key: null },
|
|
|
|
|
user: { id: '123' },
|
2024-02-26 14:12:25 -05:00
|
|
|
app,
|
2023-11-04 15:03:31 -04:00
|
|
|
};
|
|
|
|
|
const res = {};
|
|
|
|
|
const endpointOption = { modelOptions: { model: 'default-model' } };
|
|
|
|
|
|
|
|
|
|
const { client } = await initializeClient({ req, res, endpointOption });
|
|
|
|
|
|
|
|
|
|
expect(client.options.debug).toBe(true);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
test('should set contextStrategy to summarize when OPENAI_SUMMARIZE is enabled', async () => {
|
|
|
|
|
process.env.OPENAI_API_KEY = 'test-openai-api-key';
|
|
|
|
|
process.env.OPENAI_SUMMARIZE = 'true';
|
|
|
|
|
|
|
|
|
|
const req = {
|
|
|
|
|
body: { key: null },
|
|
|
|
|
user: { id: '123' },
|
2024-02-26 14:12:25 -05:00
|
|
|
app,
|
2023-11-04 15:03:31 -04:00
|
|
|
};
|
|
|
|
|
const res = {};
|
|
|
|
|
const endpointOption = { modelOptions: { model: 'default-model' } };
|
|
|
|
|
|
|
|
|
|
const { client } = await initializeClient({ req, res, endpointOption });
|
|
|
|
|
|
|
|
|
|
expect(client.options.contextStrategy).toBe('summarize');
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
// ... additional tests for reverseProxyUrl, proxy, user-provided keys, etc.
|
|
|
|
|
|
|
|
|
|
test('should throw an error if no API keys are provided in the environment', async () => {
|
|
|
|
|
// Clear the environment variables for API keys
|
|
|
|
|
delete process.env.OPENAI_API_KEY;
|
|
|
|
|
delete process.env.AZURE_API_KEY;
|
|
|
|
|
|
|
|
|
|
const req = {
|
|
|
|
|
body: { key: null },
|
|
|
|
|
user: { id: '123' },
|
2024-02-26 14:12:25 -05:00
|
|
|
app,
|
2023-11-04 15:03:31 -04:00
|
|
|
};
|
|
|
|
|
const res = {};
|
|
|
|
|
const endpointOption = { modelOptions: { model: 'default-model' } };
|
|
|
|
|
|
|
|
|
|
await expect(initializeClient({ req, res, endpointOption })).rejects.toThrow(
|
2024-02-19 01:31:38 -05:00
|
|
|
`${EModelEndpoint.openAI} API key not provided.`,
|
2023-11-04 15:03:31 -04:00
|
|
|
);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
// Additional tests for gptPlugins/initializeClient.spec.js
|
|
|
|
|
|
|
|
|
|
// ... (previous test setup code)
|
|
|
|
|
|
|
|
|
|
test('should handle user-provided OpenAI keys and check expiry', async () => {
|
|
|
|
|
process.env.OPENAI_API_KEY = 'user_provided';
|
|
|
|
|
process.env.PLUGINS_USE_AZURE = 'false';
|
|
|
|
|
|
|
|
|
|
const futureDate = new Date(Date.now() + 10000).toISOString();
|
|
|
|
|
const req = {
|
|
|
|
|
body: { key: futureDate },
|
|
|
|
|
user: { id: '123' },
|
2024-02-26 14:12:25 -05:00
|
|
|
app,
|
2023-11-04 15:03:31 -04:00
|
|
|
};
|
|
|
|
|
const res = {};
|
|
|
|
|
const endpointOption = { modelOptions: { model: 'default-model' } };
|
|
|
|
|
|
2024-04-21 08:31:54 -04:00
|
|
|
getUserKeyValues.mockResolvedValue({ apiKey: 'test-user-provided-openai-api-key' });
|
2023-11-04 15:03:31 -04:00
|
|
|
|
|
|
|
|
const { openAIApiKey } = await initializeClient({ req, res, endpointOption });
|
|
|
|
|
|
|
|
|
|
expect(openAIApiKey).toBe('test-user-provided-openai-api-key');
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
test('should handle user-provided Azure keys and check expiry', async () => {
|
|
|
|
|
process.env.AZURE_API_KEY = 'user_provided';
|
|
|
|
|
process.env.PLUGINS_USE_AZURE = 'true';
|
|
|
|
|
|
|
|
|
|
const futureDate = new Date(Date.now() + 10000).toISOString();
|
|
|
|
|
const req = {
|
|
|
|
|
body: { key: futureDate },
|
|
|
|
|
user: { id: '123' },
|
2024-02-26 14:12:25 -05:00
|
|
|
app,
|
2023-11-04 15:03:31 -04:00
|
|
|
};
|
|
|
|
|
const res = {};
|
|
|
|
|
const endpointOption = { modelOptions: { model: 'test-model' } };
|
|
|
|
|
|
2024-04-21 08:31:54 -04:00
|
|
|
getUserKeyValues.mockResolvedValue({
|
|
|
|
|
apiKey: JSON.stringify({
|
|
|
|
|
azureOpenAIApiKey: 'test-user-provided-azure-api-key',
|
|
|
|
|
azureOpenAIApiDeploymentName: 'test-deployment',
|
2023-11-04 15:03:31 -04:00
|
|
|
}),
|
2024-04-21 08:31:54 -04:00
|
|
|
});
|
2023-11-04 15:03:31 -04:00
|
|
|
|
|
|
|
|
const { azure } = await initializeClient({ req, res, endpointOption });
|
|
|
|
|
|
|
|
|
|
expect(azure.azureOpenAIApiKey).toBe('test-user-provided-azure-api-key');
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
test('should throw an error if the user-provided key has expired', async () => {
|
|
|
|
|
process.env.OPENAI_API_KEY = 'user_provided';
|
|
|
|
|
process.env.PLUGINS_USE_AZURE = 'FALSE';
|
|
|
|
|
const expiresAt = new Date(Date.now() - 10000).toISOString(); // Expired
|
|
|
|
|
const req = {
|
|
|
|
|
body: { key: expiresAt },
|
|
|
|
|
user: { id: '123' },
|
2024-02-26 14:12:25 -05:00
|
|
|
app,
|
2023-11-04 15:03:31 -04:00
|
|
|
};
|
|
|
|
|
const res = {};
|
|
|
|
|
const endpointOption = { modelOptions: { model: 'default-model' } };
|
|
|
|
|
|
2024-04-21 08:31:54 -04:00
|
|
|
await expect(initializeClient({ req, res, endpointOption })).rejects.toThrow(
|
|
|
|
|
/expired_user_key/,
|
|
|
|
|
);
|
2023-11-04 15:03:31 -04:00
|
|
|
});
|
|
|
|
|
|
|
|
|
|
test('should throw an error if the user-provided Azure key is invalid JSON', async () => {
|
|
|
|
|
process.env.AZURE_API_KEY = 'user_provided';
|
|
|
|
|
process.env.PLUGINS_USE_AZURE = 'true';
|
|
|
|
|
|
|
|
|
|
const req = {
|
|
|
|
|
body: { key: new Date(Date.now() + 10000).toISOString() },
|
|
|
|
|
user: { id: '123' },
|
2024-02-26 14:12:25 -05:00
|
|
|
app,
|
2023-11-04 15:03:31 -04:00
|
|
|
};
|
|
|
|
|
const res = {};
|
|
|
|
|
const endpointOption = { modelOptions: { model: 'default-model' } };
|
|
|
|
|
|
|
|
|
|
// Simulate an invalid JSON string returned from getUserKey
|
|
|
|
|
getUserKey.mockResolvedValue('invalid-json');
|
2024-04-21 08:31:54 -04:00
|
|
|
getUserKeyValues.mockImplementation(() => {
|
|
|
|
|
let userValues = getUserKey();
|
|
|
|
|
try {
|
|
|
|
|
userValues = JSON.parse(userValues);
|
|
|
|
|
} catch (e) {
|
|
|
|
|
throw new Error(
|
|
|
|
|
JSON.stringify({
|
|
|
|
|
type: ErrorTypes.INVALID_USER_KEY,
|
|
|
|
|
}),
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
return userValues;
|
|
|
|
|
});
|
2023-11-04 15:03:31 -04:00
|
|
|
|
|
|
|
|
await expect(initializeClient({ req, res, endpointOption })).rejects.toThrow(
|
2024-04-21 08:31:54 -04:00
|
|
|
/invalid_user_key/,
|
2023-11-04 15:03:31 -04:00
|
|
|
);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
test('should correctly handle the presence of a reverse proxy', async () => {
|
|
|
|
|
process.env.OPENAI_REVERSE_PROXY = 'http://reverse.proxy';
|
|
|
|
|
process.env.PROXY = 'http://proxy';
|
|
|
|
|
process.env.OPENAI_API_KEY = 'test-openai-api-key';
|
|
|
|
|
|
|
|
|
|
const req = {
|
|
|
|
|
body: { key: null },
|
|
|
|
|
user: { id: '123' },
|
2024-02-26 14:12:25 -05:00
|
|
|
app,
|
2023-11-04 15:03:31 -04:00
|
|
|
};
|
|
|
|
|
const res = {};
|
|
|
|
|
const endpointOption = { modelOptions: { model: 'default-model' } };
|
|
|
|
|
|
|
|
|
|
const { client } = await initializeClient({ req, res, endpointOption });
|
|
|
|
|
|
|
|
|
|
expect(client.options.reverseProxyUrl).toBe('http://reverse.proxy');
|
|
|
|
|
expect(client.options.proxy).toBe('http://proxy');
|
|
|
|
|
});
|
2024-02-28 14:27:19 -05:00
|
|
|
|
|
|
|
|
test('should throw an error when user-provided values are not valid JSON', async () => {
|
|
|
|
|
process.env.OPENAI_API_KEY = 'user_provided';
|
|
|
|
|
const req = {
|
|
|
|
|
body: { key: new Date(Date.now() + 10000).toISOString(), endpoint: 'openAI' },
|
|
|
|
|
user: { id: '123' },
|
|
|
|
|
app,
|
|
|
|
|
};
|
|
|
|
|
const res = {};
|
|
|
|
|
const endpointOption = {};
|
|
|
|
|
|
|
|
|
|
// Mock getUserKey to return a non-JSON string
|
|
|
|
|
getUserKey.mockResolvedValue('not-a-json');
|
2024-04-21 08:31:54 -04:00
|
|
|
getUserKeyValues.mockImplementation(() => {
|
|
|
|
|
let userValues = getUserKey();
|
|
|
|
|
try {
|
|
|
|
|
userValues = JSON.parse(userValues);
|
|
|
|
|
} catch (e) {
|
|
|
|
|
throw new Error(
|
|
|
|
|
JSON.stringify({
|
|
|
|
|
type: ErrorTypes.INVALID_USER_KEY,
|
|
|
|
|
}),
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
return userValues;
|
|
|
|
|
});
|
2024-02-28 14:27:19 -05:00
|
|
|
|
|
|
|
|
await expect(initializeClient({ req, res, endpointOption })).rejects.toThrow(
|
2024-04-21 08:31:54 -04:00
|
|
|
/invalid_user_key/,
|
2024-02-28 14:27:19 -05:00
|
|
|
);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
test('should initialize client correctly for Azure OpenAI with valid configuration', async () => {
|
|
|
|
|
const req = {
|
|
|
|
|
body: {
|
|
|
|
|
key: null,
|
|
|
|
|
endpoint: EModelEndpoint.gptPlugins,
|
|
|
|
|
model: modelNames[0],
|
|
|
|
|
},
|
|
|
|
|
user: { id: '123' },
|
|
|
|
|
app: {
|
|
|
|
|
locals: {
|
|
|
|
|
[EModelEndpoint.azureOpenAI]: {
|
|
|
|
|
plugins: true,
|
|
|
|
|
modelNames,
|
|
|
|
|
modelGroupMap,
|
|
|
|
|
groupMap,
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
};
|
|
|
|
|
const res = {};
|
|
|
|
|
const endpointOption = {};
|
|
|
|
|
|
|
|
|
|
const client = await initializeClient({ req, res, endpointOption });
|
|
|
|
|
expect(client.client.options.azure).toBeDefined();
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
test('should initialize client with default options when certain env vars are not set', async () => {
|
|
|
|
|
delete process.env.OPENAI_SUMMARIZE;
|
2024-03-05 14:33:45 -05:00
|
|
|
process.env.OPENAI_API_KEY = 'some-api-key';
|
2024-02-28 14:27:19 -05:00
|
|
|
|
|
|
|
|
const req = {
|
2024-03-05 14:33:45 -05:00
|
|
|
body: { key: null, endpoint: EModelEndpoint.gptPlugins },
|
2024-02-28 14:27:19 -05:00
|
|
|
user: { id: '123' },
|
|
|
|
|
app,
|
|
|
|
|
};
|
|
|
|
|
const res = {};
|
|
|
|
|
const endpointOption = {};
|
|
|
|
|
|
|
|
|
|
const client = await initializeClient({ req, res, endpointOption });
|
|
|
|
|
expect(client.client.options.contextStrategy).toBe(null);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
test('should correctly use user-provided apiKey and baseURL when provided', async () => {
|
|
|
|
|
process.env.OPENAI_API_KEY = 'user_provided';
|
|
|
|
|
process.env.OPENAI_REVERSE_PROXY = 'user_provided';
|
|
|
|
|
const req = {
|
|
|
|
|
body: {
|
|
|
|
|
key: new Date(Date.now() + 10000).toISOString(),
|
|
|
|
|
endpoint: 'openAI',
|
|
|
|
|
},
|
|
|
|
|
user: {
|
|
|
|
|
id: '123',
|
|
|
|
|
},
|
|
|
|
|
app,
|
|
|
|
|
};
|
|
|
|
|
const res = {};
|
|
|
|
|
const endpointOption = {};
|
|
|
|
|
|
2024-04-21 08:31:54 -04:00
|
|
|
getUserKeyValues.mockResolvedValue({
|
|
|
|
|
apiKey: 'test',
|
|
|
|
|
baseURL: 'https://user-provided-url.com',
|
|
|
|
|
});
|
2024-02-28 14:27:19 -05:00
|
|
|
|
|
|
|
|
const result = await initializeClient({ req, res, endpointOption });
|
|
|
|
|
|
|
|
|
|
expect(result.openAIApiKey).toBe('test');
|
|
|
|
|
expect(result.client.options.reverseProxyUrl).toBe('https://user-provided-url.com');
|
|
|
|
|
});
|
2023-11-04 15:03:31 -04:00
|
|
|
});
|