mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-21 02:40:14 +01:00
🔧 refactor: Improve Params Handling, Remove Legacy Items, & Update Configs (#6074)
* chore: include all assets for service worker, remove unused tsconfig.node.json, eslint ignore vite config * chore: exclude image files from service worker caching * refactor: simplify googleSchema transformation and error handling * fix: max output tokens cap for 3.7 models * fix: skip index fixing in CI, development, and test environments * ci: add maxOutputTokens handling tests for Claude models * refactor: drop top_k and top_p parameters for claude-3.7 in AnthropicClient and add tests for new behavior * refactor: conditionally include top_k and top_p parameters for non-claude-3.7 models * ci: add unit tests for getLLMConfig function with various model options * chore: remove all OPENROUTER_API_KEY legacy logic * refactor: optimize stream chunk handling * feat: reset model parameters button * refactor: remove unused examples field from convoSchema and presetSchema * chore: update librechat-data-provider version to 0.7.6993 * refactor: move excludedKeys set to data-provider for better reusability * feat: enhance saveMessageToDatabase to handle unset fields and fetched conversation state * feat: add 'iconURL' and 'greeting' to excludedKeys in data provider config * fix: add optional chaining to user ID retrieval in getConvo call
This commit is contained in:
parent
e14df5956a
commit
be280004cf
25 changed files with 561 additions and 238 deletions
|
|
@ -43,14 +43,21 @@ function getLLMConfig(apiKey, options = {}) {
|
|||
model: mergedOptions.model,
|
||||
stream: mergedOptions.stream,
|
||||
temperature: mergedOptions.temperature,
|
||||
topP: mergedOptions.topP,
|
||||
topK: mergedOptions.topK,
|
||||
stopSequences: mergedOptions.stop,
|
||||
maxTokens:
|
||||
mergedOptions.maxOutputTokens || anthropicSettings.maxOutputTokens.reset(mergedOptions.model),
|
||||
clientOptions: {},
|
||||
};
|
||||
|
||||
if (!/claude-3[-.]7/.test(mergedOptions.model)) {
|
||||
if (mergedOptions.topP !== undefined) {
|
||||
requestOptions.topP = mergedOptions.topP;
|
||||
}
|
||||
if (mergedOptions.topK !== undefined) {
|
||||
requestOptions.topK = mergedOptions.topK;
|
||||
}
|
||||
}
|
||||
|
||||
const supportsCacheControl =
|
||||
systemOptions.promptCache === true && checkPromptCacheSupport(requestOptions.model);
|
||||
const headers = getClaudeHeaders(requestOptions.model, supportsCacheControl);
|
||||
|
|
|
|||
112
api/server/services/Endpoints/anthropic/llm.spec.js
Normal file
112
api/server/services/Endpoints/anthropic/llm.spec.js
Normal file
|
|
@ -0,0 +1,112 @@
|
|||
const { anthropicSettings } = require('librechat-data-provider');
|
||||
const { getLLMConfig } = require('~/server/services/Endpoints/anthropic/llm');
|
||||
|
||||
jest.mock('https-proxy-agent', () => ({
|
||||
HttpsProxyAgent: jest.fn().mockImplementation((proxy) => ({ proxy })),
|
||||
}));
|
||||
|
||||
describe('getLLMConfig', () => {
|
||||
it('should create a basic configuration with default values', () => {
|
||||
const result = getLLMConfig('test-api-key', { modelOptions: {} });
|
||||
|
||||
expect(result.llmConfig).toHaveProperty('apiKey', 'test-api-key');
|
||||
expect(result.llmConfig).toHaveProperty('model', anthropicSettings.model.default);
|
||||
expect(result.llmConfig).toHaveProperty('stream', true);
|
||||
expect(result.llmConfig).toHaveProperty('maxTokens');
|
||||
});
|
||||
|
||||
it('should include proxy settings when provided', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {},
|
||||
proxy: 'http://proxy:8080',
|
||||
});
|
||||
|
||||
expect(result.llmConfig.clientOptions).toHaveProperty('httpAgent');
|
||||
expect(result.llmConfig.clientOptions.httpAgent).toHaveProperty('proxy', 'http://proxy:8080');
|
||||
});
|
||||
|
||||
it('should include reverse proxy URL when provided', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {},
|
||||
reverseProxyUrl: 'http://reverse-proxy',
|
||||
});
|
||||
|
||||
expect(result.llmConfig.clientOptions).toHaveProperty('baseURL', 'http://reverse-proxy');
|
||||
});
|
||||
|
||||
it('should include topK and topP for non-Claude-3.7 models', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3-opus',
|
||||
topK: 10,
|
||||
topP: 0.9,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.llmConfig).toHaveProperty('topK', 10);
|
||||
expect(result.llmConfig).toHaveProperty('topP', 0.9);
|
||||
});
|
||||
|
||||
it('should include topK and topP for Claude-3.5 models', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3-5-sonnet',
|
||||
topK: 10,
|
||||
topP: 0.9,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.llmConfig).toHaveProperty('topK', 10);
|
||||
expect(result.llmConfig).toHaveProperty('topP', 0.9);
|
||||
});
|
||||
|
||||
it('should NOT include topK and topP for Claude-3-7 models (hyphen notation)', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3-7-sonnet',
|
||||
topK: 10,
|
||||
topP: 0.9,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.llmConfig).not.toHaveProperty('topK');
|
||||
expect(result.llmConfig).not.toHaveProperty('topP');
|
||||
});
|
||||
|
||||
it('should NOT include topK and topP for Claude-3.7 models (decimal notation)', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3.7-sonnet',
|
||||
topK: 10,
|
||||
topP: 0.9,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.llmConfig).not.toHaveProperty('topK');
|
||||
expect(result.llmConfig).not.toHaveProperty('topP');
|
||||
});
|
||||
|
||||
it('should handle custom maxOutputTokens', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3-opus',
|
||||
maxOutputTokens: 2048,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.llmConfig).toHaveProperty('maxTokens', 2048);
|
||||
});
|
||||
|
||||
it('should handle promptCache setting', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3-5-sonnet',
|
||||
promptCache: true,
|
||||
},
|
||||
});
|
||||
|
||||
// We're not checking specific header values since that depends on the actual helper function
|
||||
// Just verifying that the promptCache setting is processed
|
||||
expect(result.llmConfig).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
|
@ -129,9 +129,6 @@ const fetchOpenAIModels = async (opts, _models = []) => {
|
|||
// .split('/deployments')[0]
|
||||
// .concat(`/models?api-version=${azure.azureOpenAIApiVersion}`);
|
||||
// apiKey = azureOpenAIApiKey;
|
||||
} else if (process.env.OPENROUTER_API_KEY) {
|
||||
reverseProxyUrl = 'https://openrouter.ai/api/v1';
|
||||
apiKey = process.env.OPENROUTER_API_KEY;
|
||||
}
|
||||
|
||||
if (reverseProxyUrl) {
|
||||
|
|
@ -218,7 +215,7 @@ const getOpenAIModels = async (opts) => {
|
|||
return models;
|
||||
}
|
||||
|
||||
if (userProvidedOpenAI && !process.env.OPENROUTER_API_KEY) {
|
||||
if (userProvidedOpenAI) {
|
||||
return models;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -161,22 +161,6 @@ describe('getOpenAIModels', () => {
|
|||
expect(models).toEqual(expect.arrayContaining(['openai-model', 'openai-model-2']));
|
||||
});
|
||||
|
||||
it('attempts to use OPENROUTER_API_KEY if set', async () => {
|
||||
process.env.OPENROUTER_API_KEY = 'test-router-key';
|
||||
const expectedModels = ['model-router-1', 'model-router-2'];
|
||||
|
||||
axios.get.mockResolvedValue({
|
||||
data: {
|
||||
data: expectedModels.map((id) => ({ id })),
|
||||
},
|
||||
});
|
||||
|
||||
const models = await getOpenAIModels({ user: 'user456' });
|
||||
|
||||
expect(models).toEqual(expect.arrayContaining(expectedModels));
|
||||
expect(axios.get).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('utilizes proxy configuration when PROXY is set', async () => {
|
||||
axios.get.mockResolvedValue({
|
||||
data: {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue