mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-09-22 06:00:56 +02:00

* refactor: modularize openai llm config logic into new getOpenAILLMConfig function (#9412) * ✈️ refactor: Migrate Anthropic's getLLMConfig to TypeScript (#9413) * refactor: move tokens.js over to packages/api and update imports * refactor: port tokens.js to typescript * refactor: move helpers.js over to packages/api and update imports * refactor: port helpers.js to typescript * refactor: move anthropic/llm.js over to packages/api and update imports * refactor: port anthropic/llm.js to typescript with supporting types in types/anthropic.ts and updated tests in llm.spec.js * refactor: move llm.spec.js over to packages/api and update import * refactor: port llm.spec.js over to typescript * 📝 Add Prompt Parameter Support for Anthropic Custom Endpoints (#9414) feat: add anthropic llm config support for openai-like (custom) endpoints * fix: missed compiler / type issues from addition of getAnthropicLLMConfig * refactor: update tokens.ts to export constants and functions, enhance type definitions, and adjust default values * WIP: first pass, decouple `llmConfig` from `configOptions` * chore: update import path for OpenAI configuration from 'llm' to 'config' * refactor: enhance type definitions for ThinkingConfig and update modelOptions in AnthropicConfigOptions * refactor: cleanup type, introduce openai transform from alt provider * chore: integrate removeNullishValues in Google llmConfig and update OpenAI exports * chore: bump version of @librechat/api to 1.3.5 in package.json and package-lock.json * refactor: update customParams type in OpenAIConfigOptions to use TConfig['customParams'] * refactor: enhance transformToOpenAIConfig to include fromEndpoint and improve config extraction * refactor: conform userId field for anthropic/openai, cleanup anthropic typing * ci: add backward compatibility tests for getOpenAIConfig with various endpoints and configurations * ci: replace userId with user in clientOptions for getLLMConfig * test: add Azure OpenAI endpoint tests for various configurations in getOpenAIConfig * refactor: defaultHeaders retrieval for prompt caching for anthropic-based custom endpoint (litellm) * test: add unit tests for getOpenAIConfig with various Anthropic model configurations * test: enhance Anthropic compatibility tests with addParams and dropParams handling * chore: update @librechat/agents dependency to version 2.4.78 in package.json and package-lock.json * chore: update @librechat/agents dependency to version 2.4.79 in package.json and package-lock.json --------- Co-authored-by: Danny Avila <danny@librechat.ai>
123 lines
3.4 KiB
JavaScript
123 lines
3.4 KiB
JavaScript
const { getModelMaxTokens } = require('@librechat/api');
|
|
const BaseClient = require('../BaseClient');
|
|
|
|
class FakeClient extends BaseClient {
|
|
constructor(apiKey, options = {}) {
|
|
super(apiKey, options);
|
|
this.sender = 'AI Assistant';
|
|
this.setOptions(options);
|
|
}
|
|
setOptions(options) {
|
|
if (this.options && !this.options.replaceOptions) {
|
|
this.options.modelOptions = {
|
|
...this.options.modelOptions,
|
|
...options.modelOptions,
|
|
};
|
|
delete options.modelOptions;
|
|
this.options = {
|
|
...this.options,
|
|
...options,
|
|
};
|
|
} else {
|
|
this.options = options;
|
|
}
|
|
|
|
if (this.options.openaiApiKey) {
|
|
this.apiKey = this.options.openaiApiKey;
|
|
}
|
|
|
|
const modelOptions = this.options.modelOptions || {};
|
|
if (!this.modelOptions) {
|
|
this.modelOptions = {
|
|
...modelOptions,
|
|
model: modelOptions.model || 'gpt-3.5-turbo',
|
|
temperature:
|
|
typeof modelOptions.temperature === 'undefined' ? 0.8 : modelOptions.temperature,
|
|
top_p: typeof modelOptions.top_p === 'undefined' ? 1 : modelOptions.top_p,
|
|
presence_penalty:
|
|
typeof modelOptions.presence_penalty === 'undefined' ? 1 : modelOptions.presence_penalty,
|
|
stop: modelOptions.stop,
|
|
};
|
|
}
|
|
|
|
this.maxContextTokens =
|
|
this.options.maxContextTokens ?? getModelMaxTokens(this.modelOptions.model) ?? 4097;
|
|
}
|
|
buildMessages() {}
|
|
getTokenCount(str) {
|
|
return str.length;
|
|
}
|
|
getTokenCountForMessage(message) {
|
|
return message?.content?.length || message.length;
|
|
}
|
|
}
|
|
|
|
const initializeFakeClient = (apiKey, options, fakeMessages) => {
|
|
let TestClient = new FakeClient(apiKey);
|
|
TestClient.options = options;
|
|
TestClient.abortController = { abort: jest.fn() };
|
|
TestClient.loadHistory = jest
|
|
.fn()
|
|
.mockImplementation((conversationId, parentMessageId = null) => {
|
|
if (!conversationId) {
|
|
TestClient.currentMessages = [];
|
|
return Promise.resolve([]);
|
|
}
|
|
|
|
const orderedMessages = TestClient.constructor.getMessagesForConversation({
|
|
messages: fakeMessages,
|
|
parentMessageId,
|
|
});
|
|
|
|
TestClient.currentMessages = orderedMessages;
|
|
return Promise.resolve(orderedMessages);
|
|
});
|
|
|
|
TestClient.getSaveOptions = jest.fn().mockImplementation(() => {
|
|
return {};
|
|
});
|
|
|
|
TestClient.getBuildMessagesOptions = jest.fn().mockImplementation(() => {
|
|
return {};
|
|
});
|
|
|
|
TestClient.sendCompletion = jest.fn(async () => {
|
|
return 'Mock response text';
|
|
});
|
|
|
|
TestClient.getCompletion = jest.fn().mockImplementation(async (..._args) => {
|
|
return {
|
|
choices: [
|
|
{
|
|
message: {
|
|
content: 'Mock response text',
|
|
},
|
|
},
|
|
],
|
|
};
|
|
});
|
|
|
|
TestClient.buildMessages = jest.fn(async (messages, parentMessageId) => {
|
|
const orderedMessages = TestClient.constructor.getMessagesForConversation({
|
|
messages,
|
|
parentMessageId,
|
|
});
|
|
const formattedMessages = orderedMessages.map((message) => {
|
|
let { role: _role, sender, text } = message;
|
|
const role = _role ?? sender;
|
|
const content = text ?? '';
|
|
return {
|
|
role: role?.toLowerCase() === 'user' ? 'user' : 'assistant',
|
|
content,
|
|
};
|
|
});
|
|
return {
|
|
prompt: formattedMessages,
|
|
tokenCountMap: null, // Simplified for the mock
|
|
};
|
|
});
|
|
|
|
return TestClient;
|
|
};
|
|
|
|
module.exports = { FakeClient, initializeFakeClient };
|