mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-16 16:30:15 +01:00
* refactor: only remove conversation states from localStorage on login/logout but not on refresh * chore: add debugging log for azure completion url * chore: add api-key to redact regex * fix: do not show endpoint selector if endpoint is falsy * chore: remove logger from genAzureChatCompletion * feat(ci): mock fetchEventSource * refactor(ci): mock all model methods in BaseClient.test, as well as mock the implementation for getCompletion in FakeClient * fix(OpenAIClient): consider chatCompletion if model name includes `gpt` as opposed to `gpt-` * fix(ChatGPTClient/azureOpenAI): Remove 'model' option for Azure compatibility (cannot be sent in payload body) * feat(ci): write new test suite that significantly increase test coverage for OpenAIClient and BaseClient by covering most of the real implementation of the `sendMessage` method - test for the azure edge case where model option is appended to modelOptions, ensuring removal before sent to the azure endpoint - test for expected azure url being passed to SSE POST request - test for AZURE_OPENAI_DEFAULT_MODEL being set, but is not included in the URL deployment name as expected - test getCompletion method to have correct payload fix(ci/OpenAIClient.test.js): correctly mock hanging/async methods * refactor(addTitle): allow azure to title as it aborts signal on completion
124 lines
3.4 KiB
JavaScript
124 lines
3.4 KiB
JavaScript
const BaseClient = require('../BaseClient');
|
|
const { getModelMaxTokens } = require('../../../utils');
|
|
|
|
class FakeClient extends BaseClient {
|
|
constructor(apiKey, options = {}) {
|
|
super(apiKey, options);
|
|
this.sender = 'AI Assistant';
|
|
this.setOptions(options);
|
|
}
|
|
setOptions(options) {
|
|
if (this.options && !this.options.replaceOptions) {
|
|
this.options.modelOptions = {
|
|
...this.options.modelOptions,
|
|
...options.modelOptions,
|
|
};
|
|
delete options.modelOptions;
|
|
this.options = {
|
|
...this.options,
|
|
...options,
|
|
};
|
|
} else {
|
|
this.options = options;
|
|
}
|
|
|
|
if (this.options.openaiApiKey) {
|
|
this.apiKey = this.options.openaiApiKey;
|
|
}
|
|
|
|
const modelOptions = this.options.modelOptions || {};
|
|
if (!this.modelOptions) {
|
|
this.modelOptions = {
|
|
...modelOptions,
|
|
model: modelOptions.model || 'gpt-3.5-turbo',
|
|
temperature:
|
|
typeof modelOptions.temperature === 'undefined' ? 0.8 : modelOptions.temperature,
|
|
top_p: typeof modelOptions.top_p === 'undefined' ? 1 : modelOptions.top_p,
|
|
presence_penalty:
|
|
typeof modelOptions.presence_penalty === 'undefined' ? 1 : modelOptions.presence_penalty,
|
|
stop: modelOptions.stop,
|
|
};
|
|
}
|
|
|
|
this.maxContextTokens = getModelMaxTokens(this.modelOptions.model) ?? 4097;
|
|
}
|
|
buildMessages() {}
|
|
getTokenCount(str) {
|
|
return str.length;
|
|
}
|
|
getTokenCountForMessage(message) {
|
|
return message?.content?.length || message.length;
|
|
}
|
|
}
|
|
|
|
const initializeFakeClient = (apiKey, options, fakeMessages) => {
|
|
let TestClient = new FakeClient(apiKey);
|
|
TestClient.options = options;
|
|
TestClient.abortController = { abort: jest.fn() };
|
|
TestClient.saveMessageToDatabase = jest.fn();
|
|
TestClient.loadHistory = jest
|
|
.fn()
|
|
.mockImplementation((conversationId, parentMessageId = null) => {
|
|
if (!conversationId) {
|
|
TestClient.currentMessages = [];
|
|
return Promise.resolve([]);
|
|
}
|
|
|
|
const orderedMessages = TestClient.constructor.getMessagesForConversation({
|
|
messages: fakeMessages,
|
|
parentMessageId,
|
|
});
|
|
|
|
TestClient.currentMessages = orderedMessages;
|
|
return Promise.resolve(orderedMessages);
|
|
});
|
|
|
|
TestClient.getSaveOptions = jest.fn().mockImplementation(() => {
|
|
return {};
|
|
});
|
|
|
|
TestClient.getBuildMessagesOptions = jest.fn().mockImplementation(() => {
|
|
return {};
|
|
});
|
|
|
|
TestClient.sendCompletion = jest.fn(async () => {
|
|
return 'Mock response text';
|
|
});
|
|
|
|
// eslint-disable-next-line no-unused-vars
|
|
TestClient.getCompletion = jest.fn().mockImplementation(async (..._args) => {
|
|
return {
|
|
choices: [
|
|
{
|
|
message: {
|
|
content: 'Mock response text',
|
|
},
|
|
},
|
|
],
|
|
};
|
|
});
|
|
|
|
TestClient.buildMessages = jest.fn(async (messages, parentMessageId) => {
|
|
const orderedMessages = TestClient.constructor.getMessagesForConversation({
|
|
messages,
|
|
parentMessageId,
|
|
});
|
|
const formattedMessages = orderedMessages.map((message) => {
|
|
let { role: _role, sender, text } = message;
|
|
const role = _role ?? sender;
|
|
const content = text ?? '';
|
|
return {
|
|
role: role?.toLowerCase() === 'user' ? 'user' : 'assistant',
|
|
content,
|
|
};
|
|
});
|
|
return {
|
|
prompt: formattedMessages,
|
|
tokenCountMap: null, // Simplified for the mock
|
|
};
|
|
});
|
|
|
|
return TestClient;
|
|
};
|
|
|
|
module.exports = { FakeClient, initializeFakeClient };
|