LibreChat/api/app/clients/specs/FakeClient.js
Danny Avila c0cb48256e
🤖 refactor: Improve Agent Handoff Context Tracking (#10553)
* chore: update @librechat/agents dependency to version 3.0.18

* refactor: add optional metadata field to message schema and types

* chore: update @librechat/agents to v3.0.19

* refactor: update return type of sendCompletion method to include metadata

* chore: linting

* chore: update @librechat/agents dependency to v3.0.20

* refactor: implement agent labeling for conversation history in multi-agent scenarios

* refactor: improve error handling for capturing agent ID map in AgentClient

* refactor: clear agentIdMap and related properties during client disposal to prevent memory leaks

* chore: update sendCompletion method for FakeClient to return an object with completion and metadata fields
2025-11-17 16:57:51 -05:00

126 lines
3.4 KiB
JavaScript

const { getModelMaxTokens } = require('@librechat/api');
const BaseClient = require('../BaseClient');
class FakeClient extends BaseClient {
constructor(apiKey, options = {}) {
super(apiKey, options);
this.sender = 'AI Assistant';
this.setOptions(options);
}
setOptions(options) {
if (this.options && !this.options.replaceOptions) {
this.options.modelOptions = {
...this.options.modelOptions,
...options.modelOptions,
};
delete options.modelOptions;
this.options = {
...this.options,
...options,
};
} else {
this.options = options;
}
if (this.options.openaiApiKey) {
this.apiKey = this.options.openaiApiKey;
}
const modelOptions = this.options.modelOptions || {};
if (!this.modelOptions) {
this.modelOptions = {
...modelOptions,
model: modelOptions.model || 'gpt-3.5-turbo',
temperature:
typeof modelOptions.temperature === 'undefined' ? 0.8 : modelOptions.temperature,
top_p: typeof modelOptions.top_p === 'undefined' ? 1 : modelOptions.top_p,
presence_penalty:
typeof modelOptions.presence_penalty === 'undefined' ? 1 : modelOptions.presence_penalty,
stop: modelOptions.stop,
};
}
this.maxContextTokens =
this.options.maxContextTokens ?? getModelMaxTokens(this.modelOptions.model) ?? 4097;
}
buildMessages() {}
getTokenCount(str) {
return str.length;
}
getTokenCountForMessage(message) {
return message?.content?.length || message.length;
}
}
const initializeFakeClient = (apiKey, options, fakeMessages) => {
let TestClient = new FakeClient(apiKey);
TestClient.options = options;
TestClient.abortController = { abort: jest.fn() };
TestClient.loadHistory = jest
.fn()
.mockImplementation((conversationId, parentMessageId = null) => {
if (!conversationId) {
TestClient.currentMessages = [];
return Promise.resolve([]);
}
const orderedMessages = TestClient.constructor.getMessagesForConversation({
messages: fakeMessages,
parentMessageId,
});
TestClient.currentMessages = orderedMessages;
return Promise.resolve(orderedMessages);
});
TestClient.getSaveOptions = jest.fn().mockImplementation(() => {
return {};
});
TestClient.getBuildMessagesOptions = jest.fn().mockImplementation(() => {
return {};
});
TestClient.sendCompletion = jest.fn(async () => {
return {
completion: 'Mock response text',
metadata: undefined,
};
});
TestClient.getCompletion = jest.fn().mockImplementation(async (..._args) => {
return {
choices: [
{
message: {
content: 'Mock response text',
},
},
],
};
});
TestClient.buildMessages = jest.fn(async (messages, parentMessageId) => {
const orderedMessages = TestClient.constructor.getMessagesForConversation({
messages,
parentMessageId,
});
const formattedMessages = orderedMessages.map((message) => {
let { role: _role, sender, text } = message;
const role = _role ?? sender;
const content = text ?? '';
return {
role: role?.toLowerCase() === 'user' ? 'user' : 'assistant',
content,
};
});
return {
prompt: formattedMessages,
tokenCountMap: null, // Simplified for the mock
};
});
return TestClient;
};
module.exports = { FakeClient, initializeFakeClient };