👤 feat: User Placeholder Variables for Custom Endpoint Headers (#7993)

* 🔧 refactor: move `processMCPEnv` from `librechat-data-provider` and move to `@librechat/api`

* 🔧 refactor: Update resolveHeaders import paths

* 🔧 refactor: Enhance resolveHeaders to support user and custom variables

- Updated resolveHeaders function to accept user and custom user variables for placeholder replacement.
- Modified header resolution in multiple client and controller files to utilize the enhanced resolveHeaders functionality.
- Added comprehensive tests for resolveHeaders to ensure correct processing of user and custom variables.

* 🔧 fix: Update user ID placeholder processing in env.ts

* 🔧 fix: Remove arguments passing this.user rather than req.user

- Updated multiple client and controller files to call resolveHeaders without the user parameter

* 🔧 refactor: Enhance processUserPlaceholders to be more readable / less nested

* 🔧 refactor: Update processUserPlaceholders to pass all tests in mpc.spec.ts and env.spec.ts

* chore: remove legacy ChatGPTClient

* chore: remove LLM initialization code

* chore: initial deprecation removal of `gptPlugins`

* chore: remove cohere-ai dependency from package.json and package-lock.json

* chore: update brace-expansion to version 2.0.2 and add license information

* chore: remove PluginsClient test file

* chore: remove legacy

* ci: remove deprecated sendMessage/getCompletion/chatCompletion tests

---------

Co-authored-by: Dustin Healy <54083382+dustinhealy@users.noreply.github.com>
This commit is contained in:
Danny Avila 2025-06-23 12:39:27 -04:00 committed by GitHub
parent 01e9b196bc
commit a058963a9f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
30 changed files with 542 additions and 2844 deletions

View file

@ -58,7 +58,7 @@ DEBUG_CONSOLE=false
# Endpoints # # Endpoints #
#===================================================# #===================================================#
# ENDPOINTS=openAI,assistants,azureOpenAI,google,gptPlugins,anthropic # ENDPOINTS=openAI,assistants,azureOpenAI,google,anthropic
PROXY= PROXY=

View file

@ -1,804 +0,0 @@
const { Keyv } = require('keyv');
const crypto = require('crypto');
const { CohereClient } = require('cohere-ai');
const { fetchEventSource } = require('@waylaidwanderer/fetch-event-source');
const { constructAzureURL, genAzureChatCompletion } = require('@librechat/api');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const {
ImageDetail,
EModelEndpoint,
resolveHeaders,
CohereConstants,
mapModelToAzureConfig,
} = require('librechat-data-provider');
const { createContextHandlers } = require('./prompts');
const { createCoherePayload } = require('./llm');
const { extractBaseURL } = require('~/utils');
const BaseClient = require('./BaseClient');
const { logger } = require('~/config');
const CHATGPT_MODEL = 'gpt-3.5-turbo';
const tokenizersCache = {};
class ChatGPTClient extends BaseClient {
constructor(apiKey, options = {}, cacheOptions = {}) {
super(apiKey, options, cacheOptions);
cacheOptions.namespace = cacheOptions.namespace || 'chatgpt';
this.conversationsCache = new Keyv(cacheOptions);
this.setOptions(options);
}
setOptions(options) {
if (this.options && !this.options.replaceOptions) {
// nested options aren't spread properly, so we need to do this manually
this.options.modelOptions = {
...this.options.modelOptions,
...options.modelOptions,
};
delete options.modelOptions;
// now we can merge options
this.options = {
...this.options,
...options,
};
} else {
this.options = options;
}
if (this.options.openaiApiKey) {
this.apiKey = this.options.openaiApiKey;
}
const modelOptions = this.options.modelOptions || {};
this.modelOptions = {
...modelOptions,
// set some good defaults (check for undefined in some cases because they may be 0)
model: modelOptions.model || CHATGPT_MODEL,
temperature: typeof modelOptions.temperature === 'undefined' ? 0.8 : modelOptions.temperature,
top_p: typeof modelOptions.top_p === 'undefined' ? 1 : modelOptions.top_p,
presence_penalty:
typeof modelOptions.presence_penalty === 'undefined' ? 1 : modelOptions.presence_penalty,
stop: modelOptions.stop,
};
this.isChatGptModel = this.modelOptions.model.includes('gpt-');
const { isChatGptModel } = this;
this.isUnofficialChatGptModel =
this.modelOptions.model.startsWith('text-chat') ||
this.modelOptions.model.startsWith('text-davinci-002-render');
const { isUnofficialChatGptModel } = this;
// Davinci models have a max context length of 4097 tokens.
this.maxContextTokens = this.options.maxContextTokens || (isChatGptModel ? 4095 : 4097);
// I decided to reserve 1024 tokens for the response.
// The max prompt tokens is determined by the max context tokens minus the max response tokens.
// Earlier messages will be dropped until the prompt is within the limit.
this.maxResponseTokens = this.modelOptions.max_tokens || 1024;
this.maxPromptTokens =
this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens;
if (this.maxPromptTokens + this.maxResponseTokens > this.maxContextTokens) {
throw new Error(
`maxPromptTokens + max_tokens (${this.maxPromptTokens} + ${this.maxResponseTokens} = ${
this.maxPromptTokens + this.maxResponseTokens
}) must be less than or equal to maxContextTokens (${this.maxContextTokens})`,
);
}
this.userLabel = this.options.userLabel || 'User';
this.chatGptLabel = this.options.chatGptLabel || 'ChatGPT';
if (isChatGptModel) {
// Use these faux tokens to help the AI understand the context since we are building the chat log ourselves.
// Trying to use "<|im_start|>" causes the AI to still generate "<" or "<|" at the end sometimes for some reason,
// without tripping the stop sequences, so I'm using "||>" instead.
this.startToken = '||>';
this.endToken = '';
this.gptEncoder = this.constructor.getTokenizer('cl100k_base');
} else if (isUnofficialChatGptModel) {
this.startToken = '<|im_start|>';
this.endToken = '<|im_end|>';
this.gptEncoder = this.constructor.getTokenizer('text-davinci-003', true, {
'<|im_start|>': 100264,
'<|im_end|>': 100265,
});
} else {
// Previously I was trying to use "<|endoftext|>" but there seems to be some bug with OpenAI's token counting
// system that causes only the first "<|endoftext|>" to be counted as 1 token, and the rest are not treated
// as a single token. So we're using this instead.
this.startToken = '||>';
this.endToken = '';
try {
this.gptEncoder = this.constructor.getTokenizer(this.modelOptions.model, true);
} catch {
this.gptEncoder = this.constructor.getTokenizer('text-davinci-003', true);
}
}
if (!this.modelOptions.stop) {
const stopTokens = [this.startToken];
if (this.endToken && this.endToken !== this.startToken) {
stopTokens.push(this.endToken);
}
stopTokens.push(`\n${this.userLabel}:`);
stopTokens.push('<|diff_marker|>');
// I chose not to do one for `chatGptLabel` because I've never seen it happen
this.modelOptions.stop = stopTokens;
}
if (this.options.reverseProxyUrl) {
this.completionsUrl = this.options.reverseProxyUrl;
} else if (isChatGptModel) {
this.completionsUrl = 'https://api.openai.com/v1/chat/completions';
} else {
this.completionsUrl = 'https://api.openai.com/v1/completions';
}
return this;
}
static getTokenizer(encoding, isModelName = false, extendSpecialTokens = {}) {
if (tokenizersCache[encoding]) {
return tokenizersCache[encoding];
}
let tokenizer;
if (isModelName) {
tokenizer = encodingForModel(encoding, extendSpecialTokens);
} else {
tokenizer = getEncoding(encoding, extendSpecialTokens);
}
tokenizersCache[encoding] = tokenizer;
return tokenizer;
}
/** @type {getCompletion} */
async getCompletion(input, onProgress, onTokenProgress, abortController = null) {
if (!abortController) {
abortController = new AbortController();
}
let modelOptions = { ...this.modelOptions };
if (typeof onProgress === 'function') {
modelOptions.stream = true;
}
if (this.isChatGptModel) {
modelOptions.messages = input;
} else {
modelOptions.prompt = input;
}
if (this.useOpenRouter && modelOptions.prompt) {
delete modelOptions.stop;
}
const { debug } = this.options;
let baseURL = this.completionsUrl;
if (debug) {
console.debug();
console.debug(baseURL);
console.debug(modelOptions);
console.debug();
}
const opts = {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
};
if (this.isVisionModel) {
modelOptions.max_tokens = 4000;
}
/** @type {TAzureConfig | undefined} */
const azureConfig = this.options?.req?.app?.locals?.[EModelEndpoint.azureOpenAI];
const isAzure = this.azure || this.options.azure;
if (
(isAzure && this.isVisionModel && azureConfig) ||
(azureConfig && this.isVisionModel && this.options.endpoint === EModelEndpoint.azureOpenAI)
) {
const { modelGroupMap, groupMap } = azureConfig;
const {
azureOptions,
baseURL,
headers = {},
serverless,
} = mapModelToAzureConfig({
modelName: modelOptions.model,
modelGroupMap,
groupMap,
});
opts.headers = resolveHeaders(headers);
this.langchainProxy = extractBaseURL(baseURL);
this.apiKey = azureOptions.azureOpenAIApiKey;
const groupName = modelGroupMap[modelOptions.model].group;
this.options.addParams = azureConfig.groupMap[groupName].addParams;
this.options.dropParams = azureConfig.groupMap[groupName].dropParams;
// Note: `forcePrompt` not re-assigned as only chat models are vision models
this.azure = !serverless && azureOptions;
this.azureEndpoint =
!serverless && genAzureChatCompletion(this.azure, modelOptions.model, this);
if (serverless === true) {
this.options.defaultQuery = azureOptions.azureOpenAIApiVersion
? { 'api-version': azureOptions.azureOpenAIApiVersion }
: undefined;
this.options.headers['api-key'] = this.apiKey;
}
}
if (this.options.defaultQuery) {
opts.defaultQuery = this.options.defaultQuery;
}
if (this.options.headers) {
opts.headers = { ...opts.headers, ...this.options.headers };
}
if (isAzure) {
// Azure does not accept `model` in the body, so we need to remove it.
delete modelOptions.model;
baseURL = this.langchainProxy
? constructAzureURL({
baseURL: this.langchainProxy,
azureOptions: this.azure,
})
: this.azureEndpoint.split(/(?<!\/)\/(chat|completion)\//)[0];
if (this.options.forcePrompt) {
baseURL += '/completions';
} else {
baseURL += '/chat/completions';
}
opts.defaultQuery = { 'api-version': this.azure.azureOpenAIApiVersion };
opts.headers = { ...opts.headers, 'api-key': this.apiKey };
} else if (this.apiKey) {
opts.headers.Authorization = `Bearer ${this.apiKey}`;
}
if (process.env.OPENAI_ORGANIZATION) {
opts.headers['OpenAI-Organization'] = process.env.OPENAI_ORGANIZATION;
}
if (this.useOpenRouter) {
opts.headers['HTTP-Referer'] = 'https://librechat.ai';
opts.headers['X-Title'] = 'LibreChat';
}
/* hacky fixes for Mistral AI API:
- Re-orders system message to the top of the messages payload, as not allowed anywhere else
- If there is only one message and it's a system message, change the role to user
*/
if (baseURL.includes('https://api.mistral.ai/v1') && modelOptions.messages) {
const { messages } = modelOptions;
const systemMessageIndex = messages.findIndex((msg) => msg.role === 'system');
if (systemMessageIndex > 0) {
const [systemMessage] = messages.splice(systemMessageIndex, 1);
messages.unshift(systemMessage);
}
modelOptions.messages = messages;
if (messages.length === 1 && messages[0].role === 'system') {
modelOptions.messages[0].role = 'user';
}
}
if (this.options.addParams && typeof this.options.addParams === 'object') {
modelOptions = {
...modelOptions,
...this.options.addParams,
};
logger.debug('[ChatGPTClient] chatCompletion: added params', {
addParams: this.options.addParams,
modelOptions,
});
}
if (this.options.dropParams && Array.isArray(this.options.dropParams)) {
this.options.dropParams.forEach((param) => {
delete modelOptions[param];
});
logger.debug('[ChatGPTClient] chatCompletion: dropped params', {
dropParams: this.options.dropParams,
modelOptions,
});
}
if (baseURL.startsWith(CohereConstants.API_URL)) {
const payload = createCoherePayload({ modelOptions });
return await this.cohereChatCompletion({ payload, onTokenProgress });
}
if (baseURL.includes('v1') && !baseURL.includes('/completions') && !this.isChatCompletion) {
baseURL = baseURL.split('v1')[0] + 'v1/completions';
} else if (
baseURL.includes('v1') &&
!baseURL.includes('/chat/completions') &&
this.isChatCompletion
) {
baseURL = baseURL.split('v1')[0] + 'v1/chat/completions';
}
const BASE_URL = new URL(baseURL);
if (opts.defaultQuery) {
Object.entries(opts.defaultQuery).forEach(([key, value]) => {
BASE_URL.searchParams.append(key, value);
});
delete opts.defaultQuery;
}
const completionsURL = BASE_URL.toString();
opts.body = JSON.stringify(modelOptions);
if (modelOptions.stream) {
return new Promise(async (resolve, reject) => {
try {
let done = false;
await fetchEventSource(completionsURL, {
...opts,
signal: abortController.signal,
async onopen(response) {
if (response.status === 200) {
return;
}
if (debug) {
console.debug(response);
}
let error;
try {
const body = await response.text();
error = new Error(`Failed to send message. HTTP ${response.status} - ${body}`);
error.status = response.status;
error.json = JSON.parse(body);
} catch {
error = error || new Error(`Failed to send message. HTTP ${response.status}`);
}
throw error;
},
onclose() {
if (debug) {
console.debug('Server closed the connection unexpectedly, returning...');
}
// workaround for private API not sending [DONE] event
if (!done) {
onProgress('[DONE]');
resolve();
}
},
onerror(err) {
if (debug) {
console.debug(err);
}
// rethrow to stop the operation
throw err;
},
onmessage(message) {
if (debug) {
console.debug(message);
}
if (!message.data || message.event === 'ping') {
return;
}
if (message.data === '[DONE]') {
onProgress('[DONE]');
resolve();
done = true;
return;
}
onProgress(JSON.parse(message.data));
},
});
} catch (err) {
reject(err);
}
});
}
const response = await fetch(completionsURL, {
...opts,
signal: abortController.signal,
});
if (response.status !== 200) {
const body = await response.text();
const error = new Error(`Failed to send message. HTTP ${response.status} - ${body}`);
error.status = response.status;
try {
error.json = JSON.parse(body);
} catch {
error.body = body;
}
throw error;
}
return response.json();
}
/** @type {cohereChatCompletion} */
async cohereChatCompletion({ payload, onTokenProgress }) {
const cohere = new CohereClient({
token: this.apiKey,
environment: this.completionsUrl,
});
if (!payload.stream) {
const chatResponse = await cohere.chat(payload);
return chatResponse.text;
}
const chatStream = await cohere.chatStream(payload);
let reply = '';
for await (const message of chatStream) {
if (!message) {
continue;
}
if (message.eventType === 'text-generation' && message.text) {
onTokenProgress(message.text);
reply += message.text;
}
/*
Cohere API Chinese Unicode character replacement hotfix.
Should be un-commented when the following issue is resolved:
https://github.com/cohere-ai/cohere-typescript/issues/151
else if (message.eventType === 'stream-end' && message.response) {
reply = message.response.text;
}
*/
}
return reply;
}
async generateTitle(userMessage, botMessage) {
const instructionsPayload = {
role: 'system',
content: `Write an extremely concise subtitle for this conversation with no more than a few words. All words should be capitalized. Exclude punctuation.
||>Message:
${userMessage.message}
||>Response:
${botMessage.message}
||>Title:`,
};
const titleGenClientOptions = JSON.parse(JSON.stringify(this.options));
titleGenClientOptions.modelOptions = {
model: 'gpt-3.5-turbo',
temperature: 0,
presence_penalty: 0,
frequency_penalty: 0,
};
const titleGenClient = new ChatGPTClient(this.apiKey, titleGenClientOptions);
const result = await titleGenClient.getCompletion([instructionsPayload], null);
// remove any non-alphanumeric characters, replace multiple spaces with 1, and then trim
return result.choices[0].message.content
.replace(/[^a-zA-Z0-9' ]/g, '')
.replace(/\s+/g, ' ')
.trim();
}
async sendMessage(message, opts = {}) {
if (opts.clientOptions && typeof opts.clientOptions === 'object') {
this.setOptions(opts.clientOptions);
}
const conversationId = opts.conversationId || crypto.randomUUID();
const parentMessageId = opts.parentMessageId || crypto.randomUUID();
let conversation =
typeof opts.conversation === 'object'
? opts.conversation
: await this.conversationsCache.get(conversationId);
let isNewConversation = false;
if (!conversation) {
conversation = {
messages: [],
createdAt: Date.now(),
};
isNewConversation = true;
}
const shouldGenerateTitle = opts.shouldGenerateTitle && isNewConversation;
const userMessage = {
id: crypto.randomUUID(),
parentMessageId,
role: 'User',
message,
};
conversation.messages.push(userMessage);
// Doing it this way instead of having each message be a separate element in the array seems to be more reliable,
// especially when it comes to keeping the AI in character. It also seems to improve coherency and context retention.
const { prompt: payload, context } = await this.buildPrompt(
conversation.messages,
userMessage.id,
{
isChatGptModel: this.isChatGptModel,
promptPrefix: opts.promptPrefix,
},
);
if (this.options.keepNecessaryMessagesOnly) {
conversation.messages = context;
}
let reply = '';
let result = null;
if (typeof opts.onProgress === 'function') {
await this.getCompletion(
payload,
(progressMessage) => {
if (progressMessage === '[DONE]') {
return;
}
const token = this.isChatGptModel
? progressMessage.choices[0].delta.content
: progressMessage.choices[0].text;
// first event's delta content is always undefined
if (!token) {
return;
}
if (this.options.debug) {
console.debug(token);
}
if (token === this.endToken) {
return;
}
opts.onProgress(token);
reply += token;
},
opts.abortController || new AbortController(),
);
} else {
result = await this.getCompletion(
payload,
null,
opts.abortController || new AbortController(),
);
if (this.options.debug) {
console.debug(JSON.stringify(result));
}
if (this.isChatGptModel) {
reply = result.choices[0].message.content;
} else {
reply = result.choices[0].text.replace(this.endToken, '');
}
}
// avoids some rendering issues when using the CLI app
if (this.options.debug) {
console.debug();
}
reply = reply.trim();
const replyMessage = {
id: crypto.randomUUID(),
parentMessageId: userMessage.id,
role: 'ChatGPT',
message: reply,
};
conversation.messages.push(replyMessage);
const returnData = {
response: replyMessage.message,
conversationId,
parentMessageId: replyMessage.parentMessageId,
messageId: replyMessage.id,
details: result || {},
};
if (shouldGenerateTitle) {
conversation.title = await this.generateTitle(userMessage, replyMessage);
returnData.title = conversation.title;
}
await this.conversationsCache.set(conversationId, conversation);
if (this.options.returnConversation) {
returnData.conversation = conversation;
}
return returnData;
}
async buildPrompt(messages, { isChatGptModel = false, promptPrefix = null }) {
promptPrefix = (promptPrefix || this.options.promptPrefix || '').trim();
// Handle attachments and create augmentedPrompt
if (this.options.attachments) {
const attachments = await this.options.attachments;
const lastMessage = messages[messages.length - 1];
if (this.message_file_map) {
this.message_file_map[lastMessage.messageId] = attachments;
} else {
this.message_file_map = {
[lastMessage.messageId]: attachments,
};
}
const files = await this.addImageURLs(lastMessage, attachments);
this.options.attachments = files;
this.contextHandlers = createContextHandlers(this.options.req, lastMessage.text);
}
if (this.message_file_map) {
this.contextHandlers = createContextHandlers(
this.options.req,
messages[messages.length - 1].text,
);
}
// Calculate image token cost and process embedded files
messages.forEach((message, i) => {
if (this.message_file_map && this.message_file_map[message.messageId]) {
const attachments = this.message_file_map[message.messageId];
for (const file of attachments) {
if (file.embedded) {
this.contextHandlers?.processFile(file);
continue;
}
messages[i].tokenCount =
(messages[i].tokenCount || 0) +
this.calculateImageTokenCost({
width: file.width,
height: file.height,
detail: this.options.imageDetail ?? ImageDetail.auto,
});
}
}
});
if (this.contextHandlers) {
this.augmentedPrompt = await this.contextHandlers.createContext();
promptPrefix = this.augmentedPrompt + promptPrefix;
}
if (promptPrefix) {
// If the prompt prefix doesn't end with the end token, add it.
if (!promptPrefix.endsWith(`${this.endToken}`)) {
promptPrefix = `${promptPrefix.trim()}${this.endToken}\n\n`;
}
promptPrefix = `${this.startToken}Instructions:\n${promptPrefix}`;
}
const promptSuffix = `${this.startToken}${this.chatGptLabel}:\n`; // Prompt ChatGPT to respond.
const instructionsPayload = {
role: 'system',
content: promptPrefix,
};
const messagePayload = {
role: 'system',
content: promptSuffix,
};
let currentTokenCount;
if (isChatGptModel) {
currentTokenCount =
this.getTokenCountForMessage(instructionsPayload) +
this.getTokenCountForMessage(messagePayload);
} else {
currentTokenCount = this.getTokenCount(`${promptPrefix}${promptSuffix}`);
}
let promptBody = '';
const maxTokenCount = this.maxPromptTokens;
const context = [];
// Iterate backwards through the messages, adding them to the prompt until we reach the max token count.
// Do this within a recursive async function so that it doesn't block the event loop for too long.
const buildPromptBody = async () => {
if (currentTokenCount < maxTokenCount && messages.length > 0) {
const message = messages.pop();
const roleLabel =
message?.isCreatedByUser || message?.role?.toLowerCase() === 'user'
? this.userLabel
: this.chatGptLabel;
const messageString = `${this.startToken}${roleLabel}:\n${
message?.text ?? message?.message
}${this.endToken}\n`;
let newPromptBody;
if (promptBody || isChatGptModel) {
newPromptBody = `${messageString}${promptBody}`;
} else {
// Always insert prompt prefix before the last user message, if not gpt-3.5-turbo.
// This makes the AI obey the prompt instructions better, which is important for custom instructions.
// After a bunch of testing, it doesn't seem to cause the AI any confusion, even if you ask it things
// like "what's the last thing I wrote?".
newPromptBody = `${promptPrefix}${messageString}${promptBody}`;
}
context.unshift(message);
const tokenCountForMessage = this.getTokenCount(messageString);
const newTokenCount = currentTokenCount + tokenCountForMessage;
if (newTokenCount > maxTokenCount) {
if (promptBody) {
// This message would put us over the token limit, so don't add it.
return false;
}
// This is the first message, so we can't add it. Just throw an error.
throw new Error(
`Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`,
);
}
promptBody = newPromptBody;
currentTokenCount = newTokenCount;
// wait for next tick to avoid blocking the event loop
await new Promise((resolve) => setImmediate(resolve));
return buildPromptBody();
}
return true;
};
await buildPromptBody();
const prompt = `${promptBody}${promptSuffix}`;
if (isChatGptModel) {
messagePayload.content = prompt;
// Add 3 tokens for Assistant Label priming after all messages have been counted.
currentTokenCount += 3;
}
// Use up to `this.maxContextTokens` tokens (prompt + response), but try to leave `this.maxTokens` tokens for the response.
this.modelOptions.max_tokens = Math.min(
this.maxContextTokens - currentTokenCount,
this.maxResponseTokens,
);
if (isChatGptModel) {
return { prompt: [instructionsPayload, messagePayload], context };
}
return { prompt, context, promptTokens: currentTokenCount };
}
getTokenCount(text) {
return this.gptEncoder.encode(text, 'all').length;
}
/**
* Algorithm adapted from "6. Counting tokens for chat API calls" of
* https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
*
* An additional 3 tokens need to be added for assistant label priming after all messages have been counted.
*
* @param {Object} message
*/
getTokenCountForMessage(message) {
// Note: gpt-3.5-turbo and gpt-4 may update over time. Use default for these as well as for unknown models
let tokensPerMessage = 3;
let tokensPerName = 1;
if (this.modelOptions.model === 'gpt-3.5-turbo-0301') {
tokensPerMessage = 4;
tokensPerName = -1;
}
let numTokens = tokensPerMessage;
for (let [key, value] of Object.entries(message)) {
numTokens += this.getTokenCount(value);
if (key === 'name') {
numTokens += tokensPerName;
}
}
return numTokens;
}
}
module.exports = ChatGPTClient;

View file

@ -5,6 +5,7 @@ const {
isEnabled, isEnabled,
Tokenizer, Tokenizer,
createFetch, createFetch,
resolveHeaders,
constructAzureURL, constructAzureURL,
genAzureChatCompletion, genAzureChatCompletion,
createStreamEventHandlers, createStreamEventHandlers,
@ -15,7 +16,6 @@ const {
ContentTypes, ContentTypes,
parseTextParts, parseTextParts,
EModelEndpoint, EModelEndpoint,
resolveHeaders,
KnownEndpoints, KnownEndpoints,
openAISettings, openAISettings,
ImageDetailCost, ImageDetailCost,
@ -37,7 +37,6 @@ const { addSpaceIfNeeded, sleep } = require('~/server/utils');
const { spendTokens } = require('~/models/spendTokens'); const { spendTokens } = require('~/models/spendTokens');
const { handleOpenAIErrors } = require('./tools/util'); const { handleOpenAIErrors } = require('./tools/util');
const { createLLM, RunManager } = require('./llm'); const { createLLM, RunManager } = require('./llm');
const ChatGPTClient = require('./ChatGPTClient');
const { summaryBuffer } = require('./memory'); const { summaryBuffer } = require('./memory');
const { runTitleChain } = require('./chains'); const { runTitleChain } = require('./chains');
const { tokenSplit } = require('./document'); const { tokenSplit } = require('./document');
@ -47,12 +46,6 @@ const { logger } = require('~/config');
class OpenAIClient extends BaseClient { class OpenAIClient extends BaseClient {
constructor(apiKey, options = {}) { constructor(apiKey, options = {}) {
super(apiKey, options); super(apiKey, options);
this.ChatGPTClient = new ChatGPTClient();
this.buildPrompt = this.ChatGPTClient.buildPrompt.bind(this);
/** @type {getCompletion} */
this.getCompletion = this.ChatGPTClient.getCompletion.bind(this);
/** @type {cohereChatCompletion} */
this.cohereChatCompletion = this.ChatGPTClient.cohereChatCompletion.bind(this);
this.contextStrategy = options.contextStrategy this.contextStrategy = options.contextStrategy
? options.contextStrategy.toLowerCase() ? options.contextStrategy.toLowerCase()
: 'discard'; : 'discard';
@ -379,23 +372,12 @@ class OpenAIClient extends BaseClient {
return files; return files;
} }
async buildMessages( async buildMessages(messages, parentMessageId, { promptPrefix = null }, opts) {
messages,
parentMessageId,
{ isChatCompletion = false, promptPrefix = null },
opts,
) {
let orderedMessages = this.constructor.getMessagesForConversation({ let orderedMessages = this.constructor.getMessagesForConversation({
messages, messages,
parentMessageId, parentMessageId,
summary: this.shouldSummarize, summary: this.shouldSummarize,
}); });
if (!isChatCompletion) {
return await this.buildPrompt(orderedMessages, {
isChatGptModel: isChatCompletion,
promptPrefix,
});
}
let payload; let payload;
let instructions; let instructions;

View file

@ -1,542 +0,0 @@
const OpenAIClient = require('./OpenAIClient');
const { CallbackManager } = require('@langchain/core/callbacks/manager');
const { BufferMemory, ChatMessageHistory } = require('langchain/memory');
const { addImages, buildErrorInput, buildPromptPrefix } = require('./output_parsers');
const { initializeCustomAgent, initializeFunctionsAgent } = require('./agents');
const { processFileURL } = require('~/server/services/Files/process');
const { EModelEndpoint } = require('librechat-data-provider');
const { checkBalance } = require('~/models/balanceMethods');
const { formatLangChainMessages } = require('./prompts');
const { extractBaseURL } = require('~/utils');
const { loadTools } = require('./tools/util');
const { logger } = require('~/config');
class PluginsClient extends OpenAIClient {
constructor(apiKey, options = {}) {
super(apiKey, options);
this.sender = options.sender ?? 'Assistant';
this.tools = [];
this.actions = [];
this.setOptions(options);
this.openAIApiKey = this.apiKey;
this.executor = null;
}
setOptions(options) {
this.agentOptions = { ...options.agentOptions };
this.functionsAgent = this.agentOptions?.agent === 'functions';
this.agentIsGpt3 = this.agentOptions?.model?.includes('gpt-3');
super.setOptions(options);
this.isGpt3 = this.modelOptions?.model?.includes('gpt-3');
if (this.options.reverseProxyUrl) {
this.langchainProxy = extractBaseURL(this.options.reverseProxyUrl);
}
}
getSaveOptions() {
return {
artifacts: this.options.artifacts,
chatGptLabel: this.options.chatGptLabel,
modelLabel: this.options.modelLabel,
promptPrefix: this.options.promptPrefix,
tools: this.options.tools,
...this.modelOptions,
agentOptions: this.agentOptions,
iconURL: this.options.iconURL,
greeting: this.options.greeting,
spec: this.options.spec,
};
}
saveLatestAction(action) {
this.actions.push(action);
}
getFunctionModelName(input) {
if (/-(?!0314)\d{4}/.test(input)) {
return input;
} else if (input.includes('gpt-3.5-turbo')) {
return 'gpt-3.5-turbo';
} else if (input.includes('gpt-4')) {
return 'gpt-4';
} else {
return 'gpt-3.5-turbo';
}
}
getBuildMessagesOptions(opts) {
return {
isChatCompletion: true,
promptPrefix: opts.promptPrefix,
abortController: opts.abortController,
};
}
async initialize({ user, message, onAgentAction, onChainEnd, signal }) {
const modelOptions = {
modelName: this.agentOptions.model,
temperature: this.agentOptions.temperature,
};
const model = this.initializeLLM({
...modelOptions,
context: 'plugins',
initialMessageCount: this.currentMessages.length + 1,
});
logger.debug(
`[PluginsClient] Agent Model: ${model.modelName} | Temp: ${model.temperature} | Functions: ${this.functionsAgent}`,
);
// Map Messages to Langchain format
const pastMessages = formatLangChainMessages(this.currentMessages.slice(0, -1), {
userName: this.options?.name,
});
logger.debug('[PluginsClient] pastMessages: ' + pastMessages.length);
// TODO: use readOnly memory, TokenBufferMemory? (both unavailable in LangChainJS)
const memory = new BufferMemory({
llm: model,
chatHistory: new ChatMessageHistory(pastMessages),
});
const { loadedTools } = await loadTools({
user,
model,
tools: this.options.tools,
functions: this.functionsAgent,
options: {
memory,
signal: this.abortController.signal,
openAIApiKey: this.openAIApiKey,
conversationId: this.conversationId,
fileStrategy: this.options.req.app.locals.fileStrategy,
processFileURL,
message,
},
useSpecs: true,
});
if (loadedTools.length === 0) {
return;
}
this.tools = loadedTools;
logger.debug('[PluginsClient] Requested Tools', this.options.tools);
logger.debug(
'[PluginsClient] Loaded Tools',
this.tools.map((tool) => tool.name),
);
const handleAction = (action, runId, callback = null) => {
this.saveLatestAction(action);
logger.debug('[PluginsClient] Latest Agent Action ', this.actions[this.actions.length - 1]);
if (typeof callback === 'function') {
callback(action, runId);
}
};
// initialize agent
const initializer = this.functionsAgent ? initializeFunctionsAgent : initializeCustomAgent;
let customInstructions = (this.options.promptPrefix ?? '').trim();
if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) {
customInstructions = `${customInstructions ?? ''}\n${this.options.artifactsPrompt}`.trim();
}
this.executor = await initializer({
model,
signal,
pastMessages,
tools: this.tools,
customInstructions,
verbose: this.options.debug,
returnIntermediateSteps: true,
customName: this.options.chatGptLabel,
currentDateString: this.currentDateString,
callbackManager: CallbackManager.fromHandlers({
async handleAgentAction(action, runId) {
handleAction(action, runId, onAgentAction);
},
async handleChainEnd(action) {
if (typeof onChainEnd === 'function') {
onChainEnd(action);
}
},
}),
});
logger.debug('[PluginsClient] Loaded agent.');
}
async executorCall(message, { signal, stream, onToolStart, onToolEnd }) {
let errorMessage = '';
const maxAttempts = 1;
for (let attempts = 1; attempts <= maxAttempts; attempts++) {
const errorInput = buildErrorInput({
message,
errorMessage,
actions: this.actions,
functionsAgent: this.functionsAgent,
});
const input = attempts > 1 ? errorInput : message;
logger.debug(`[PluginsClient] Attempt ${attempts} of ${maxAttempts}`);
if (errorMessage.length > 0) {
logger.debug('[PluginsClient] Caught error, input: ' + JSON.stringify(input));
}
try {
this.result = await this.executor.call({ input, signal }, [
{
async handleToolStart(...args) {
await onToolStart(...args);
},
async handleToolEnd(...args) {
await onToolEnd(...args);
},
async handleLLMEnd(output) {
const { generations } = output;
const { text } = generations[0][0];
if (text && typeof stream === 'function') {
await stream(text);
}
},
},
]);
break; // Exit the loop if the function call is successful
} catch (err) {
logger.error('[PluginsClient] executorCall error:', err);
if (attempts === maxAttempts) {
const { run } = this.runManager.getRunByConversationId(this.conversationId);
const defaultOutput = `Encountered an error while attempting to respond: ${err.message}`;
this.result.output = run && run.error ? run.error : defaultOutput;
this.result.errorMessage = run && run.error ? run.error : err.message;
this.result.intermediateSteps = this.actions;
break;
}
}
}
}
/**
*
* @param {TMessage} responseMessage
* @param {Partial<TMessage>} saveOptions
* @param {string} user
* @returns
*/
async handleResponseMessage(responseMessage, saveOptions, user) {
const { output, errorMessage, ...result } = this.result;
logger.debug('[PluginsClient][handleResponseMessage] Output:', {
output,
errorMessage,
...result,
});
const { error } = responseMessage;
if (!error) {
responseMessage.tokenCount = this.getTokenCountForResponse(responseMessage);
responseMessage.completionTokens = this.getTokenCount(responseMessage.text);
}
// Record usage only when completion is skipped as it is already recorded in the agent phase.
if (!this.agentOptions.skipCompletion && !error) {
await this.recordTokenUsage(responseMessage);
}
const databasePromise = this.saveMessageToDatabase(responseMessage, saveOptions, user);
delete responseMessage.tokenCount;
return { ...responseMessage, ...result, databasePromise };
}
async sendMessage(message, opts = {}) {
/** @type {Promise<TMessage>} */
let userMessagePromise;
/** @type {{ filteredTools: string[], includedTools: string[] }} */
const { filteredTools = [], includedTools = [] } = this.options.req.app.locals;
if (includedTools.length > 0) {
const tools = this.options.tools.filter((plugin) => includedTools.includes(plugin));
this.options.tools = tools;
} else {
const tools = this.options.tools.filter((plugin) => !filteredTools.includes(plugin));
this.options.tools = tools;
}
// If a message is edited, no tools can be used.
const completionMode = this.options.tools.length === 0 || opts.isEdited;
if (completionMode) {
this.setOptions(opts);
return super.sendMessage(message, opts);
}
logger.debug('[PluginsClient] sendMessage', { userMessageText: message, opts });
const {
user,
conversationId,
responseMessageId,
saveOptions,
userMessage,
onAgentAction,
onChainEnd,
onToolStart,
onToolEnd,
} = await this.handleStartMethods(message, opts);
if (opts.progressCallback) {
opts.onProgress = opts.progressCallback.call(null, {
...(opts.progressOptions ?? {}),
parentMessageId: userMessage.messageId,
messageId: responseMessageId,
});
}
this.currentMessages.push(userMessage);
let {
prompt: payload,
tokenCountMap,
promptTokens,
} = await this.buildMessages(
this.currentMessages,
userMessage.messageId,
this.getBuildMessagesOptions({
promptPrefix: null,
abortController: this.abortController,
}),
);
if (tokenCountMap) {
logger.debug('[PluginsClient] tokenCountMap', { tokenCountMap });
if (tokenCountMap[userMessage.messageId]) {
userMessage.tokenCount = tokenCountMap[userMessage.messageId];
logger.debug('[PluginsClient] userMessage.tokenCount', userMessage.tokenCount);
}
this.handleTokenCountMap(tokenCountMap);
}
this.result = {};
if (payload) {
this.currentMessages = payload;
}
if (!this.skipSaveUserMessage) {
userMessagePromise = this.saveMessageToDatabase(userMessage, saveOptions, user);
if (typeof opts?.getReqData === 'function') {
opts.getReqData({
userMessagePromise,
});
}
}
const balance = this.options.req?.app?.locals?.balance;
if (balance?.enabled) {
await checkBalance({
req: this.options.req,
res: this.options.res,
txData: {
user: this.user,
tokenType: 'prompt',
amount: promptTokens,
debug: this.options.debug,
model: this.modelOptions.model,
endpoint: EModelEndpoint.openAI,
},
});
}
const responseMessage = {
endpoint: EModelEndpoint.gptPlugins,
iconURL: this.options.iconURL,
messageId: responseMessageId,
conversationId,
parentMessageId: userMessage.messageId,
isCreatedByUser: false,
model: this.modelOptions.model,
sender: this.sender,
promptTokens,
};
await this.initialize({
user,
message,
onAgentAction,
onChainEnd,
signal: this.abortController.signal,
onProgress: opts.onProgress,
});
// const stream = async (text) => {
// await this.generateTextStream.call(this, text, opts.onProgress, { delay: 1 });
// };
await this.executorCall(message, {
signal: this.abortController.signal,
// stream,
onToolStart,
onToolEnd,
});
// If message was aborted mid-generation
if (this.result?.errorMessage?.length > 0 && this.result?.errorMessage?.includes('cancel')) {
responseMessage.text = 'Cancelled.';
return await this.handleResponseMessage(responseMessage, saveOptions, user);
}
// If error occurred during generation (likely token_balance)
if (this.result?.errorMessage?.length > 0) {
responseMessage.error = true;
responseMessage.text = this.result.output;
return await this.handleResponseMessage(responseMessage, saveOptions, user);
}
if (this.agentOptions.skipCompletion && this.result.output && this.functionsAgent) {
const partialText = opts.getPartialText();
const trimmedPartial = opts.getPartialText().replaceAll(':::plugin:::\n', '');
responseMessage.text =
trimmedPartial.length === 0 ? `${partialText}${this.result.output}` : partialText;
addImages(this.result.intermediateSteps, responseMessage);
await this.generateTextStream(this.result.output, opts.onProgress, { delay: 5 });
return await this.handleResponseMessage(responseMessage, saveOptions, user);
}
if (this.agentOptions.skipCompletion && this.result.output) {
responseMessage.text = this.result.output;
addImages(this.result.intermediateSteps, responseMessage);
await this.generateTextStream(this.result.output, opts.onProgress, { delay: 5 });
return await this.handleResponseMessage(responseMessage, saveOptions, user);
}
logger.debug('[PluginsClient] Completion phase: this.result', this.result);
const promptPrefix = buildPromptPrefix({
result: this.result,
message,
functionsAgent: this.functionsAgent,
});
logger.debug('[PluginsClient]', { promptPrefix });
payload = await this.buildCompletionPrompt({
messages: this.currentMessages,
promptPrefix,
});
logger.debug('[PluginsClient] buildCompletionPrompt Payload', payload);
responseMessage.text = await this.sendCompletion(payload, opts);
return await this.handleResponseMessage(responseMessage, saveOptions, user);
}
async buildCompletionPrompt({ messages, promptPrefix: _promptPrefix }) {
logger.debug('[PluginsClient] buildCompletionPrompt messages', messages);
const orderedMessages = messages;
let promptPrefix = _promptPrefix.trim();
// If the prompt prefix doesn't end with the end token, add it.
if (!promptPrefix.endsWith(`${this.endToken}`)) {
promptPrefix = `${promptPrefix.trim()}${this.endToken}\n\n`;
}
promptPrefix = `${this.startToken}Instructions:\n${promptPrefix}`;
const promptSuffix = `${this.startToken}${this.chatGptLabel ?? 'Assistant'}:\n`;
const instructionsPayload = {
role: 'system',
content: promptPrefix,
};
const messagePayload = {
role: 'system',
content: promptSuffix,
};
if (this.isGpt3) {
instructionsPayload.role = 'user';
messagePayload.role = 'user';
instructionsPayload.content += `\n${promptSuffix}`;
}
// testing if this works with browser endpoint
if (!this.isGpt3 && this.options.reverseProxyUrl) {
instructionsPayload.role = 'user';
}
let currentTokenCount =
this.getTokenCountForMessage(instructionsPayload) +
this.getTokenCountForMessage(messagePayload);
let promptBody = '';
const maxTokenCount = this.maxPromptTokens;
// Iterate backwards through the messages, adding them to the prompt until we reach the max token count.
// Do this within a recursive async function so that it doesn't block the event loop for too long.
const buildPromptBody = async () => {
if (currentTokenCount < maxTokenCount && orderedMessages.length > 0) {
const message = orderedMessages.pop();
const isCreatedByUser = message.isCreatedByUser || message.role?.toLowerCase() === 'user';
const roleLabel = isCreatedByUser ? this.userLabel : this.chatGptLabel;
let messageString = `${this.startToken}${roleLabel}:\n${
message.text ?? message.content ?? ''
}${this.endToken}\n`;
let newPromptBody = `${messageString}${promptBody}`;
const tokenCountForMessage = this.getTokenCount(messageString);
const newTokenCount = currentTokenCount + tokenCountForMessage;
if (newTokenCount > maxTokenCount) {
if (promptBody) {
// This message would put us over the token limit, so don't add it.
return false;
}
// This is the first message, so we can't add it. Just throw an error.
throw new Error(
`Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`,
);
}
promptBody = newPromptBody;
currentTokenCount = newTokenCount;
// wait for next tick to avoid blocking the event loop
await new Promise((resolve) => setTimeout(resolve, 0));
return buildPromptBody();
}
return true;
};
await buildPromptBody();
const prompt = promptBody;
messagePayload.content = prompt;
// Add 2 tokens for metadata after all messages have been counted.
currentTokenCount += 2;
if (this.isGpt3 && messagePayload.content.length > 0) {
const context = 'Chat History:\n';
messagePayload.content = `${context}${prompt}`;
currentTokenCount += this.getTokenCount(context);
}
// Use up to `this.maxContextTokens` tokens (prompt + response), but try to leave `this.maxTokens` tokens for the response.
this.modelOptions.max_tokens = Math.min(
this.maxContextTokens - currentTokenCount,
this.maxResponseTokens,
);
if (this.isGpt3) {
messagePayload.content += promptSuffix;
return [instructionsPayload, messagePayload];
}
const result = [messagePayload, instructionsPayload];
if (this.functionsAgent && !this.isGpt3) {
result[1].content = `${result[1].content}\n${this.startToken}${this.chatGptLabel}:\nSure thing! Here is the output you requested:\n`;
}
return result.filter((message) => message.content.length > 0);
}
}
module.exports = PluginsClient;

View file

@ -1,15 +1,11 @@
const ChatGPTClient = require('./ChatGPTClient');
const OpenAIClient = require('./OpenAIClient'); const OpenAIClient = require('./OpenAIClient');
const PluginsClient = require('./PluginsClient');
const GoogleClient = require('./GoogleClient'); const GoogleClient = require('./GoogleClient');
const TextStream = require('./TextStream'); const TextStream = require('./TextStream');
const AnthropicClient = require('./AnthropicClient'); const AnthropicClient = require('./AnthropicClient');
const toolUtils = require('./tools/util'); const toolUtils = require('./tools/util');
module.exports = { module.exports = {
ChatGPTClient,
OpenAIClient, OpenAIClient,
PluginsClient,
GoogleClient, GoogleClient,
TextStream, TextStream,
AnthropicClient, AnthropicClient,

View file

@ -531,44 +531,6 @@ describe('OpenAIClient', () => {
}); });
}); });
describe('sendMessage/getCompletion/chatCompletion', () => {
afterEach(() => {
delete process.env.AZURE_OPENAI_DEFAULT_MODEL;
delete process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME;
});
it('should call getCompletion and fetchEventSource when using a text/instruct model', async () => {
const model = 'text-davinci-003';
const onProgress = jest.fn().mockImplementation(() => ({}));
const testClient = new OpenAIClient('test-api-key', {
...defaultOptions,
modelOptions: { model },
});
const getCompletion = jest.spyOn(testClient, 'getCompletion');
await testClient.sendMessage('Hi mom!', { onProgress });
expect(getCompletion).toHaveBeenCalled();
expect(getCompletion.mock.calls.length).toBe(1);
expect(getCompletion.mock.calls[0][0]).toBe('||>User:\nHi mom!\n||>Assistant:\n');
expect(fetchEventSource).toHaveBeenCalled();
expect(fetchEventSource.mock.calls.length).toBe(1);
// Check if the first argument (url) is correct
const firstCallArgs = fetchEventSource.mock.calls[0];
const expectedURL = 'https://api.openai.com/v1/completions';
expect(firstCallArgs[0]).toBe(expectedURL);
const requestBody = JSON.parse(firstCallArgs[1].body);
expect(requestBody).toHaveProperty('model');
expect(requestBody.model).toBe(model);
});
});
describe('checkVisionRequest functionality', () => { describe('checkVisionRequest functionality', () => {
let client; let client;
const attachments = [{ type: 'image/png' }]; const attachments = [{ type: 'image/png' }];

View file

@ -1,314 +0,0 @@
const crypto = require('crypto');
const { Constants } = require('librechat-data-provider');
const { HumanMessage, AIMessage } = require('@langchain/core/messages');
const PluginsClient = require('../PluginsClient');
jest.mock('~/db/connect');
jest.mock('~/models/Conversation', () => {
return function () {
return {
save: jest.fn(),
deleteConvos: jest.fn(),
};
};
});
const defaultAzureOptions = {
azureOpenAIApiInstanceName: 'your-instance-name',
azureOpenAIApiDeploymentName: 'your-deployment-name',
azureOpenAIApiVersion: '2020-07-01-preview',
};
describe('PluginsClient', () => {
let TestAgent;
let options = {
tools: [],
modelOptions: {
model: 'gpt-3.5-turbo',
temperature: 0,
max_tokens: 2,
},
agentOptions: {
model: 'gpt-3.5-turbo',
},
};
let parentMessageId;
let conversationId;
const fakeMessages = [];
const userMessage = 'Hello, ChatGPT!';
const apiKey = 'fake-api-key';
beforeEach(() => {
TestAgent = new PluginsClient(apiKey, options);
TestAgent.loadHistory = jest
.fn()
.mockImplementation((conversationId, parentMessageId = null) => {
if (!conversationId) {
TestAgent.currentMessages = [];
return Promise.resolve([]);
}
const orderedMessages = TestAgent.constructor.getMessagesForConversation({
messages: fakeMessages,
parentMessageId,
});
const chatMessages = orderedMessages.map((msg) =>
msg?.isCreatedByUser || msg?.role?.toLowerCase() === 'user'
? new HumanMessage(msg.text)
: new AIMessage(msg.text),
);
TestAgent.currentMessages = orderedMessages;
return Promise.resolve(chatMessages);
});
TestAgent.sendMessage = jest.fn().mockImplementation(async (message, opts = {}) => {
if (opts && typeof opts === 'object') {
TestAgent.setOptions(opts);
}
const conversationId = opts.conversationId || crypto.randomUUID();
const parentMessageId = opts.parentMessageId || Constants.NO_PARENT;
const userMessageId = opts.overrideParentMessageId || crypto.randomUUID();
this.pastMessages = await TestAgent.loadHistory(
conversationId,
TestAgent.options?.parentMessageId,
);
const userMessage = {
text: message,
sender: 'ChatGPT',
isCreatedByUser: true,
messageId: userMessageId,
parentMessageId,
conversationId,
};
const response = {
sender: 'ChatGPT',
text: 'Hello, User!',
isCreatedByUser: false,
messageId: crypto.randomUUID(),
parentMessageId: userMessage.messageId,
conversationId,
};
fakeMessages.push(userMessage);
fakeMessages.push(response);
return response;
});
});
test('initializes PluginsClient without crashing', () => {
expect(TestAgent).toBeInstanceOf(PluginsClient);
});
test('check setOptions function', () => {
expect(TestAgent.agentIsGpt3).toBe(true);
});
describe('sendMessage', () => {
test('sendMessage should return a response message', async () => {
const expectedResult = expect.objectContaining({
sender: 'ChatGPT',
text: expect.any(String),
isCreatedByUser: false,
messageId: expect.any(String),
parentMessageId: expect.any(String),
conversationId: expect.any(String),
});
const response = await TestAgent.sendMessage(userMessage);
parentMessageId = response.messageId;
conversationId = response.conversationId;
expect(response).toEqual(expectedResult);
});
test('sendMessage should work with provided conversationId and parentMessageId', async () => {
const userMessage = 'Second message in the conversation';
const opts = {
conversationId,
parentMessageId,
};
const expectedResult = expect.objectContaining({
sender: 'ChatGPT',
text: expect.any(String),
isCreatedByUser: false,
messageId: expect.any(String),
parentMessageId: expect.any(String),
conversationId: opts.conversationId,
});
const response = await TestAgent.sendMessage(userMessage, opts);
parentMessageId = response.messageId;
expect(response.conversationId).toEqual(conversationId);
expect(response).toEqual(expectedResult);
});
test('should return chat history', async () => {
const chatMessages = await TestAgent.loadHistory(conversationId, parentMessageId);
expect(TestAgent.currentMessages).toHaveLength(4);
expect(chatMessages[0].text).toEqual(userMessage);
});
});
describe('getFunctionModelName', () => {
let client;
beforeEach(() => {
client = new PluginsClient('dummy_api_key');
});
test('should return the input when it includes a dash followed by four digits', () => {
expect(client.getFunctionModelName('-1234')).toBe('-1234');
expect(client.getFunctionModelName('gpt-4-5678-preview')).toBe('gpt-4-5678-preview');
});
test('should return the input for all function-capable models (`0613` models and above)', () => {
expect(client.getFunctionModelName('gpt-4-0613')).toBe('gpt-4-0613');
expect(client.getFunctionModelName('gpt-4-32k-0613')).toBe('gpt-4-32k-0613');
expect(client.getFunctionModelName('gpt-3.5-turbo-0613')).toBe('gpt-3.5-turbo-0613');
expect(client.getFunctionModelName('gpt-3.5-turbo-16k-0613')).toBe('gpt-3.5-turbo-16k-0613');
expect(client.getFunctionModelName('gpt-3.5-turbo-1106')).toBe('gpt-3.5-turbo-1106');
expect(client.getFunctionModelName('gpt-4-1106-preview')).toBe('gpt-4-1106-preview');
expect(client.getFunctionModelName('gpt-4-1106')).toBe('gpt-4-1106');
});
test('should return the corresponding model if input is non-function capable (`0314` models)', () => {
expect(client.getFunctionModelName('gpt-4-0314')).toBe('gpt-4');
expect(client.getFunctionModelName('gpt-4-32k-0314')).toBe('gpt-4');
expect(client.getFunctionModelName('gpt-3.5-turbo-0314')).toBe('gpt-3.5-turbo');
expect(client.getFunctionModelName('gpt-3.5-turbo-16k-0314')).toBe('gpt-3.5-turbo');
});
test('should return "gpt-3.5-turbo" when the input includes "gpt-3.5-turbo"', () => {
expect(client.getFunctionModelName('test gpt-3.5-turbo model')).toBe('gpt-3.5-turbo');
});
test('should return "gpt-4" when the input includes "gpt-4"', () => {
expect(client.getFunctionModelName('testing gpt-4')).toBe('gpt-4');
});
test('should return "gpt-3.5-turbo" for input that does not meet any specific condition', () => {
expect(client.getFunctionModelName('random string')).toBe('gpt-3.5-turbo');
expect(client.getFunctionModelName('')).toBe('gpt-3.5-turbo');
});
});
describe('Azure OpenAI tests specific to Plugins', () => {
// TODO: add more tests for Azure OpenAI integration with Plugins
// let client;
// beforeEach(() => {
// client = new PluginsClient('dummy_api_key');
// });
test('should not call getFunctionModelName when azure options are set', () => {
const spy = jest.spyOn(PluginsClient.prototype, 'getFunctionModelName');
const model = 'gpt-4-turbo';
// note, without the azure change in PR #1766, `getFunctionModelName` is called twice
const testClient = new PluginsClient('dummy_api_key', {
agentOptions: {
model,
agent: 'functions',
},
azure: defaultAzureOptions,
});
expect(spy).not.toHaveBeenCalled();
expect(testClient.agentOptions.model).toBe(model);
spy.mockRestore();
});
});
describe('sendMessage with filtered tools', () => {
let TestAgent;
const apiKey = 'fake-api-key';
const mockTools = [{ name: 'tool1' }, { name: 'tool2' }, { name: 'tool3' }, { name: 'tool4' }];
beforeEach(() => {
TestAgent = new PluginsClient(apiKey, {
tools: mockTools,
modelOptions: {
model: 'gpt-3.5-turbo',
temperature: 0,
max_tokens: 2,
},
agentOptions: {
model: 'gpt-3.5-turbo',
},
});
TestAgent.options.req = {
app: {
locals: {},
},
};
TestAgent.sendMessage = jest.fn().mockImplementation(async () => {
const { filteredTools = [], includedTools = [] } = TestAgent.options.req.app.locals;
if (includedTools.length > 0) {
const tools = TestAgent.options.tools.filter((plugin) =>
includedTools.includes(plugin.name),
);
TestAgent.options.tools = tools;
} else {
const tools = TestAgent.options.tools.filter(
(plugin) => !filteredTools.includes(plugin.name),
);
TestAgent.options.tools = tools;
}
return {
text: 'Mocked response',
tools: TestAgent.options.tools,
};
});
});
test('should filter out tools when filteredTools is provided', async () => {
TestAgent.options.req.app.locals.filteredTools = ['tool1', 'tool3'];
const response = await TestAgent.sendMessage('Test message');
expect(response.tools).toHaveLength(2);
expect(response.tools).toEqual(
expect.arrayContaining([
expect.objectContaining({ name: 'tool2' }),
expect.objectContaining({ name: 'tool4' }),
]),
);
});
test('should only include specified tools when includedTools is provided', async () => {
TestAgent.options.req.app.locals.includedTools = ['tool2', 'tool4'];
const response = await TestAgent.sendMessage('Test message');
expect(response.tools).toHaveLength(2);
expect(response.tools).toEqual(
expect.arrayContaining([
expect.objectContaining({ name: 'tool2' }),
expect.objectContaining({ name: 'tool4' }),
]),
);
});
test('should prioritize includedTools over filteredTools', async () => {
TestAgent.options.req.app.locals.filteredTools = ['tool1', 'tool3'];
TestAgent.options.req.app.locals.includedTools = ['tool1', 'tool2'];
const response = await TestAgent.sendMessage('Test message');
expect(response.tools).toHaveLength(2);
expect(response.tools).toEqual(
expect.arrayContaining([
expect.objectContaining({ name: 'tool1' }),
expect.objectContaining({ name: 'tool2' }),
]),
);
});
test('should not modify tools when no filters are provided', async () => {
const response = await TestAgent.sendMessage('Test message');
expect(response.tools).toHaveLength(4);
expect(response.tools).toEqual(expect.arrayContaining(mockTools));
});
});
});

View file

@ -55,7 +55,6 @@
"@waylaidwanderer/fetch-event-source": "^3.0.1", "@waylaidwanderer/fetch-event-source": "^3.0.1",
"axios": "^1.8.2", "axios": "^1.8.2",
"bcryptjs": "^2.4.3", "bcryptjs": "^2.4.3",
"cohere-ai": "^7.9.1",
"compression": "^1.7.4", "compression": "^1.7.4",
"connect-redis": "^7.1.0", "connect-redis": "^7.1.0",
"cookie": "^0.7.2", "cookie": "^0.7.2",

View file

@ -169,9 +169,6 @@ function disposeClient(client) {
client.isGenerativeModel = null; client.isGenerativeModel = null;
} }
// Properties specific to OpenAIClient // Properties specific to OpenAIClient
if (client.ChatGPTClient) {
client.ChatGPTClient = null;
}
if (client.completionsUrl) { if (client.completionsUrl) {
client.completionsUrl = null; client.completionsUrl = null;
} }

View file

@ -1,106 +0,0 @@
const { HttpsProxyAgent } = require('https-proxy-agent');
const { resolveHeaders } = require('librechat-data-provider');
const { createLLM } = require('~/app/clients/llm');
/**
* Initializes and returns a Language Learning Model (LLM) instance.
*
* @param {Object} options - Configuration options for the LLM.
* @param {string} options.model - The model identifier.
* @param {string} options.modelName - The specific name of the model.
* @param {number} options.temperature - The temperature setting for the model.
* @param {number} options.presence_penalty - The presence penalty for the model.
* @param {number} options.frequency_penalty - The frequency penalty for the model.
* @param {number} options.max_tokens - The maximum number of tokens for the model output.
* @param {boolean} options.streaming - Whether to use streaming for the model output.
* @param {Object} options.context - The context for the conversation.
* @param {number} options.tokenBuffer - The token buffer size.
* @param {number} options.initialMessageCount - The initial message count.
* @param {string} options.conversationId - The ID of the conversation.
* @param {string} options.user - The user identifier.
* @param {string} options.langchainProxy - The langchain proxy URL.
* @param {boolean} options.useOpenRouter - Whether to use OpenRouter.
* @param {Object} options.options - Additional options.
* @param {Object} options.options.headers - Custom headers for the request.
* @param {string} options.options.proxy - Proxy URL.
* @param {Object} options.options.req - The request object.
* @param {Object} options.options.res - The response object.
* @param {boolean} options.options.debug - Whether to enable debug mode.
* @param {string} options.apiKey - The API key for authentication.
* @param {Object} options.azure - Azure-specific configuration.
* @param {Object} options.abortController - The AbortController instance.
* @returns {Object} The initialized LLM instance.
*/
function initializeLLM(options) {
const {
model,
modelName,
temperature,
presence_penalty,
frequency_penalty,
max_tokens,
streaming,
user,
langchainProxy,
useOpenRouter,
options: { headers, proxy },
apiKey,
azure,
} = options;
const modelOptions = {
modelName: modelName || model,
temperature,
presence_penalty,
frequency_penalty,
user,
};
if (max_tokens) {
modelOptions.max_tokens = max_tokens;
}
const configOptions = {};
if (langchainProxy) {
configOptions.basePath = langchainProxy;
}
if (useOpenRouter) {
configOptions.basePath = 'https://openrouter.ai/api/v1';
configOptions.baseOptions = {
headers: {
'HTTP-Referer': 'https://librechat.ai',
'X-Title': 'LibreChat',
},
};
}
if (headers && typeof headers === 'object' && !Array.isArray(headers)) {
configOptions.baseOptions = {
headers: resolveHeaders({
...headers,
...configOptions?.baseOptions?.headers,
}),
};
}
if (proxy) {
configOptions.httpAgent = new HttpsProxyAgent(proxy);
configOptions.httpsAgent = new HttpsProxyAgent(proxy);
}
const llm = createLLM({
modelOptions,
configOptions,
openAIApiKey: apiKey,
azure,
streaming,
});
return llm;
}
module.exports = {
initializeLLM,
};

View file

@ -7,7 +7,6 @@ const {
} = require('librechat-data-provider'); } = require('librechat-data-provider');
const azureAssistants = require('~/server/services/Endpoints/azureAssistants'); const azureAssistants = require('~/server/services/Endpoints/azureAssistants');
const assistants = require('~/server/services/Endpoints/assistants'); const assistants = require('~/server/services/Endpoints/assistants');
const gptPlugins = require('~/server/services/Endpoints/gptPlugins');
const { processFiles } = require('~/server/services/Files/process'); const { processFiles } = require('~/server/services/Files/process');
const anthropic = require('~/server/services/Endpoints/anthropic'); const anthropic = require('~/server/services/Endpoints/anthropic');
const bedrock = require('~/server/services/Endpoints/bedrock'); const bedrock = require('~/server/services/Endpoints/bedrock');
@ -25,7 +24,6 @@ const buildFunction = {
[EModelEndpoint.bedrock]: bedrock.buildOptions, [EModelEndpoint.bedrock]: bedrock.buildOptions,
[EModelEndpoint.azureOpenAI]: openAI.buildOptions, [EModelEndpoint.azureOpenAI]: openAI.buildOptions,
[EModelEndpoint.anthropic]: anthropic.buildOptions, [EModelEndpoint.anthropic]: anthropic.buildOptions,
[EModelEndpoint.gptPlugins]: gptPlugins.buildOptions,
[EModelEndpoint.assistants]: assistants.buildOptions, [EModelEndpoint.assistants]: assistants.buildOptions,
[EModelEndpoint.azureAssistants]: azureAssistants.buildOptions, [EModelEndpoint.azureAssistants]: azureAssistants.buildOptions,
}; };
@ -60,15 +58,6 @@ async function buildEndpointOption(req, res, next) {
return handleError(res, { text: 'Model spec mismatch' }); return handleError(res, { text: 'Model spec mismatch' });
} }
if (
currentModelSpec.preset.endpoint !== EModelEndpoint.gptPlugins &&
currentModelSpec.preset.tools
) {
return handleError(res, {
text: `Only the "${EModelEndpoint.gptPlugins}" endpoint can have tools defined in the preset`,
});
}
try { try {
currentModelSpec.preset.spec = spec; currentModelSpec.preset.spec = spec;
if (currentModelSpec.iconURL != null && currentModelSpec.iconURL !== '') { if (currentModelSpec.iconURL != null && currentModelSpec.iconURL !== '') {

View file

@ -1,207 +0,0 @@
const express = require('express');
const { getResponseSender } = require('librechat-data-provider');
const {
setHeaders,
moderateText,
validateModel,
handleAbortError,
validateEndpoint,
buildEndpointOption,
createAbortController,
} = require('~/server/middleware');
const { sendMessage, createOnProgress, formatSteps, formatAction } = require('~/server/utils');
const { initializeClient } = require('~/server/services/Endpoints/gptPlugins');
const { saveMessage, updateMessage } = require('~/models');
const { validateTools } = require('~/app');
const { logger } = require('~/config');
const router = express.Router();
router.use(moderateText);
router.post(
'/',
validateEndpoint,
validateModel,
buildEndpointOption,
setHeaders,
async (req, res) => {
let {
text,
generation,
endpointOption,
conversationId,
responseMessageId,
isContinued = false,
parentMessageId = null,
overrideParentMessageId = null,
} = req.body;
logger.debug('[/edit/gptPlugins]', {
text,
generation,
isContinued,
conversationId,
...endpointOption,
});
let userMessage;
let userMessagePromise;
let promptTokens;
const sender = getResponseSender({
...endpointOption,
model: endpointOption.modelOptions.model,
});
const userMessageId = parentMessageId;
const user = req.user.id;
const plugin = {
loading: true,
inputs: [],
latest: null,
outputs: null,
};
const getReqData = (data = {}) => {
for (let key in data) {
if (key === 'userMessage') {
userMessage = data[key];
} else if (key === 'userMessagePromise') {
userMessagePromise = data[key];
} else if (key === 'responseMessageId') {
responseMessageId = data[key];
} else if (key === 'promptTokens') {
promptTokens = data[key];
}
}
};
const {
onProgress: progressCallback,
sendIntermediateMessage,
getPartialText,
} = createOnProgress({
generation,
onProgress: () => {
if (plugin.loading === true) {
plugin.loading = false;
}
},
});
const onChainEnd = (data) => {
let { intermediateSteps: steps } = data;
plugin.outputs = steps && steps[0].action ? formatSteps(steps) : 'An error occurred.';
plugin.loading = false;
saveMessage(
req,
{ ...userMessage, user },
{ context: 'api/server/routes/ask/gptPlugins.js - onChainEnd' },
);
sendIntermediateMessage(res, {
plugin,
parentMessageId: userMessage.messageId,
messageId: responseMessageId,
});
// logger.debug('CHAIN END', plugin.outputs);
};
const getAbortData = () => ({
sender,
conversationId,
userMessagePromise,
messageId: responseMessageId,
parentMessageId: overrideParentMessageId ?? userMessageId,
text: getPartialText(),
plugin: { ...plugin, loading: false },
userMessage,
promptTokens,
});
const { abortController, onStart } = createAbortController(req, res, getAbortData, getReqData);
try {
endpointOption.tools = await validateTools(user, endpointOption.tools);
const { client } = await initializeClient({ req, res, endpointOption });
const onAgentAction = (action, start = false) => {
const formattedAction = formatAction(action);
plugin.inputs.push(formattedAction);
plugin.latest = formattedAction.plugin;
if (!start && !client.skipSaveUserMessage) {
saveMessage(
req,
{ ...userMessage, user },
{ context: 'api/server/routes/ask/gptPlugins.js - onAgentAction' },
);
}
sendIntermediateMessage(res, {
plugin,
parentMessageId: userMessage.messageId,
messageId: responseMessageId,
});
// logger.debug('PLUGIN ACTION', formattedAction);
};
let response = await client.sendMessage(text, {
user,
generation,
isContinued,
isEdited: true,
conversationId,
parentMessageId,
responseMessageId,
overrideParentMessageId,
getReqData,
onAgentAction,
onChainEnd,
onStart,
...endpointOption,
progressCallback,
progressOptions: {
res,
plugin,
// parentMessageId: overrideParentMessageId || userMessageId,
},
abortController,
});
if (overrideParentMessageId) {
response.parentMessageId = overrideParentMessageId;
}
logger.debug('[/edit/gptPlugins] CLIENT RESPONSE', response);
const { conversation = {} } = await response.databasePromise;
delete response.databasePromise;
conversation.title =
conversation && !conversation.title ? null : conversation?.title || 'New Chat';
sendMessage(res, {
title: conversation.title,
final: true,
conversation,
requestMessage: userMessage,
responseMessage: response,
});
res.end();
response.plugin = { ...plugin, loading: false };
await updateMessage(
req,
{ ...response, user },
{ context: 'api/server/routes/edit/gptPlugins.js' },
);
} catch (error) {
const partialText = getPartialText();
handleAbortError(res, req, error, {
partialText,
conversationId,
sender,
messageId: responseMessageId,
parentMessageId: userMessageId ?? parentMessageId,
});
}
},
);
module.exports = router;

View file

@ -3,7 +3,6 @@ const openAI = require('./openAI');
const custom = require('./custom'); const custom = require('./custom');
const google = require('./google'); const google = require('./google');
const anthropic = require('./anthropic'); const anthropic = require('./anthropic');
const gptPlugins = require('./gptPlugins');
const { isEnabled } = require('~/server/utils'); const { isEnabled } = require('~/server/utils');
const { EModelEndpoint } = require('librechat-data-provider'); const { EModelEndpoint } = require('librechat-data-provider');
const { const {
@ -39,7 +38,6 @@ if (isEnabled(LIMIT_MESSAGE_USER)) {
router.use(validateConvoAccess); router.use(validateConvoAccess);
router.use([`/${EModelEndpoint.azureOpenAI}`, `/${EModelEndpoint.openAI}`], openAI); router.use([`/${EModelEndpoint.azureOpenAI}`, `/${EModelEndpoint.openAI}`], openAI);
router.use(`/${EModelEndpoint.gptPlugins}`, gptPlugins);
router.use(`/${EModelEndpoint.anthropic}`, anthropic); router.use(`/${EModelEndpoint.anthropic}`, anthropic);
router.use(`/${EModelEndpoint.google}`, google); router.use(`/${EModelEndpoint.google}`, google);
router.use(`/${EModelEndpoint.custom}`, custom); router.use(`/${EModelEndpoint.custom}`, custom);

View file

@ -1,12 +1,7 @@
const OpenAI = require('openai'); const OpenAI = require('openai');
const { HttpsProxyAgent } = require('https-proxy-agent'); const { HttpsProxyAgent } = require('https-proxy-agent');
const { constructAzureURL, isUserProvided } = require('@librechat/api'); const { constructAzureURL, isUserProvided, resolveHeaders } = require('@librechat/api');
const { const { ErrorTypes, EModelEndpoint, mapModelToAzureConfig } = require('librechat-data-provider');
ErrorTypes,
EModelEndpoint,
resolveHeaders,
mapModelToAzureConfig,
} = require('librechat-data-provider');
const { const {
getUserKeyValues, getUserKeyValues,
getUserKeyExpiry, getUserKeyExpiry,
@ -114,11 +109,14 @@ const initializeClient = async ({ req, res, version, endpointOption, initAppClie
apiKey = azureOptions.azureOpenAIApiKey; apiKey = azureOptions.azureOpenAIApiKey;
opts.defaultQuery = { 'api-version': azureOptions.azureOpenAIApiVersion }; opts.defaultQuery = { 'api-version': azureOptions.azureOpenAIApiVersion };
opts.defaultHeaders = resolveHeaders({ opts.defaultHeaders = resolveHeaders(
...headers, {
'api-key': apiKey, ...headers,
'OpenAI-Beta': `assistants=${version}`, 'api-key': apiKey,
}); 'OpenAI-Beta': `assistants=${version}`,
},
req.user,
);
opts.model = azureOptions.azureOpenAIApiDeploymentName; opts.model = azureOptions.azureOpenAIApiDeploymentName;
if (initAppClient) { if (initAppClient) {

View file

@ -1,41 +0,0 @@
const { removeNullishValues } = require('librechat-data-provider');
const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts');
const buildOptions = (endpoint, parsedBody) => {
const {
modelLabel,
chatGptLabel,
promptPrefix,
agentOptions,
tools = [],
iconURL,
greeting,
spec,
maxContextTokens,
artifacts,
...modelOptions
} = parsedBody;
const endpointOption = removeNullishValues({
endpoint,
tools: tools
.map((tool) => tool?.pluginKey ?? tool)
.filter((toolName) => typeof toolName === 'string'),
modelLabel,
chatGptLabel,
promptPrefix,
agentOptions,
iconURL,
greeting,
spec,
maxContextTokens,
modelOptions,
});
if (typeof artifacts === 'string') {
endpointOption.artifactsPrompt = generateArtifactsPrompt({ endpoint, artifacts });
}
return endpointOption;
};
module.exports = buildOptions;

View file

@ -1,7 +0,0 @@
const buildOptions = require('./build');
const initializeClient = require('./initialize');
module.exports = {
buildOptions,
initializeClient,
};

View file

@ -1,134 +0,0 @@
const {
EModelEndpoint,
resolveHeaders,
mapModelToAzureConfig,
} = require('librechat-data-provider');
const { isEnabled, isUserProvided, getAzureCredentials } = require('@librechat/api');
const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService');
const { PluginsClient } = require('~/app');
const initializeClient = async ({ req, res, endpointOption }) => {
const {
PROXY,
OPENAI_API_KEY,
AZURE_API_KEY,
PLUGINS_USE_AZURE,
OPENAI_REVERSE_PROXY,
AZURE_OPENAI_BASEURL,
OPENAI_SUMMARIZE,
DEBUG_PLUGINS,
} = process.env;
const { key: expiresAt, model: modelName } = req.body;
const contextStrategy = isEnabled(OPENAI_SUMMARIZE) ? 'summarize' : null;
let useAzure = isEnabled(PLUGINS_USE_AZURE);
let endpoint = useAzure ? EModelEndpoint.azureOpenAI : EModelEndpoint.openAI;
/** @type {false | TAzureConfig} */
const azureConfig = req.app.locals[EModelEndpoint.azureOpenAI];
useAzure = useAzure || azureConfig?.plugins;
if (useAzure && endpoint !== EModelEndpoint.azureOpenAI) {
endpoint = EModelEndpoint.azureOpenAI;
}
const credentials = {
[EModelEndpoint.openAI]: OPENAI_API_KEY,
[EModelEndpoint.azureOpenAI]: AZURE_API_KEY,
};
const baseURLOptions = {
[EModelEndpoint.openAI]: OPENAI_REVERSE_PROXY,
[EModelEndpoint.azureOpenAI]: AZURE_OPENAI_BASEURL,
};
const userProvidesKey = isUserProvided(credentials[endpoint]);
const userProvidesURL = isUserProvided(baseURLOptions[endpoint]);
let userValues = null;
if (expiresAt && (userProvidesKey || userProvidesURL)) {
checkUserKeyExpiry(expiresAt, endpoint);
userValues = await getUserKeyValues({ userId: req.user.id, name: endpoint });
}
let apiKey = userProvidesKey ? userValues?.apiKey : credentials[endpoint];
let baseURL = userProvidesURL ? userValues?.baseURL : baseURLOptions[endpoint];
const clientOptions = {
contextStrategy,
debug: isEnabled(DEBUG_PLUGINS),
reverseProxyUrl: baseURL ? baseURL : null,
proxy: PROXY ?? null,
req,
res,
...endpointOption,
};
if (useAzure && azureConfig) {
const { modelGroupMap, groupMap } = azureConfig;
const {
azureOptions,
baseURL,
headers = {},
serverless,
} = mapModelToAzureConfig({
modelName,
modelGroupMap,
groupMap,
});
clientOptions.reverseProxyUrl = baseURL ?? clientOptions.reverseProxyUrl;
clientOptions.headers = resolveHeaders({ ...headers, ...(clientOptions.headers ?? {}) });
clientOptions.titleConvo = azureConfig.titleConvo;
clientOptions.titleModel = azureConfig.titleModel;
clientOptions.titleMethod = azureConfig.titleMethod ?? 'completion';
const azureRate = modelName.includes('gpt-4') ? 30 : 17;
clientOptions.streamRate = azureConfig.streamRate ?? azureRate;
const groupName = modelGroupMap[modelName].group;
clientOptions.addParams = azureConfig.groupMap[groupName].addParams;
clientOptions.dropParams = azureConfig.groupMap[groupName].dropParams;
clientOptions.forcePrompt = azureConfig.groupMap[groupName].forcePrompt;
apiKey = azureOptions.azureOpenAIApiKey;
clientOptions.azure = !serverless && azureOptions;
if (serverless === true) {
clientOptions.defaultQuery = azureOptions.azureOpenAIApiVersion
? { 'api-version': azureOptions.azureOpenAIApiVersion }
: undefined;
clientOptions.headers['api-key'] = apiKey;
}
} else if (useAzure || (apiKey && apiKey.includes('{"azure') && !clientOptions.azure)) {
clientOptions.azure = userProvidesKey ? JSON.parse(userValues.apiKey) : getAzureCredentials();
apiKey = clientOptions.azure.azureOpenAIApiKey;
}
/** @type {undefined | TBaseEndpoint} */
const pluginsConfig = req.app.locals[EModelEndpoint.gptPlugins];
if (!useAzure && pluginsConfig) {
clientOptions.streamRate = pluginsConfig.streamRate;
}
/** @type {undefined | TBaseEndpoint} */
const allConfig = req.app.locals.all;
if (allConfig) {
clientOptions.streamRate = allConfig.streamRate;
}
if (!apiKey) {
throw new Error(`${endpoint} API key not provided. Please provide it again.`);
}
const client = new PluginsClient(apiKey, clientOptions);
return {
client,
azure: clientOptions.azure,
openAIApiKey: apiKey,
};
};
module.exports = initializeClient;

View file

@ -1,410 +0,0 @@
// gptPlugins/initializeClient.spec.js
jest.mock('~/cache/getLogStores');
const { EModelEndpoint, ErrorTypes, validateAzureGroups } = require('librechat-data-provider');
const { getUserKey, getUserKeyValues } = require('~/server/services/UserService');
const initializeClient = require('./initialize');
const { PluginsClient } = require('~/app');
// Mock getUserKey since it's the only function we want to mock
jest.mock('~/server/services/UserService', () => ({
getUserKey: jest.fn(),
getUserKeyValues: jest.fn(),
checkUserKeyExpiry: jest.requireActual('~/server/services/UserService').checkUserKeyExpiry,
}));
describe('gptPlugins/initializeClient', () => {
// Set up environment variables
const originalEnvironment = process.env;
const app = {
locals: {},
};
const validAzureConfigs = [
{
group: 'librechat-westus',
apiKey: 'WESTUS_API_KEY',
instanceName: 'librechat-westus',
version: '2023-12-01-preview',
models: {
'gpt-4-vision-preview': {
deploymentName: 'gpt-4-vision-preview',
version: '2024-02-15-preview',
},
'gpt-3.5-turbo': {
deploymentName: 'gpt-35-turbo',
},
'gpt-3.5-turbo-1106': {
deploymentName: 'gpt-35-turbo-1106',
},
'gpt-4': {
deploymentName: 'gpt-4',
},
'gpt-4-1106-preview': {
deploymentName: 'gpt-4-1106-preview',
},
},
},
{
group: 'librechat-eastus',
apiKey: 'EASTUS_API_KEY',
instanceName: 'librechat-eastus',
deploymentName: 'gpt-4-turbo',
version: '2024-02-15-preview',
models: {
'gpt-4-turbo': true,
},
baseURL: 'https://eastus.example.com',
additionalHeaders: {
'x-api-key': 'x-api-key-value',
},
},
{
group: 'mistral-inference',
apiKey: 'AZURE_MISTRAL_API_KEY',
baseURL:
'https://Mistral-large-vnpet-serverless.region.inference.ai.azure.com/v1/chat/completions',
serverless: true,
models: {
'mistral-large': true,
},
},
{
group: 'llama-70b-chat',
apiKey: 'AZURE_LLAMA2_70B_API_KEY',
baseURL:
'https://Llama-2-70b-chat-qmvyb-serverless.region.inference.ai.azure.com/v1/chat/completions',
serverless: true,
models: {
'llama-70b-chat': true,
},
},
];
const { modelNames, modelGroupMap, groupMap } = validateAzureGroups(validAzureConfigs);
beforeEach(() => {
jest.resetModules(); // Clears the cache
process.env = { ...originalEnvironment }; // Make a copy
});
afterAll(() => {
process.env = originalEnvironment; // Restore original env vars
});
test('should initialize PluginsClient with OpenAI API key and default options', async () => {
process.env.OPENAI_API_KEY = 'test-openai-api-key';
process.env.PLUGINS_USE_AZURE = 'false';
process.env.DEBUG_PLUGINS = 'false';
process.env.OPENAI_SUMMARIZE = 'false';
const req = {
body: { key: null },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'default-model' } };
const { client, openAIApiKey } = await initializeClient({ req, res, endpointOption });
expect(openAIApiKey).toBe('test-openai-api-key');
expect(client).toBeInstanceOf(PluginsClient);
});
test('should initialize PluginsClient with Azure credentials when PLUGINS_USE_AZURE is true', async () => {
process.env.AZURE_API_KEY = 'test-azure-api-key';
(process.env.AZURE_OPENAI_API_INSTANCE_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_VERSION = 'some-value'),
(process.env.AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME = 'some-value'),
(process.env.PLUGINS_USE_AZURE = 'true');
process.env.DEBUG_PLUGINS = 'false';
process.env.OPENAI_SUMMARIZE = 'false';
const req = {
body: { key: null },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'test-model' } };
const { client, azure } = await initializeClient({ req, res, endpointOption });
expect(azure.azureOpenAIApiKey).toBe('test-azure-api-key');
expect(client).toBeInstanceOf(PluginsClient);
});
test('should use the debug option when DEBUG_PLUGINS is enabled', async () => {
process.env.OPENAI_API_KEY = 'test-openai-api-key';
process.env.DEBUG_PLUGINS = 'true';
const req = {
body: { key: null },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'default-model' } };
const { client } = await initializeClient({ req, res, endpointOption });
expect(client.options.debug).toBe(true);
});
test('should set contextStrategy to summarize when OPENAI_SUMMARIZE is enabled', async () => {
process.env.OPENAI_API_KEY = 'test-openai-api-key';
process.env.OPENAI_SUMMARIZE = 'true';
const req = {
body: { key: null },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'default-model' } };
const { client } = await initializeClient({ req, res, endpointOption });
expect(client.options.contextStrategy).toBe('summarize');
});
// ... additional tests for reverseProxyUrl, proxy, user-provided keys, etc.
test('should throw an error if no API keys are provided in the environment', async () => {
// Clear the environment variables for API keys
delete process.env.OPENAI_API_KEY;
delete process.env.AZURE_API_KEY;
const req = {
body: { key: null },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'default-model' } };
await expect(initializeClient({ req, res, endpointOption })).rejects.toThrow(
`${EModelEndpoint.openAI} API key not provided.`,
);
});
// Additional tests for gptPlugins/initializeClient.spec.js
// ... (previous test setup code)
test('should handle user-provided OpenAI keys and check expiry', async () => {
process.env.OPENAI_API_KEY = 'user_provided';
process.env.PLUGINS_USE_AZURE = 'false';
const futureDate = new Date(Date.now() + 10000).toISOString();
const req = {
body: { key: futureDate },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'default-model' } };
getUserKeyValues.mockResolvedValue({ apiKey: 'test-user-provided-openai-api-key' });
const { openAIApiKey } = await initializeClient({ req, res, endpointOption });
expect(openAIApiKey).toBe('test-user-provided-openai-api-key');
});
test('should handle user-provided Azure keys and check expiry', async () => {
process.env.AZURE_API_KEY = 'user_provided';
process.env.PLUGINS_USE_AZURE = 'true';
const futureDate = new Date(Date.now() + 10000).toISOString();
const req = {
body: { key: futureDate },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'test-model' } };
getUserKeyValues.mockResolvedValue({
apiKey: JSON.stringify({
azureOpenAIApiKey: 'test-user-provided-azure-api-key',
azureOpenAIApiDeploymentName: 'test-deployment',
}),
});
const { azure } = await initializeClient({ req, res, endpointOption });
expect(azure.azureOpenAIApiKey).toBe('test-user-provided-azure-api-key');
});
test('should throw an error if the user-provided key has expired', async () => {
process.env.OPENAI_API_KEY = 'user_provided';
process.env.PLUGINS_USE_AZURE = 'FALSE';
const expiresAt = new Date(Date.now() - 10000).toISOString(); // Expired
const req = {
body: { key: expiresAt },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'default-model' } };
await expect(initializeClient({ req, res, endpointOption })).rejects.toThrow(
/expired_user_key/,
);
});
test('should throw an error if the user-provided Azure key is invalid JSON', async () => {
process.env.AZURE_API_KEY = 'user_provided';
process.env.PLUGINS_USE_AZURE = 'true';
const req = {
body: { key: new Date(Date.now() + 10000).toISOString() },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'default-model' } };
// Simulate an invalid JSON string returned from getUserKey
getUserKey.mockResolvedValue('invalid-json');
getUserKeyValues.mockImplementation(() => {
let userValues = getUserKey();
try {
userValues = JSON.parse(userValues);
} catch (e) {
throw new Error(
JSON.stringify({
type: ErrorTypes.INVALID_USER_KEY,
}),
);
}
return userValues;
});
await expect(initializeClient({ req, res, endpointOption })).rejects.toThrow(
/invalid_user_key/,
);
});
test('should correctly handle the presence of a reverse proxy', async () => {
process.env.OPENAI_REVERSE_PROXY = 'http://reverse.proxy';
process.env.PROXY = 'http://proxy';
process.env.OPENAI_API_KEY = 'test-openai-api-key';
const req = {
body: { key: null },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'default-model' } };
const { client } = await initializeClient({ req, res, endpointOption });
expect(client.options.reverseProxyUrl).toBe('http://reverse.proxy');
expect(client.options.proxy).toBe('http://proxy');
});
test('should throw an error when user-provided values are not valid JSON', async () => {
process.env.OPENAI_API_KEY = 'user_provided';
const req = {
body: { key: new Date(Date.now() + 10000).toISOString(), endpoint: 'openAI' },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = {};
// Mock getUserKey to return a non-JSON string
getUserKey.mockResolvedValue('not-a-json');
getUserKeyValues.mockImplementation(() => {
let userValues = getUserKey();
try {
userValues = JSON.parse(userValues);
} catch (e) {
throw new Error(
JSON.stringify({
type: ErrorTypes.INVALID_USER_KEY,
}),
);
}
return userValues;
});
await expect(initializeClient({ req, res, endpointOption })).rejects.toThrow(
/invalid_user_key/,
);
});
test('should initialize client correctly for Azure OpenAI with valid configuration', async () => {
const req = {
body: {
key: null,
endpoint: EModelEndpoint.gptPlugins,
model: modelNames[0],
},
user: { id: '123' },
app: {
locals: {
[EModelEndpoint.azureOpenAI]: {
plugins: true,
modelNames,
modelGroupMap,
groupMap,
},
},
},
};
const res = {};
const endpointOption = {};
const client = await initializeClient({ req, res, endpointOption });
expect(client.client.options.azure).toBeDefined();
});
test('should initialize client with default options when certain env vars are not set', async () => {
delete process.env.OPENAI_SUMMARIZE;
process.env.OPENAI_API_KEY = 'some-api-key';
const req = {
body: { key: null, endpoint: EModelEndpoint.gptPlugins },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = {};
const client = await initializeClient({ req, res, endpointOption });
expect(client.client.options.contextStrategy).toBe(null);
});
test('should correctly use user-provided apiKey and baseURL when provided', async () => {
process.env.OPENAI_API_KEY = 'user_provided';
process.env.OPENAI_REVERSE_PROXY = 'user_provided';
const req = {
body: {
key: new Date(Date.now() + 10000).toISOString(),
endpoint: 'openAI',
},
user: {
id: '123',
},
app,
};
const res = {};
const endpointOption = {};
getUserKeyValues.mockResolvedValue({
apiKey: 'test',
baseURL: 'https://user-provided-url.com',
});
const result = await initializeClient({ req, res, endpointOption });
expect(result.openAIApiKey).toBe('test');
expect(result.client.options.reverseProxyUrl).toBe('https://user-provided-url.com');
});
});

View file

@ -1,11 +1,7 @@
const { const { ErrorTypes, EModelEndpoint, mapModelToAzureConfig } = require('librechat-data-provider');
ErrorTypes,
EModelEndpoint,
resolveHeaders,
mapModelToAzureConfig,
} = require('librechat-data-provider');
const { const {
isEnabled, isEnabled,
resolveHeaders,
isUserProvided, isUserProvided,
getOpenAIConfig, getOpenAIConfig,
getAzureCredentials, getAzureCredentials,
@ -84,7 +80,10 @@ const initializeClient = async ({
}); });
clientOptions.reverseProxyUrl = baseURL ?? clientOptions.reverseProxyUrl; clientOptions.reverseProxyUrl = baseURL ?? clientOptions.reverseProxyUrl;
clientOptions.headers = resolveHeaders({ ...headers, ...(clientOptions.headers ?? {}) }); clientOptions.headers = resolveHeaders(
{ ...headers, ...(clientOptions.headers ?? {}) },
req.user,
);
clientOptions.titleConvo = azureConfig.titleConvo; clientOptions.titleConvo = azureConfig.titleConvo;
clientOptions.titleModel = azureConfig.titleModel; clientOptions.titleModel = azureConfig.titleModel;

View file

@ -1,9 +1,9 @@
const { logger } = require('@librechat/data-schemas'); const { logger } = require('@librechat/data-schemas');
const { CacheKeys, processMCPEnv } = require('librechat-data-provider'); const { CacheKeys } = require('librechat-data-provider');
const { findToken, updateToken, createToken, deleteTokens } = require('~/models');
const { getMCPManager, getFlowStateManager } = require('~/config'); const { getMCPManager, getFlowStateManager } = require('~/config');
const { getCachedTools, setCachedTools } = require('./Config'); const { getCachedTools, setCachedTools } = require('./Config');
const { getLogStores } = require('~/cache'); const { getLogStores } = require('~/cache');
const { findToken, updateToken, createToken, deleteTokens } = require('~/models');
/** /**
* Initialize MCP servers * Initialize MCP servers
@ -30,7 +30,6 @@ async function initializeMCP(app) {
createToken, createToken,
deleteTokens, deleteTokens,
}, },
processMCPEnv,
}); });
delete app.locals.mcpConfig; delete app.locals.mcpConfig;

View file

@ -1503,7 +1503,6 @@
* @property {boolean|{userProvide: boolean}} [anthropic] - Flag to indicate if Anthropic endpoint is user provided, or its configuration. * @property {boolean|{userProvide: boolean}} [anthropic] - Flag to indicate if Anthropic endpoint is user provided, or its configuration.
* @property {boolean|{userProvide: boolean}} [google] - Flag to indicate if Google endpoint is user provided, or its configuration. * @property {boolean|{userProvide: boolean}} [google] - Flag to indicate if Google endpoint is user provided, or its configuration.
* @property {boolean|{userProvide: boolean, userProvideURL: boolean, name: string}} [custom] - Custom Endpoint configuration. * @property {boolean|{userProvide: boolean, userProvideURL: boolean, name: string}} [custom] - Custom Endpoint configuration.
* @property {boolean|GptPlugins} [gptPlugins] - Configuration for GPT plugins.
* @memberof typedefs * @memberof typedefs
*/ */

26
package-lock.json generated
View file

@ -71,7 +71,6 @@
"@waylaidwanderer/fetch-event-source": "^3.0.1", "@waylaidwanderer/fetch-event-source": "^3.0.1",
"axios": "^1.8.2", "axios": "^1.8.2",
"bcryptjs": "^2.4.3", "bcryptjs": "^2.4.3",
"cohere-ai": "^7.9.1",
"compression": "^1.7.4", "compression": "^1.7.4",
"connect-redis": "^7.1.0", "connect-redis": "^7.1.0",
"cookie": "^0.7.2", "cookie": "^0.7.2",
@ -28101,6 +28100,8 @@
"version": "7.9.1", "version": "7.9.1",
"resolved": "https://registry.npmjs.org/cohere-ai/-/cohere-ai-7.9.1.tgz", "resolved": "https://registry.npmjs.org/cohere-ai/-/cohere-ai-7.9.1.tgz",
"integrity": "sha512-shMz0Bs3p6/Nw5Yi+6Wc9tZ7DCGTtEnf1eAcuesnlyeKoFuZ7+bzeiHkt5E8SvTgAHxN1GCP3UkIoW85QhHKTA==", "integrity": "sha512-shMz0Bs3p6/Nw5Yi+6Wc9tZ7DCGTtEnf1eAcuesnlyeKoFuZ7+bzeiHkt5E8SvTgAHxN1GCP3UkIoW85QhHKTA==",
"optional": true,
"peer": true,
"dependencies": { "dependencies": {
"form-data": "4.0.0", "form-data": "4.0.0",
"js-base64": "3.7.2", "js-base64": "3.7.2",
@ -28113,6 +28114,8 @@
"version": "6.11.2", "version": "6.11.2",
"resolved": "https://registry.npmjs.org/qs/-/qs-6.11.2.tgz", "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.2.tgz",
"integrity": "sha512-tDNIz22aBzCDxLtVH++VnTfzxlfeK5CbqohpSqpJgj1Wg/cQbStNAz3NuqCs5vV+pjBsK4x4pN9HlVh7rcYRiA==", "integrity": "sha512-tDNIz22aBzCDxLtVH++VnTfzxlfeK5CbqohpSqpJgj1Wg/cQbStNAz3NuqCs5vV+pjBsK4x4pN9HlVh7rcYRiA==",
"optional": true,
"peer": true,
"dependencies": { "dependencies": {
"side-channel": "^1.0.4" "side-channel": "^1.0.4"
}, },
@ -34441,7 +34444,9 @@
"node_modules/js-base64": { "node_modules/js-base64": {
"version": "3.7.2", "version": "3.7.2",
"resolved": "https://registry.npmjs.org/js-base64/-/js-base64-3.7.2.tgz", "resolved": "https://registry.npmjs.org/js-base64/-/js-base64-3.7.2.tgz",
"integrity": "sha512-NnRs6dsyqUXejqk/yv2aiXlAvOs56sLkX6nUdeaNezI5LFFLlsZjOThmwnrcwh5ZZRwZlCMnVAY3CvhIhoVEKQ==" "integrity": "sha512-NnRs6dsyqUXejqk/yv2aiXlAvOs56sLkX6nUdeaNezI5LFFLlsZjOThmwnrcwh5ZZRwZlCMnVAY3CvhIhoVEKQ==",
"optional": true,
"peer": true
}, },
"node_modules/js-cookie": { "node_modules/js-cookie": {
"version": "3.0.5", "version": "3.0.5",
@ -44808,7 +44813,9 @@
"node_modules/url-join": { "node_modules/url-join": {
"version": "4.0.1", "version": "4.0.1",
"resolved": "https://registry.npmjs.org/url-join/-/url-join-4.0.1.tgz", "resolved": "https://registry.npmjs.org/url-join/-/url-join-4.0.1.tgz",
"integrity": "sha512-jk1+QP6ZJqyOiuEI9AEWQfju/nB2Pw466kbA0LEZljHwKeMgd9WrAEgEGxjPDD2+TNbbb37rTyhEfrCXfuKXnA==" "integrity": "sha512-jk1+QP6ZJqyOiuEI9AEWQfju/nB2Pw466kbA0LEZljHwKeMgd9WrAEgEGxjPDD2+TNbbb37rTyhEfrCXfuKXnA==",
"optional": true,
"peer": true
}, },
"node_modules/url-parse": { "node_modules/url-parse": {
"version": "1.5.10", "version": "1.5.10",
@ -46575,10 +46582,11 @@
} }
}, },
"packages/api/node_modules/brace-expansion": { "packages/api/node_modules/brace-expansion": {
"version": "2.0.1", "version": "2.0.2",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
"integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
"dev": true, "dev": true,
"license": "MIT",
"dependencies": { "dependencies": {
"balanced-match": "^1.0.0" "balanced-match": "^1.0.0"
} }
@ -46832,9 +46840,9 @@
} }
}, },
"packages/data-schemas/node_modules/brace-expansion": { "packages/data-schemas/node_modules/brace-expansion": {
"version": "2.0.1", "version": "2.0.2",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
"integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {

View file

@ -1,9 +1,4 @@
import { import { ErrorTypes, EModelEndpoint, mapModelToAzureConfig } from 'librechat-data-provider';
ErrorTypes,
EModelEndpoint,
resolveHeaders,
mapModelToAzureConfig,
} from 'librechat-data-provider';
import type { import type {
LLMConfigOptions, LLMConfigOptions,
UserKeyValues, UserKeyValues,
@ -13,6 +8,7 @@ import type {
import { createHandleLLMNewToken } from '~/utils/generators'; import { createHandleLLMNewToken } from '~/utils/generators';
import { getAzureCredentials } from '~/utils/azure'; import { getAzureCredentials } from '~/utils/azure';
import { isUserProvided } from '~/utils/common'; import { isUserProvided } from '~/utils/common';
import { resolveHeaders } from '~/utils/env';
import { getOpenAIConfig } from './llm'; import { getOpenAIConfig } from './llm';
/** /**
@ -91,7 +87,10 @@ export const initializeOpenAI = async ({
}); });
clientOptions.reverseProxyUrl = configBaseURL ?? clientOptions.reverseProxyUrl; clientOptions.reverseProxyUrl = configBaseURL ?? clientOptions.reverseProxyUrl;
clientOptions.headers = resolveHeaders({ ...headers, ...(clientOptions.headers ?? {}) }); clientOptions.headers = resolveHeaders(
{ ...headers, ...(clientOptions.headers ?? {}) },
req.user,
);
const groupName = modelGroupMap[modelName || '']?.group; const groupName = modelGroupMap[modelName || '']?.group;
if (groupName && groupMap[groupName]) { if (groupName && groupMap[groupName]) {

View file

@ -2,7 +2,7 @@ import { logger } from '@librechat/data-schemas';
import { CallToolResultSchema, ErrorCode, McpError } from '@modelcontextprotocol/sdk/types.js'; import { CallToolResultSchema, ErrorCode, McpError } from '@modelcontextprotocol/sdk/types.js';
import type { RequestOptions } from '@modelcontextprotocol/sdk/shared/protocol.js'; import type { RequestOptions } from '@modelcontextprotocol/sdk/shared/protocol.js';
import type { OAuthClientInformation } from '@modelcontextprotocol/sdk/shared/auth.js'; import type { OAuthClientInformation } from '@modelcontextprotocol/sdk/shared/auth.js';
import type { JsonSchemaType, MCPOptions, TUser } from 'librechat-data-provider'; import type { JsonSchemaType, TUser } from 'librechat-data-provider';
import type { TokenMethods } from '@librechat/data-schemas'; import type { TokenMethods } from '@librechat/data-schemas';
import type { FlowStateManager } from '~/flow/manager'; import type { FlowStateManager } from '~/flow/manager';
import type { MCPOAuthTokens, MCPOAuthFlowMetadata } from './oauth/types'; import type { MCPOAuthTokens, MCPOAuthFlowMetadata } from './oauth/types';
@ -13,6 +13,7 @@ import { MCPOAuthHandler } from './oauth/handler';
import { MCPTokenStorage } from './oauth/tokens'; import { MCPTokenStorage } from './oauth/tokens';
import { formatToolContent } from './parsers'; import { formatToolContent } from './parsers';
import { MCPConnection } from './connection'; import { MCPConnection } from './connection';
import { processMCPEnv } from '~/utils/env';
export class MCPManager { export class MCPManager {
private static instance: MCPManager | null = null; private static instance: MCPManager | null = null;
@ -24,11 +25,6 @@ export class MCPManager {
private userLastActivity: Map<string, number> = new Map(); private userLastActivity: Map<string, number> = new Map();
private readonly USER_CONNECTION_IDLE_TIMEOUT = 15 * 60 * 1000; // 15 minutes (TODO: make configurable) private readonly USER_CONNECTION_IDLE_TIMEOUT = 15 * 60 * 1000; // 15 minutes (TODO: make configurable)
private mcpConfigs: t.MCPServers = {}; private mcpConfigs: t.MCPServers = {};
private processMCPEnv?: (
obj: MCPOptions,
user?: TUser,
customUserVars?: Record<string, string>,
) => MCPOptions; // Store the processing function
/** Store MCP server instructions */ /** Store MCP server instructions */
private serverInstructions: Map<string, string> = new Map(); private serverInstructions: Map<string, string> = new Map();
@ -46,14 +42,11 @@ export class MCPManager {
mcpServers, mcpServers,
flowManager, flowManager,
tokenMethods, tokenMethods,
processMCPEnv,
}: { }: {
mcpServers: t.MCPServers; mcpServers: t.MCPServers;
flowManager: FlowStateManager<MCPOAuthTokens | null>; flowManager: FlowStateManager<MCPOAuthTokens | null>;
tokenMethods?: TokenMethods; tokenMethods?: TokenMethods;
processMCPEnv?: (obj: MCPOptions) => MCPOptions;
}): Promise<void> { }): Promise<void> {
this.processMCPEnv = processMCPEnv; // Store the function
this.mcpConfigs = mcpServers; this.mcpConfigs = mcpServers;
if (!flowManager) { if (!flowManager) {
@ -68,7 +61,7 @@ export class MCPManager {
const connectionResults = await Promise.allSettled( const connectionResults = await Promise.allSettled(
entries.map(async ([serverName, _config], i) => { entries.map(async ([serverName, _config], i) => {
/** Process env for app-level connections */ /** Process env for app-level connections */
const config = this.processMCPEnv ? this.processMCPEnv(_config) : _config; const config = processMCPEnv(_config);
/** Existing tokens for system-level connections */ /** Existing tokens for system-level connections */
let tokens: MCPOAuthTokens | null = null; let tokens: MCPOAuthTokens | null = null;
@ -444,9 +437,7 @@ export class MCPManager {
); );
} }
if (this.processMCPEnv) { config = { ...(processMCPEnv(config, user, customUserVars) ?? {}) };
config = { ...(this.processMCPEnv(config, user, customUserVars) ?? {}) };
}
/** If no in-memory tokens, tokens from persistent storage */ /** If no in-memory tokens, tokens from persistent storage */
let tokens: MCPOAuthTokens | null = null; let tokens: MCPOAuthTokens | null = null;
if (tokenMethods?.findToken) { if (tokenMethods?.findToken) {

View file

@ -1,10 +1,10 @@
import type { TUser } from 'librechat-data-provider';
import { import {
StreamableHTTPOptionsSchema,
StdioOptionsSchema,
processMCPEnv,
MCPOptions, MCPOptions,
} from '../src/mcp'; StdioOptionsSchema,
StreamableHTTPOptionsSchema,
} from 'librechat-data-provider';
import type { TUser } from 'librechat-data-provider';
import { processMCPEnv } from '~/utils/env';
// Helper function to create test user objects // Helper function to create test user objects
function createTestUser( function createTestUser(

View file

@ -0,0 +1,317 @@
import { resolveHeaders } from './env';
import type { TUser } from 'librechat-data-provider';
// Helper function to create test user objects
function createTestUser(overrides: Partial<TUser> = {}): TUser {
return {
id: 'test-user-id',
username: 'testuser',
email: 'test@example.com',
name: 'Test User',
avatar: 'https://example.com/avatar.png',
provider: 'email',
role: 'user',
createdAt: new Date('2021-01-01').toISOString(),
updatedAt: new Date('2021-01-01').toISOString(),
...overrides,
};
}
describe('resolveHeaders', () => {
beforeEach(() => {
// Set up test environment variables
process.env.TEST_API_KEY = 'test-api-key-value';
process.env.ANOTHER_SECRET = 'another-secret-value';
});
afterEach(() => {
// Clean up environment variables
delete process.env.TEST_API_KEY;
delete process.env.ANOTHER_SECRET;
});
it('should return empty object when headers is undefined', () => {
const result = resolveHeaders(undefined);
expect(result).toEqual({});
});
it('should return empty object when headers is null', () => {
const result = resolveHeaders(null as unknown as Record<string, string> | undefined);
expect(result).toEqual({});
});
it('should return empty object when headers is empty', () => {
const result = resolveHeaders({});
expect(result).toEqual({});
});
it('should process environment variables in headers', () => {
const headers = {
Authorization: '${TEST_API_KEY}',
'X-Secret': '${ANOTHER_SECRET}',
'Content-Type': 'application/json',
};
const result = resolveHeaders(headers);
expect(result).toEqual({
Authorization: 'test-api-key-value',
'X-Secret': 'another-secret-value',
'Content-Type': 'application/json',
});
});
it('should process user ID placeholder when user has id', () => {
const user = { id: 'test-user-123' };
const headers = {
'User-Id': '{{LIBRECHAT_USER_ID}}',
'Content-Type': 'application/json',
};
const result = resolveHeaders(headers, user);
expect(result).toEqual({
'User-Id': 'test-user-123',
'Content-Type': 'application/json',
});
});
it('should not process user ID placeholder when user is undefined', () => {
const headers = {
'User-Id': '{{LIBRECHAT_USER_ID}}',
'Content-Type': 'application/json',
};
const result = resolveHeaders(headers);
expect(result).toEqual({
'User-Id': '{{LIBRECHAT_USER_ID}}',
'Content-Type': 'application/json',
});
});
it('should not process user ID placeholder when user has no id', () => {
const user = { id: '' };
const headers = {
'User-Id': '{{LIBRECHAT_USER_ID}}',
'Content-Type': 'application/json',
};
const result = resolveHeaders(headers, user);
expect(result).toEqual({
'User-Id': '{{LIBRECHAT_USER_ID}}',
'Content-Type': 'application/json',
});
});
it('should process full user object placeholders', () => {
const user = createTestUser({
id: 'user-123',
email: 'test@example.com',
username: 'testuser',
name: 'Test User',
role: 'admin',
});
const headers = {
'User-Email': '{{LIBRECHAT_USER_EMAIL}}',
'User-Name': '{{LIBRECHAT_USER_NAME}}',
'User-Username': '{{LIBRECHAT_USER_USERNAME}}',
'User-Role': '{{LIBRECHAT_USER_ROLE}}',
'User-Id': '{{LIBRECHAT_USER_ID}}',
'Content-Type': 'application/json',
};
const result = resolveHeaders(headers, user);
expect(result).toEqual({
'User-Email': 'test@example.com',
'User-Name': 'Test User',
'User-Username': 'testuser',
'User-Role': 'admin',
'User-Id': 'user-123',
'Content-Type': 'application/json',
});
});
it('should handle missing user fields gracefully', () => {
const user = createTestUser({
id: 'user-123',
email: 'test@example.com',
username: undefined, // explicitly set to undefined
});
const headers = {
'User-Email': '{{LIBRECHAT_USER_EMAIL}}',
'User-Username': '{{LIBRECHAT_USER_USERNAME}}',
'Non-Existent': '{{LIBRECHAT_USER_NONEXISTENT}}',
};
const result = resolveHeaders(headers, user);
expect(result).toEqual({
'User-Email': 'test@example.com',
'User-Username': '', // Empty string for missing field
'Non-Existent': '{{LIBRECHAT_USER_NONEXISTENT}}', // Unchanged for non-existent field
});
});
it('should process custom user variables', () => {
const user = { id: 'user-123' };
const customUserVars = {
CUSTOM_TOKEN: 'user-specific-token',
REGION: 'us-west-1',
};
const headers = {
Authorization: 'Bearer {{CUSTOM_TOKEN}}',
'X-Region': '{{REGION}}',
'X-System-Key': '${TEST_API_KEY}',
'X-User-Id': '{{LIBRECHAT_USER_ID}}',
};
const result = resolveHeaders(headers, user, customUserVars);
expect(result).toEqual({
Authorization: 'Bearer user-specific-token',
'X-Region': 'us-west-1',
'X-System-Key': 'test-api-key-value',
'X-User-Id': 'user-123',
});
});
it('should prioritize custom user variables over user fields', () => {
const user = createTestUser({
id: 'user-123',
email: 'user-email@example.com',
});
const customUserVars = {
LIBRECHAT_USER_EMAIL: 'custom-email@example.com',
};
const headers = {
'Test-Email': '{{LIBRECHAT_USER_EMAIL}}',
};
const result = resolveHeaders(headers, user, customUserVars);
expect(result).toEqual({
'Test-Email': 'custom-email@example.com',
});
});
it('should handle boolean user fields', () => {
const user = createTestUser({
id: 'user-123',
// Note: TUser doesn't have these boolean fields, so we'll test with string fields
role: 'admin',
});
const headers = {
'User-Role': '{{LIBRECHAT_USER_ROLE}}',
'User-Id': '{{LIBRECHAT_USER_ID}}',
};
const result = resolveHeaders(headers, user);
expect(result).toEqual({
'User-Role': 'admin',
'User-Id': 'user-123',
});
});
it('should handle multiple occurrences of the same placeholder', () => {
const user = createTestUser({
id: 'user-123',
email: 'test@example.com',
});
const headers = {
'Primary-Email': '{{LIBRECHAT_USER_EMAIL}}',
'Secondary-Email': '{{LIBRECHAT_USER_EMAIL}}',
'Backup-Email': '{{LIBRECHAT_USER_EMAIL}}',
};
const result = resolveHeaders(headers, user);
expect(result).toEqual({
'Primary-Email': 'test@example.com',
'Secondary-Email': 'test@example.com',
'Backup-Email': 'test@example.com',
});
});
it('should handle mixed variable types in the same headers object', () => {
const user = createTestUser({
id: 'user-123',
email: 'test@example.com',
});
const customUserVars = {
CUSTOM_TOKEN: 'secret-token',
};
const headers = {
Authorization: 'Bearer {{CUSTOM_TOKEN}}',
'X-User-Id': '{{LIBRECHAT_USER_ID}}',
'X-System-Key': '${TEST_API_KEY}',
'X-User-Email': '{{LIBRECHAT_USER_EMAIL}}',
'Content-Type': 'application/json',
};
const result = resolveHeaders(headers, user, customUserVars);
expect(result).toEqual({
Authorization: 'Bearer secret-token',
'X-User-Id': 'user-123',
'X-System-Key': 'test-api-key-value',
'X-User-Email': 'test@example.com',
'Content-Type': 'application/json',
});
});
it('should not modify the original headers object', () => {
const originalHeaders = {
Authorization: '${TEST_API_KEY}',
'User-Id': '{{LIBRECHAT_USER_ID}}',
};
const user = { id: 'user-123' };
const result = resolveHeaders(originalHeaders, user);
// Verify the result is processed
expect(result).toEqual({
Authorization: 'test-api-key-value',
'User-Id': 'user-123',
});
// Verify the original object is unchanged
expect(originalHeaders).toEqual({
Authorization: '${TEST_API_KEY}',
'User-Id': '{{LIBRECHAT_USER_ID}}',
});
});
it('should handle special characters in custom variable names', () => {
const user = { id: 'user-123' };
const customUserVars = {
'CUSTOM-VAR': 'dash-value',
CUSTOM_VAR: 'underscore-value',
'CUSTOM.VAR': 'dot-value',
};
const headers = {
'Dash-Header': '{{CUSTOM-VAR}}',
'Underscore-Header': '{{CUSTOM_VAR}}',
'Dot-Header': '{{CUSTOM.VAR}}',
};
const result = resolveHeaders(headers, user, customUserVars);
expect(result).toEqual({
'Dash-Header': 'dash-value',
'Underscore-Header': 'underscore-value',
'Dot-Header': 'dot-value',
});
});
});

View file

@ -0,0 +1,170 @@
import { extractEnvVariable } from 'librechat-data-provider';
import type { TUser, MCPOptions } from 'librechat-data-provider';
/**
* List of allowed user fields that can be used in MCP environment variables.
* These are non-sensitive string/boolean fields from the IUser interface.
*/
const ALLOWED_USER_FIELDS = [
'id',
'name',
'username',
'email',
'provider',
'role',
'googleId',
'facebookId',
'openidId',
'samlId',
'ldapId',
'githubId',
'discordId',
'appleId',
'emailVerified',
'twoFactorEnabled',
'termsAccepted',
] as const;
/**
* Processes a string value to replace user field placeholders
* @param value - The string value to process
* @param user - The user object
* @returns The processed string with placeholders replaced
*/
function processUserPlaceholders(value: string, user?: TUser): string {
if (!user || typeof value !== 'string') {
return value;
}
for (const field of ALLOWED_USER_FIELDS) {
const placeholder = `{{LIBRECHAT_USER_${field.toUpperCase()}}}`;
if (!value.includes(placeholder)) {
continue;
}
const fieldValue = user[field as keyof TUser];
// Skip replacement if field doesn't exist in user object
if (!(field in user)) {
continue;
}
// Special case for 'id' field: skip if undefined or empty
if (field === 'id' && (fieldValue === undefined || fieldValue === '')) {
continue;
}
const replacementValue = fieldValue == null ? '' : String(fieldValue);
value = value.replace(new RegExp(placeholder, 'g'), replacementValue);
}
return value;
}
/**
* Processes a single string value by replacing various types of placeholders
* @param originalValue - The original string value to process
* @param customUserVars - Optional custom user variables to replace placeholders
* @param user - Optional user object for replacing user field placeholders
* @returns The processed string with all placeholders replaced
*/
function processSingleValue({
originalValue,
customUserVars,
user,
}: {
originalValue: string;
customUserVars?: Record<string, string>;
user?: TUser;
}): string {
let value = originalValue;
// 1. Replace custom user variables
if (customUserVars) {
for (const [varName, varVal] of Object.entries(customUserVars)) {
/** Escaped varName for use in regex to avoid issues with special characters */
const escapedVarName = varName.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
const placeholderRegex = new RegExp(`\\{\\{${escapedVarName}\\}\\}`, 'g');
value = value.replace(placeholderRegex, varVal);
}
}
// 2. Replace user field placeholders (e.g., {{LIBRECHAT_USER_EMAIL}}, {{LIBRECHAT_USER_ID}})
value = processUserPlaceholders(value, user);
// 3. Replace system environment variables
value = extractEnvVariable(value);
return value;
}
/**
* Recursively processes an object to replace environment variables in string values
* @param obj - The object to process
* @param user - The user object containing all user fields
* @param customUserVars - vars that user set in settings
* @returns - The processed object with environment variables replaced
*/
export function processMCPEnv(
obj: Readonly<MCPOptions>,
user?: TUser,
customUserVars?: Record<string, string>,
): MCPOptions {
if (obj === null || obj === undefined) {
return obj;
}
const newObj: MCPOptions = structuredClone(obj);
if ('env' in newObj && newObj.env) {
const processedEnv: Record<string, string> = {};
for (const [key, originalValue] of Object.entries(newObj.env)) {
processedEnv[key] = processSingleValue({ originalValue, customUserVars, user });
}
newObj.env = processedEnv;
}
// Process headers if they exist (for WebSocket, SSE, StreamableHTTP types)
// Note: `env` and `headers` are on different branches of the MCPOptions union type.
if ('headers' in newObj && newObj.headers) {
const processedHeaders: Record<string, string> = {};
for (const [key, originalValue] of Object.entries(newObj.headers)) {
processedHeaders[key] = processSingleValue({ originalValue, customUserVars, user });
}
newObj.headers = processedHeaders;
}
// Process URL if it exists (for WebSocket, SSE, StreamableHTTP types)
if ('url' in newObj && newObj.url) {
newObj.url = processSingleValue({ originalValue: newObj.url, customUserVars, user });
}
return newObj;
}
/**
* Resolves header values by replacing user placeholders, custom variables, and environment variables
* @param headers - The headers object to process
* @param user - Optional user object for replacing user field placeholders (can be partial with just id)
* @param customUserVars - Optional custom user variables to replace placeholders
* @returns - The processed headers with all placeholders replaced
*/
export function resolveHeaders(
headers: Record<string, string> | undefined,
user?: Partial<TUser> | { id: string },
customUserVars?: Record<string, string>,
) {
const resolvedHeaders = { ...(headers ?? {}) };
if (headers && typeof headers === 'object' && !Array.isArray(headers)) {
Object.keys(headers).forEach((key) => {
resolvedHeaders[key] = processSingleValue({
originalValue: headers[key],
customUserVars,
user: user as TUser,
});
});
}
return resolvedHeaders;
}

View file

@ -1,6 +1,7 @@
export * from './axios'; export * from './axios';
export * from './azure'; export * from './azure';
export * from './common'; export * from './common';
export * from './env';
export * from './events'; export * from './events';
export * from './files'; export * from './files';
export * from './generators'; export * from './generators';

View file

@ -1,7 +1,6 @@
import { z } from 'zod'; import { z } from 'zod';
import type { TUser } from './types';
import { extractEnvVariable } from './utils';
import { TokenExchangeMethodEnum } from './types/agents'; import { TokenExchangeMethodEnum } from './types/agents';
import { extractEnvVariable } from './utils';
const BaseOptionsSchema = z.object({ const BaseOptionsSchema = z.object({
iconPath: z.string().optional(), iconPath: z.string().optional(),
@ -153,130 +152,3 @@ export const MCPOptionsSchema = z.union([
export const MCPServersSchema = z.record(z.string(), MCPOptionsSchema); export const MCPServersSchema = z.record(z.string(), MCPOptionsSchema);
export type MCPOptions = z.infer<typeof MCPOptionsSchema>; export type MCPOptions = z.infer<typeof MCPOptionsSchema>;
/**
* List of allowed user fields that can be used in MCP environment variables.
* These are non-sensitive string/boolean fields from the IUser interface.
*/
const ALLOWED_USER_FIELDS = [
'name',
'username',
'email',
'provider',
'role',
'googleId',
'facebookId',
'openidId',
'samlId',
'ldapId',
'githubId',
'discordId',
'appleId',
'emailVerified',
'twoFactorEnabled',
'termsAccepted',
] as const;
/**
* Processes a string value to replace user field placeholders
* @param value - The string value to process
* @param user - The user object
* @returns The processed string with placeholders replaced
*/
function processUserPlaceholders(value: string, user?: TUser): string {
if (!user || typeof value !== 'string') {
return value;
}
for (const field of ALLOWED_USER_FIELDS) {
const placeholder = `{{LIBRECHAT_USER_${field.toUpperCase()}}}`;
if (value.includes(placeholder)) {
const fieldValue = user[field as keyof TUser];
const replacementValue = fieldValue != null ? String(fieldValue) : '';
value = value.replace(new RegExp(placeholder, 'g'), replacementValue);
}
}
return value;
}
function processSingleValue({
originalValue,
customUserVars,
user,
}: {
originalValue: string;
customUserVars?: Record<string, string>;
user?: TUser;
}): string {
let value = originalValue;
// 1. Replace custom user variables
if (customUserVars) {
for (const [varName, varVal] of Object.entries(customUserVars)) {
/** Escaped varName for use in regex to avoid issues with special characters */
const escapedVarName = varName.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
const placeholderRegex = new RegExp(`\\{\\{${escapedVarName}\\}\\}`, 'g');
value = value.replace(placeholderRegex, varVal);
}
}
// 2.A. Special handling for LIBRECHAT_USER_ID placeholder
// This ensures {{LIBRECHAT_USER_ID}} is replaced only if user.id is available.
// If user.id is null/undefined, the placeholder remains
if (user && user.id != null && value.includes('{{LIBRECHAT_USER_ID}}')) {
value = value.replace(/\{\{LIBRECHAT_USER_ID\}\}/g, String(user.id));
}
// 2.B. Replace other standard user field placeholders (e.g., {{LIBRECHAT_USER_EMAIL}})
value = processUserPlaceholders(value, user);
// 3. Replace system environment variables
value = extractEnvVariable(value);
return value;
}
/**
* Recursively processes an object to replace environment variables in string values
* @param obj - The object to process
* @param user - The user object containing all user fields
* @param customUserVars - vars that user set in settings
* @returns - The processed object with environment variables replaced
*/
export function processMCPEnv(
obj: Readonly<MCPOptions>,
user?: TUser,
customUserVars?: Record<string, string>,
): MCPOptions {
if (obj === null || obj === undefined) {
return obj;
}
const newObj: MCPOptions = structuredClone(obj);
if ('env' in newObj && newObj.env) {
const processedEnv: Record<string, string> = {};
for (const [key, originalValue] of Object.entries(newObj.env)) {
processedEnv[key] = processSingleValue({ originalValue, customUserVars, user });
}
newObj.env = processedEnv;
}
// Process headers if they exist (for WebSocket, SSE, StreamableHTTP types)
// Note: `env` and `headers` are on different branches of the MCPOptions union type.
if ('headers' in newObj && newObj.headers) {
const processedHeaders: Record<string, string> = {};
for (const [key, originalValue] of Object.entries(newObj.headers)) {
processedHeaders[key] = processSingleValue({ originalValue, customUserVars, user });
}
newObj.headers = processedHeaders;
}
// Process URL if it exists (for WebSocket, SSE, StreamableHTTP types)
if ('url' in newObj && newObj.url) {
newObj.url = processSingleValue({ originalValue: newObj.url, customUserVars, user });
}
return newObj;
}

View file

@ -122,19 +122,6 @@ export function errorsToString(errors: ZodIssue[]) {
.join(' '); .join(' ');
} }
/** Resolves header values to env variables if detected */
export function resolveHeaders(headers: Record<string, string> | undefined) {
const resolvedHeaders = { ...(headers ?? {}) };
if (headers && typeof headers === 'object' && !Array.isArray(headers)) {
Object.keys(headers).forEach((key) => {
resolvedHeaders[key] = extractEnvVariable(headers[key]);
});
}
return resolvedHeaders;
}
export function getFirstDefinedValue(possibleValues: string[]) { export function getFirstDefinedValue(possibleValues: string[]) {
let returnValue; let returnValue;
for (const value of possibleValues) { for (const value of possibleValues) {