mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-02-02 07:41:49 +01:00
🔧 fix: Update Token Calculations/Mapping, MCP env Initialization (#6406)
* fix: Enhance MCP initialization to process environment variables * fix: only build tokenCountMap with messages that are being used in the payload * fix: Adjust maxContextTokens calculation to account for maxOutputTokens * refactor: Make processMCPEnv optional in MCPManager initialization * chore: Bump version of librechat-data-provider to 0.7.73
This commit is contained in:
parent
d6a17784dc
commit
efb616d600
8 changed files with 46 additions and 20 deletions
|
|
@ -366,17 +366,14 @@ class BaseClient {
|
|||
* context: TMessage[],
|
||||
* remainingContextTokens: number,
|
||||
* messagesToRefine: TMessage[],
|
||||
* summaryIndex: number,
|
||||
* }>} An object with four properties: `context`, `summaryIndex`, `remainingContextTokens`, and `messagesToRefine`.
|
||||
* }>} An object with three properties: `context`, `remainingContextTokens`, and `messagesToRefine`.
|
||||
* `context` is an array of messages that fit within the token limit.
|
||||
* `summaryIndex` is the index of the first message in the `messagesToRefine` array.
|
||||
* `remainingContextTokens` is the number of tokens remaining within the limit after adding the messages to the context.
|
||||
* `messagesToRefine` is an array of messages that were not added to the context because they would have exceeded the token limit.
|
||||
*/
|
||||
async getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, instructions }) {
|
||||
// Every reply is primed with <|start|>assistant<|message|>, so we
|
||||
// start with 3 tokens for the label after all messages have been counted.
|
||||
let summaryIndex = -1;
|
||||
let currentTokenCount = 3;
|
||||
const instructionsTokenCount = instructions?.tokenCount ?? 0;
|
||||
let remainingContextTokens =
|
||||
|
|
@ -409,14 +406,12 @@ class BaseClient {
|
|||
}
|
||||
|
||||
const prunedMemory = messages;
|
||||
summaryIndex = prunedMemory.length - 1;
|
||||
remainingContextTokens -= currentTokenCount;
|
||||
|
||||
return {
|
||||
context: context.reverse(),
|
||||
remainingContextTokens,
|
||||
messagesToRefine: prunedMemory,
|
||||
summaryIndex,
|
||||
};
|
||||
}
|
||||
|
||||
|
|
@ -459,7 +454,7 @@ class BaseClient {
|
|||
|
||||
let orderedWithInstructions = this.addInstructions(orderedMessages, instructions);
|
||||
|
||||
let { context, remainingContextTokens, messagesToRefine, summaryIndex } =
|
||||
let { context, remainingContextTokens, messagesToRefine } =
|
||||
await this.getMessagesWithinTokenLimit({
|
||||
messages: orderedWithInstructions,
|
||||
instructions,
|
||||
|
|
@ -529,7 +524,7 @@ class BaseClient {
|
|||
}
|
||||
|
||||
// Make sure to only continue summarization logic if the summary message was generated
|
||||
shouldSummarize = summaryMessage && shouldSummarize;
|
||||
shouldSummarize = summaryMessage != null && shouldSummarize === true;
|
||||
|
||||
logger.debug('[BaseClient] Context Count (2/2)', {
|
||||
remainingContextTokens,
|
||||
|
|
@ -539,17 +534,18 @@ class BaseClient {
|
|||
/** @type {Record<string, number> | undefined} */
|
||||
let tokenCountMap;
|
||||
if (buildTokenMap) {
|
||||
tokenCountMap = orderedWithInstructions.reduce((map, message, index) => {
|
||||
const currentPayload = shouldSummarize ? orderedWithInstructions : context;
|
||||
tokenCountMap = currentPayload.reduce((map, message, index) => {
|
||||
const { messageId } = message;
|
||||
if (!messageId) {
|
||||
return map;
|
||||
}
|
||||
|
||||
if (shouldSummarize && index === summaryIndex && !usePrevSummary) {
|
||||
if (shouldSummarize && index === messagesToRefine.length - 1 && !usePrevSummary) {
|
||||
map.summaryMessage = { ...summaryMessage, messageId, tokenCount: summaryTokenCount };
|
||||
}
|
||||
|
||||
map[messageId] = orderedWithInstructions[index].tokenCount;
|
||||
map[messageId] = currentPayload[index].tokenCount;
|
||||
return map;
|
||||
}, {});
|
||||
}
|
||||
|
|
|
|||
|
|
@ -164,7 +164,7 @@ describe('BaseClient', () => {
|
|||
const result = await TestClient.getMessagesWithinTokenLimit({ messages });
|
||||
|
||||
expect(result.context).toEqual(expectedContext);
|
||||
expect(result.summaryIndex).toEqual(expectedIndex);
|
||||
expect(result.messagesToRefine.length - 1).toEqual(expectedIndex);
|
||||
expect(result.remainingContextTokens).toBe(expectedRemainingContextTokens);
|
||||
expect(result.messagesToRefine).toEqual(expectedMessagesToRefine);
|
||||
});
|
||||
|
|
@ -200,7 +200,7 @@ describe('BaseClient', () => {
|
|||
const result = await TestClient.getMessagesWithinTokenLimit({ messages });
|
||||
|
||||
expect(result.context).toEqual(expectedContext);
|
||||
expect(result.summaryIndex).toEqual(expectedIndex);
|
||||
expect(result.messagesToRefine.length - 1).toEqual(expectedIndex);
|
||||
expect(result.remainingContextTokens).toBe(expectedRemainingContextTokens);
|
||||
expect(result.messagesToRefine).toEqual(expectedMessagesToRefine);
|
||||
});
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ const {
|
|||
FileSources,
|
||||
EModelEndpoint,
|
||||
loadOCRConfig,
|
||||
processMCPEnv,
|
||||
getConfigDefaults,
|
||||
} = require('librechat-data-provider');
|
||||
const { checkVariables, checkHealth, checkConfig, checkAzureVariables } = require('./start/checks');
|
||||
|
|
@ -54,7 +55,7 @@ const AppService = async (app) => {
|
|||
|
||||
if (config.mcpServers != null) {
|
||||
const mcpManager = await getMCPManager();
|
||||
await mcpManager.initializeMCP(config.mcpServers);
|
||||
await mcpManager.initializeMCP(config.mcpServers, processMCPEnv);
|
||||
await mcpManager.mapAvailableTools(availableTools);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -178,6 +178,7 @@ const initializeAgentOptions = async ({
|
|||
agent.provider = options.provider;
|
||||
}
|
||||
|
||||
/** @type {import('@librechat/agents').ClientOptions} */
|
||||
agent.model_parameters = Object.assign(model_parameters, options.llmConfig);
|
||||
if (options.configOptions) {
|
||||
agent.model_parameters.configuration = options.configOptions;
|
||||
|
|
@ -196,6 +197,7 @@ const initializeAgentOptions = async ({
|
|||
|
||||
const tokensModel =
|
||||
agent.provider === EModelEndpoint.azureOpenAI ? agent.model : agent.model_parameters.model;
|
||||
const maxTokens = agent.model_parameters.maxOutputTokens ?? agent.model_parameters.maxTokens ?? 0;
|
||||
|
||||
return {
|
||||
...agent,
|
||||
|
|
@ -204,7 +206,7 @@ const initializeAgentOptions = async ({
|
|||
toolContextMap,
|
||||
maxContextTokens:
|
||||
agent.max_context_tokens ??
|
||||
(getModelMaxTokens(tokensModel, providerEndpointMap[provider]) ?? 4000) * 0.9,
|
||||
((getModelMaxTokens(tokensModel, providerEndpointMap[provider]) ?? 4000) - maxTokens) * 0.9,
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue