2023-12-11 14:48:40 -05:00
|
|
|
const { EModelEndpoint } = require('librechat-data-provider');
|
2025-08-07 15:03:19 -04:00
|
|
|
const {
|
2025-09-08 11:35:29 -07:00
|
|
|
maxTokensMap,
|
|
|
|
matchModelName,
|
|
|
|
processModelData,
|
|
|
|
getModelMaxTokens,
|
2025-08-07 15:03:19 -04:00
|
|
|
maxOutputTokensMap,
|
2025-08-16 13:36:46 -04:00
|
|
|
findMatchingPattern,
|
2025-09-08 11:35:29 -07:00
|
|
|
} = require('@librechat/api');
|
feat: ConversationSummaryBufferMemory (#973)
* refactor: pass model in message edit payload, use encoder in standalone util function
* feat: add summaryBuffer helper
* refactor(api/messages): use new countTokens helper and add auth middleware at top
* wip: ConversationSummaryBufferMemory
* refactor: move pre-generation helpers to prompts dir
* chore: remove console log
* chore: remove test as payload will no longer carry tokenCount
* chore: update getMessagesWithinTokenLimit JSDoc
* refactor: optimize getMessagesForConversation and also break on summary, feat(ci): getMessagesForConversation tests
* refactor(getMessagesForConvo): count '00000000-0000-0000-0000-000000000000' as root message
* chore: add newer model to token map
* fix: condition was point to prop of array instead of message prop
* refactor(BaseClient): use object for refineMessages param, rename 'summary' to 'summaryMessage', add previous_summary
refactor(getMessagesWithinTokenLimit): replace text and tokenCount if should summarize, summary, and summaryTokenCount are present
fix/refactor(handleContextStrategy): use the right comparison length for context diff, and replace payload first message when a summary is present
* chore: log previous_summary if debugging
* refactor(formatMessage): assume if role is defined that it's a valid value
* refactor(getMessagesWithinTokenLimit): remove summary logic
refactor(handleContextStrategy): add usePrevSummary logic in case only summary was pruned
refactor(loadHistory): initial message query will return all ordered messages but keep track of the latest summary
refactor(getMessagesForConversation): use object for single param, edit jsdoc, edit all files using the method
refactor(ChatGPTClient): order messages before buildPrompt is called, TODO: add convoSumBuffMemory logic
* fix: undefined handling and summarizing only when shouldRefineContext is true
* chore(BaseClient): fix test results omitting system role for summaries and test edge case
* chore: export summaryBuffer from index file
* refactor(OpenAIClient/BaseClient): move refineMessages to subclass, implement LLM initialization for summaryBuffer
* feat: add OPENAI_SUMMARIZE to enable summarizing, refactor: rename client prop 'shouldRefineContext' to 'shouldSummarize', change contextStrategy value to 'summarize' from 'refine'
* refactor: rename refineMessages method to summarizeMessages for clarity
* chore: clarify summary future intent in .env.example
* refactor(initializeLLM): handle case for either 'model' or 'modelName' being passed
* feat(gptPlugins): enable summarization for plugins
* refactor(gptPlugins): utilize new initializeLLM method and formatting methods for messages, use payload array for currentMessages and assign pastMessages sooner
* refactor(agents): use ConversationSummaryBufferMemory for both agent types
* refactor(formatMessage): optimize original method for langchain, add helper function for langchain messages, add JSDocs and tests
* refactor(summaryBuffer): add helper to createSummaryBufferMemory, and use new formatting helpers
* fix: forgot to spread formatMessages also took opportunity to pluralize filename
* refactor: pass memory to tools, namely openapi specs. not used and may never be used by new method but added for testing
* ci(formatMessages): add more exhaustive checks for langchain messages
* feat: add debug env var for OpenAI
* chore: delete unnecessary comments
* chore: add extra note about summary feature
* fix: remove tokenCount from payload instructions
* fix: test fail
* fix: only pass instructions to payload when defined or not empty object
* refactor: fromPromptMessages is deprecated, use renamed method fromMessages
* refactor: use 'includes' instead of 'startsWith' for extended OpenRouter compatibility
* fix(PluginsClient.buildPromptBody): handle undefined message strings
* chore: log langchain titling error
* feat: getModelMaxTokens helper
* feat: tokenSplit helper
* feat: summary prompts updated
* fix: optimize _CUT_OFF_SUMMARIZER prompt
* refactor(summaryBuffer): use custom summary prompt, allow prompt to be passed, pass humanPrefix and aiPrefix to memory, along with any future variables, rename messagesToRefine to context
* fix(summaryBuffer): handle edge case where messagesToRefine exceeds summary context,
refactor(BaseClient): allow custom maxContextTokens to be passed to getMessagesWithinTokenLimit, add defined check before unshifting summaryMessage, update shouldSummarize based on this
refactor(OpenAIClient): use getModelMaxTokens, use cut-off message method for summary if no messages were left after pruning
* fix(handleContextStrategy): handle case where incoming prompt is bigger than model context
* chore: rename refinedContent to splitText
* chore: remove unnecessary debug log
2023-09-26 21:02:28 -04:00
|
|
|
|
|
|
|
describe('getModelMaxTokens', () => {
|
|
|
|
test('should return correct tokens for exact match', () => {
|
2023-12-10 14:54:13 -05:00
|
|
|
expect(getModelMaxTokens('gpt-4-32k-0613')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-4-32k-0613'],
|
|
|
|
);
|
feat: ConversationSummaryBufferMemory (#973)
* refactor: pass model in message edit payload, use encoder in standalone util function
* feat: add summaryBuffer helper
* refactor(api/messages): use new countTokens helper and add auth middleware at top
* wip: ConversationSummaryBufferMemory
* refactor: move pre-generation helpers to prompts dir
* chore: remove console log
* chore: remove test as payload will no longer carry tokenCount
* chore: update getMessagesWithinTokenLimit JSDoc
* refactor: optimize getMessagesForConversation and also break on summary, feat(ci): getMessagesForConversation tests
* refactor(getMessagesForConvo): count '00000000-0000-0000-0000-000000000000' as root message
* chore: add newer model to token map
* fix: condition was point to prop of array instead of message prop
* refactor(BaseClient): use object for refineMessages param, rename 'summary' to 'summaryMessage', add previous_summary
refactor(getMessagesWithinTokenLimit): replace text and tokenCount if should summarize, summary, and summaryTokenCount are present
fix/refactor(handleContextStrategy): use the right comparison length for context diff, and replace payload first message when a summary is present
* chore: log previous_summary if debugging
* refactor(formatMessage): assume if role is defined that it's a valid value
* refactor(getMessagesWithinTokenLimit): remove summary logic
refactor(handleContextStrategy): add usePrevSummary logic in case only summary was pruned
refactor(loadHistory): initial message query will return all ordered messages but keep track of the latest summary
refactor(getMessagesForConversation): use object for single param, edit jsdoc, edit all files using the method
refactor(ChatGPTClient): order messages before buildPrompt is called, TODO: add convoSumBuffMemory logic
* fix: undefined handling and summarizing only when shouldRefineContext is true
* chore(BaseClient): fix test results omitting system role for summaries and test edge case
* chore: export summaryBuffer from index file
* refactor(OpenAIClient/BaseClient): move refineMessages to subclass, implement LLM initialization for summaryBuffer
* feat: add OPENAI_SUMMARIZE to enable summarizing, refactor: rename client prop 'shouldRefineContext' to 'shouldSummarize', change contextStrategy value to 'summarize' from 'refine'
* refactor: rename refineMessages method to summarizeMessages for clarity
* chore: clarify summary future intent in .env.example
* refactor(initializeLLM): handle case for either 'model' or 'modelName' being passed
* feat(gptPlugins): enable summarization for plugins
* refactor(gptPlugins): utilize new initializeLLM method and formatting methods for messages, use payload array for currentMessages and assign pastMessages sooner
* refactor(agents): use ConversationSummaryBufferMemory for both agent types
* refactor(formatMessage): optimize original method for langchain, add helper function for langchain messages, add JSDocs and tests
* refactor(summaryBuffer): add helper to createSummaryBufferMemory, and use new formatting helpers
* fix: forgot to spread formatMessages also took opportunity to pluralize filename
* refactor: pass memory to tools, namely openapi specs. not used and may never be used by new method but added for testing
* ci(formatMessages): add more exhaustive checks for langchain messages
* feat: add debug env var for OpenAI
* chore: delete unnecessary comments
* chore: add extra note about summary feature
* fix: remove tokenCount from payload instructions
* fix: test fail
* fix: only pass instructions to payload when defined or not empty object
* refactor: fromPromptMessages is deprecated, use renamed method fromMessages
* refactor: use 'includes' instead of 'startsWith' for extended OpenRouter compatibility
* fix(PluginsClient.buildPromptBody): handle undefined message strings
* chore: log langchain titling error
* feat: getModelMaxTokens helper
* feat: tokenSplit helper
* feat: summary prompts updated
* fix: optimize _CUT_OFF_SUMMARIZER prompt
* refactor(summaryBuffer): use custom summary prompt, allow prompt to be passed, pass humanPrefix and aiPrefix to memory, along with any future variables, rename messagesToRefine to context
* fix(summaryBuffer): handle edge case where messagesToRefine exceeds summary context,
refactor(BaseClient): allow custom maxContextTokens to be passed to getMessagesWithinTokenLimit, add defined check before unshifting summaryMessage, update shouldSummarize based on this
refactor(OpenAIClient): use getModelMaxTokens, use cut-off message method for summary if no messages were left after pruning
* fix(handleContextStrategy): handle case where incoming prompt is bigger than model context
* chore: rename refinedContent to splitText
* chore: remove unnecessary debug log
2023-09-26 21:02:28 -04:00
|
|
|
});
|
|
|
|
|
|
|
|
test('should return correct tokens for partial match', () => {
|
2023-12-10 14:54:13 -05:00
|
|
|
expect(getModelMaxTokens('gpt-4-32k-unknown')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-4-32k'],
|
|
|
|
);
|
feat: ConversationSummaryBufferMemory (#973)
* refactor: pass model in message edit payload, use encoder in standalone util function
* feat: add summaryBuffer helper
* refactor(api/messages): use new countTokens helper and add auth middleware at top
* wip: ConversationSummaryBufferMemory
* refactor: move pre-generation helpers to prompts dir
* chore: remove console log
* chore: remove test as payload will no longer carry tokenCount
* chore: update getMessagesWithinTokenLimit JSDoc
* refactor: optimize getMessagesForConversation and also break on summary, feat(ci): getMessagesForConversation tests
* refactor(getMessagesForConvo): count '00000000-0000-0000-0000-000000000000' as root message
* chore: add newer model to token map
* fix: condition was point to prop of array instead of message prop
* refactor(BaseClient): use object for refineMessages param, rename 'summary' to 'summaryMessage', add previous_summary
refactor(getMessagesWithinTokenLimit): replace text and tokenCount if should summarize, summary, and summaryTokenCount are present
fix/refactor(handleContextStrategy): use the right comparison length for context diff, and replace payload first message when a summary is present
* chore: log previous_summary if debugging
* refactor(formatMessage): assume if role is defined that it's a valid value
* refactor(getMessagesWithinTokenLimit): remove summary logic
refactor(handleContextStrategy): add usePrevSummary logic in case only summary was pruned
refactor(loadHistory): initial message query will return all ordered messages but keep track of the latest summary
refactor(getMessagesForConversation): use object for single param, edit jsdoc, edit all files using the method
refactor(ChatGPTClient): order messages before buildPrompt is called, TODO: add convoSumBuffMemory logic
* fix: undefined handling and summarizing only when shouldRefineContext is true
* chore(BaseClient): fix test results omitting system role for summaries and test edge case
* chore: export summaryBuffer from index file
* refactor(OpenAIClient/BaseClient): move refineMessages to subclass, implement LLM initialization for summaryBuffer
* feat: add OPENAI_SUMMARIZE to enable summarizing, refactor: rename client prop 'shouldRefineContext' to 'shouldSummarize', change contextStrategy value to 'summarize' from 'refine'
* refactor: rename refineMessages method to summarizeMessages for clarity
* chore: clarify summary future intent in .env.example
* refactor(initializeLLM): handle case for either 'model' or 'modelName' being passed
* feat(gptPlugins): enable summarization for plugins
* refactor(gptPlugins): utilize new initializeLLM method and formatting methods for messages, use payload array for currentMessages and assign pastMessages sooner
* refactor(agents): use ConversationSummaryBufferMemory for both agent types
* refactor(formatMessage): optimize original method for langchain, add helper function for langchain messages, add JSDocs and tests
* refactor(summaryBuffer): add helper to createSummaryBufferMemory, and use new formatting helpers
* fix: forgot to spread formatMessages also took opportunity to pluralize filename
* refactor: pass memory to tools, namely openapi specs. not used and may never be used by new method but added for testing
* ci(formatMessages): add more exhaustive checks for langchain messages
* feat: add debug env var for OpenAI
* chore: delete unnecessary comments
* chore: add extra note about summary feature
* fix: remove tokenCount from payload instructions
* fix: test fail
* fix: only pass instructions to payload when defined or not empty object
* refactor: fromPromptMessages is deprecated, use renamed method fromMessages
* refactor: use 'includes' instead of 'startsWith' for extended OpenRouter compatibility
* fix(PluginsClient.buildPromptBody): handle undefined message strings
* chore: log langchain titling error
* feat: getModelMaxTokens helper
* feat: tokenSplit helper
* feat: summary prompts updated
* fix: optimize _CUT_OFF_SUMMARIZER prompt
* refactor(summaryBuffer): use custom summary prompt, allow prompt to be passed, pass humanPrefix and aiPrefix to memory, along with any future variables, rename messagesToRefine to context
* fix(summaryBuffer): handle edge case where messagesToRefine exceeds summary context,
refactor(BaseClient): allow custom maxContextTokens to be passed to getMessagesWithinTokenLimit, add defined check before unshifting summaryMessage, update shouldSummarize based on this
refactor(OpenAIClient): use getModelMaxTokens, use cut-off message method for summary if no messages were left after pruning
* fix(handleContextStrategy): handle case where incoming prompt is bigger than model context
* chore: rename refinedContent to splitText
* chore: remove unnecessary debug log
2023-09-26 21:02:28 -04:00
|
|
|
});
|
|
|
|
|
|
|
|
test('should return correct tokens for partial match (OpenRouter)', () => {
|
2023-12-10 14:54:13 -05:00
|
|
|
expect(getModelMaxTokens('openai/gpt-4-32k')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-4-32k'],
|
|
|
|
);
|
feat: ConversationSummaryBufferMemory (#973)
* refactor: pass model in message edit payload, use encoder in standalone util function
* feat: add summaryBuffer helper
* refactor(api/messages): use new countTokens helper and add auth middleware at top
* wip: ConversationSummaryBufferMemory
* refactor: move pre-generation helpers to prompts dir
* chore: remove console log
* chore: remove test as payload will no longer carry tokenCount
* chore: update getMessagesWithinTokenLimit JSDoc
* refactor: optimize getMessagesForConversation and also break on summary, feat(ci): getMessagesForConversation tests
* refactor(getMessagesForConvo): count '00000000-0000-0000-0000-000000000000' as root message
* chore: add newer model to token map
* fix: condition was point to prop of array instead of message prop
* refactor(BaseClient): use object for refineMessages param, rename 'summary' to 'summaryMessage', add previous_summary
refactor(getMessagesWithinTokenLimit): replace text and tokenCount if should summarize, summary, and summaryTokenCount are present
fix/refactor(handleContextStrategy): use the right comparison length for context diff, and replace payload first message when a summary is present
* chore: log previous_summary if debugging
* refactor(formatMessage): assume if role is defined that it's a valid value
* refactor(getMessagesWithinTokenLimit): remove summary logic
refactor(handleContextStrategy): add usePrevSummary logic in case only summary was pruned
refactor(loadHistory): initial message query will return all ordered messages but keep track of the latest summary
refactor(getMessagesForConversation): use object for single param, edit jsdoc, edit all files using the method
refactor(ChatGPTClient): order messages before buildPrompt is called, TODO: add convoSumBuffMemory logic
* fix: undefined handling and summarizing only when shouldRefineContext is true
* chore(BaseClient): fix test results omitting system role for summaries and test edge case
* chore: export summaryBuffer from index file
* refactor(OpenAIClient/BaseClient): move refineMessages to subclass, implement LLM initialization for summaryBuffer
* feat: add OPENAI_SUMMARIZE to enable summarizing, refactor: rename client prop 'shouldRefineContext' to 'shouldSummarize', change contextStrategy value to 'summarize' from 'refine'
* refactor: rename refineMessages method to summarizeMessages for clarity
* chore: clarify summary future intent in .env.example
* refactor(initializeLLM): handle case for either 'model' or 'modelName' being passed
* feat(gptPlugins): enable summarization for plugins
* refactor(gptPlugins): utilize new initializeLLM method and formatting methods for messages, use payload array for currentMessages and assign pastMessages sooner
* refactor(agents): use ConversationSummaryBufferMemory for both agent types
* refactor(formatMessage): optimize original method for langchain, add helper function for langchain messages, add JSDocs and tests
* refactor(summaryBuffer): add helper to createSummaryBufferMemory, and use new formatting helpers
* fix: forgot to spread formatMessages also took opportunity to pluralize filename
* refactor: pass memory to tools, namely openapi specs. not used and may never be used by new method but added for testing
* ci(formatMessages): add more exhaustive checks for langchain messages
* feat: add debug env var for OpenAI
* chore: delete unnecessary comments
* chore: add extra note about summary feature
* fix: remove tokenCount from payload instructions
* fix: test fail
* fix: only pass instructions to payload when defined or not empty object
* refactor: fromPromptMessages is deprecated, use renamed method fromMessages
* refactor: use 'includes' instead of 'startsWith' for extended OpenRouter compatibility
* fix(PluginsClient.buildPromptBody): handle undefined message strings
* chore: log langchain titling error
* feat: getModelMaxTokens helper
* feat: tokenSplit helper
* feat: summary prompts updated
* fix: optimize _CUT_OFF_SUMMARIZER prompt
* refactor(summaryBuffer): use custom summary prompt, allow prompt to be passed, pass humanPrefix and aiPrefix to memory, along with any future variables, rename messagesToRefine to context
* fix(summaryBuffer): handle edge case where messagesToRefine exceeds summary context,
refactor(BaseClient): allow custom maxContextTokens to be passed to getMessagesWithinTokenLimit, add defined check before unshifting summaryMessage, update shouldSummarize based on this
refactor(OpenAIClient): use getModelMaxTokens, use cut-off message method for summary if no messages were left after pruning
* fix(handleContextStrategy): handle case where incoming prompt is bigger than model context
* chore: rename refinedContent to splitText
* chore: remove unnecessary debug log
2023-09-26 21:02:28 -04:00
|
|
|
});
|
|
|
|
|
|
|
|
test('should return undefined for no match', () => {
|
|
|
|
expect(getModelMaxTokens('unknown-model')).toBeUndefined();
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should return correct tokens for another exact match', () => {
|
2023-11-06 15:26:16 -05:00
|
|
|
expect(getModelMaxTokens('gpt-3.5-turbo-16k-0613')).toBe(
|
2023-12-10 14:54:13 -05:00
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-3.5-turbo-16k-0613'],
|
2023-11-06 15:26:16 -05:00
|
|
|
);
|
feat: ConversationSummaryBufferMemory (#973)
* refactor: pass model in message edit payload, use encoder in standalone util function
* feat: add summaryBuffer helper
* refactor(api/messages): use new countTokens helper and add auth middleware at top
* wip: ConversationSummaryBufferMemory
* refactor: move pre-generation helpers to prompts dir
* chore: remove console log
* chore: remove test as payload will no longer carry tokenCount
* chore: update getMessagesWithinTokenLimit JSDoc
* refactor: optimize getMessagesForConversation and also break on summary, feat(ci): getMessagesForConversation tests
* refactor(getMessagesForConvo): count '00000000-0000-0000-0000-000000000000' as root message
* chore: add newer model to token map
* fix: condition was point to prop of array instead of message prop
* refactor(BaseClient): use object for refineMessages param, rename 'summary' to 'summaryMessage', add previous_summary
refactor(getMessagesWithinTokenLimit): replace text and tokenCount if should summarize, summary, and summaryTokenCount are present
fix/refactor(handleContextStrategy): use the right comparison length for context diff, and replace payload first message when a summary is present
* chore: log previous_summary if debugging
* refactor(formatMessage): assume if role is defined that it's a valid value
* refactor(getMessagesWithinTokenLimit): remove summary logic
refactor(handleContextStrategy): add usePrevSummary logic in case only summary was pruned
refactor(loadHistory): initial message query will return all ordered messages but keep track of the latest summary
refactor(getMessagesForConversation): use object for single param, edit jsdoc, edit all files using the method
refactor(ChatGPTClient): order messages before buildPrompt is called, TODO: add convoSumBuffMemory logic
* fix: undefined handling and summarizing only when shouldRefineContext is true
* chore(BaseClient): fix test results omitting system role for summaries and test edge case
* chore: export summaryBuffer from index file
* refactor(OpenAIClient/BaseClient): move refineMessages to subclass, implement LLM initialization for summaryBuffer
* feat: add OPENAI_SUMMARIZE to enable summarizing, refactor: rename client prop 'shouldRefineContext' to 'shouldSummarize', change contextStrategy value to 'summarize' from 'refine'
* refactor: rename refineMessages method to summarizeMessages for clarity
* chore: clarify summary future intent in .env.example
* refactor(initializeLLM): handle case for either 'model' or 'modelName' being passed
* feat(gptPlugins): enable summarization for plugins
* refactor(gptPlugins): utilize new initializeLLM method and formatting methods for messages, use payload array for currentMessages and assign pastMessages sooner
* refactor(agents): use ConversationSummaryBufferMemory for both agent types
* refactor(formatMessage): optimize original method for langchain, add helper function for langchain messages, add JSDocs and tests
* refactor(summaryBuffer): add helper to createSummaryBufferMemory, and use new formatting helpers
* fix: forgot to spread formatMessages also took opportunity to pluralize filename
* refactor: pass memory to tools, namely openapi specs. not used and may never be used by new method but added for testing
* ci(formatMessages): add more exhaustive checks for langchain messages
* feat: add debug env var for OpenAI
* chore: delete unnecessary comments
* chore: add extra note about summary feature
* fix: remove tokenCount from payload instructions
* fix: test fail
* fix: only pass instructions to payload when defined or not empty object
* refactor: fromPromptMessages is deprecated, use renamed method fromMessages
* refactor: use 'includes' instead of 'startsWith' for extended OpenRouter compatibility
* fix(PluginsClient.buildPromptBody): handle undefined message strings
* chore: log langchain titling error
* feat: getModelMaxTokens helper
* feat: tokenSplit helper
* feat: summary prompts updated
* fix: optimize _CUT_OFF_SUMMARIZER prompt
* refactor(summaryBuffer): use custom summary prompt, allow prompt to be passed, pass humanPrefix and aiPrefix to memory, along with any future variables, rename messagesToRefine to context
* fix(summaryBuffer): handle edge case where messagesToRefine exceeds summary context,
refactor(BaseClient): allow custom maxContextTokens to be passed to getMessagesWithinTokenLimit, add defined check before unshifting summaryMessage, update shouldSummarize based on this
refactor(OpenAIClient): use getModelMaxTokens, use cut-off message method for summary if no messages were left after pruning
* fix(handleContextStrategy): handle case where incoming prompt is bigger than model context
* chore: rename refinedContent to splitText
* chore: remove unnecessary debug log
2023-09-26 21:02:28 -04:00
|
|
|
});
|
|
|
|
|
|
|
|
test('should return correct tokens for another partial match', () => {
|
2023-12-10 14:54:13 -05:00
|
|
|
expect(getModelMaxTokens('gpt-3.5-turbo-unknown')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-3.5-turbo'],
|
|
|
|
);
|
feat: ConversationSummaryBufferMemory (#973)
* refactor: pass model in message edit payload, use encoder in standalone util function
* feat: add summaryBuffer helper
* refactor(api/messages): use new countTokens helper and add auth middleware at top
* wip: ConversationSummaryBufferMemory
* refactor: move pre-generation helpers to prompts dir
* chore: remove console log
* chore: remove test as payload will no longer carry tokenCount
* chore: update getMessagesWithinTokenLimit JSDoc
* refactor: optimize getMessagesForConversation and also break on summary, feat(ci): getMessagesForConversation tests
* refactor(getMessagesForConvo): count '00000000-0000-0000-0000-000000000000' as root message
* chore: add newer model to token map
* fix: condition was point to prop of array instead of message prop
* refactor(BaseClient): use object for refineMessages param, rename 'summary' to 'summaryMessage', add previous_summary
refactor(getMessagesWithinTokenLimit): replace text and tokenCount if should summarize, summary, and summaryTokenCount are present
fix/refactor(handleContextStrategy): use the right comparison length for context diff, and replace payload first message when a summary is present
* chore: log previous_summary if debugging
* refactor(formatMessage): assume if role is defined that it's a valid value
* refactor(getMessagesWithinTokenLimit): remove summary logic
refactor(handleContextStrategy): add usePrevSummary logic in case only summary was pruned
refactor(loadHistory): initial message query will return all ordered messages but keep track of the latest summary
refactor(getMessagesForConversation): use object for single param, edit jsdoc, edit all files using the method
refactor(ChatGPTClient): order messages before buildPrompt is called, TODO: add convoSumBuffMemory logic
* fix: undefined handling and summarizing only when shouldRefineContext is true
* chore(BaseClient): fix test results omitting system role for summaries and test edge case
* chore: export summaryBuffer from index file
* refactor(OpenAIClient/BaseClient): move refineMessages to subclass, implement LLM initialization for summaryBuffer
* feat: add OPENAI_SUMMARIZE to enable summarizing, refactor: rename client prop 'shouldRefineContext' to 'shouldSummarize', change contextStrategy value to 'summarize' from 'refine'
* refactor: rename refineMessages method to summarizeMessages for clarity
* chore: clarify summary future intent in .env.example
* refactor(initializeLLM): handle case for either 'model' or 'modelName' being passed
* feat(gptPlugins): enable summarization for plugins
* refactor(gptPlugins): utilize new initializeLLM method and formatting methods for messages, use payload array for currentMessages and assign pastMessages sooner
* refactor(agents): use ConversationSummaryBufferMemory for both agent types
* refactor(formatMessage): optimize original method for langchain, add helper function for langchain messages, add JSDocs and tests
* refactor(summaryBuffer): add helper to createSummaryBufferMemory, and use new formatting helpers
* fix: forgot to spread formatMessages also took opportunity to pluralize filename
* refactor: pass memory to tools, namely openapi specs. not used and may never be used by new method but added for testing
* ci(formatMessages): add more exhaustive checks for langchain messages
* feat: add debug env var for OpenAI
* chore: delete unnecessary comments
* chore: add extra note about summary feature
* fix: remove tokenCount from payload instructions
* fix: test fail
* fix: only pass instructions to payload when defined or not empty object
* refactor: fromPromptMessages is deprecated, use renamed method fromMessages
* refactor: use 'includes' instead of 'startsWith' for extended OpenRouter compatibility
* fix(PluginsClient.buildPromptBody): handle undefined message strings
* chore: log langchain titling error
* feat: getModelMaxTokens helper
* feat: tokenSplit helper
* feat: summary prompts updated
* fix: optimize _CUT_OFF_SUMMARIZER prompt
* refactor(summaryBuffer): use custom summary prompt, allow prompt to be passed, pass humanPrefix and aiPrefix to memory, along with any future variables, rename messagesToRefine to context
* fix(summaryBuffer): handle edge case where messagesToRefine exceeds summary context,
refactor(BaseClient): allow custom maxContextTokens to be passed to getMessagesWithinTokenLimit, add defined check before unshifting summaryMessage, update shouldSummarize based on this
refactor(OpenAIClient): use getModelMaxTokens, use cut-off message method for summary if no messages were left after pruning
* fix(handleContextStrategy): handle case where incoming prompt is bigger than model context
* chore: rename refinedContent to splitText
* chore: remove unnecessary debug log
2023-09-26 21:02:28 -04:00
|
|
|
});
|
|
|
|
|
|
|
|
test('should return undefined for undefined input', () => {
|
|
|
|
expect(getModelMaxTokens(undefined)).toBeUndefined();
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should return undefined for null input', () => {
|
|
|
|
expect(getModelMaxTokens(null)).toBeUndefined();
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should return undefined for number input', () => {
|
|
|
|
expect(getModelMaxTokens(123)).toBeUndefined();
|
|
|
|
});
|
2023-11-06 15:26:16 -05:00
|
|
|
|
|
|
|
// 11/06 Update
|
|
|
|
test('should return correct tokens for gpt-3.5-turbo-1106 exact match', () => {
|
2023-12-10 14:54:13 -05:00
|
|
|
expect(getModelMaxTokens('gpt-3.5-turbo-1106')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-3.5-turbo-1106'],
|
|
|
|
);
|
2023-11-06 15:26:16 -05:00
|
|
|
});
|
|
|
|
|
|
|
|
test('should return correct tokens for gpt-4-1106 exact match', () => {
|
2023-12-10 14:54:13 -05:00
|
|
|
expect(getModelMaxTokens('gpt-4-1106')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-4-1106']);
|
2023-11-06 15:26:16 -05:00
|
|
|
});
|
|
|
|
|
2024-04-23 08:57:20 -04:00
|
|
|
test('should return correct tokens for gpt-4-vision exact match', () => {
|
|
|
|
expect(getModelMaxTokens('gpt-4-vision')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-4-vision'],
|
|
|
|
);
|
|
|
|
});
|
|
|
|
|
2023-11-06 15:26:16 -05:00
|
|
|
test('should return correct tokens for gpt-3.5-turbo-1106 partial match', () => {
|
|
|
|
expect(getModelMaxTokens('something-/gpt-3.5-turbo-1106')).toBe(
|
2023-12-10 14:54:13 -05:00
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-3.5-turbo-1106'],
|
2023-11-06 15:26:16 -05:00
|
|
|
);
|
|
|
|
expect(getModelMaxTokens('gpt-3.5-turbo-1106/something-/')).toBe(
|
2023-12-10 14:54:13 -05:00
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-3.5-turbo-1106'],
|
2023-11-06 15:26:16 -05:00
|
|
|
);
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should return correct tokens for gpt-4-1106 partial match', () => {
|
2023-12-10 14:54:13 -05:00
|
|
|
expect(getModelMaxTokens('gpt-4-1106/something')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-4-1106'],
|
|
|
|
);
|
|
|
|
expect(getModelMaxTokens('gpt-4-1106-preview')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-4-1106'],
|
|
|
|
);
|
|
|
|
expect(getModelMaxTokens('gpt-4-1106-vision-preview')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-4-1106'],
|
|
|
|
);
|
2023-11-06 15:26:16 -05:00
|
|
|
});
|
2023-11-26 14:44:57 -05:00
|
|
|
|
2024-01-25 22:57:18 -05:00
|
|
|
// 01/25 Update
|
|
|
|
test('should return correct tokens for gpt-4-turbo/0125 matches', () => {
|
|
|
|
expect(getModelMaxTokens('gpt-4-turbo')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-4-turbo'],
|
|
|
|
);
|
|
|
|
expect(getModelMaxTokens('gpt-4-turbo-preview')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-4-turbo'],
|
|
|
|
);
|
|
|
|
expect(getModelMaxTokens('gpt-4-0125')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-4-0125']);
|
|
|
|
expect(getModelMaxTokens('gpt-4-0125-preview')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-4-0125'],
|
|
|
|
);
|
2024-02-02 01:01:11 -05:00
|
|
|
expect(getModelMaxTokens('gpt-3.5-turbo-0125')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-3.5-turbo-0125'],
|
|
|
|
);
|
2024-01-25 22:57:18 -05:00
|
|
|
});
|
|
|
|
|
2025-02-28 12:19:21 -05:00
|
|
|
test('should return correct tokens for gpt-4.5 matches', () => {
|
|
|
|
expect(getModelMaxTokens('gpt-4.5')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-4.5']);
|
|
|
|
expect(getModelMaxTokens('gpt-4.5-preview')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-4.5'],
|
|
|
|
);
|
|
|
|
expect(getModelMaxTokens('openai/gpt-4.5-preview')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-4.5'],
|
|
|
|
);
|
|
|
|
});
|
|
|
|
|
2025-04-14 14:55:59 -04:00
|
|
|
test('should return correct tokens for gpt-4.1 matches', () => {
|
|
|
|
expect(getModelMaxTokens('gpt-4.1')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-4.1']);
|
|
|
|
expect(getModelMaxTokens('gpt-4.1-preview')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-4.1'],
|
|
|
|
);
|
|
|
|
expect(getModelMaxTokens('openai/gpt-4.1')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-4.1'],
|
|
|
|
);
|
|
|
|
expect(getModelMaxTokens('gpt-4.1-2024-08-06')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-4.1'],
|
|
|
|
);
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should return correct tokens for gpt-4.1-mini matches', () => {
|
|
|
|
expect(getModelMaxTokens('gpt-4.1-mini')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-4.1-mini'],
|
|
|
|
);
|
|
|
|
expect(getModelMaxTokens('gpt-4.1-mini-preview')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-4.1-mini'],
|
|
|
|
);
|
|
|
|
expect(getModelMaxTokens('openai/gpt-4.1-mini')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-4.1-mini'],
|
|
|
|
);
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should return correct tokens for gpt-4.1-nano matches', () => {
|
|
|
|
expect(getModelMaxTokens('gpt-4.1-nano')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-4.1-nano'],
|
|
|
|
);
|
|
|
|
expect(getModelMaxTokens('gpt-4.1-nano-preview')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-4.1-nano'],
|
|
|
|
);
|
|
|
|
expect(getModelMaxTokens('openai/gpt-4.1-nano')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-4.1-nano'],
|
|
|
|
);
|
|
|
|
});
|
|
|
|
|
2025-08-07 16:01:29 -04:00
|
|
|
test('should return correct tokens for gpt-5 matches', () => {
|
|
|
|
expect(getModelMaxTokens('gpt-5')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-5']);
|
|
|
|
expect(getModelMaxTokens('gpt-5-preview')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-5']);
|
|
|
|
expect(getModelMaxTokens('openai/gpt-5')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-5']);
|
|
|
|
expect(getModelMaxTokens('gpt-5-2025-01-30')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-5'],
|
|
|
|
);
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should return correct tokens for gpt-5-mini matches', () => {
|
|
|
|
expect(getModelMaxTokens('gpt-5-mini')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-5-mini']);
|
|
|
|
expect(getModelMaxTokens('gpt-5-mini-preview')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-5-mini'],
|
|
|
|
);
|
|
|
|
expect(getModelMaxTokens('openai/gpt-5-mini')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-5-mini'],
|
|
|
|
);
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should return correct tokens for gpt-5-nano matches', () => {
|
|
|
|
expect(getModelMaxTokens('gpt-5-nano')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-5-nano']);
|
|
|
|
expect(getModelMaxTokens('gpt-5-nano-preview')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-5-nano'],
|
|
|
|
);
|
|
|
|
expect(getModelMaxTokens('openai/gpt-5-nano')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['gpt-5-nano'],
|
|
|
|
);
|
|
|
|
});
|
|
|
|
|
2023-11-26 14:44:57 -05:00
|
|
|
test('should return correct tokens for Anthropic models', () => {
|
|
|
|
const models = [
|
|
|
|
'claude-2.1',
|
|
|
|
'claude-2',
|
|
|
|
'claude-1.2',
|
|
|
|
'claude-1',
|
|
|
|
'claude-1-100k',
|
|
|
|
'claude-instant-1',
|
|
|
|
'claude-instant-1-100k',
|
2024-06-20 20:48:15 -04:00
|
|
|
'claude-3-haiku',
|
|
|
|
'claude-3-sonnet',
|
|
|
|
'claude-3-opus',
|
|
|
|
'claude-3-5-sonnet',
|
2025-02-24 20:08:55 -05:00
|
|
|
'claude-3-7-sonnet',
|
2023-11-26 14:44:57 -05:00
|
|
|
];
|
|
|
|
|
2024-06-20 20:48:15 -04:00
|
|
|
const maxTokens = {
|
|
|
|
'claude-': maxTokensMap[EModelEndpoint.anthropic]['claude-'],
|
|
|
|
'claude-2.1': maxTokensMap[EModelEndpoint.anthropic]['claude-2.1'],
|
|
|
|
'claude-3': maxTokensMap[EModelEndpoint.anthropic]['claude-3-sonnet'],
|
|
|
|
};
|
|
|
|
|
2023-11-26 14:44:57 -05:00
|
|
|
models.forEach((model) => {
|
2024-06-20 20:48:15 -04:00
|
|
|
let expectedTokens;
|
|
|
|
|
|
|
|
if (model === 'claude-2.1') {
|
|
|
|
expectedTokens = maxTokens['claude-2.1'];
|
|
|
|
} else if (model.startsWith('claude-3')) {
|
|
|
|
expectedTokens = maxTokens['claude-3'];
|
|
|
|
} else {
|
|
|
|
expectedTokens = maxTokens['claude-'];
|
|
|
|
}
|
|
|
|
|
2023-12-10 14:54:13 -05:00
|
|
|
expect(getModelMaxTokens(model, EModelEndpoint.anthropic)).toEqual(expectedTokens);
|
2023-11-26 14:44:57 -05:00
|
|
|
});
|
|
|
|
});
|
2023-12-10 14:54:13 -05:00
|
|
|
|
|
|
|
// Tests for Google models
|
|
|
|
test('should return correct tokens for exact match - Google models', () => {
|
|
|
|
expect(getModelMaxTokens('text-bison-32k', EModelEndpoint.google)).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.google]['text-bison-32k'],
|
|
|
|
);
|
|
|
|
expect(getModelMaxTokens('codechat-bison-32k', EModelEndpoint.google)).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.google]['codechat-bison-32k'],
|
|
|
|
);
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should return undefined for no match - Google models', () => {
|
|
|
|
expect(getModelMaxTokens('unknown-google-model', EModelEndpoint.google)).toBeUndefined();
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should return correct tokens for partial match - Google models', () => {
|
2025-02-06 18:13:18 -05:00
|
|
|
expect(getModelMaxTokens('gemini-2.0-flash-lite-preview-02-05', EModelEndpoint.google)).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.google]['gemini-2.0-flash-lite'],
|
|
|
|
);
|
|
|
|
expect(getModelMaxTokens('gemini-2.0-flash-001', EModelEndpoint.google)).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.google]['gemini-2.0-flash'],
|
|
|
|
);
|
|
|
|
expect(getModelMaxTokens('gemini-2.0-flash-exp', EModelEndpoint.google)).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.google]['gemini-2.0-flash'],
|
|
|
|
);
|
|
|
|
expect(getModelMaxTokens('gemini-2.0-pro-exp-02-05', EModelEndpoint.google)).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.google]['gemini-2.0'],
|
|
|
|
);
|
|
|
|
expect(getModelMaxTokens('gemini-1.5-flash-8b', EModelEndpoint.google)).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.google]['gemini-1.5-flash-8b'],
|
|
|
|
);
|
|
|
|
expect(getModelMaxTokens('gemini-1.5-flash-thinking', EModelEndpoint.google)).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.google]['gemini-1.5-flash'],
|
|
|
|
);
|
2024-04-16 08:32:40 -04:00
|
|
|
expect(getModelMaxTokens('gemini-1.5-pro-latest', EModelEndpoint.google)).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.google]['gemini-1.5'],
|
|
|
|
);
|
|
|
|
expect(getModelMaxTokens('gemini-1.5-pro-preview-0409', EModelEndpoint.google)).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.google]['gemini-1.5'],
|
|
|
|
);
|
|
|
|
expect(getModelMaxTokens('gemini-pro-vision', EModelEndpoint.google)).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.google]['gemini-pro-vision'],
|
|
|
|
);
|
|
|
|
expect(getModelMaxTokens('gemini-1.0', EModelEndpoint.google)).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.google]['gemini'],
|
|
|
|
);
|
2023-12-15 02:18:07 -05:00
|
|
|
expect(getModelMaxTokens('gemini-pro', EModelEndpoint.google)).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.google]['gemini'],
|
|
|
|
);
|
2023-12-10 14:54:13 -05:00
|
|
|
expect(getModelMaxTokens('code-', EModelEndpoint.google)).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.google]['code-'],
|
|
|
|
);
|
|
|
|
expect(getModelMaxTokens('chat-', EModelEndpoint.google)).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.google]['chat-'],
|
|
|
|
);
|
|
|
|
});
|
2024-02-02 23:53:50 -05:00
|
|
|
|
2024-04-16 08:32:40 -04:00
|
|
|
test('should return correct tokens for partial match - Cohere models', () => {
|
|
|
|
expect(getModelMaxTokens('command', EModelEndpoint.custom)).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.custom]['command'],
|
|
|
|
);
|
|
|
|
expect(getModelMaxTokens('command-r-plus', EModelEndpoint.custom)).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.custom]['command-r-plus'],
|
|
|
|
);
|
|
|
|
});
|
|
|
|
|
2024-02-02 23:53:50 -05:00
|
|
|
test('should return correct tokens when using a custom endpointTokenConfig', () => {
|
|
|
|
const customTokenConfig = {
|
|
|
|
'custom-model': 12345,
|
|
|
|
};
|
|
|
|
expect(getModelMaxTokens('custom-model', EModelEndpoint.openAI, customTokenConfig)).toBe(12345);
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should prioritize endpointTokenConfig over the default configuration', () => {
|
|
|
|
const customTokenConfig = {
|
|
|
|
'gpt-4-32k': 9999,
|
|
|
|
};
|
|
|
|
expect(getModelMaxTokens('gpt-4-32k', EModelEndpoint.openAI, customTokenConfig)).toBe(9999);
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should return undefined if the model is not found in custom endpointTokenConfig', () => {
|
|
|
|
const customTokenConfig = {
|
|
|
|
'custom-model': 12345,
|
|
|
|
};
|
|
|
|
expect(
|
|
|
|
getModelMaxTokens('nonexistent-model', EModelEndpoint.openAI, customTokenConfig),
|
|
|
|
).toBeUndefined();
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should return correct tokens for exact match in azureOpenAI models', () => {
|
|
|
|
expect(getModelMaxTokens('gpt-4-turbo', EModelEndpoint.azureOpenAI)).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.azureOpenAI]['gpt-4-turbo'],
|
|
|
|
);
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should return undefined for no match in azureOpenAI models', () => {
|
|
|
|
expect(
|
|
|
|
getModelMaxTokens('nonexistent-azure-model', EModelEndpoint.azureOpenAI),
|
|
|
|
).toBeUndefined();
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should return undefined for undefined, null, or number model argument with azureOpenAI endpoint', () => {
|
|
|
|
expect(getModelMaxTokens(undefined, EModelEndpoint.azureOpenAI)).toBeUndefined();
|
|
|
|
expect(getModelMaxTokens(null, EModelEndpoint.azureOpenAI)).toBeUndefined();
|
|
|
|
expect(getModelMaxTokens(1234, EModelEndpoint.azureOpenAI)).toBeUndefined();
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should respect custom endpointTokenConfig over azureOpenAI defaults', () => {
|
|
|
|
const customTokenConfig = {
|
|
|
|
'custom-azure-model': 4096,
|
|
|
|
};
|
|
|
|
expect(
|
|
|
|
getModelMaxTokens('custom-azure-model', EModelEndpoint.azureOpenAI, customTokenConfig),
|
|
|
|
).toBe(4096);
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should return correct tokens for partial match with custom endpointTokenConfig in azureOpenAI', () => {
|
|
|
|
const customTokenConfig = {
|
|
|
|
'azure-custom-': 1024,
|
|
|
|
};
|
|
|
|
expect(
|
|
|
|
getModelMaxTokens('azure-custom-gpt-3', EModelEndpoint.azureOpenAI, customTokenConfig),
|
|
|
|
).toBe(1024);
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should return undefined for a model when using an unsupported endpoint', () => {
|
|
|
|
expect(getModelMaxTokens('azure-gpt-3', 'unsupportedEndpoint')).toBeUndefined();
|
|
|
|
});
|
2024-12-18 14:40:58 -05:00
|
|
|
|
|
|
|
test('should return correct max context tokens for o1-series models', () => {
|
|
|
|
// Standard o1 variations
|
|
|
|
const o1Tokens = maxTokensMap[EModelEndpoint.openAI]['o1'];
|
|
|
|
expect(getModelMaxTokens('o1')).toBe(o1Tokens);
|
|
|
|
expect(getModelMaxTokens('o1-latest')).toBe(o1Tokens);
|
|
|
|
expect(getModelMaxTokens('o1-2024-12-17')).toBe(o1Tokens);
|
|
|
|
expect(getModelMaxTokens('o1-something-else')).toBe(o1Tokens);
|
|
|
|
expect(getModelMaxTokens('openai/o1-something-else')).toBe(o1Tokens);
|
|
|
|
|
|
|
|
// Mini variations
|
|
|
|
const o1MiniTokens = maxTokensMap[EModelEndpoint.openAI]['o1-mini'];
|
|
|
|
expect(getModelMaxTokens('o1-mini')).toBe(o1MiniTokens);
|
|
|
|
expect(getModelMaxTokens('o1-mini-latest')).toBe(o1MiniTokens);
|
|
|
|
expect(getModelMaxTokens('o1-mini-2024-09-12')).toBe(o1MiniTokens);
|
|
|
|
expect(getModelMaxTokens('o1-mini-something')).toBe(o1MiniTokens);
|
|
|
|
expect(getModelMaxTokens('openai/o1-mini-something')).toBe(o1MiniTokens);
|
|
|
|
|
|
|
|
// Preview variations
|
|
|
|
const o1PreviewTokens = maxTokensMap[EModelEndpoint.openAI]['o1-preview'];
|
|
|
|
expect(getModelMaxTokens('o1-preview')).toBe(o1PreviewTokens);
|
|
|
|
expect(getModelMaxTokens('o1-preview-latest')).toBe(o1PreviewTokens);
|
|
|
|
expect(getModelMaxTokens('o1-preview-2024-09-12')).toBe(o1PreviewTokens);
|
|
|
|
expect(getModelMaxTokens('o1-preview-something')).toBe(o1PreviewTokens);
|
|
|
|
expect(getModelMaxTokens('openai/o1-preview-something')).toBe(o1PreviewTokens);
|
|
|
|
});
|
2025-04-17 00:40:26 -04:00
|
|
|
|
|
|
|
test('should return correct max context tokens for o4-mini and o3', () => {
|
|
|
|
const o4MiniTokens = maxTokensMap[EModelEndpoint.openAI]['o4-mini'];
|
|
|
|
const o3Tokens = maxTokensMap[EModelEndpoint.openAI]['o3'];
|
|
|
|
expect(getModelMaxTokens('o4-mini')).toBe(o4MiniTokens);
|
|
|
|
expect(getModelMaxTokens('openai/o4-mini')).toBe(o4MiniTokens);
|
|
|
|
expect(getModelMaxTokens('o3')).toBe(o3Tokens);
|
|
|
|
expect(getModelMaxTokens('openai/o3')).toBe(o3Tokens);
|
|
|
|
});
|
2025-08-07 15:03:19 -04:00
|
|
|
|
|
|
|
test('should return correct tokens for GPT-OSS models', () => {
|
|
|
|
const expected = maxTokensMap[EModelEndpoint.openAI]['gpt-oss-20b'];
|
|
|
|
['gpt-oss-20b', 'gpt-oss-120b', 'openai/gpt-oss-20b', 'openai/gpt-oss-120b'].forEach((name) => {
|
|
|
|
expect(getModelMaxTokens(name)).toBe(expected);
|
|
|
|
});
|
|
|
|
});
|
|
|
|
|
2025-08-07 16:01:29 -04:00
|
|
|
test('should return correct max output tokens for GPT-5 models', () => {
|
2025-09-08 11:35:29 -07:00
|
|
|
const { getModelMaxOutputTokens } = require('@librechat/api');
|
2025-08-07 16:01:29 -04:00
|
|
|
['gpt-5', 'gpt-5-mini', 'gpt-5-nano'].forEach((model) => {
|
|
|
|
expect(getModelMaxOutputTokens(model)).toBe(maxOutputTokensMap[EModelEndpoint.openAI][model]);
|
|
|
|
expect(getModelMaxOutputTokens(model, EModelEndpoint.openAI)).toBe(
|
|
|
|
maxOutputTokensMap[EModelEndpoint.openAI][model],
|
|
|
|
);
|
|
|
|
expect(getModelMaxOutputTokens(model, EModelEndpoint.azureOpenAI)).toBe(
|
|
|
|
maxOutputTokensMap[EModelEndpoint.azureOpenAI][model],
|
|
|
|
);
|
|
|
|
});
|
|
|
|
});
|
|
|
|
|
2025-08-07 15:03:19 -04:00
|
|
|
test('should return correct max output tokens for GPT-OSS models', () => {
|
2025-09-08 11:35:29 -07:00
|
|
|
const { getModelMaxOutputTokens } = require('@librechat/api');
|
2025-08-07 15:03:19 -04:00
|
|
|
['gpt-oss-20b', 'gpt-oss-120b'].forEach((model) => {
|
|
|
|
expect(getModelMaxOutputTokens(model)).toBe(maxOutputTokensMap[EModelEndpoint.openAI][model]);
|
|
|
|
expect(getModelMaxOutputTokens(model, EModelEndpoint.openAI)).toBe(
|
|
|
|
maxOutputTokensMap[EModelEndpoint.openAI][model],
|
|
|
|
);
|
|
|
|
expect(getModelMaxOutputTokens(model, EModelEndpoint.azureOpenAI)).toBe(
|
|
|
|
maxOutputTokensMap[EModelEndpoint.azureOpenAI][model],
|
|
|
|
);
|
|
|
|
});
|
|
|
|
});
|
feat: ConversationSummaryBufferMemory (#973)
* refactor: pass model in message edit payload, use encoder in standalone util function
* feat: add summaryBuffer helper
* refactor(api/messages): use new countTokens helper and add auth middleware at top
* wip: ConversationSummaryBufferMemory
* refactor: move pre-generation helpers to prompts dir
* chore: remove console log
* chore: remove test as payload will no longer carry tokenCount
* chore: update getMessagesWithinTokenLimit JSDoc
* refactor: optimize getMessagesForConversation and also break on summary, feat(ci): getMessagesForConversation tests
* refactor(getMessagesForConvo): count '00000000-0000-0000-0000-000000000000' as root message
* chore: add newer model to token map
* fix: condition was point to prop of array instead of message prop
* refactor(BaseClient): use object for refineMessages param, rename 'summary' to 'summaryMessage', add previous_summary
refactor(getMessagesWithinTokenLimit): replace text and tokenCount if should summarize, summary, and summaryTokenCount are present
fix/refactor(handleContextStrategy): use the right comparison length for context diff, and replace payload first message when a summary is present
* chore: log previous_summary if debugging
* refactor(formatMessage): assume if role is defined that it's a valid value
* refactor(getMessagesWithinTokenLimit): remove summary logic
refactor(handleContextStrategy): add usePrevSummary logic in case only summary was pruned
refactor(loadHistory): initial message query will return all ordered messages but keep track of the latest summary
refactor(getMessagesForConversation): use object for single param, edit jsdoc, edit all files using the method
refactor(ChatGPTClient): order messages before buildPrompt is called, TODO: add convoSumBuffMemory logic
* fix: undefined handling and summarizing only when shouldRefineContext is true
* chore(BaseClient): fix test results omitting system role for summaries and test edge case
* chore: export summaryBuffer from index file
* refactor(OpenAIClient/BaseClient): move refineMessages to subclass, implement LLM initialization for summaryBuffer
* feat: add OPENAI_SUMMARIZE to enable summarizing, refactor: rename client prop 'shouldRefineContext' to 'shouldSummarize', change contextStrategy value to 'summarize' from 'refine'
* refactor: rename refineMessages method to summarizeMessages for clarity
* chore: clarify summary future intent in .env.example
* refactor(initializeLLM): handle case for either 'model' or 'modelName' being passed
* feat(gptPlugins): enable summarization for plugins
* refactor(gptPlugins): utilize new initializeLLM method and formatting methods for messages, use payload array for currentMessages and assign pastMessages sooner
* refactor(agents): use ConversationSummaryBufferMemory for both agent types
* refactor(formatMessage): optimize original method for langchain, add helper function for langchain messages, add JSDocs and tests
* refactor(summaryBuffer): add helper to createSummaryBufferMemory, and use new formatting helpers
* fix: forgot to spread formatMessages also took opportunity to pluralize filename
* refactor: pass memory to tools, namely openapi specs. not used and may never be used by new method but added for testing
* ci(formatMessages): add more exhaustive checks for langchain messages
* feat: add debug env var for OpenAI
* chore: delete unnecessary comments
* chore: add extra note about summary feature
* fix: remove tokenCount from payload instructions
* fix: test fail
* fix: only pass instructions to payload when defined or not empty object
* refactor: fromPromptMessages is deprecated, use renamed method fromMessages
* refactor: use 'includes' instead of 'startsWith' for extended OpenRouter compatibility
* fix(PluginsClient.buildPromptBody): handle undefined message strings
* chore: log langchain titling error
* feat: getModelMaxTokens helper
* feat: tokenSplit helper
* feat: summary prompts updated
* fix: optimize _CUT_OFF_SUMMARIZER prompt
* refactor(summaryBuffer): use custom summary prompt, allow prompt to be passed, pass humanPrefix and aiPrefix to memory, along with any future variables, rename messagesToRefine to context
* fix(summaryBuffer): handle edge case where messagesToRefine exceeds summary context,
refactor(BaseClient): allow custom maxContextTokens to be passed to getMessagesWithinTokenLimit, add defined check before unshifting summaryMessage, update shouldSummarize based on this
refactor(OpenAIClient): use getModelMaxTokens, use cut-off message method for summary if no messages were left after pruning
* fix(handleContextStrategy): handle case where incoming prompt is bigger than model context
* chore: rename refinedContent to splitText
* chore: remove unnecessary debug log
2023-09-26 21:02:28 -04:00
|
|
|
});
|
feat: Accurate Token Usage Tracking & Optional Balance (#1018)
* refactor(Chains/llms): allow passing callbacks
* refactor(BaseClient): accurately count completion tokens as generation only
* refactor(OpenAIClient): remove unused getTokenCountForResponse, pass streaming var and callbacks in initializeLLM
* wip: summary prompt tokens
* refactor(summarizeMessages): new cut-off strategy that generates a better summary by adding context from beginning, truncating the middle, and providing the end
wip: draft out relevant providers and variables for token tracing
* refactor(createLLM): make streaming prop false by default
* chore: remove use of getTokenCountForResponse
* refactor(agents): use BufferMemory as ConversationSummaryBufferMemory token usage not easy to trace
* chore: remove passing of streaming prop, also console log useful vars for tracing
* feat: formatFromLangChain helper function to count tokens for ChatModelStart
* refactor(initializeLLM): add role for LLM tracing
* chore(formatFromLangChain): update JSDoc
* feat(formatMessages): formats langChain messages into OpenAI payload format
* chore: install openai-chat-tokens
* refactor(formatMessage): optimize conditional langChain logic
fix(formatFromLangChain): fix destructuring
* feat: accurate prompt tokens for ChatModelStart before generation
* refactor(handleChatModelStart): move to callbacks dir, use factory function
* refactor(initializeLLM): rename 'role' to 'context'
* feat(Balance/Transaction): new schema/models for tracking token spend
refactor(Key): factor out model export to separate file
* refactor(initializeClient): add req,res objects to client options
* feat: add-balance script to add to an existing users' token balance
refactor(Transaction): use multiplier map/function, return balance update
* refactor(Tx): update enum for tokenType, return 1 for multiplier if no map match
* refactor(Tx): add fair fallback value multiplier incase the config result is undefined
* refactor(Balance): rename 'tokens' to 'tokenCredits'
* feat: balance check, add tx.js for new tx-related methods and tests
* chore(summaryPrompts): update prompt token count
* refactor(callbacks): pass req, res
wip: check balance
* refactor(Tx): make convoId a String type, fix(calculateTokenValue)
* refactor(BaseClient): add conversationId as client prop when assigned
* feat(RunManager): track LLM runs with manager, track token spend from LLM,
refactor(OpenAIClient): use RunManager to create callbacks, pass user prop to langchain api calls
* feat(spendTokens): helper to spend prompt/completion tokens
* feat(checkBalance): add helper to check, log, deny request if balance doesn't have enough funds
refactor(Balance): static check method to return object instead of boolean now
wip(OpenAIClient): implement use of checkBalance
* refactor(initializeLLM): add token buffer to assure summary isn't generated when subsequent payload is too large
refactor(OpenAIClient): add checkBalance
refactor(createStartHandler): add checkBalance
* chore: remove prompt and completion token logging from route handler
* chore(spendTokens): add JSDoc
* feat(logTokenCost): record transactions for basic api calls
* chore(ask/edit): invoke getResponseSender only once per API call
* refactor(ask/edit): pass promptTokens to getIds and include in abort data
* refactor(getIds -> getReqData): rename function
* refactor(Tx): increase value if incomplete message
* feat: record tokenUsage when message is aborted
* refactor: subtract tokens when payload includes function_call
* refactor: add namespace for token_balance
* fix(spendTokens): only execute if corresponding token type amounts are defined
* refactor(checkBalance): throws Error if not enough token credits
* refactor(runTitleChain): pass and use signal, spread object props in create helpers, and use 'call' instead of 'run'
* fix(abortMiddleware): circular dependency, and default to empty string for completionTokens
* fix: properly cancel title requests when there isn't enough tokens to generate
* feat(predictNewSummary): custom chain for summaries to allow signal passing
refactor(summaryBuffer): use new custom chain
* feat(RunManager): add getRunByConversationId method, refactor: remove run and throw llm error on handleLLMError
* refactor(createStartHandler): if summary, add error details to runs
* fix(OpenAIClient): support aborting from summarization & showing error to user
refactor(summarizeMessages): remove unnecessary operations counting summaryPromptTokens and note for alternative, pass signal to summaryBuffer
* refactor(logTokenCost -> recordTokenUsage): rename
* refactor(checkBalance): include promptTokens in errorMessage
* refactor(checkBalance/spendTokens): move to models dir
* fix(createLanguageChain): correctly pass config
* refactor(initializeLLM/title): add tokenBuffer of 150 for balance check
* refactor(openAPIPlugin): pass signal and memory, filter functions by the one being called
* refactor(createStartHandler): add error to run if context is plugins as well
* refactor(RunManager/handleLLMError): throw error immediately if plugins, don't remove run
* refactor(PluginsClient): pass memory and signal to tools, cleanup error handling logic
* chore: use absolute equality for addTitle condition
* refactor(checkBalance): move checkBalance to execute after userMessage and tokenCounts are saved, also make conditional
* style: icon changes to match official
* fix(BaseClient): getTokenCountForResponse -> getTokenCount
* fix(formatLangChainMessages): add kwargs as fallback prop from lc_kwargs, update JSDoc
* refactor(Tx.create): does not update balance if CHECK_BALANCE is not enabled
* fix(e2e/cleanUp): cleanup new collections, import all model methods from index
* fix(config/add-balance): add uncaughtException listener
* fix: circular dependency
* refactor(initializeLLM/checkBalance): append new generations to errorMessage if cost exceeds balance
* fix(handleResponseMessage): only record token usage in this method if not error and completion is not skipped
* fix(createStartHandler): correct condition for generations
* chore: bump postcss due to moderate severity vulnerability
* chore: bump zod due to low severity vulnerability
* chore: bump openai & data-provider version
* feat(types): OpenAI Message types
* chore: update bun lockfile
* refactor(CodeBlock): add error block formatting
* refactor(utils/Plugin): factor out formatJSON and cn to separate files (json.ts and cn.ts), add extractJSON
* chore(logViolation): delete user_id after error is logged
* refactor(getMessageError -> Error): change to React.FC, add token_balance handling, use extractJSON to determine JSON instead of regex
* fix(DALL-E): use latest openai SDK
* chore: reorganize imports, fix type issue
* feat(server): add balance route
* fix(api/models): add auth
* feat(data-provider): /api/balance query
* feat: show balance if checking is enabled, refetch on final message or error
* chore: update docs, .env.example with token_usage info, add balance script command
* fix(Balance): fallback to empty obj for balance query
* style: slight adjustment of balance element
* docs(token_usage): add PR notes
2023-10-05 18:34:10 -04:00
|
|
|
|
|
|
|
describe('matchModelName', () => {
|
|
|
|
it('should return the exact model name if it exists in maxTokensMap', () => {
|
|
|
|
expect(matchModelName('gpt-4-32k-0613')).toBe('gpt-4-32k-0613');
|
|
|
|
});
|
|
|
|
|
|
|
|
it('should return the closest matching key for partial matches', () => {
|
|
|
|
expect(matchModelName('gpt-4-32k-unknown')).toBe('gpt-4-32k');
|
|
|
|
});
|
|
|
|
|
|
|
|
it('should return the input model name if no match is found', () => {
|
|
|
|
expect(matchModelName('unknown-model')).toBe('unknown-model');
|
|
|
|
});
|
|
|
|
|
|
|
|
it('should return undefined for non-string inputs', () => {
|
|
|
|
expect(matchModelName(undefined)).toBeUndefined();
|
|
|
|
expect(matchModelName(null)).toBeUndefined();
|
|
|
|
expect(matchModelName(123)).toBeUndefined();
|
|
|
|
expect(matchModelName({})).toBeUndefined();
|
|
|
|
});
|
2023-11-06 15:26:16 -05:00
|
|
|
|
|
|
|
// 11/06 Update
|
|
|
|
it('should return the exact model name for gpt-3.5-turbo-1106 if it exists in maxTokensMap', () => {
|
|
|
|
expect(matchModelName('gpt-3.5-turbo-1106')).toBe('gpt-3.5-turbo-1106');
|
|
|
|
});
|
|
|
|
|
|
|
|
it('should return the exact model name for gpt-4-1106 if it exists in maxTokensMap', () => {
|
|
|
|
expect(matchModelName('gpt-4-1106')).toBe('gpt-4-1106');
|
|
|
|
});
|
|
|
|
|
|
|
|
it('should return the closest matching key for gpt-3.5-turbo-1106 partial matches', () => {
|
|
|
|
expect(matchModelName('gpt-3.5-turbo-1106/something')).toBe('gpt-3.5-turbo-1106');
|
|
|
|
expect(matchModelName('something/gpt-3.5-turbo-1106')).toBe('gpt-3.5-turbo-1106');
|
|
|
|
});
|
|
|
|
|
|
|
|
it('should return the closest matching key for gpt-4-1106 partial matches', () => {
|
2025-07-11 03:24:13 -04:00
|
|
|
expect(matchModelName('gpt-4-1106/something')).toBe('gpt-4-1106');
|
2023-11-06 15:26:16 -05:00
|
|
|
expect(matchModelName('gpt-4-1106-preview')).toBe('gpt-4-1106');
|
|
|
|
expect(matchModelName('gpt-4-1106-vision-preview')).toBe('gpt-4-1106');
|
|
|
|
});
|
2023-12-10 14:54:13 -05:00
|
|
|
|
2024-01-25 22:57:18 -05:00
|
|
|
// 01/25 Update
|
|
|
|
it('should return the closest matching key for gpt-4-turbo/0125 matches', () => {
|
|
|
|
expect(matchModelName('openai/gpt-4-0125')).toBe('gpt-4-0125');
|
|
|
|
expect(matchModelName('gpt-4-turbo-preview')).toBe('gpt-4-turbo');
|
|
|
|
expect(matchModelName('gpt-4-turbo-vision-preview')).toBe('gpt-4-turbo');
|
|
|
|
expect(matchModelName('gpt-4-0125')).toBe('gpt-4-0125');
|
|
|
|
expect(matchModelName('gpt-4-0125-preview')).toBe('gpt-4-0125');
|
|
|
|
expect(matchModelName('gpt-4-0125-vision-preview')).toBe('gpt-4-0125');
|
|
|
|
});
|
|
|
|
|
2025-04-14 14:55:59 -04:00
|
|
|
it('should return the closest matching key for gpt-4.1 matches', () => {
|
|
|
|
expect(matchModelName('openai/gpt-4.1')).toBe('gpt-4.1');
|
|
|
|
expect(matchModelName('gpt-4.1-preview')).toBe('gpt-4.1');
|
|
|
|
expect(matchModelName('gpt-4.1-2024-08-06')).toBe('gpt-4.1');
|
|
|
|
expect(matchModelName('gpt-4.1-2024-08-06-0718')).toBe('gpt-4.1');
|
|
|
|
});
|
|
|
|
|
|
|
|
it('should return the closest matching key for gpt-4.1-mini matches', () => {
|
|
|
|
expect(matchModelName('openai/gpt-4.1-mini')).toBe('gpt-4.1-mini');
|
|
|
|
expect(matchModelName('gpt-4.1-mini-preview')).toBe('gpt-4.1-mini');
|
|
|
|
expect(matchModelName('gpt-4.1-mini-2024-08-06')).toBe('gpt-4.1-mini');
|
|
|
|
});
|
|
|
|
|
|
|
|
it('should return the closest matching key for gpt-4.1-nano matches', () => {
|
|
|
|
expect(matchModelName('openai/gpt-4.1-nano')).toBe('gpt-4.1-nano');
|
|
|
|
expect(matchModelName('gpt-4.1-nano-preview')).toBe('gpt-4.1-nano');
|
|
|
|
expect(matchModelName('gpt-4.1-nano-2024-08-06')).toBe('gpt-4.1-nano');
|
|
|
|
});
|
|
|
|
|
2025-08-07 16:01:29 -04:00
|
|
|
it('should return the closest matching key for gpt-5 matches', () => {
|
|
|
|
expect(matchModelName('openai/gpt-5')).toBe('gpt-5');
|
|
|
|
expect(matchModelName('gpt-5-preview')).toBe('gpt-5');
|
|
|
|
expect(matchModelName('gpt-5-2025-01-30')).toBe('gpt-5');
|
|
|
|
expect(matchModelName('gpt-5-2025-01-30-0130')).toBe('gpt-5');
|
|
|
|
});
|
|
|
|
|
|
|
|
it('should return the closest matching key for gpt-5-mini matches', () => {
|
|
|
|
expect(matchModelName('openai/gpt-5-mini')).toBe('gpt-5-mini');
|
|
|
|
expect(matchModelName('gpt-5-mini-preview')).toBe('gpt-5-mini');
|
|
|
|
expect(matchModelName('gpt-5-mini-2025-01-30')).toBe('gpt-5-mini');
|
|
|
|
});
|
|
|
|
|
|
|
|
it('should return the closest matching key for gpt-5-nano matches', () => {
|
|
|
|
expect(matchModelName('openai/gpt-5-nano')).toBe('gpt-5-nano');
|
|
|
|
expect(matchModelName('gpt-5-nano-preview')).toBe('gpt-5-nano');
|
|
|
|
expect(matchModelName('gpt-5-nano-2025-01-30')).toBe('gpt-5-nano');
|
|
|
|
});
|
|
|
|
|
2023-12-10 14:54:13 -05:00
|
|
|
// Tests for Google models
|
|
|
|
it('should return the exact model name if it exists in maxTokensMap - Google models', () => {
|
|
|
|
expect(matchModelName('text-bison-32k', EModelEndpoint.google)).toBe('text-bison-32k');
|
|
|
|
expect(matchModelName('codechat-bison-32k', EModelEndpoint.google)).toBe('codechat-bison-32k');
|
|
|
|
});
|
|
|
|
|
|
|
|
it('should return the input model name if no match is found - Google models', () => {
|
|
|
|
expect(matchModelName('unknown-google-model', EModelEndpoint.google)).toBe(
|
|
|
|
'unknown-google-model',
|
|
|
|
);
|
|
|
|
});
|
|
|
|
|
|
|
|
it('should return the closest matching key for partial matches - Google models', () => {
|
|
|
|
expect(matchModelName('code-', EModelEndpoint.google)).toBe('code-');
|
|
|
|
expect(matchModelName('chat-', EModelEndpoint.google)).toBe('chat-');
|
|
|
|
});
|
feat: Accurate Token Usage Tracking & Optional Balance (#1018)
* refactor(Chains/llms): allow passing callbacks
* refactor(BaseClient): accurately count completion tokens as generation only
* refactor(OpenAIClient): remove unused getTokenCountForResponse, pass streaming var and callbacks in initializeLLM
* wip: summary prompt tokens
* refactor(summarizeMessages): new cut-off strategy that generates a better summary by adding context from beginning, truncating the middle, and providing the end
wip: draft out relevant providers and variables for token tracing
* refactor(createLLM): make streaming prop false by default
* chore: remove use of getTokenCountForResponse
* refactor(agents): use BufferMemory as ConversationSummaryBufferMemory token usage not easy to trace
* chore: remove passing of streaming prop, also console log useful vars for tracing
* feat: formatFromLangChain helper function to count tokens for ChatModelStart
* refactor(initializeLLM): add role for LLM tracing
* chore(formatFromLangChain): update JSDoc
* feat(formatMessages): formats langChain messages into OpenAI payload format
* chore: install openai-chat-tokens
* refactor(formatMessage): optimize conditional langChain logic
fix(formatFromLangChain): fix destructuring
* feat: accurate prompt tokens for ChatModelStart before generation
* refactor(handleChatModelStart): move to callbacks dir, use factory function
* refactor(initializeLLM): rename 'role' to 'context'
* feat(Balance/Transaction): new schema/models for tracking token spend
refactor(Key): factor out model export to separate file
* refactor(initializeClient): add req,res objects to client options
* feat: add-balance script to add to an existing users' token balance
refactor(Transaction): use multiplier map/function, return balance update
* refactor(Tx): update enum for tokenType, return 1 for multiplier if no map match
* refactor(Tx): add fair fallback value multiplier incase the config result is undefined
* refactor(Balance): rename 'tokens' to 'tokenCredits'
* feat: balance check, add tx.js for new tx-related methods and tests
* chore(summaryPrompts): update prompt token count
* refactor(callbacks): pass req, res
wip: check balance
* refactor(Tx): make convoId a String type, fix(calculateTokenValue)
* refactor(BaseClient): add conversationId as client prop when assigned
* feat(RunManager): track LLM runs with manager, track token spend from LLM,
refactor(OpenAIClient): use RunManager to create callbacks, pass user prop to langchain api calls
* feat(spendTokens): helper to spend prompt/completion tokens
* feat(checkBalance): add helper to check, log, deny request if balance doesn't have enough funds
refactor(Balance): static check method to return object instead of boolean now
wip(OpenAIClient): implement use of checkBalance
* refactor(initializeLLM): add token buffer to assure summary isn't generated when subsequent payload is too large
refactor(OpenAIClient): add checkBalance
refactor(createStartHandler): add checkBalance
* chore: remove prompt and completion token logging from route handler
* chore(spendTokens): add JSDoc
* feat(logTokenCost): record transactions for basic api calls
* chore(ask/edit): invoke getResponseSender only once per API call
* refactor(ask/edit): pass promptTokens to getIds and include in abort data
* refactor(getIds -> getReqData): rename function
* refactor(Tx): increase value if incomplete message
* feat: record tokenUsage when message is aborted
* refactor: subtract tokens when payload includes function_call
* refactor: add namespace for token_balance
* fix(spendTokens): only execute if corresponding token type amounts are defined
* refactor(checkBalance): throws Error if not enough token credits
* refactor(runTitleChain): pass and use signal, spread object props in create helpers, and use 'call' instead of 'run'
* fix(abortMiddleware): circular dependency, and default to empty string for completionTokens
* fix: properly cancel title requests when there isn't enough tokens to generate
* feat(predictNewSummary): custom chain for summaries to allow signal passing
refactor(summaryBuffer): use new custom chain
* feat(RunManager): add getRunByConversationId method, refactor: remove run and throw llm error on handleLLMError
* refactor(createStartHandler): if summary, add error details to runs
* fix(OpenAIClient): support aborting from summarization & showing error to user
refactor(summarizeMessages): remove unnecessary operations counting summaryPromptTokens and note for alternative, pass signal to summaryBuffer
* refactor(logTokenCost -> recordTokenUsage): rename
* refactor(checkBalance): include promptTokens in errorMessage
* refactor(checkBalance/spendTokens): move to models dir
* fix(createLanguageChain): correctly pass config
* refactor(initializeLLM/title): add tokenBuffer of 150 for balance check
* refactor(openAPIPlugin): pass signal and memory, filter functions by the one being called
* refactor(createStartHandler): add error to run if context is plugins as well
* refactor(RunManager/handleLLMError): throw error immediately if plugins, don't remove run
* refactor(PluginsClient): pass memory and signal to tools, cleanup error handling logic
* chore: use absolute equality for addTitle condition
* refactor(checkBalance): move checkBalance to execute after userMessage and tokenCounts are saved, also make conditional
* style: icon changes to match official
* fix(BaseClient): getTokenCountForResponse -> getTokenCount
* fix(formatLangChainMessages): add kwargs as fallback prop from lc_kwargs, update JSDoc
* refactor(Tx.create): does not update balance if CHECK_BALANCE is not enabled
* fix(e2e/cleanUp): cleanup new collections, import all model methods from index
* fix(config/add-balance): add uncaughtException listener
* fix: circular dependency
* refactor(initializeLLM/checkBalance): append new generations to errorMessage if cost exceeds balance
* fix(handleResponseMessage): only record token usage in this method if not error and completion is not skipped
* fix(createStartHandler): correct condition for generations
* chore: bump postcss due to moderate severity vulnerability
* chore: bump zod due to low severity vulnerability
* chore: bump openai & data-provider version
* feat(types): OpenAI Message types
* chore: update bun lockfile
* refactor(CodeBlock): add error block formatting
* refactor(utils/Plugin): factor out formatJSON and cn to separate files (json.ts and cn.ts), add extractJSON
* chore(logViolation): delete user_id after error is logged
* refactor(getMessageError -> Error): change to React.FC, add token_balance handling, use extractJSON to determine JSON instead of regex
* fix(DALL-E): use latest openai SDK
* chore: reorganize imports, fix type issue
* feat(server): add balance route
* fix(api/models): add auth
* feat(data-provider): /api/balance query
* feat: show balance if checking is enabled, refetch on final message or error
* chore: update docs, .env.example with token_usage info, add balance script command
* fix(Balance): fallback to empty obj for balance query
* style: slight adjustment of balance element
* docs(token_usage): add PR notes
2023-10-05 18:34:10 -04:00
|
|
|
});
|
2024-11-01 18:36:39 -04:00
|
|
|
|
|
|
|
describe('Meta Models Tests', () => {
|
|
|
|
describe('getModelMaxTokens', () => {
|
|
|
|
test('should return correct tokens for LLaMa 2 models', () => {
|
|
|
|
expect(getModelMaxTokens('llama2')).toBe(4000);
|
|
|
|
expect(getModelMaxTokens('llama2.70b')).toBe(4000);
|
|
|
|
expect(getModelMaxTokens('llama2-13b')).toBe(4000);
|
|
|
|
expect(getModelMaxTokens('llama2-70b')).toBe(4000);
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should return correct tokens for LLaMa 3 models', () => {
|
|
|
|
expect(getModelMaxTokens('llama3')).toBe(8000);
|
|
|
|
expect(getModelMaxTokens('llama3.8b')).toBe(8000);
|
|
|
|
expect(getModelMaxTokens('llama3.70b')).toBe(8000);
|
|
|
|
expect(getModelMaxTokens('llama3-8b')).toBe(8000);
|
|
|
|
expect(getModelMaxTokens('llama3-70b')).toBe(8000);
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should return correct tokens for LLaMa 3.1 models', () => {
|
|
|
|
expect(getModelMaxTokens('llama3.1:8b')).toBe(127500);
|
|
|
|
expect(getModelMaxTokens('llama3.1:70b')).toBe(127500);
|
|
|
|
expect(getModelMaxTokens('llama3.1:405b')).toBe(127500);
|
|
|
|
expect(getModelMaxTokens('llama3-1-8b')).toBe(127500);
|
|
|
|
expect(getModelMaxTokens('llama3-1-70b')).toBe(127500);
|
|
|
|
expect(getModelMaxTokens('llama3-1-405b')).toBe(127500);
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should handle partial matches for Meta models', () => {
|
|
|
|
// Test with full model names
|
|
|
|
expect(getModelMaxTokens('meta/llama3.1:405b')).toBe(127500);
|
|
|
|
expect(getModelMaxTokens('meta/llama3.1:70b')).toBe(127500);
|
|
|
|
expect(getModelMaxTokens('meta/llama3.1:8b')).toBe(127500);
|
|
|
|
expect(getModelMaxTokens('meta/llama3-1-8b')).toBe(127500);
|
|
|
|
|
|
|
|
// Test base versions
|
|
|
|
expect(getModelMaxTokens('meta/llama3.1')).toBe(127500);
|
|
|
|
expect(getModelMaxTokens('meta/llama3-1')).toBe(127500);
|
|
|
|
expect(getModelMaxTokens('meta/llama3')).toBe(8000);
|
|
|
|
expect(getModelMaxTokens('meta/llama2')).toBe(4000);
|
|
|
|
});
|
2024-11-04 12:59:04 -05:00
|
|
|
|
|
|
|
test('should match Deepseek model variations', () => {
|
2025-01-22 07:50:09 -05:00
|
|
|
expect(getModelMaxTokens('deepseek-chat')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['deepseek'],
|
|
|
|
);
|
|
|
|
expect(getModelMaxTokens('deepseek-coder')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['deepseek'],
|
|
|
|
);
|
|
|
|
expect(getModelMaxTokens('deepseek-reasoner')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['deepseek-reasoner'],
|
|
|
|
);
|
2025-03-17 16:43:44 -04:00
|
|
|
expect(getModelMaxTokens('deepseek.r1')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.openAI]['deepseek.r1'],
|
|
|
|
);
|
2024-11-04 12:59:04 -05:00
|
|
|
});
|
2024-11-01 18:36:39 -04:00
|
|
|
});
|
|
|
|
|
|
|
|
describe('matchModelName', () => {
|
|
|
|
test('should match exact LLaMa model names', () => {
|
|
|
|
expect(matchModelName('llama2')).toBe('llama2');
|
|
|
|
expect(matchModelName('llama3')).toBe('llama3');
|
|
|
|
expect(matchModelName('llama3.1:8b')).toBe('llama3.1:8b');
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should match LLaMa model variations', () => {
|
|
|
|
// Test full model names
|
|
|
|
expect(matchModelName('meta/llama3.1:405b')).toBe('llama3.1:405b');
|
|
|
|
expect(matchModelName('meta/llama3.1:70b')).toBe('llama3.1:70b');
|
|
|
|
expect(matchModelName('meta/llama3.1:8b')).toBe('llama3.1:8b');
|
|
|
|
expect(matchModelName('meta/llama3-1-8b')).toBe('llama3-1-8b');
|
|
|
|
|
|
|
|
// Test base versions
|
|
|
|
expect(matchModelName('meta/llama3.1')).toBe('llama3.1');
|
|
|
|
expect(matchModelName('meta/llama3-1')).toBe('llama3-1');
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should handle custom endpoint for Meta models', () => {
|
|
|
|
expect(matchModelName('llama2', EModelEndpoint.bedrock)).toBe('llama2');
|
|
|
|
expect(matchModelName('llama3', EModelEndpoint.bedrock)).toBe('llama3');
|
|
|
|
expect(matchModelName('llama3.1:8b', EModelEndpoint.bedrock)).toBe('llama3.1:8b');
|
|
|
|
});
|
2024-11-04 12:59:04 -05:00
|
|
|
|
|
|
|
test('should match Deepseek model variations', () => {
|
|
|
|
expect(matchModelName('deepseek-chat')).toBe('deepseek');
|
|
|
|
expect(matchModelName('deepseek-coder')).toBe('deepseek');
|
|
|
|
});
|
2024-11-01 18:36:39 -04:00
|
|
|
});
|
|
|
|
|
|
|
|
describe('processModelData with Meta models', () => {
|
|
|
|
test('should process Meta model data correctly', () => {
|
|
|
|
const input = {
|
|
|
|
data: [
|
|
|
|
{
|
|
|
|
id: 'llama2',
|
|
|
|
pricing: {
|
|
|
|
prompt: '0.00001',
|
|
|
|
completion: '0.00003',
|
|
|
|
},
|
|
|
|
context_length: 4000,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
id: 'llama3',
|
|
|
|
pricing: {
|
|
|
|
prompt: '0.00002',
|
|
|
|
completion: '0.00004',
|
|
|
|
},
|
|
|
|
context_length: 8000,
|
|
|
|
},
|
|
|
|
],
|
|
|
|
};
|
|
|
|
|
|
|
|
const result = processModelData(input);
|
|
|
|
expect(result.llama2).toEqual({
|
|
|
|
prompt: 10,
|
|
|
|
completion: 30,
|
|
|
|
context: 4000,
|
|
|
|
});
|
|
|
|
expect(result.llama3).toEqual({
|
|
|
|
prompt: 20,
|
|
|
|
completion: 40,
|
|
|
|
context: 8000,
|
|
|
|
});
|
|
|
|
});
|
|
|
|
});
|
|
|
|
});
|
2025-02-24 20:08:55 -05:00
|
|
|
|
|
|
|
describe('Grok Model Tests - Tokens', () => {
|
|
|
|
describe('getModelMaxTokens', () => {
|
|
|
|
test('should return correct tokens for Grok vision models', () => {
|
|
|
|
expect(getModelMaxTokens('grok-2-vision-1212')).toBe(32768);
|
|
|
|
expect(getModelMaxTokens('grok-2-vision')).toBe(32768);
|
|
|
|
expect(getModelMaxTokens('grok-2-vision-latest')).toBe(32768);
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should return correct tokens for Grok beta models', () => {
|
|
|
|
expect(getModelMaxTokens('grok-vision-beta')).toBe(8192);
|
|
|
|
expect(getModelMaxTokens('grok-beta')).toBe(131072);
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should return correct tokens for Grok text models', () => {
|
|
|
|
expect(getModelMaxTokens('grok-2-1212')).toBe(131072);
|
|
|
|
expect(getModelMaxTokens('grok-2')).toBe(131072);
|
|
|
|
expect(getModelMaxTokens('grok-2-latest')).toBe(131072);
|
|
|
|
});
|
|
|
|
|
2025-04-12 18:46:36 -04:00
|
|
|
test('should return correct tokens for Grok 3 series models', () => {
|
|
|
|
expect(getModelMaxTokens('grok-3')).toBe(131072);
|
|
|
|
expect(getModelMaxTokens('grok-3-fast')).toBe(131072);
|
|
|
|
expect(getModelMaxTokens('grok-3-mini')).toBe(131072);
|
|
|
|
expect(getModelMaxTokens('grok-3-mini-fast')).toBe(131072);
|
|
|
|
});
|
|
|
|
|
2025-07-11 03:24:13 -04:00
|
|
|
test('should return correct tokens for Grok 4 model', () => {
|
|
|
|
expect(getModelMaxTokens('grok-4-0709')).toBe(256000);
|
|
|
|
});
|
|
|
|
|
2025-02-24 20:08:55 -05:00
|
|
|
test('should handle partial matches for Grok models with prefixes', () => {
|
|
|
|
// Vision models should match before general models
|
2025-04-12 18:46:36 -04:00
|
|
|
expect(getModelMaxTokens('xai/grok-2-vision-1212')).toBe(32768);
|
|
|
|
expect(getModelMaxTokens('xai/grok-2-vision')).toBe(32768);
|
|
|
|
expect(getModelMaxTokens('xai/grok-2-vision-latest')).toBe(32768);
|
2025-02-24 20:08:55 -05:00
|
|
|
// Beta models
|
2025-04-12 18:46:36 -04:00
|
|
|
expect(getModelMaxTokens('xai/grok-vision-beta')).toBe(8192);
|
|
|
|
expect(getModelMaxTokens('xai/grok-beta')).toBe(131072);
|
2025-02-24 20:08:55 -05:00
|
|
|
// Text models
|
2025-04-12 18:46:36 -04:00
|
|
|
expect(getModelMaxTokens('xai/grok-2-1212')).toBe(131072);
|
|
|
|
expect(getModelMaxTokens('xai/grok-2')).toBe(131072);
|
|
|
|
expect(getModelMaxTokens('xai/grok-2-latest')).toBe(131072);
|
|
|
|
// Grok 3 models
|
|
|
|
expect(getModelMaxTokens('xai/grok-3')).toBe(131072);
|
|
|
|
expect(getModelMaxTokens('xai/grok-3-fast')).toBe(131072);
|
|
|
|
expect(getModelMaxTokens('xai/grok-3-mini')).toBe(131072);
|
|
|
|
expect(getModelMaxTokens('xai/grok-3-mini-fast')).toBe(131072);
|
2025-07-11 03:24:13 -04:00
|
|
|
// Grok 4 model
|
|
|
|
expect(getModelMaxTokens('xai/grok-4-0709')).toBe(256000);
|
2025-02-24 20:08:55 -05:00
|
|
|
});
|
|
|
|
});
|
|
|
|
|
|
|
|
describe('matchModelName', () => {
|
|
|
|
test('should match exact Grok model names', () => {
|
|
|
|
// Vision models
|
|
|
|
expect(matchModelName('grok-2-vision-1212')).toBe('grok-2-vision-1212');
|
|
|
|
expect(matchModelName('grok-2-vision')).toBe('grok-2-vision');
|
|
|
|
expect(matchModelName('grok-2-vision-latest')).toBe('grok-2-vision-latest');
|
|
|
|
// Beta models
|
|
|
|
expect(matchModelName('grok-vision-beta')).toBe('grok-vision-beta');
|
|
|
|
expect(matchModelName('grok-beta')).toBe('grok-beta');
|
|
|
|
// Text models
|
|
|
|
expect(matchModelName('grok-2-1212')).toBe('grok-2-1212');
|
|
|
|
expect(matchModelName('grok-2')).toBe('grok-2');
|
|
|
|
expect(matchModelName('grok-2-latest')).toBe('grok-2-latest');
|
2025-04-12 18:46:36 -04:00
|
|
|
// Grok 3 models
|
|
|
|
expect(matchModelName('grok-3')).toBe('grok-3');
|
|
|
|
expect(matchModelName('grok-3-fast')).toBe('grok-3-fast');
|
|
|
|
expect(matchModelName('grok-3-mini')).toBe('grok-3-mini');
|
|
|
|
expect(matchModelName('grok-3-mini-fast')).toBe('grok-3-mini-fast');
|
2025-07-11 03:24:13 -04:00
|
|
|
// Grok 4 model
|
|
|
|
expect(matchModelName('grok-4-0709')).toBe('grok-4');
|
2025-02-24 20:08:55 -05:00
|
|
|
});
|
|
|
|
|
|
|
|
test('should match Grok model variations with prefixes', () => {
|
|
|
|
// Vision models should match before general models
|
2025-04-12 18:46:36 -04:00
|
|
|
expect(matchModelName('xai/grok-2-vision-1212')).toBe('grok-2-vision-1212');
|
|
|
|
expect(matchModelName('xai/grok-2-vision')).toBe('grok-2-vision');
|
|
|
|
expect(matchModelName('xai/grok-2-vision-latest')).toBe('grok-2-vision-latest');
|
2025-02-24 20:08:55 -05:00
|
|
|
// Beta models
|
2025-04-12 18:46:36 -04:00
|
|
|
expect(matchModelName('xai/grok-vision-beta')).toBe('grok-vision-beta');
|
|
|
|
expect(matchModelName('xai/grok-beta')).toBe('grok-beta');
|
2025-02-24 20:08:55 -05:00
|
|
|
// Text models
|
2025-04-12 18:46:36 -04:00
|
|
|
expect(matchModelName('xai/grok-2-1212')).toBe('grok-2-1212');
|
|
|
|
expect(matchModelName('xai/grok-2')).toBe('grok-2');
|
|
|
|
expect(matchModelName('xai/grok-2-latest')).toBe('grok-2-latest');
|
|
|
|
// Grok 3 models
|
|
|
|
expect(matchModelName('xai/grok-3')).toBe('grok-3');
|
|
|
|
expect(matchModelName('xai/grok-3-fast')).toBe('grok-3-fast');
|
|
|
|
expect(matchModelName('xai/grok-3-mini')).toBe('grok-3-mini');
|
|
|
|
expect(matchModelName('xai/grok-3-mini-fast')).toBe('grok-3-mini-fast');
|
2025-07-11 03:24:13 -04:00
|
|
|
// Grok 4 model
|
|
|
|
expect(matchModelName('xai/grok-4-0709')).toBe('grok-4');
|
2025-02-24 20:08:55 -05:00
|
|
|
});
|
|
|
|
});
|
|
|
|
});
|
2025-05-22 15:00:44 -04:00
|
|
|
|
|
|
|
describe('Claude Model Tests', () => {
|
|
|
|
it('should return correct context length for Claude 4 models', () => {
|
2025-08-16 13:36:46 -04:00
|
|
|
expect(getModelMaxTokens('claude-sonnet-4')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.anthropic]['claude-sonnet-4'],
|
|
|
|
);
|
|
|
|
expect(getModelMaxTokens('claude-opus-4')).toBe(
|
|
|
|
maxTokensMap[EModelEndpoint.anthropic]['claude-opus-4'],
|
|
|
|
);
|
2025-05-22 15:00:44 -04:00
|
|
|
});
|
|
|
|
|
|
|
|
it('should handle Claude 4 model name variations with different prefixes and suffixes', () => {
|
|
|
|
const modelVariations = [
|
|
|
|
'claude-sonnet-4',
|
|
|
|
'claude-sonnet-4-20240229',
|
|
|
|
'claude-sonnet-4-latest',
|
|
|
|
'anthropic/claude-sonnet-4',
|
|
|
|
'claude-sonnet-4/anthropic',
|
|
|
|
'claude-sonnet-4-preview',
|
|
|
|
'claude-sonnet-4-20240229-preview',
|
|
|
|
'claude-opus-4',
|
|
|
|
'claude-opus-4-20240229',
|
|
|
|
'claude-opus-4-latest',
|
|
|
|
'anthropic/claude-opus-4',
|
|
|
|
'claude-opus-4/anthropic',
|
|
|
|
'claude-opus-4-preview',
|
|
|
|
'claude-opus-4-20240229-preview',
|
|
|
|
];
|
|
|
|
|
|
|
|
modelVariations.forEach((model) => {
|
2025-08-16 13:36:46 -04:00
|
|
|
const modelKey = findMatchingPattern(model, maxTokensMap[EModelEndpoint.anthropic]);
|
|
|
|
expect(getModelMaxTokens(model)).toBe(maxTokensMap[EModelEndpoint.anthropic][modelKey]);
|
2025-05-22 15:00:44 -04:00
|
|
|
});
|
|
|
|
});
|
|
|
|
|
|
|
|
it('should match model names correctly for Claude 4 models', () => {
|
|
|
|
const modelVariations = [
|
|
|
|
'claude-sonnet-4',
|
|
|
|
'claude-sonnet-4-20240229',
|
|
|
|
'claude-sonnet-4-latest',
|
|
|
|
'anthropic/claude-sonnet-4',
|
|
|
|
'claude-sonnet-4/anthropic',
|
|
|
|
'claude-sonnet-4-preview',
|
|
|
|
'claude-sonnet-4-20240229-preview',
|
|
|
|
'claude-opus-4',
|
|
|
|
'claude-opus-4-20240229',
|
|
|
|
'claude-opus-4-latest',
|
|
|
|
'anthropic/claude-opus-4',
|
|
|
|
'claude-opus-4/anthropic',
|
|
|
|
'claude-opus-4-preview',
|
|
|
|
'claude-opus-4-20240229-preview',
|
|
|
|
];
|
|
|
|
|
|
|
|
modelVariations.forEach((model) => {
|
|
|
|
const isSonnet = model.includes('sonnet');
|
|
|
|
const expectedModel = isSonnet ? 'claude-sonnet-4' : 'claude-opus-4';
|
|
|
|
expect(matchModelName(model, EModelEndpoint.anthropic)).toBe(expectedModel);
|
|
|
|
});
|
|
|
|
});
|
|
|
|
});
|
2025-07-22 15:52:54 -04:00
|
|
|
|
|
|
|
describe('Kimi Model Tests', () => {
|
|
|
|
describe('getModelMaxTokens', () => {
|
|
|
|
test('should return correct tokens for Kimi models', () => {
|
|
|
|
expect(getModelMaxTokens('kimi')).toBe(131000);
|
|
|
|
expect(getModelMaxTokens('kimi-k2')).toBe(131000);
|
|
|
|
expect(getModelMaxTokens('kimi-vl')).toBe(131000);
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should return correct tokens for Kimi models with provider prefix', () => {
|
|
|
|
expect(getModelMaxTokens('moonshotai/kimi-k2')).toBe(131000);
|
|
|
|
expect(getModelMaxTokens('moonshotai/kimi')).toBe(131000);
|
|
|
|
expect(getModelMaxTokens('moonshotai/kimi-vl')).toBe(131000);
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should handle partial matches for Kimi models', () => {
|
|
|
|
expect(getModelMaxTokens('kimi-k2-latest')).toBe(131000);
|
|
|
|
expect(getModelMaxTokens('kimi-vl-preview')).toBe(131000);
|
|
|
|
expect(getModelMaxTokens('kimi-2024')).toBe(131000);
|
|
|
|
});
|
|
|
|
});
|
|
|
|
|
|
|
|
describe('matchModelName', () => {
|
|
|
|
test('should match exact Kimi model names', () => {
|
|
|
|
expect(matchModelName('kimi')).toBe('kimi');
|
|
|
|
expect(matchModelName('kimi-k2')).toBe('kimi');
|
|
|
|
expect(matchModelName('kimi-vl')).toBe('kimi');
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should match Kimi model variations with provider prefix', () => {
|
|
|
|
expect(matchModelName('moonshotai/kimi')).toBe('kimi');
|
|
|
|
expect(matchModelName('moonshotai/kimi-k2')).toBe('kimi');
|
|
|
|
expect(matchModelName('moonshotai/kimi-vl')).toBe('kimi');
|
|
|
|
});
|
|
|
|
|
|
|
|
test('should match Kimi model variations with suffixes', () => {
|
|
|
|
expect(matchModelName('kimi-k2-latest')).toBe('kimi');
|
|
|
|
expect(matchModelName('kimi-vl-preview')).toBe('kimi');
|
|
|
|
expect(matchModelName('kimi-2024')).toBe('kimi');
|
|
|
|
});
|
|
|
|
});
|
|
|
|
});
|