🔀 feat: moonshotai/kimi Context and OpenRouter Endpoint Token Config (#8604)

*  feat: Enhance agent initialization with endpoint token configuration and round max context tokens

* feat: recognize moonshot/kimi model context window

* chore: remove unused i18n key
This commit is contained in:
Danny Avila 2025-07-22 15:52:54 -04:00 committed by GitHub
parent 259224d986
commit f797e90d79
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 55 additions and 6 deletions

View file

@ -131,7 +131,7 @@ const initializeAgent = async ({
);
const agentMaxContextTokens = optionalChainWithEmptyCheck(
maxContextTokens,
getModelMaxTokens(tokensModel, providerEndpointMap[provider]),
getModelMaxTokens(tokensModel, providerEndpointMap[provider], options.endpointTokenConfig),
4096,
);
@ -191,7 +191,7 @@ const initializeAgent = async ({
resendFiles,
toolContextMap,
useLegacyContent: !!options.useLegacyContent,
maxContextTokens: (agentMaxContextTokens - maxTokens) * 0.9,
maxContextTokens: Math.round((agentMaxContextTokens - maxTokens) * 0.9),
};
};

View file

@ -141,8 +141,9 @@ const initializeClient = async ({ req, res, endpointOption, optionsOnly, overrid
const options = getOpenAIConfig(apiKey, clientOptions, endpoint);
if (options != null) {
options.useLegacyContent = true;
options.endpointTokenConfig = endpointTokenConfig;
}
if (!customOptions.streamRate) {
if (!clientOptions.streamRate) {
return options;
}
options.llmConfig.callbacks = [

View file

@ -226,7 +226,14 @@ const xAIModels = {
'grok-4': 256000, // 256K context
};
const aggregateModels = { ...openAIModels, ...googleModels, ...bedrockModels, ...xAIModels };
const aggregateModels = {
...openAIModels,
...googleModels,
...bedrockModels,
...xAIModels,
// misc.
kimi: 131000,
};
const maxTokensMap = {
[EModelEndpoint.azureOpenAI]: openAIModels,

View file

@ -714,3 +714,45 @@ describe('Claude Model Tests', () => {
});
});
});
describe('Kimi Model Tests', () => {
describe('getModelMaxTokens', () => {
test('should return correct tokens for Kimi models', () => {
expect(getModelMaxTokens('kimi')).toBe(131000);
expect(getModelMaxTokens('kimi-k2')).toBe(131000);
expect(getModelMaxTokens('kimi-vl')).toBe(131000);
});
test('should return correct tokens for Kimi models with provider prefix', () => {
expect(getModelMaxTokens('moonshotai/kimi-k2')).toBe(131000);
expect(getModelMaxTokens('moonshotai/kimi')).toBe(131000);
expect(getModelMaxTokens('moonshotai/kimi-vl')).toBe(131000);
});
test('should handle partial matches for Kimi models', () => {
expect(getModelMaxTokens('kimi-k2-latest')).toBe(131000);
expect(getModelMaxTokens('kimi-vl-preview')).toBe(131000);
expect(getModelMaxTokens('kimi-2024')).toBe(131000);
});
});
describe('matchModelName', () => {
test('should match exact Kimi model names', () => {
expect(matchModelName('kimi')).toBe('kimi');
expect(matchModelName('kimi-k2')).toBe('kimi');
expect(matchModelName('kimi-vl')).toBe('kimi');
});
test('should match Kimi model variations with provider prefix', () => {
expect(matchModelName('moonshotai/kimi')).toBe('kimi');
expect(matchModelName('moonshotai/kimi-k2')).toBe('kimi');
expect(matchModelName('moonshotai/kimi-vl')).toBe('kimi');
});
test('should match Kimi model variations with suffixes', () => {
expect(matchModelName('kimi-k2-latest')).toBe('kimi');
expect(matchModelName('kimi-vl-preview')).toBe('kimi');
expect(matchModelName('kimi-2024')).toBe('kimi');
});
});
});

View file

@ -466,7 +466,6 @@
"com_nav_send_message": "Send message",
"com_nav_setting_account": "Account",
"com_nav_setting_balance": "Balance",
"com_nav_setting_beta": "Beta features",
"com_nav_setting_chat": "Chat",
"com_nav_setting_data": "Data controls",
"com_nav_setting_general": "General",
@ -1090,4 +1089,4 @@
"com_ui_yes": "Yes",
"com_ui_zoom": "Zoom",
"com_user_message": "You"
}
}