feat: GPT-OSS models Token Limits & Rates

This commit is contained in:
Danny Avila 2025-08-07 15:03:19 -04:00
parent b9f72f4869
commit d95d8032cc
No known key found for this signature in database
GPG key ID: BF31EEB2C5CA0956
4 changed files with 51 additions and 4 deletions

View file

@ -1,4 +1,4 @@
const { matchModelName } = require('../utils');
const { matchModelName } = require('../utils/tokens');
const defaultRate = 6;
/**
@ -147,6 +147,9 @@ const tokenValues = Object.assign(
codestral: { prompt: 0.3, completion: 0.9 },
'ministral-8b': { prompt: 0.1, completion: 0.1 },
'ministral-3b': { prompt: 0.04, completion: 0.04 },
// GPT-OSS models
'gpt-oss-20b': { prompt: 0.05, completion: 0.2 },
'gpt-oss-120b': { prompt: 0.15, completion: 0.6 },
},
bedrockValues,
);

View file

@ -311,6 +311,18 @@ describe('getMultiplier', () => {
defaultRate,
);
});
it('should return correct multipliers for GPT-OSS models', () => {
const models = ['gpt-oss-20b', 'gpt-oss-120b'];
models.forEach((key) => {
const expectedPrompt = tokenValues[key].prompt;
const expectedCompletion = tokenValues[key].completion;
expect(getMultiplier({ valueKey: key, tokenType: 'prompt' })).toBe(expectedPrompt);
expect(getMultiplier({ valueKey: key, tokenType: 'completion' })).toBe(expectedCompletion);
expect(getMultiplier({ model: key, tokenType: 'prompt' })).toBe(expectedPrompt);
expect(getMultiplier({ model: key, tokenType: 'completion' })).toBe(expectedCompletion);
});
});
});
describe('AWS Bedrock Model Tests', () => {

View file

@ -234,6 +234,9 @@ const aggregateModels = {
...xAIModels,
// misc.
kimi: 131000,
// GPT-OSS
'gpt-oss-20b': 131000,
'gpt-oss-120b': 131000,
};
const maxTokensMap = {
@ -250,6 +253,8 @@ const modelMaxOutputs = {
o1: 32268, // -500 from max: 32,768
'o1-mini': 65136, // -500 from max: 65,536
'o1-preview': 32268, // -500 from max: 32,768
'gpt-oss-20b': 131000,
'gpt-oss-120b': 131000,
system_default: 1024,
};
@ -468,10 +473,11 @@ const tiktokenModels = new Set([
]);
module.exports = {
tiktokenModels,
maxTokensMap,
inputSchema,
modelSchema,
maxTokensMap,
tiktokenModels,
maxOutputTokensMap,
matchModelName,
processModelData,
getModelMaxTokens,

View file

@ -1,5 +1,11 @@
const { EModelEndpoint } = require('librechat-data-provider');
const { getModelMaxTokens, processModelData, matchModelName, maxTokensMap } = require('./tokens');
const {
maxOutputTokensMap,
getModelMaxTokens,
processModelData,
matchModelName,
maxTokensMap,
} = require('./tokens');
describe('getModelMaxTokens', () => {
test('should return correct tokens for exact match', () => {
@ -349,6 +355,26 @@ describe('getModelMaxTokens', () => {
expect(getModelMaxTokens('o3')).toBe(o3Tokens);
expect(getModelMaxTokens('openai/o3')).toBe(o3Tokens);
});
test('should return correct tokens for GPT-OSS models', () => {
const expected = maxTokensMap[EModelEndpoint.openAI]['gpt-oss-20b'];
['gpt-oss-20b', 'gpt-oss-120b', 'openai/gpt-oss-20b', 'openai/gpt-oss-120b'].forEach((name) => {
expect(getModelMaxTokens(name)).toBe(expected);
});
});
test('should return correct max output tokens for GPT-OSS models', () => {
const { getModelMaxOutputTokens } = require('./tokens');
['gpt-oss-20b', 'gpt-oss-120b'].forEach((model) => {
expect(getModelMaxOutputTokens(model)).toBe(maxOutputTokensMap[EModelEndpoint.openAI][model]);
expect(getModelMaxOutputTokens(model, EModelEndpoint.openAI)).toBe(
maxOutputTokensMap[EModelEndpoint.openAI][model],
);
expect(getModelMaxOutputTokens(model, EModelEndpoint.azureOpenAI)).toBe(
maxOutputTokensMap[EModelEndpoint.azureOpenAI][model],
);
});
});
});
describe('matchModelName', () => {