refactor: remove type re-exports from @librechat/api tokens

Update all imports of TokenConfig and EndpointTokenConfig to import
directly from librechat-data-provider instead of re-exporting through
packages/api/src/types/tokens.ts. Remove the now-unnecessary re-export
file and its barrel export.
This commit is contained in:
Marco Beretta 2026-02-08 17:36:44 +01:00
parent 3e9ddbf073
commit 9ff227a5b2
No known key found for this signature in database
GPG key ID: D918033D8E74CC11
12 changed files with 584 additions and 592 deletions

View file

@ -1,13 +1,13 @@
/** Note: No hard-coded values should be used in this file. */
const { EModelEndpoint } = require('librechat-data-provider');
const {
EModelEndpoint,
maxTokensMap,
matchModelName,
processModelData,
getModelMaxTokens,
maxOutputTokensMap,
findMatchingPattern,
} = require('@librechat/api');
} = require('librechat-data-provider');
const { processModelData } = require('@librechat/api');
describe('getModelMaxTokens', () => {
test('should return correct tokens for exact match', () => {
@ -485,7 +485,7 @@ describe('getModelMaxTokens', () => {
});
test('should return correct max output tokens for GPT-5 models', () => {
const { getModelMaxOutputTokens } = require('@librechat/api');
const { getModelMaxOutputTokens } = require('librechat-data-provider');
['gpt-5', 'gpt-5-mini', 'gpt-5-nano', 'gpt-5-pro'].forEach((model) => {
expect(getModelMaxOutputTokens(model)).toBe(maxOutputTokensMap[EModelEndpoint.openAI][model]);
expect(getModelMaxOutputTokens(model, EModelEndpoint.openAI)).toBe(
@ -498,7 +498,7 @@ describe('getModelMaxTokens', () => {
});
test('should return correct max output tokens for GPT-OSS models', () => {
const { getModelMaxOutputTokens } = require('@librechat/api');
const { getModelMaxOutputTokens } = require('librechat-data-provider');
['gpt-oss-20b', 'gpt-oss-120b'].forEach((model) => {
expect(getModelMaxOutputTokens(model)).toBe(maxOutputTokensMap[EModelEndpoint.openAI][model]);
expect(getModelMaxOutputTokens(model, EModelEndpoint.openAI)).toBe(
@ -745,7 +745,7 @@ describe('Meta Models Tests', () => {
});
describe('DeepSeek Max Output Tokens', () => {
const { getModelMaxOutputTokens } = require('@librechat/api');
const { getModelMaxOutputTokens } = require('librechat-data-provider');
test('should return correct max output tokens for deepseek-chat', () => {
const expected = maxOutputTokensMap[EModelEndpoint.openAI]['deepseek-chat'];
@ -1123,7 +1123,7 @@ describe('Claude Model Tests', () => {
});
it('should return correct max output tokens for Claude Opus 4.6 (128K)', () => {
const { getModelMaxOutputTokens } = require('@librechat/api');
const { getModelMaxOutputTokens } = require('librechat-data-provider');
expect(getModelMaxOutputTokens('claude-opus-4-6', EModelEndpoint.anthropic)).toBe(
maxOutputTokensMap[EModelEndpoint.anthropic]['claude-opus-4-6'],
);