refactor: move tokens.js over to packages/api and update imports

This commit is contained in:
Dustin Healy 2025-08-30 15:25:13 -07:00 committed by Danny Avila
parent efdad28b70
commit d1d4c2eb27
No known key found for this signature in database
GPG key ID: BF31EEB2C5CA0956
17 changed files with 39 additions and 34 deletions

View file

@ -10,7 +10,14 @@ const {
validateVisionModel, validateVisionModel,
} = require('librechat-data-provider'); } = require('librechat-data-provider');
const { SplitStreamHandler: _Handler } = require('@librechat/agents'); const { SplitStreamHandler: _Handler } = require('@librechat/agents');
const { Tokenizer, createFetch, createStreamEventHandlers } = require('@librechat/api'); const {
Tokenizer,
createFetch,
matchModelName,
getModelMaxTokens,
getModelMaxOutputTokens,
createStreamEventHandlers,
} = require('@librechat/api');
const { const {
truncateText, truncateText,
formatMessage, formatMessage,
@ -24,7 +31,6 @@ const {
configureReasoning, configureReasoning,
checkPromptCacheSupport, checkPromptCacheSupport,
} = require('~/server/services/Endpoints/anthropic/helpers'); } = require('~/server/services/Endpoints/anthropic/helpers');
const { getModelMaxTokens, getModelMaxOutputTokens, matchModelName } = require('~/utils');
const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens'); const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens');
const { encodeAndFormat } = require('~/server/services/Files/images/encode'); const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const { sleep } = require('~/server/utils'); const { sleep } = require('~/server/utils');

View file

@ -1,4 +1,5 @@
const { google } = require('googleapis'); const { google } = require('googleapis');
const { getModelMaxTokens } = require('@librechat/api');
const { concat } = require('@langchain/core/utils/stream'); const { concat } = require('@langchain/core/utils/stream');
const { ChatVertexAI } = require('@langchain/google-vertexai'); const { ChatVertexAI } = require('@langchain/google-vertexai');
const { Tokenizer, getSafetySettings } = require('@librechat/api'); const { Tokenizer, getSafetySettings } = require('@librechat/api');
@ -21,7 +22,6 @@ const {
} = require('librechat-data-provider'); } = require('librechat-data-provider');
const { encodeAndFormat } = require('~/server/services/Files/images'); const { encodeAndFormat } = require('~/server/services/Files/images');
const { spendTokens } = require('~/models/spendTokens'); const { spendTokens } = require('~/models/spendTokens');
const { getModelMaxTokens } = require('~/utils');
const { sleep } = require('~/server/utils'); const { sleep } = require('~/server/utils');
const { logger } = require('~/config'); const { logger } = require('~/config');
const { const {

View file

@ -7,7 +7,9 @@ const {
createFetch, createFetch,
resolveHeaders, resolveHeaders,
constructAzureURL, constructAzureURL,
getModelMaxTokens,
genAzureChatCompletion, genAzureChatCompletion,
getModelMaxOutputTokens,
createStreamEventHandlers, createStreamEventHandlers,
} = require('@librechat/api'); } = require('@librechat/api');
const { const {
@ -31,13 +33,13 @@ const {
titleInstruction, titleInstruction,
createContextHandlers, createContextHandlers,
} = require('./prompts'); } = require('./prompts');
const { extractBaseURL, getModelMaxTokens, getModelMaxOutputTokens } = require('~/utils');
const { encodeAndFormat } = require('~/server/services/Files/images/encode'); const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const { addSpaceIfNeeded, sleep } = require('~/server/utils'); const { addSpaceIfNeeded, sleep } = require('~/server/utils');
const { spendTokens } = require('~/models/spendTokens'); const { spendTokens } = require('~/models/spendTokens');
const { handleOpenAIErrors } = require('./tools/util'); const { handleOpenAIErrors } = require('./tools/util');
const { summaryBuffer } = require('./memory'); const { summaryBuffer } = require('./memory');
const { runTitleChain } = require('./chains'); const { runTitleChain } = require('./chains');
const { extractBaseURL } = require('~/utils');
const { tokenSplit } = require('./document'); const { tokenSplit } = require('./document');
const BaseClient = require('./BaseClient'); const BaseClient = require('./BaseClient');
const { createLLM } = require('./llm'); const { createLLM } = require('./llm');

View file

@ -1,5 +1,5 @@
const { getModelMaxTokens } = require('@librechat/api');
const BaseClient = require('../BaseClient'); const BaseClient = require('../BaseClient');
const { getModelMaxTokens } = require('../../../utils');
class FakeClient extends BaseClient { class FakeClient extends BaseClient {
constructor(apiKey, options = {}) { constructor(apiKey, options = {}) {

View file

@ -1,4 +1,4 @@
const { matchModelName } = require('../utils/tokens'); const { matchModelName } = require('@librechat/api');
const defaultRate = 6; const defaultRate = 6;
/** /**

View file

@ -1,7 +1,7 @@
const { v4 } = require('uuid'); const { v4 } = require('uuid');
const { sleep } = require('@librechat/agents'); const { sleep } = require('@librechat/agents');
const { logger } = require('@librechat/data-schemas'); const { logger } = require('@librechat/data-schemas');
const { sendEvent, getBalanceConfig } = require('@librechat/api'); const { sendEvent, getBalanceConfig, getModelMaxTokens } = require('@librechat/api');
const { const {
Time, Time,
Constants, Constants,
@ -34,7 +34,6 @@ const { checkBalance } = require('~/models/balanceMethods');
const { getConvo } = require('~/models/Conversation'); const { getConvo } = require('~/models/Conversation');
const getLogStores = require('~/cache/getLogStores'); const getLogStores = require('~/cache/getLogStores');
const { countTokens } = require('~/server/utils'); const { countTokens } = require('~/server/utils');
const { getModelMaxTokens } = require('~/utils');
const { getOpenAIClient } = require('./helpers'); const { getOpenAIClient } = require('./helpers');
/** /**

View file

@ -1,7 +1,7 @@
const { v4 } = require('uuid'); const { v4 } = require('uuid');
const { sleep } = require('@librechat/agents'); const { sleep } = require('@librechat/agents');
const { logger } = require('@librechat/data-schemas'); const { logger } = require('@librechat/data-schemas');
const { sendEvent, getBalanceConfig } = require('@librechat/api'); const { sendEvent, getBalanceConfig, getModelMaxTokens } = require('@librechat/api');
const { const {
Time, Time,
Constants, Constants,
@ -31,7 +31,6 @@ const { checkBalance } = require('~/models/balanceMethods');
const { getConvo } = require('~/models/Conversation'); const { getConvo } = require('~/models/Conversation');
const getLogStores = require('~/cache/getLogStores'); const getLogStores = require('~/cache/getLogStores');
const { countTokens } = require('~/server/utils'); const { countTokens } = require('~/server/utils');
const { getModelMaxTokens } = require('~/utils');
const { getOpenAIClient } = require('./helpers'); const { getOpenAIClient } = require('./helpers');
/** /**

View file

@ -1,6 +1,7 @@
const { Providers } = require('@librechat/agents'); const { Providers } = require('@librechat/agents');
const { const {
primeResources, primeResources,
getModelMaxTokens,
extractLibreChatParams, extractLibreChatParams,
optionalChainWithEmptyCheck, optionalChainWithEmptyCheck,
} = require('@librechat/api'); } = require('@librechat/api');
@ -17,7 +18,6 @@ const { getProviderConfig } = require('~/server/services/Endpoints');
const { processFiles } = require('~/server/services/Files/process'); const { processFiles } = require('~/server/services/Files/process');
const { getFiles, getToolFilesByIds } = require('~/models/File'); const { getFiles, getToolFilesByIds } = require('~/models/File');
const { getConvoFiles } = require('~/models/Conversation'); const { getConvoFiles } = require('~/models/Conversation');
const { getModelMaxTokens } = require('~/utils');
/** /**
* @param {object} params * @param {object} params

View file

@ -1,5 +1,5 @@
const { matchModelName } = require('@librechat/api');
const { EModelEndpoint, anthropicSettings } = require('librechat-data-provider'); const { EModelEndpoint, anthropicSettings } = require('librechat-data-provider');
const { matchModelName } = require('~/utils');
const { logger } = require('~/config'); const { logger } = require('~/config');
/** /**

View file

@ -1,3 +1,4 @@
const { getModelMaxTokens } = require('@librechat/api');
const { createContentAggregator } = require('@librechat/agents'); const { createContentAggregator } = require('@librechat/agents');
const { const {
EModelEndpoint, EModelEndpoint,
@ -7,7 +8,6 @@ const {
const { getDefaultHandlers } = require('~/server/controllers/agents/callbacks'); const { getDefaultHandlers } = require('~/server/controllers/agents/callbacks');
const getOptions = require('~/server/services/Endpoints/bedrock/options'); const getOptions = require('~/server/services/Endpoints/bedrock/options');
const AgentClient = require('~/server/controllers/agents/client'); const AgentClient = require('~/server/controllers/agents/client');
const { getModelMaxTokens } = require('~/utils');
const initializeClient = async ({ req, res, endpointOption }) => { const initializeClient = async ({ req, res, endpointOption }) => {
if (!endpointOption) { if (!endpointOption) {

View file

@ -1,13 +1,13 @@
const axios = require('axios'); const axios = require('axios');
const { Providers } = require('@librechat/agents'); const { Providers } = require('@librechat/agents');
const { logAxiosError } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas'); const { logger } = require('@librechat/data-schemas');
const { HttpsProxyAgent } = require('https-proxy-agent'); const { HttpsProxyAgent } = require('https-proxy-agent');
const { logAxiosError, inputSchema, processModelData } = require('@librechat/api');
const { EModelEndpoint, defaultModels, CacheKeys } = require('librechat-data-provider'); const { EModelEndpoint, defaultModels, CacheKeys } = require('librechat-data-provider');
const { inputSchema, extractBaseURL, processModelData } = require('~/utils');
const { OllamaClient } = require('~/app/clients/OllamaClient'); const { OllamaClient } = require('~/app/clients/OllamaClient');
const { isUserProvided } = require('~/server/utils'); const { isUserProvided } = require('~/server/utils');
const getLogStores = require('~/cache/getLogStores'); const getLogStores = require('~/cache/getLogStores');
const { extractBaseURL } = require('~/utils');
/** /**
* Splits a string by commas and trims each resulting value. * Splits a string by commas and trims each resulting value.

View file

@ -11,8 +11,8 @@ const {
getAnthropicModels, getAnthropicModels,
} = require('./ModelService'); } = require('./ModelService');
jest.mock('~/utils', () => { jest.mock('@librechat/api', () => {
const originalUtils = jest.requireActual('~/utils'); const originalUtils = jest.requireActual('@librechat/api');
return { return {
...originalUtils, ...originalUtils,
processModelData: jest.fn((...args) => { processModelData: jest.fn((...args) => {
@ -108,7 +108,7 @@ describe('fetchModels with createTokenConfig true', () => {
beforeEach(() => { beforeEach(() => {
// Clears the mock's history before each test // Clears the mock's history before each test
const _utils = require('~/utils'); const _utils = require('@librechat/api');
axios.get.mockResolvedValue({ data }); axios.get.mockResolvedValue({ data });
}); });
@ -120,7 +120,7 @@ describe('fetchModels with createTokenConfig true', () => {
createTokenConfig: true, createTokenConfig: true,
}); });
const { processModelData } = require('~/utils'); const { processModelData } = require('@librechat/api');
expect(processModelData).toHaveBeenCalled(); expect(processModelData).toHaveBeenCalled();
expect(processModelData).toHaveBeenCalledWith(data); expect(processModelData).toHaveBeenCalledWith(data);
}); });

View file

@ -1,7 +1,7 @@
const axios = require('axios'); const axios = require('axios');
const deriveBaseURL = require('./deriveBaseURL'); const deriveBaseURL = require('./deriveBaseURL');
jest.mock('~/utils', () => { jest.mock('@librechat/api', () => {
const originalUtils = jest.requireActual('~/utils'); const originalUtils = jest.requireActual('@librechat/api');
return { return {
...originalUtils, ...originalUtils,
processModelData: jest.fn((...args) => { processModelData: jest.fn((...args) => {

View file

@ -1,4 +1,3 @@
const tokenHelpers = require('./tokens');
const deriveBaseURL = require('./deriveBaseURL'); const deriveBaseURL = require('./deriveBaseURL');
const extractBaseURL = require('./extractBaseURL'); const extractBaseURL = require('./extractBaseURL');
const findMessageContent = require('./findMessageContent'); const findMessageContent = require('./findMessageContent');
@ -6,6 +5,5 @@ const findMessageContent = require('./findMessageContent');
module.exports = { module.exports = {
deriveBaseURL, deriveBaseURL,
extractBaseURL, extractBaseURL,
...tokenHelpers,
findMessageContent, findMessageContent,
}; };

View file

@ -1,12 +1,12 @@
const { EModelEndpoint } = require('librechat-data-provider'); const { EModelEndpoint } = require('librechat-data-provider');
const { const {
maxTokensMap,
matchModelName,
processModelData,
getModelMaxTokens,
maxOutputTokensMap, maxOutputTokensMap,
findMatchingPattern, findMatchingPattern,
getModelMaxTokens, } = require('@librechat/api');
processModelData,
matchModelName,
maxTokensMap,
} = require('./tokens');
describe('getModelMaxTokens', () => { describe('getModelMaxTokens', () => {
test('should return correct tokens for exact match', () => { test('should return correct tokens for exact match', () => {
@ -394,7 +394,7 @@ describe('getModelMaxTokens', () => {
}); });
test('should return correct max output tokens for GPT-5 models', () => { test('should return correct max output tokens for GPT-5 models', () => {
const { getModelMaxOutputTokens } = require('./tokens'); const { getModelMaxOutputTokens } = require('@librechat/api');
['gpt-5', 'gpt-5-mini', 'gpt-5-nano'].forEach((model) => { ['gpt-5', 'gpt-5-mini', 'gpt-5-nano'].forEach((model) => {
expect(getModelMaxOutputTokens(model)).toBe(maxOutputTokensMap[EModelEndpoint.openAI][model]); expect(getModelMaxOutputTokens(model)).toBe(maxOutputTokensMap[EModelEndpoint.openAI][model]);
expect(getModelMaxOutputTokens(model, EModelEndpoint.openAI)).toBe( expect(getModelMaxOutputTokens(model, EModelEndpoint.openAI)).toBe(
@ -407,7 +407,7 @@ describe('getModelMaxTokens', () => {
}); });
test('should return correct max output tokens for GPT-OSS models', () => { test('should return correct max output tokens for GPT-OSS models', () => {
const { getModelMaxOutputTokens } = require('./tokens'); const { getModelMaxOutputTokens } = require('@librechat/api');
['gpt-oss-20b', 'gpt-oss-120b'].forEach((model) => { ['gpt-oss-20b', 'gpt-oss-120b'].forEach((model) => {
expect(getModelMaxOutputTokens(model)).toBe(maxOutputTokensMap[EModelEndpoint.openAI][model]); expect(getModelMaxOutputTokens(model)).toBe(maxOutputTokensMap[EModelEndpoint.openAI][model]);
expect(getModelMaxOutputTokens(model, EModelEndpoint.openAI)).toBe( expect(getModelMaxOutputTokens(model, EModelEndpoint.openAI)).toBe(

View file

@ -15,3 +15,4 @@ export * from './text';
export { default as Tokenizer } from './tokenizer'; export { default as Tokenizer } from './tokenizer';
export * from './yaml'; export * from './yaml';
export * from './http'; export * from './http';
export * from './tokens';

View file

@ -1,5 +1,5 @@
const z = require('zod'); import z from 'zod';
const { EModelEndpoint } = require('librechat-data-provider'); import { EModelEndpoint } from 'librechat-data-provider';
const openAIModels = { const openAIModels = {
'o4-mini': 200000, 'o4-mini': 200000,
@ -478,15 +478,15 @@ const tiktokenModels = new Set([
'gpt-3.5-turbo-0301', 'gpt-3.5-turbo-0301',
]); ]);
module.exports = { export {
inputSchema, inputSchema,
modelSchema, modelSchema,
maxTokensMap, maxTokensMap,
tiktokenModels, tiktokenModels,
maxOutputTokensMap,
matchModelName, matchModelName,
processModelData, processModelData,
getModelMaxTokens, getModelMaxTokens,
maxOutputTokensMap,
getModelTokenValue, getModelTokenValue,
findMatchingPattern, findMatchingPattern,
getModelMaxOutputTokens, getModelMaxOutputTokens,