mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-09-21 21:50:49 +02:00
👤 feat: User ID in Model Query; chore: cleanup ModelService (#1753)
* feat: send the LibreChat user ID as a query param when fetching the list of models * chore: update bun * chore: change bun command for building data-provider * refactor: prefer use of `getCustomConfig` to access custom config, also move to `server/services/Config` * refactor: make endpoints/custom option for the config optional, add userIdQuery, and use modelQueries log store in ModelService * refactor(ModelService): use env variables at runtime, use default models from data-provider, and add tests * docs: add `userIdQuery` * fix(ci): import changed
This commit is contained in:
parent
d06e5d2e02
commit
ff057152e2
17 changed files with 339 additions and 83 deletions
5
api/cache/getLogStores.js
vendored
5
api/cache/getLogStores.js
vendored
|
@ -31,6 +31,10 @@ const genTitle = isEnabled(USE_REDIS) // ttl: 2 minutes
|
||||||
? new Keyv({ store: keyvRedis, ttl: 120000 })
|
? new Keyv({ store: keyvRedis, ttl: 120000 })
|
||||||
: new Keyv({ namespace: CacheKeys.GEN_TITLE, ttl: 120000 });
|
: new Keyv({ namespace: CacheKeys.GEN_TITLE, ttl: 120000 });
|
||||||
|
|
||||||
|
const modelQueries = isEnabled(process.env.USE_REDIS)
|
||||||
|
? new Keyv({ store: keyvRedis })
|
||||||
|
: new Keyv({ namespace: 'models' });
|
||||||
|
|
||||||
const namespaces = {
|
const namespaces = {
|
||||||
[CacheKeys.CONFIG_STORE]: config,
|
[CacheKeys.CONFIG_STORE]: config,
|
||||||
pending_req,
|
pending_req,
|
||||||
|
@ -44,6 +48,7 @@ const namespaces = {
|
||||||
logins: createViolationInstance('logins'),
|
logins: createViolationInstance('logins'),
|
||||||
[CacheKeys.TOKEN_CONFIG]: tokenConfig,
|
[CacheKeys.TOKEN_CONFIG]: tokenConfig,
|
||||||
[CacheKeys.GEN_TITLE]: genTitle,
|
[CacheKeys.GEN_TITLE]: genTitle,
|
||||||
|
[CacheKeys.MODEL_QUERIES]: modelQueries,
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -9,8 +9,8 @@ async function modelController(req, res) {
|
||||||
res.send(cachedModelsConfig);
|
res.send(cachedModelsConfig);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
const defaultModelsConfig = await loadDefaultModels();
|
const defaultModelsConfig = await loadDefaultModels(req);
|
||||||
const customModelsConfig = await loadConfigModels();
|
const customModelsConfig = await loadConfigModels(req);
|
||||||
|
|
||||||
const modelConfig = { ...defaultModelsConfig, ...customModelsConfig };
|
const modelConfig = { ...defaultModelsConfig, ...customModelsConfig };
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
const crypto = require('crypto');
|
const crypto = require('crypto');
|
||||||
const bcrypt = require('bcryptjs');
|
const bcrypt = require('bcryptjs');
|
||||||
const { registerSchema, errorsToString } = require('~/strategies/validators');
|
const { registerSchema, errorsToString } = require('~/strategies/validators');
|
||||||
const getCustomConfig = require('~/cache/getCustomConfig');
|
const getCustomConfig = require('~/server/services/Config/getCustomConfig');
|
||||||
const Token = require('~/models/schema/tokenSchema');
|
const Token = require('~/models/schema/tokenSchema');
|
||||||
const { sendEmail } = require('~/server/utils');
|
const { sendEmail } = require('~/server/utils');
|
||||||
const Session = require('~/models/Session');
|
const Session = require('~/models/Session');
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
const getCustomConfig = require('~/cache/getCustomConfig');
|
const getCustomConfig = require('~/server/services/Config/getCustomConfig');
|
||||||
const { isDomainAllowed } = require('./AuthService');
|
const { isDomainAllowed } = require('./AuthService');
|
||||||
|
|
||||||
jest.mock('~/cache/getCustomConfig', () => jest.fn());
|
jest.mock('~/server/services/Config/getCustomConfig', () => jest.fn());
|
||||||
|
|
||||||
describe('isDomainAllowed', () => {
|
describe('isDomainAllowed', () => {
|
||||||
it('should allow domain when customConfig is not available', async () => {
|
it('should allow domain when customConfig is not available', async () => {
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
const { CacheKeys } = require('librechat-data-provider');
|
const { CacheKeys } = require('librechat-data-provider');
|
||||||
const loadCustomConfig = require('~/server/services/Config/loadCustomConfig');
|
const loadCustomConfig = require('./loadCustomConfig');
|
||||||
const getLogStores = require('./getLogStores');
|
const getLogStores = require('~/cache/getLogStores');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Retrieves the configuration object
|
* Retrieves the configuration object
|
|
@ -1,4 +1,5 @@
|
||||||
const { config } = require('./EndpointService');
|
const { config } = require('./EndpointService');
|
||||||
|
const getCustomConfig = require('./getCustomConfig');
|
||||||
const loadCustomConfig = require('./loadCustomConfig');
|
const loadCustomConfig = require('./loadCustomConfig');
|
||||||
const loadConfigModels = require('./loadConfigModels');
|
const loadConfigModels = require('./loadConfigModels');
|
||||||
const loadDefaultModels = require('./loadDefaultModels');
|
const loadDefaultModels = require('./loadDefaultModels');
|
||||||
|
@ -9,6 +10,7 @@ const loadDefaultEndpointsConfig = require('./loadDefaultEConfig');
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
config,
|
config,
|
||||||
|
getCustomConfig,
|
||||||
loadCustomConfig,
|
loadCustomConfig,
|
||||||
loadConfigModels,
|
loadConfigModels,
|
||||||
loadDefaultModels,
|
loadDefaultModels,
|
||||||
|
|
|
@ -1,18 +1,12 @@
|
||||||
const { CacheKeys, EModelEndpoint } = require('librechat-data-provider');
|
const { EModelEndpoint } = require('librechat-data-provider');
|
||||||
const { isUserProvided, extractEnvVariable } = require('~/server/utils');
|
const { isUserProvided, extractEnvVariable } = require('~/server/utils');
|
||||||
const loadCustomConfig = require('./loadCustomConfig');
|
const getCustomConfig = require('./getCustomConfig');
|
||||||
const { getLogStores } = require('~/cache');
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Load config endpoints from the cached configuration object
|
* Load config endpoints from the cached configuration object
|
||||||
* @function loadConfigEndpoints */
|
* @function loadConfigEndpoints */
|
||||||
async function loadConfigEndpoints() {
|
async function loadConfigEndpoints() {
|
||||||
const cache = getLogStores(CacheKeys.CONFIG_STORE);
|
const customConfig = await getCustomConfig();
|
||||||
let customConfig = await cache.get(CacheKeys.CUSTOM_CONFIG);
|
|
||||||
|
|
||||||
if (!customConfig) {
|
|
||||||
customConfig = await loadCustomConfig();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!customConfig) {
|
if (!customConfig) {
|
||||||
return {};
|
return {};
|
||||||
|
|
|
@ -1,19 +1,15 @@
|
||||||
const { CacheKeys, EModelEndpoint } = require('librechat-data-provider');
|
const { EModelEndpoint } = require('librechat-data-provider');
|
||||||
const { isUserProvided, extractEnvVariable } = require('~/server/utils');
|
const { isUserProvided, extractEnvVariable } = require('~/server/utils');
|
||||||
const { fetchModels } = require('~/server/services/ModelService');
|
const { fetchModels } = require('~/server/services/ModelService');
|
||||||
const loadCustomConfig = require('./loadCustomConfig');
|
const getCustomConfig = require('./getCustomConfig');
|
||||||
const { getLogStores } = require('~/cache');
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Load config endpoints from the cached configuration object
|
* Load config endpoints from the cached configuration object
|
||||||
* @function loadConfigModels */
|
* @function loadConfigModels
|
||||||
async function loadConfigModels() {
|
* @param {Express.Request} req - The Express request object.
|
||||||
const cache = getLogStores(CacheKeys.CONFIG_STORE);
|
*/
|
||||||
let customConfig = await cache.get(CacheKeys.CUSTOM_CONFIG);
|
async function loadConfigModels(req) {
|
||||||
|
const customConfig = await getCustomConfig();
|
||||||
if (!customConfig) {
|
|
||||||
customConfig = await loadCustomConfig();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!customConfig) {
|
if (!customConfig) {
|
||||||
return {};
|
return {};
|
||||||
|
@ -49,7 +45,14 @@ async function loadConfigModels() {
|
||||||
|
|
||||||
if (models.fetch && !isUserProvided(API_KEY) && !isUserProvided(BASE_URL)) {
|
if (models.fetch && !isUserProvided(API_KEY) && !isUserProvided(BASE_URL)) {
|
||||||
fetchPromisesMap[BASE_URL] =
|
fetchPromisesMap[BASE_URL] =
|
||||||
fetchPromisesMap[BASE_URL] || fetchModels({ baseURL: BASE_URL, apiKey: API_KEY, name });
|
fetchPromisesMap[BASE_URL] ||
|
||||||
|
fetchModels({
|
||||||
|
user: req.user.id,
|
||||||
|
baseURL: BASE_URL,
|
||||||
|
apiKey: API_KEY,
|
||||||
|
name,
|
||||||
|
userIdQuery: models.userIdQuery,
|
||||||
|
});
|
||||||
baseUrlToNameMap[BASE_URL] = baseUrlToNameMap[BASE_URL] || [];
|
baseUrlToNameMap[BASE_URL] = baseUrlToNameMap[BASE_URL] || [];
|
||||||
baseUrlToNameMap[BASE_URL].push(name);
|
baseUrlToNameMap[BASE_URL].push(name);
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -17,6 +17,7 @@ const configPath = path.resolve(projectRoot, 'librechat.yaml');
|
||||||
async function loadCustomConfig() {
|
async function loadCustomConfig() {
|
||||||
const customConfig = loadYaml(configPath);
|
const customConfig = loadYaml(configPath);
|
||||||
if (!customConfig) {
|
if (!customConfig) {
|
||||||
|
logger.info('Custom config file missing or YAML format invalid.');
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,7 +26,7 @@ async function loadCustomConfig() {
|
||||||
logger.error(`Invalid custom config file at ${configPath}`, result.error);
|
logger.error(`Invalid custom config file at ${configPath}`, result.error);
|
||||||
return null;
|
return null;
|
||||||
} else {
|
} else {
|
||||||
logger.info('Loaded custom config file:');
|
logger.info('Custom config file loaded:');
|
||||||
logger.info(JSON.stringify(customConfig, null, 2));
|
logger.info(JSON.stringify(customConfig, null, 2));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,13 +11,23 @@ const fitlerAssistantModels = (str) => {
|
||||||
return /gpt-4|gpt-3\\.5/i.test(str) && !/vision|instruct/i.test(str);
|
return /gpt-4|gpt-3\\.5/i.test(str) && !/vision|instruct/i.test(str);
|
||||||
};
|
};
|
||||||
|
|
||||||
async function loadDefaultModels() {
|
/**
|
||||||
|
* Loads the default models for the application.
|
||||||
|
* @async
|
||||||
|
* @function
|
||||||
|
* @param {Express.Request} req - The Express request object.
|
||||||
|
*/
|
||||||
|
async function loadDefaultModels(req) {
|
||||||
const google = getGoogleModels();
|
const google = getGoogleModels();
|
||||||
const openAI = await getOpenAIModels();
|
const openAI = await getOpenAIModels({ user: req.user.id });
|
||||||
const anthropic = getAnthropicModels();
|
const anthropic = getAnthropicModels();
|
||||||
const chatGPTBrowser = getChatGPTBrowserModels();
|
const chatGPTBrowser = getChatGPTBrowserModels();
|
||||||
const azureOpenAI = await getOpenAIModels({ azure: true });
|
const azureOpenAI = await getOpenAIModels({ user: req.user.id, azure: true });
|
||||||
const gptPlugins = await getOpenAIModels({ azure: useAzurePlugins, plugins: true });
|
const gptPlugins = await getOpenAIModels({
|
||||||
|
user: req.user.id,
|
||||||
|
azure: useAzurePlugins,
|
||||||
|
plugins: true,
|
||||||
|
});
|
||||||
|
|
||||||
return {
|
return {
|
||||||
[EModelEndpoint.openAI]: openAI,
|
[EModelEndpoint.openAI]: openAI,
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
const { EModelEndpoint, CacheKeys } = require('librechat-data-provider');
|
const { EModelEndpoint, CacheKeys } = require('librechat-data-provider');
|
||||||
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
|
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
|
||||||
|
const getCustomConfig = require('~/server/services/Config/getCustomConfig');
|
||||||
const { isUserProvided, extractEnvVariable } = require('~/server/utils');
|
const { isUserProvided, extractEnvVariable } = require('~/server/utils');
|
||||||
const { fetchModels } = require('~/server/services/ModelService');
|
const { fetchModels } = require('~/server/services/ModelService');
|
||||||
const getCustomConfig = require('~/cache/getCustomConfig');
|
|
||||||
const getLogStores = require('~/cache/getLogStores');
|
const getLogStores = require('~/cache/getLogStores');
|
||||||
const { OpenAIClient } = require('~/app');
|
const { OpenAIClient } = require('~/app');
|
||||||
|
|
||||||
|
|
|
@ -1,47 +1,35 @@
|
||||||
const Keyv = require('keyv');
|
|
||||||
const axios = require('axios');
|
const axios = require('axios');
|
||||||
const HttpsProxyAgent = require('https-proxy-agent');
|
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||||
const { EModelEndpoint, defaultModels, CacheKeys } = require('librechat-data-provider');
|
const { EModelEndpoint, defaultModels, CacheKeys } = require('librechat-data-provider');
|
||||||
const { extractBaseURL, inputSchema, processModelData } = require('~/utils');
|
const { extractBaseURL, inputSchema, processModelData } = require('~/utils');
|
||||||
const getLogStores = require('~/cache/getLogStores');
|
const getLogStores = require('~/cache/getLogStores');
|
||||||
const { isEnabled } = require('~/server/utils');
|
|
||||||
const keyvRedis = require('~/cache/keyvRedis');
|
|
||||||
const { logger } = require('~/config');
|
const { logger } = require('~/config');
|
||||||
|
|
||||||
// const { getAzureCredentials, genAzureChatCompletion } = require('~/utils/');
|
// const { getAzureCredentials, genAzureChatCompletion } = require('~/utils/');
|
||||||
|
|
||||||
const { openAIApiKey, userProvidedOpenAI } = require('./Config/EndpointService').config;
|
const { openAIApiKey, userProvidedOpenAI } = require('./Config/EndpointService').config;
|
||||||
|
|
||||||
const modelsCache = isEnabled(process.env.USE_REDIS)
|
|
||||||
? new Keyv({ store: keyvRedis })
|
|
||||||
: new Keyv({ namespace: 'models' });
|
|
||||||
|
|
||||||
const {
|
|
||||||
OPENROUTER_API_KEY,
|
|
||||||
OPENAI_REVERSE_PROXY,
|
|
||||||
CHATGPT_MODELS,
|
|
||||||
ANTHROPIC_MODELS,
|
|
||||||
GOOGLE_MODELS,
|
|
||||||
PROXY,
|
|
||||||
} = process.env ?? {};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Fetches OpenAI models from the specified base API path or Azure, based on the provided configuration.
|
* Fetches OpenAI models from the specified base API path or Azure, based on the provided configuration.
|
||||||
*
|
*
|
||||||
* @param {Object} params - The parameters for fetching the models.
|
* @param {Object} params - The parameters for fetching the models.
|
||||||
|
* @param {Object} params.user - The user ID to send to the API.
|
||||||
* @param {string} params.apiKey - The API key for authentication with the API.
|
* @param {string} params.apiKey - The API key for authentication with the API.
|
||||||
* @param {string} params.baseURL - The base path URL for the API.
|
* @param {string} params.baseURL - The base path URL for the API.
|
||||||
* @param {string} [params.name='OpenAI'] - The name of the API; defaults to 'OpenAI'.
|
* @param {string} [params.name='OpenAI'] - The name of the API; defaults to 'OpenAI'.
|
||||||
* @param {boolean} [params.azure=false] - Whether to fetch models from Azure.
|
* @param {boolean} [params.azure=false] - Whether to fetch models from Azure.
|
||||||
|
* @param {boolean} [params.userIdQuery=false] - Whether to send the user ID as a query parameter.
|
||||||
* @param {boolean} [params.createTokenConfig=true] - Whether to create a token configuration from the API response.
|
* @param {boolean} [params.createTokenConfig=true] - Whether to create a token configuration from the API response.
|
||||||
* @returns {Promise<string[]>} A promise that resolves to an array of model identifiers.
|
* @returns {Promise<string[]>} A promise that resolves to an array of model identifiers.
|
||||||
* @async
|
* @async
|
||||||
*/
|
*/
|
||||||
const fetchModels = async ({
|
const fetchModels = async ({
|
||||||
|
user,
|
||||||
apiKey,
|
apiKey,
|
||||||
baseURL,
|
baseURL,
|
||||||
name = 'OpenAI',
|
name = 'OpenAI',
|
||||||
azure = false,
|
azure = false,
|
||||||
|
userIdQuery = false,
|
||||||
createTokenConfig = true,
|
createTokenConfig = true,
|
||||||
}) => {
|
}) => {
|
||||||
let models = [];
|
let models = [];
|
||||||
|
@ -51,21 +39,26 @@ const fetchModels = async ({
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const payload = {
|
const options = {
|
||||||
headers: {
|
headers: {
|
||||||
Authorization: `Bearer ${apiKey}`,
|
Authorization: `Bearer ${apiKey}`,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
if (PROXY) {
|
if (process.env.PROXY) {
|
||||||
payload.httpsAgent = new HttpsProxyAgent(PROXY);
|
options.httpsAgent = new HttpsProxyAgent(process.env.PROXY);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (process.env.OPENAI_ORGANIZATION && baseURL.includes('openai')) {
|
if (process.env.OPENAI_ORGANIZATION && baseURL.includes('openai')) {
|
||||||
payload.headers['OpenAI-Organization'] = process.env.OPENAI_ORGANIZATION;
|
options.headers['OpenAI-Organization'] = process.env.OPENAI_ORGANIZATION;
|
||||||
}
|
}
|
||||||
|
|
||||||
const res = await axios.get(`${baseURL}${azure ? '' : '/models'}`, payload);
|
const url = new URL(`${baseURL}${azure ? '' : '/models'}`);
|
||||||
|
if (user && userIdQuery) {
|
||||||
|
url.searchParams.append('user', user);
|
||||||
|
}
|
||||||
|
const res = await axios.get(url.toString(), options);
|
||||||
|
|
||||||
/** @type {z.infer<typeof inputSchema>} */
|
/** @type {z.infer<typeof inputSchema>} */
|
||||||
const input = res.data;
|
const input = res.data;
|
||||||
|
|
||||||
|
@ -83,11 +76,22 @@ const fetchModels = async ({
|
||||||
return models;
|
return models;
|
||||||
};
|
};
|
||||||
|
|
||||||
const fetchOpenAIModels = async (opts = { azure: false, plugins: false }, _models = []) => {
|
/**
|
||||||
|
* Fetches models from the specified API path or Azure, based on the provided options.
|
||||||
|
* @async
|
||||||
|
* @function
|
||||||
|
* @param {object} opts - The options for fetching the models.
|
||||||
|
* @param {string} opts.user - The user ID to send to the API.
|
||||||
|
* @param {boolean} [opts.azure=false] - Whether to fetch models from Azure.
|
||||||
|
* @param {boolean} [opts.plugins=false] - Whether to fetch models from the plugins.
|
||||||
|
* @param {string[]} [_models=[]] - The models to use as a fallback.
|
||||||
|
*/
|
||||||
|
const fetchOpenAIModels = async (opts, _models = []) => {
|
||||||
let models = _models.slice() ?? [];
|
let models = _models.slice() ?? [];
|
||||||
let apiKey = openAIApiKey;
|
let apiKey = openAIApiKey;
|
||||||
let baseURL = 'https://api.openai.com/v1';
|
const openaiBaseURL = 'https://api.openai.com/v1';
|
||||||
let reverseProxyUrl = OPENAI_REVERSE_PROXY;
|
let baseURL = openaiBaseURL;
|
||||||
|
let reverseProxyUrl = process.env.OPENAI_REVERSE_PROXY;
|
||||||
if (opts.azure) {
|
if (opts.azure) {
|
||||||
return models;
|
return models;
|
||||||
// const azure = getAzureCredentials();
|
// const azure = getAzureCredentials();
|
||||||
|
@ -95,15 +99,17 @@ const fetchOpenAIModels = async (opts = { azure: false, plugins: false }, _model
|
||||||
// .split('/deployments')[0]
|
// .split('/deployments')[0]
|
||||||
// .concat(`/models?api-version=${azure.azureOpenAIApiVersion}`);
|
// .concat(`/models?api-version=${azure.azureOpenAIApiVersion}`);
|
||||||
// apiKey = azureOpenAIApiKey;
|
// apiKey = azureOpenAIApiKey;
|
||||||
} else if (OPENROUTER_API_KEY) {
|
} else if (process.env.OPENROUTER_API_KEY) {
|
||||||
reverseProxyUrl = 'https://openrouter.ai/api/v1';
|
reverseProxyUrl = 'https://openrouter.ai/api/v1';
|
||||||
apiKey = OPENROUTER_API_KEY;
|
apiKey = process.env.OPENROUTER_API_KEY;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (reverseProxyUrl) {
|
if (reverseProxyUrl) {
|
||||||
baseURL = extractBaseURL(reverseProxyUrl);
|
baseURL = extractBaseURL(reverseProxyUrl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const modelsCache = getLogStores(CacheKeys.MODEL_QUERIES);
|
||||||
|
|
||||||
const cachedModels = await modelsCache.get(baseURL);
|
const cachedModels = await modelsCache.get(baseURL);
|
||||||
if (cachedModels) {
|
if (cachedModels) {
|
||||||
return cachedModels;
|
return cachedModels;
|
||||||
|
@ -114,10 +120,15 @@ const fetchOpenAIModels = async (opts = { azure: false, plugins: false }, _model
|
||||||
apiKey,
|
apiKey,
|
||||||
baseURL,
|
baseURL,
|
||||||
azure: opts.azure,
|
azure: opts.azure,
|
||||||
|
user: opts.user,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!reverseProxyUrl) {
|
if (models.length === 0) {
|
||||||
|
return _models;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (baseURL === openaiBaseURL) {
|
||||||
const regex = /(text-davinci-003|gpt-)/;
|
const regex = /(text-davinci-003|gpt-)/;
|
||||||
models = models.filter((model) => regex.test(model));
|
models = models.filter((model) => regex.test(model));
|
||||||
}
|
}
|
||||||
|
@ -126,18 +137,27 @@ const fetchOpenAIModels = async (opts = { azure: false, plugins: false }, _model
|
||||||
return models;
|
return models;
|
||||||
};
|
};
|
||||||
|
|
||||||
const getOpenAIModels = async (opts = { azure: false, plugins: false }) => {
|
/**
|
||||||
let models = [
|
* Loads the default models for the application.
|
||||||
'gpt-4',
|
* @async
|
||||||
'gpt-4-0613',
|
* @function
|
||||||
'gpt-3.5-turbo',
|
* @param {object} opts - The options for fetching the models.
|
||||||
'gpt-3.5-turbo-16k',
|
* @param {string} opts.user - The user ID to send to the API.
|
||||||
'gpt-3.5-turbo-0613',
|
* @param {boolean} [opts.azure=false] - Whether to fetch models from Azure.
|
||||||
'gpt-3.5-turbo-0301',
|
* @param {boolean} [opts.plugins=false] - Whether to fetch models from the plugins.
|
||||||
];
|
*/
|
||||||
|
const getOpenAIModels = async (opts) => {
|
||||||
|
let models = defaultModels.openAI;
|
||||||
|
|
||||||
if (!opts.plugins) {
|
if (opts.plugins) {
|
||||||
models.push('text-davinci-003');
|
models = models.filter(
|
||||||
|
(model) =>
|
||||||
|
!model.includes('text-davinci') &&
|
||||||
|
!model.includes('instruct') &&
|
||||||
|
!model.includes('0613') &&
|
||||||
|
!model.includes('0314') &&
|
||||||
|
!model.includes('0301'),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let key;
|
let key;
|
||||||
|
@ -154,7 +174,7 @@ const getOpenAIModels = async (opts = { azure: false, plugins: false }) => {
|
||||||
return models;
|
return models;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (userProvidedOpenAI && !OPENROUTER_API_KEY) {
|
if (userProvidedOpenAI && !process.env.OPENROUTER_API_KEY) {
|
||||||
return models;
|
return models;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -163,8 +183,8 @@ const getOpenAIModels = async (opts = { azure: false, plugins: false }) => {
|
||||||
|
|
||||||
const getChatGPTBrowserModels = () => {
|
const getChatGPTBrowserModels = () => {
|
||||||
let models = ['text-davinci-002-render-sha', 'gpt-4'];
|
let models = ['text-davinci-002-render-sha', 'gpt-4'];
|
||||||
if (CHATGPT_MODELS) {
|
if (process.env.CHATGPT_MODELS) {
|
||||||
models = String(CHATGPT_MODELS).split(',');
|
models = String(process.env.CHATGPT_MODELS).split(',');
|
||||||
}
|
}
|
||||||
|
|
||||||
return models;
|
return models;
|
||||||
|
@ -172,8 +192,8 @@ const getChatGPTBrowserModels = () => {
|
||||||
|
|
||||||
const getAnthropicModels = () => {
|
const getAnthropicModels = () => {
|
||||||
let models = defaultModels[EModelEndpoint.anthropic];
|
let models = defaultModels[EModelEndpoint.anthropic];
|
||||||
if (ANTHROPIC_MODELS) {
|
if (process.env.ANTHROPIC_MODELS) {
|
||||||
models = String(ANTHROPIC_MODELS).split(',');
|
models = String(process.env.ANTHROPIC_MODELS).split(',');
|
||||||
}
|
}
|
||||||
|
|
||||||
return models;
|
return models;
|
||||||
|
@ -181,8 +201,8 @@ const getAnthropicModels = () => {
|
||||||
|
|
||||||
const getGoogleModels = () => {
|
const getGoogleModels = () => {
|
||||||
let models = defaultModels[EModelEndpoint.google];
|
let models = defaultModels[EModelEndpoint.google];
|
||||||
if (GOOGLE_MODELS) {
|
if (process.env.GOOGLE_MODELS) {
|
||||||
models = String(GOOGLE_MODELS).split(',');
|
models = String(process.env.GOOGLE_MODELS).split(',');
|
||||||
}
|
}
|
||||||
|
|
||||||
return models;
|
return models;
|
||||||
|
|
212
api/server/services/ModelService.spec.js
Normal file
212
api/server/services/ModelService.spec.js
Normal file
|
@ -0,0 +1,212 @@
|
||||||
|
const axios = require('axios');
|
||||||
|
|
||||||
|
const { fetchModels, getOpenAIModels } = require('./ModelService');
|
||||||
|
jest.mock('~/utils', () => {
|
||||||
|
const originalUtils = jest.requireActual('~/utils');
|
||||||
|
return {
|
||||||
|
...originalUtils,
|
||||||
|
processModelData: jest.fn((...args) => {
|
||||||
|
return originalUtils.processModelData(...args);
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
jest.mock('axios');
|
||||||
|
jest.mock('~/cache/getLogStores', () =>
|
||||||
|
jest.fn().mockImplementation(() => ({
|
||||||
|
get: jest.fn().mockResolvedValue(undefined),
|
||||||
|
set: jest.fn().mockResolvedValue(true),
|
||||||
|
})),
|
||||||
|
);
|
||||||
|
jest.mock('~/config', () => ({
|
||||||
|
logger: {
|
||||||
|
error: jest.fn(),
|
||||||
|
},
|
||||||
|
}));
|
||||||
|
jest.mock('./Config/EndpointService', () => ({
|
||||||
|
config: {
|
||||||
|
openAIApiKey: 'mockedApiKey',
|
||||||
|
userProvidedOpenAI: false,
|
||||||
|
},
|
||||||
|
}));
|
||||||
|
|
||||||
|
axios.get.mockResolvedValue({
|
||||||
|
data: {
|
||||||
|
data: [{ id: 'model-1' }, { id: 'model-2' }],
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('fetchModels', () => {
|
||||||
|
it('fetches models successfully from the API', async () => {
|
||||||
|
const models = await fetchModels({
|
||||||
|
user: 'user123',
|
||||||
|
apiKey: 'testApiKey',
|
||||||
|
baseURL: 'https://api.test.com',
|
||||||
|
name: 'TestAPI',
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(models).toEqual(['model-1', 'model-2']);
|
||||||
|
expect(axios.get).toHaveBeenCalledWith(
|
||||||
|
expect.stringContaining('https://api.test.com/models'),
|
||||||
|
expect.any(Object),
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('adds the user ID to the models query when option and ID are passed', async () => {
|
||||||
|
const models = await fetchModels({
|
||||||
|
user: 'user123',
|
||||||
|
apiKey: 'testApiKey',
|
||||||
|
baseURL: 'https://api.test.com',
|
||||||
|
userIdQuery: true,
|
||||||
|
name: 'TestAPI',
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(models).toEqual(['model-1', 'model-2']);
|
||||||
|
expect(axios.get).toHaveBeenCalledWith(
|
||||||
|
expect.stringContaining('https://api.test.com/models?user=user123'),
|
||||||
|
expect.any(Object),
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
jest.clearAllMocks();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('fetchModels with createTokenConfig true', () => {
|
||||||
|
const data = {
|
||||||
|
data: [
|
||||||
|
{
|
||||||
|
id: 'model-1',
|
||||||
|
pricing: {
|
||||||
|
prompt: '0.002',
|
||||||
|
completion: '0.001',
|
||||||
|
},
|
||||||
|
context_length: 1024,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'model-2',
|
||||||
|
pricing: {
|
||||||
|
prompt: '0.003',
|
||||||
|
completion: '0.0015',
|
||||||
|
},
|
||||||
|
context_length: 2048,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
// Clears the mock's history before each test
|
||||||
|
const _utils = require('~/utils');
|
||||||
|
axios.get.mockResolvedValue({ data });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('creates and stores token configuration if createTokenConfig is true', async () => {
|
||||||
|
await fetchModels({
|
||||||
|
user: 'user123',
|
||||||
|
apiKey: 'testApiKey',
|
||||||
|
baseURL: 'https://api.test.com',
|
||||||
|
createTokenConfig: true,
|
||||||
|
});
|
||||||
|
|
||||||
|
const { processModelData } = require('~/utils');
|
||||||
|
expect(processModelData).toHaveBeenCalled();
|
||||||
|
expect(processModelData).toHaveBeenCalledWith(data);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getOpenAIModels', () => {
|
||||||
|
let originalEnv;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
originalEnv = { ...process.env };
|
||||||
|
axios.get.mockRejectedValue(new Error('Network error'));
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
process.env = originalEnv;
|
||||||
|
axios.get.mockReset();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('returns default models when no environment configurations are provided (and fetch fails)', async () => {
|
||||||
|
const models = await getOpenAIModels({ user: 'user456' });
|
||||||
|
expect(models).toContain('gpt-4');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('returns `AZURE_OPENAI_MODELS` with `azure` flag (and fetch fails)', async () => {
|
||||||
|
process.env.AZURE_OPENAI_MODELS = 'azure-model,azure-model-2';
|
||||||
|
const models = await getOpenAIModels({ azure: true });
|
||||||
|
expect(models).toEqual(expect.arrayContaining(['azure-model', 'azure-model-2']));
|
||||||
|
});
|
||||||
|
|
||||||
|
it('returns `PLUGIN_MODELS` with `plugins` flag (and fetch fails)', async () => {
|
||||||
|
process.env.PLUGIN_MODELS = 'plugins-model,plugins-model-2';
|
||||||
|
const models = await getOpenAIModels({ plugins: true });
|
||||||
|
expect(models).toEqual(expect.arrayContaining(['plugins-model', 'plugins-model-2']));
|
||||||
|
});
|
||||||
|
|
||||||
|
it('returns `OPENAI_MODELS` with no flags (and fetch fails)', async () => {
|
||||||
|
process.env.OPENAI_MODELS = 'openai-model,openai-model-2';
|
||||||
|
const models = await getOpenAIModels({});
|
||||||
|
expect(models).toEqual(expect.arrayContaining(['openai-model', 'openai-model-2']));
|
||||||
|
});
|
||||||
|
|
||||||
|
it('attempts to use OPENROUTER_API_KEY if set', async () => {
|
||||||
|
process.env.OPENROUTER_API_KEY = 'test-router-key';
|
||||||
|
const expectedModels = ['model-router-1', 'model-router-2'];
|
||||||
|
|
||||||
|
axios.get.mockResolvedValue({
|
||||||
|
data: {
|
||||||
|
data: expectedModels.map((id) => ({ id })),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
const models = await getOpenAIModels({ user: 'user456' });
|
||||||
|
|
||||||
|
expect(models).toEqual(expect.arrayContaining(expectedModels));
|
||||||
|
expect(axios.get).toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('utilizes proxy configuration when PROXY is set', async () => {
|
||||||
|
axios.get.mockResolvedValue({
|
||||||
|
data: {
|
||||||
|
data: [],
|
||||||
|
},
|
||||||
|
});
|
||||||
|
process.env.PROXY = 'http://localhost:8888';
|
||||||
|
await getOpenAIModels({ user: 'user456' });
|
||||||
|
|
||||||
|
expect(axios.get).toHaveBeenCalledWith(
|
||||||
|
expect.any(String),
|
||||||
|
expect.objectContaining({
|
||||||
|
httpsAgent: expect.anything(),
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getOpenAIModels with mocked config', () => {
|
||||||
|
it('uses alternative behavior when userProvidedOpenAI is true', async () => {
|
||||||
|
jest.mock('./Config/EndpointService', () => ({
|
||||||
|
config: {
|
||||||
|
openAIApiKey: 'mockedApiKey',
|
||||||
|
userProvidedOpenAI: true,
|
||||||
|
},
|
||||||
|
}));
|
||||||
|
jest.mock('librechat-data-provider', () => {
|
||||||
|
const original = jest.requireActual('librechat-data-provider');
|
||||||
|
return {
|
||||||
|
...original,
|
||||||
|
defaultModels: {
|
||||||
|
[original.EModelEndpoint.openAI]: ['some-default-model'],
|
||||||
|
},
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
jest.resetModules();
|
||||||
|
const { getOpenAIModels } = require('./ModelService');
|
||||||
|
|
||||||
|
const models = await getOpenAIModels({ user: 'user456' });
|
||||||
|
expect(models).toContain('some-default-model');
|
||||||
|
});
|
||||||
|
});
|
BIN
bun.lockb
BIN
bun.lockb
Binary file not shown.
|
@ -242,6 +242,9 @@ endpoints:
|
||||||
- Type: Boolean
|
- Type: Boolean
|
||||||
- Example: `fetch: true`
|
- Example: `fetch: true`
|
||||||
- **Note**: May cause slowdowns during initial use of the app if the response is delayed. Defaults to `false`.
|
- **Note**: May cause slowdowns during initial use of the app if the response is delayed. Defaults to `false`.
|
||||||
|
- **userIdQuery**: When set to `true`, adds the LibreChat user ID as a query parameter to the API models request.
|
||||||
|
- Type: Boolean
|
||||||
|
- Example: `userIdQuery: true`
|
||||||
|
|
||||||
### **titleConvo**:
|
### **titleConvo**:
|
||||||
|
|
||||||
|
|
|
@ -50,8 +50,8 @@
|
||||||
"format": "prettier-eslint --write \"{,!(node_modules)/**/}*.{js,jsx,ts,tsx}\"",
|
"format": "prettier-eslint --write \"{,!(node_modules)/**/}*.{js,jsx,ts,tsx}\"",
|
||||||
"b:api": "NODE_ENV=production bun run api/server/index.js",
|
"b:api": "NODE_ENV=production bun run api/server/index.js",
|
||||||
"b:api:dev": "NODE_ENV=production bun run --watch api/server/index.js",
|
"b:api:dev": "NODE_ENV=production bun run --watch api/server/index.js",
|
||||||
"b:data-provider": "cd packages/data-provider && bun run b:build",
|
"b:data": "cd packages/data-provider && bun run b:build",
|
||||||
"b:client": "bun --bun run b:data-provider && cd client && bun --bun run b:build",
|
"b:client": "bun --bun run b:data && cd client && bun --bun run b:build",
|
||||||
"b:client:dev": "cd client && bun run b:dev",
|
"b:client:dev": "cd client && bun run b:dev",
|
||||||
"b:test:client": "cd client && bun run b:test",
|
"b:test:client": "cd client && bun run b:test",
|
||||||
"b:test:api": "cd api && bun run b:test",
|
"b:test:api": "cd api && bun run b:test",
|
||||||
|
|
|
@ -15,6 +15,7 @@ export const endpointSchema = z.object({
|
||||||
models: z.object({
|
models: z.object({
|
||||||
default: z.array(z.string()).min(1),
|
default: z.array(z.string()).min(1),
|
||||||
fetch: z.boolean().optional(),
|
fetch: z.boolean().optional(),
|
||||||
|
userIdQuery: z.boolean().optional(),
|
||||||
}),
|
}),
|
||||||
titleConvo: z.boolean().optional(),
|
titleConvo: z.boolean().optional(),
|
||||||
titleMethod: z.union([z.literal('completion'), z.literal('functions')]).optional(),
|
titleMethod: z.union([z.literal('completion'), z.literal('functions')]).optional(),
|
||||||
|
@ -40,7 +41,8 @@ export const configSchema = z.object({
|
||||||
.object({
|
.object({
|
||||||
custom: z.array(endpointSchema.partial()),
|
custom: z.array(endpointSchema.partial()),
|
||||||
})
|
})
|
||||||
.strict(),
|
.strict()
|
||||||
|
.optional(),
|
||||||
});
|
});
|
||||||
|
|
||||||
export type TCustomConfig = z.infer<typeof configSchema>;
|
export type TCustomConfig = z.infer<typeof configSchema>;
|
||||||
|
@ -177,6 +179,10 @@ export enum CacheKeys {
|
||||||
* Key for the model config cache.
|
* Key for the model config cache.
|
||||||
*/
|
*/
|
||||||
MODELS_CONFIG = 'modelsConfig',
|
MODELS_CONFIG = 'modelsConfig',
|
||||||
|
/**
|
||||||
|
* Key for the model queries cache.
|
||||||
|
*/
|
||||||
|
MODEL_QUERIES = 'modelQueries',
|
||||||
/**
|
/**
|
||||||
* Key for the default endpoint config cache.
|
* Key for the default endpoint config cache.
|
||||||
*/
|
*/
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue