mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-09-22 06:00:56 +02:00
refactor: Speed up Config fetching and Setup Config Groundwork 👷🚧 (#1297)
* refactor: move endpoint services to own directory * refactor: make endpointconfig handling more concise, separate logic, and cache result for subsequent serving * refactor: ModelController gets same treatment as EndpointController, draft OverrideController * wip: flesh out override controller more to return real value * refactor: client/api changes in anticipation of override
This commit is contained in:
parent
9b2359fc27
commit
0bae503a0a
27 changed files with 405 additions and 138 deletions
8
api/cache/getLogStores.js
vendored
8
api/cache/getLogStores.js
vendored
|
@ -1,7 +1,8 @@
|
|||
const Keyv = require('keyv');
|
||||
const keyvMongo = require('./keyvMongo');
|
||||
const keyvRedis = require('./keyvRedis');
|
||||
const { math, isEnabled } = require('../server/utils');
|
||||
const { CacheKeys } = require('~/common/enums');
|
||||
const { math, isEnabled } = require('~/server/utils');
|
||||
const { logFile, violationFile } = require('./keyvFiles');
|
||||
const { BAN_DURATION, USE_REDIS } = process.env ?? {};
|
||||
|
||||
|
@ -17,7 +18,12 @@ const pending_req = isEnabled(USE_REDIS)
|
|||
? new Keyv({ store: keyvRedis })
|
||||
: new Keyv({ namespace: 'pending_req' });
|
||||
|
||||
const config = isEnabled(USE_REDIS)
|
||||
? new Keyv({ store: keyvRedis })
|
||||
: new Keyv({ namespace: CacheKeys.CONFIG });
|
||||
|
||||
const namespaces = {
|
||||
config,
|
||||
pending_req,
|
||||
ban: new Keyv({ store: keyvMongo, namespace: 'bans', ttl: duration }),
|
||||
general: new Keyv({ store: logFile, namespace: 'violations' }),
|
||||
|
|
15
api/common/enums.js
Normal file
15
api/common/enums.js
Normal file
|
@ -0,0 +1,15 @@
|
|||
/**
|
||||
* @typedef {Object} CacheKeys
|
||||
* @property {'config'} CONFIG - Key for the config cache.
|
||||
* @property {'modelsConfig'} MODELS_CONFIG - Key for the model config cache.
|
||||
* @property {'defaultConfig'} DEFAULT_CONFIG - Key for the default config cache.
|
||||
* @property {'overrideConfig'} OVERRIDE_CONFIG - Key for the override config cache.
|
||||
*/
|
||||
const CacheKeys = {
|
||||
CONFIG: 'config',
|
||||
MODELS_CONFIG: 'modelsConfig',
|
||||
DEFAULT_CONFIG: 'defaultConfig',
|
||||
OVERRIDE_CONFIG: 'overrideConfig',
|
||||
};
|
||||
|
||||
module.exports = { CacheKeys };
|
|
@ -1,95 +1,17 @@
|
|||
const { EModelEndpoint } = require('~/server/routes/endpoints/schemas');
|
||||
const { availableTools } = require('~/app/clients/tools');
|
||||
const { addOpenAPISpecs } = require('~/app/clients/tools/util/addOpenAPISpecs');
|
||||
const {
|
||||
openAIApiKey,
|
||||
azureOpenAIApiKey,
|
||||
useAzurePlugins,
|
||||
userProvidedOpenAI,
|
||||
palmKey,
|
||||
openAI,
|
||||
// assistant,
|
||||
azureOpenAI,
|
||||
bingAI,
|
||||
chatGPTBrowser,
|
||||
anthropic,
|
||||
} = require('~/server/services/EndpointService').config;
|
||||
const { getLogStores } = require('~/cache');
|
||||
const { CacheKeys } = require('~/common/enums');
|
||||
const { loadDefaultEndpointsConfig } = require('~/server/services/Config');
|
||||
|
||||
let i = 0;
|
||||
async function endpointController(req, res) {
|
||||
let key, palmUser;
|
||||
try {
|
||||
key = require('~/data/auth.json');
|
||||
} catch (e) {
|
||||
if (i === 0) {
|
||||
i++;
|
||||
const cache = getLogStores(CacheKeys.CONFIG);
|
||||
const config = await cache.get(CacheKeys.DEFAULT_CONFIG);
|
||||
if (config) {
|
||||
res.send(config);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (palmKey === 'user_provided') {
|
||||
palmUser = true;
|
||||
if (i <= 1) {
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
const tools = await addOpenAPISpecs(availableTools);
|
||||
function transformToolsToMap(tools) {
|
||||
return tools.reduce((map, obj) => {
|
||||
map[obj.pluginKey] = obj.name;
|
||||
return map;
|
||||
}, {});
|
||||
}
|
||||
const plugins = transformToolsToMap(tools);
|
||||
|
||||
const google = key || palmUser ? { userProvide: palmUser } : false;
|
||||
|
||||
const gptPlugins =
|
||||
openAIApiKey || azureOpenAIApiKey
|
||||
? {
|
||||
plugins,
|
||||
availableAgents: ['classic', 'functions'],
|
||||
userProvide: userProvidedOpenAI,
|
||||
azure: useAzurePlugins,
|
||||
}
|
||||
: false;
|
||||
|
||||
let enabledEndpoints = [
|
||||
EModelEndpoint.openAI,
|
||||
EModelEndpoint.azureOpenAI,
|
||||
EModelEndpoint.google,
|
||||
EModelEndpoint.bingAI,
|
||||
EModelEndpoint.chatGPTBrowser,
|
||||
EModelEndpoint.gptPlugins,
|
||||
EModelEndpoint.anthropic,
|
||||
];
|
||||
|
||||
const endpointsEnv = process.env.ENDPOINTS || '';
|
||||
if (endpointsEnv) {
|
||||
enabledEndpoints = endpointsEnv
|
||||
.split(',')
|
||||
.filter((endpoint) => endpoint?.trim())
|
||||
.map((endpoint) => endpoint.trim());
|
||||
}
|
||||
|
||||
const endpointConfig = {
|
||||
[EModelEndpoint.openAI]: openAI,
|
||||
[EModelEndpoint.azureOpenAI]: azureOpenAI,
|
||||
[EModelEndpoint.google]: google,
|
||||
[EModelEndpoint.bingAI]: bingAI,
|
||||
[EModelEndpoint.chatGPTBrowser]: chatGPTBrowser,
|
||||
[EModelEndpoint.gptPlugins]: gptPlugins,
|
||||
[EModelEndpoint.anthropic]: anthropic,
|
||||
};
|
||||
|
||||
const orderedAndFilteredEndpoints = enabledEndpoints.reduce((config, key, index) => {
|
||||
if (endpointConfig[key]) {
|
||||
config[key] = { ...(endpointConfig[key] ?? {}), order: index };
|
||||
}
|
||||
return config;
|
||||
}, {});
|
||||
|
||||
res.send(JSON.stringify(orderedAndFilteredEndpoints));
|
||||
const defaultConfig = await loadDefaultEndpointsConfig();
|
||||
await cache.set(CacheKeys.DEFAULT_CONFIG, defaultConfig);
|
||||
res.send(JSON.stringify(defaultConfig));
|
||||
}
|
||||
|
||||
module.exports = endpointController;
|
||||
|
|
|
@ -1,35 +1,17 @@
|
|||
const { EModelEndpoint } = require('../routes/endpoints/schemas');
|
||||
const {
|
||||
getOpenAIModels,
|
||||
getChatGPTBrowserModels,
|
||||
getAnthropicModels,
|
||||
} = require('../services/ModelService');
|
||||
|
||||
const { useAzurePlugins } = require('../services/EndpointService').config;
|
||||
|
||||
const fitlerAssistantModels = (str) => {
|
||||
return /gpt-4|gpt-3\\.5/i.test(str) && !/vision|instruct/i.test(str);
|
||||
};
|
||||
const { getLogStores } = require('~/cache');
|
||||
const { CacheKeys } = require('~/common/enums');
|
||||
const { loadDefaultModels } = require('~/server/services/Config');
|
||||
|
||||
async function modelController(req, res) {
|
||||
const openAI = await getOpenAIModels();
|
||||
const azureOpenAI = await getOpenAIModels({ azure: true });
|
||||
const gptPlugins = await getOpenAIModels({ azure: useAzurePlugins, plugins: true });
|
||||
const chatGPTBrowser = getChatGPTBrowserModels();
|
||||
const anthropic = getAnthropicModels();
|
||||
|
||||
res.send(
|
||||
JSON.stringify({
|
||||
[EModelEndpoint.openAI]: openAI,
|
||||
[EModelEndpoint.azureOpenAI]: azureOpenAI,
|
||||
[EModelEndpoint.assistant]: openAI.filter(fitlerAssistantModels),
|
||||
[EModelEndpoint.google]: ['chat-bison', 'text-bison', 'codechat-bison'],
|
||||
[EModelEndpoint.bingAI]: ['BingAI', 'Sydney'],
|
||||
[EModelEndpoint.chatGPTBrowser]: chatGPTBrowser,
|
||||
[EModelEndpoint.gptPlugins]: gptPlugins,
|
||||
[EModelEndpoint.anthropic]: anthropic,
|
||||
}),
|
||||
);
|
||||
const cache = getLogStores(CacheKeys.CONFIG);
|
||||
let modelConfig = await cache.get(CacheKeys.MODELS_CONFIG);
|
||||
if (modelConfig) {
|
||||
res.send(modelConfig);
|
||||
return;
|
||||
}
|
||||
modelConfig = await loadDefaultModels();
|
||||
await cache.set(CacheKeys.MODELS_CONFIG, modelConfig);
|
||||
res.send(modelConfig);
|
||||
}
|
||||
|
||||
module.exports = modelController;
|
||||
|
|
27
api/server/controllers/OverrideController.js
Normal file
27
api/server/controllers/OverrideController.js
Normal file
|
@ -0,0 +1,27 @@
|
|||
const { getLogStores } = require('~/cache');
|
||||
const { CacheKeys } = require('~/common/enums');
|
||||
const { loadOverrideConfig } = require('~/server/services/Config');
|
||||
|
||||
async function overrideController(req, res) {
|
||||
const cache = getLogStores(CacheKeys.CONFIG);
|
||||
let overrideConfig = await cache.get(CacheKeys.OVERRIDE_CONFIG);
|
||||
if (overrideConfig) {
|
||||
res.send(overrideConfig);
|
||||
return;
|
||||
} else if (overrideConfig === false) {
|
||||
res.send(false);
|
||||
return;
|
||||
}
|
||||
overrideConfig = await loadOverrideConfig();
|
||||
const { endpointsConfig, modelsConfig } = overrideConfig;
|
||||
if (endpointsConfig) {
|
||||
await cache.set(CacheKeys.DEFAULT_CONFIG, endpointsConfig);
|
||||
}
|
||||
if (modelsConfig) {
|
||||
await cache.set(CacheKeys.MODELS_CONFIG, modelsConfig);
|
||||
}
|
||||
await cache.set(CacheKeys.OVERRIDE_CONFIG, overrideConfig);
|
||||
res.send(JSON.stringify(overrideConfig));
|
||||
}
|
||||
|
||||
module.exports = overrideController;
|
|
@ -1,7 +1,9 @@
|
|||
const express = require('express');
|
||||
const router = express.Router();
|
||||
const endpointController = require('../controllers/EndpointController');
|
||||
const endpointController = require('~/server/controllers/EndpointController');
|
||||
const overrideController = require('~/server/controllers/OverrideController');
|
||||
|
||||
router.get('/', endpointController);
|
||||
router.get('/config/override', overrideController);
|
||||
|
||||
module.exports = router;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
const { EModelEndpoint } = require('../routes/endpoints/schemas');
|
||||
const { EModelEndpoint } = require('~/server/routes/endpoints/schemas');
|
||||
|
||||
const {
|
||||
OPENAI_API_KEY: openAIApiKey,
|
13
api/server/services/Config/index.js
Normal file
13
api/server/services/Config/index.js
Normal file
|
@ -0,0 +1,13 @@
|
|||
const { config } = require('./EndpointService');
|
||||
const loadDefaultModels = require('./loadDefaultModels');
|
||||
const loadOverrideConfig = require('./loadOverrideConfig');
|
||||
const loadAsyncEndpoints = require('./loadAsyncEndpoints');
|
||||
const loadDefaultEndpointsConfig = require('./loadDefaultEConfig');
|
||||
|
||||
module.exports = {
|
||||
config,
|
||||
loadDefaultModels,
|
||||
loadOverrideConfig,
|
||||
loadAsyncEndpoints,
|
||||
loadDefaultEndpointsConfig,
|
||||
};
|
51
api/server/services/Config/loadAsyncEndpoints.js
Normal file
51
api/server/services/Config/loadAsyncEndpoints.js
Normal file
|
@ -0,0 +1,51 @@
|
|||
const { availableTools } = require('~/app/clients/tools');
|
||||
const { addOpenAPISpecs } = require('~/app/clients/tools/util/addOpenAPISpecs');
|
||||
const { openAIApiKey, azureOpenAIApiKey, useAzurePlugins, userProvidedOpenAI, palmKey } =
|
||||
require('./EndpointService').config;
|
||||
|
||||
/**
|
||||
* Load async endpoints and return a configuration object
|
||||
*/
|
||||
async function loadAsyncEndpoints() {
|
||||
let i = 0;
|
||||
let key, palmUser;
|
||||
try {
|
||||
key = require('~/data/auth.json');
|
||||
} catch (e) {
|
||||
if (i === 0) {
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
if (palmKey === 'user_provided') {
|
||||
palmUser = true;
|
||||
if (i <= 1) {
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
const tools = await addOpenAPISpecs(availableTools);
|
||||
function transformToolsToMap(tools) {
|
||||
return tools.reduce((map, obj) => {
|
||||
map[obj.pluginKey] = obj.name;
|
||||
return map;
|
||||
}, {});
|
||||
}
|
||||
const plugins = transformToolsToMap(tools);
|
||||
|
||||
const google = key || palmUser ? { userProvide: palmUser } : false;
|
||||
|
||||
const gptPlugins =
|
||||
openAIApiKey || azureOpenAIApiKey
|
||||
? {
|
||||
plugins,
|
||||
availableAgents: ['classic', 'functions'],
|
||||
userProvide: userProvidedOpenAI,
|
||||
azure: useAzurePlugins,
|
||||
}
|
||||
: false;
|
||||
|
||||
return { google, gptPlugins };
|
||||
}
|
||||
|
||||
module.exports = loadAsyncEndpoints;
|
52
api/server/services/Config/loadDefaultEConfig.js
Normal file
52
api/server/services/Config/loadDefaultEConfig.js
Normal file
|
@ -0,0 +1,52 @@
|
|||
const { EModelEndpoint } = require('~/server/routes/endpoints/schemas');
|
||||
const loadAsyncEndpoints = require('./loadAsyncEndpoints');
|
||||
const { config } = require('./EndpointService');
|
||||
|
||||
/**
|
||||
* Load async endpoints and return a configuration object
|
||||
* @function loadDefaultEndpointsConfig
|
||||
* @returns {Promise<Object.<string, EndpointWithOrder>>} An object whose keys are endpoint names and values are objects that contain the endpoint configuration and an order.
|
||||
*/
|
||||
async function loadDefaultEndpointsConfig() {
|
||||
const { google, gptPlugins } = await loadAsyncEndpoints();
|
||||
const { openAI, bingAI, anthropic, azureOpenAI, chatGPTBrowser } = config;
|
||||
|
||||
let enabledEndpoints = [
|
||||
EModelEndpoint.openAI,
|
||||
EModelEndpoint.azureOpenAI,
|
||||
EModelEndpoint.google,
|
||||
EModelEndpoint.bingAI,
|
||||
EModelEndpoint.chatGPTBrowser,
|
||||
EModelEndpoint.gptPlugins,
|
||||
EModelEndpoint.anthropic,
|
||||
];
|
||||
|
||||
const endpointsEnv = process.env.ENDPOINTS || '';
|
||||
if (endpointsEnv) {
|
||||
enabledEndpoints = endpointsEnv
|
||||
.split(',')
|
||||
.filter((endpoint) => endpoint?.trim())
|
||||
.map((endpoint) => endpoint.trim());
|
||||
}
|
||||
|
||||
const endpointConfig = {
|
||||
[EModelEndpoint.openAI]: openAI,
|
||||
[EModelEndpoint.azureOpenAI]: azureOpenAI,
|
||||
[EModelEndpoint.google]: google,
|
||||
[EModelEndpoint.bingAI]: bingAI,
|
||||
[EModelEndpoint.chatGPTBrowser]: chatGPTBrowser,
|
||||
[EModelEndpoint.gptPlugins]: gptPlugins,
|
||||
[EModelEndpoint.anthropic]: anthropic,
|
||||
};
|
||||
|
||||
const orderedAndFilteredEndpoints = enabledEndpoints.reduce((config, key, index) => {
|
||||
if (endpointConfig[key]) {
|
||||
config[key] = { ...(endpointConfig[key] ?? {}), order: index };
|
||||
}
|
||||
return config;
|
||||
}, {});
|
||||
|
||||
return orderedAndFilteredEndpoints;
|
||||
}
|
||||
|
||||
module.exports = loadDefaultEndpointsConfig;
|
32
api/server/services/Config/loadDefaultModels.js
Normal file
32
api/server/services/Config/loadDefaultModels.js
Normal file
|
@ -0,0 +1,32 @@
|
|||
const {
|
||||
getOpenAIModels,
|
||||
getChatGPTBrowserModels,
|
||||
getAnthropicModels,
|
||||
} = require('~/server/services/ModelService');
|
||||
const { EModelEndpoint } = require('~/server/routes/endpoints/schemas');
|
||||
const { useAzurePlugins } = require('~/server/services/Config/EndpointService').config;
|
||||
|
||||
const fitlerAssistantModels = (str) => {
|
||||
return /gpt-4|gpt-3\\.5/i.test(str) && !/vision|instruct/i.test(str);
|
||||
};
|
||||
|
||||
async function loadDefaultModels() {
|
||||
const openAI = await getOpenAIModels();
|
||||
const anthropic = getAnthropicModels();
|
||||
const chatGPTBrowser = getChatGPTBrowserModels();
|
||||
const azureOpenAI = await getOpenAIModels({ azure: true });
|
||||
const gptPlugins = await getOpenAIModels({ azure: useAzurePlugins, plugins: true });
|
||||
|
||||
return {
|
||||
[EModelEndpoint.openAI]: openAI,
|
||||
[EModelEndpoint.azureOpenAI]: azureOpenAI,
|
||||
[EModelEndpoint.assistant]: openAI.filter(fitlerAssistantModels),
|
||||
[EModelEndpoint.google]: ['chat-bison', 'text-bison', 'codechat-bison'],
|
||||
[EModelEndpoint.bingAI]: ['BingAI', 'Sydney'],
|
||||
[EModelEndpoint.chatGPTBrowser]: chatGPTBrowser,
|
||||
[EModelEndpoint.gptPlugins]: gptPlugins,
|
||||
[EModelEndpoint.anthropic]: anthropic,
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = loadDefaultModels;
|
6
api/server/services/Config/loadOverrideConfig.js
Normal file
6
api/server/services/Config/loadOverrideConfig.js
Normal file
|
@ -0,0 +1,6 @@
|
|||
// fetch some remote config
|
||||
async function loadOverrideConfig() {
|
||||
return false;
|
||||
}
|
||||
|
||||
module.exports = loadOverrideConfig;
|
|
@ -1,11 +1,11 @@
|
|||
const HttpsProxyAgent = require('https-proxy-agent');
|
||||
const axios = require('axios');
|
||||
const Keyv = require('keyv');
|
||||
const { isEnabled } = require('../utils');
|
||||
const { extractBaseURL } = require('../../utils');
|
||||
const keyvRedis = require('../../cache/keyvRedis');
|
||||
// const { getAzureCredentials, genAzureChatCompletion } = require('../../utils/');
|
||||
const { openAIApiKey, userProvidedOpenAI } = require('./EndpointService').config;
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
const { extractBaseURL } = require('~/utils');
|
||||
const keyvRedis = require('~/cache/keyvRedis');
|
||||
// const { getAzureCredentials, genAzureChatCompletion } = require('~/utils/');
|
||||
const { openAIApiKey, userProvidedOpenAI } = require('./Config/EndpointService').config;
|
||||
|
||||
const modelsCache = isEnabled(process.env.USE_REDIS)
|
||||
? new Keyv({ store: keyvRedis })
|
||||
|
|
|
@ -239,3 +239,68 @@
|
|||
* @typedef {AgentAction & { toolCallId: string; run_id: string; thread_id: string; }} OpenAIAssistantAction
|
||||
* @memberof typedefs
|
||||
*/
|
||||
|
||||
/**
|
||||
* @exports EndpointServiceConfig
|
||||
* @typedef {Object} EndpointServiceConfig
|
||||
* @property {string} openAIApiKey - The API key for OpenAI.
|
||||
* @property {string} azureOpenAIApiKey - The API key for Azure OpenAI.
|
||||
* @property {boolean} useAzurePlugins - Flag to indicate if Azure plugins are used.
|
||||
* @property {boolean} userProvidedOpenAI - Flag to indicate if OpenAI API key is user provided.
|
||||
* @property {string} palmKey - The Palm key.
|
||||
* @property {boolean|{userProvide: boolean}} [openAI] - Flag to indicate if OpenAI endpoint is user provided, or its configuration.
|
||||
* @property {boolean|{userProvide: boolean}} [assistant] - Flag to indicate if Assistant endpoint is user provided, or its configuration.
|
||||
* @property {boolean|{userProvide: boolean}} [azureOpenAI] - Flag to indicate if Azure OpenAI endpoint is user provided, or its configuration.
|
||||
* @property {boolean|{userProvide: boolean}} [chatGPTBrowser] - Flag to indicate if ChatGPT Browser endpoint is user provided, or its configuration.
|
||||
* @property {boolean|{userProvide: boolean}} [anthropic] - Flag to indicate if Anthropic endpoint is user provided, or its configuration.
|
||||
* @property {boolean|{userProvide: boolean}} [bingAI] - Flag to indicate if BingAI endpoint is user provided, or its configuration.
|
||||
* @property {boolean|{userProvide: boolean}} [bingAI] - Flag to indicate if BingAI endpoint is user provided, or its configuration.
|
||||
* @property {boolean|{userProvide: boolean}} [bingAI] - Flag to indicate if BingAI endpoint is user provided, or its configuration.
|
||||
* @memberof typedefs
|
||||
*/
|
||||
|
||||
/**
|
||||
* @exports Plugin
|
||||
* @typedef {Object} Plugin
|
||||
* @property {string} pluginKey - The key of the plugin.
|
||||
* @property {string} name - The name of the plugin.
|
||||
* @memberof typedefs
|
||||
*/
|
||||
|
||||
/**
|
||||
* @exports GptPlugins
|
||||
* @typedef {Object} GptPlugins
|
||||
* @property {Plugin[]} plugins - An array of plugins available.
|
||||
* @property {string[]} availableAgents - Available agents, 'classic' or 'functions'.
|
||||
* @property {boolean} userProvide - A flag indicating if the user has provided the data.
|
||||
* @property {boolean} azure - A flag indicating if azure plugins are used.
|
||||
* @memberof typedefs
|
||||
*/
|
||||
|
||||
/**
|
||||
* @exports DefaultConfig
|
||||
* @typedef {Object} DefaultConfig
|
||||
* @property {boolean|{userProvide: boolean}} [openAI] - Flag to indicate if OpenAI endpoint is user provided, or its configuration.
|
||||
* @property {boolean|{userProvide: boolean}} [assistant] - Flag to indicate if Assistant endpoint is user provided, or its configuration.
|
||||
* @property {boolean|{userProvide: boolean}} [azureOpenAI] - Flag to indicate if Azure OpenAI endpoint is user provided, or its configuration.
|
||||
* @property {boolean|{userProvide: boolean}} [chatGPTBrowser] - Flag to indicate if ChatGPT Browser endpoint is user provided, or its configuration.
|
||||
* @property {boolean|{userProvide: boolean}} [anthropic] - Flag to indicate if Anthropic endpoint is user provided, or its configuration.
|
||||
* @property {boolean|{userProvide: boolean}} [bingAI] - Flag to indicate if BingAI endpoint is user provided, or its configuration.
|
||||
* @property {boolean|{userProvide: boolean}} [google] - Flag to indicate if Google endpoint is user provided, or its configuration.
|
||||
* @property {boolean|GptPlugins} [gptPlugins] - Configuration for GPT plugins.
|
||||
* @memberof typedefs
|
||||
*/
|
||||
|
||||
/**
|
||||
* @exports EndpointConfig
|
||||
* @typedef {boolean|{userProvide: boolean}|GptPlugins} EndpointConfig
|
||||
* @memberof typedefs
|
||||
*/
|
||||
|
||||
/**
|
||||
* @exports EndpointWithOrder
|
||||
* @typedef {Object} EndpointWithOrder
|
||||
* @property {EndpointConfig} config - The configuration of the endpoint.
|
||||
* @property {number} order - The order of the endpoint.
|
||||
* @memberof typedefs
|
||||
*/
|
||||
|
|
|
@ -1 +1,2 @@
|
|||
export * from './mutations';
|
||||
export * from './queries';
|
||||
|
|
17
client/src/data-provider/queries.ts
Normal file
17
client/src/data-provider/queries.ts
Normal file
|
@ -0,0 +1,17 @@
|
|||
import { UseQueryOptions, useQuery, QueryObserverResult } from '@tanstack/react-query';
|
||||
import { QueryKeys, dataService } from 'librechat-data-provider';
|
||||
|
||||
export const useGetEndpointsConfigOverride = <TData = unknown | boolean>(
|
||||
config?: UseQueryOptions<unknown | boolean, unknown, TData>,
|
||||
): QueryObserverResult<TData> => {
|
||||
return useQuery<unknown | boolean, unknown, TData>(
|
||||
[QueryKeys.endpointsConfigOverride],
|
||||
() => dataService.getEndpointsConfigOverride(),
|
||||
{
|
||||
refetchOnWindowFocus: false,
|
||||
refetchOnReconnect: false,
|
||||
refetchOnMount: false,
|
||||
...config,
|
||||
},
|
||||
);
|
||||
};
|
1
client/src/hooks/Config/index.ts
Normal file
1
client/src/hooks/Config/index.ts
Normal file
|
@ -0,0 +1 @@
|
|||
export { default as useConfigOverride } from './useConfigOverride';
|
47
client/src/hooks/Config/useConfigOverride.ts
Normal file
47
client/src/hooks/Config/useConfigOverride.ts
Normal file
|
@ -0,0 +1,47 @@
|
|||
import { useSetRecoilState } from 'recoil';
|
||||
import { useEffect, useCallback } from 'react';
|
||||
import { useQueryClient } from '@tanstack/react-query';
|
||||
import type { TEndpointsConfig, TModelsConfig } from 'librechat-data-provider';
|
||||
import { useGetEndpointsConfigOverride } from '~/data-provider';
|
||||
import { QueryKeys } from 'librechat-data-provider';
|
||||
import store from '~/store';
|
||||
|
||||
type TempOverrideType = Record<string, unknown> & {
|
||||
endpointsConfig: TEndpointsConfig;
|
||||
modelsConfig: TModelsConfig;
|
||||
combinedOptions: unknown[];
|
||||
combined: boolean;
|
||||
};
|
||||
|
||||
export default function useConfigOverride() {
|
||||
const setModelsConfig = useSetRecoilState(store.modelsConfig);
|
||||
const setEndpointsQueryEnabled = useSetRecoilState(store.endpointsQueryEnabled);
|
||||
const overrideQuery = useGetEndpointsConfigOverride({
|
||||
staleTime: Infinity,
|
||||
});
|
||||
|
||||
const queryClient = useQueryClient();
|
||||
|
||||
const handleOverride = useCallback(
|
||||
async (data: unknown | boolean) => {
|
||||
const { endpointsConfig, modelsConfig } = data as TempOverrideType;
|
||||
if (endpointsConfig) {
|
||||
setEndpointsQueryEnabled(false);
|
||||
await queryClient.cancelQueries([QueryKeys.endpoints]);
|
||||
queryClient.setQueryData([QueryKeys.endpoints], endpointsConfig);
|
||||
}
|
||||
if (modelsConfig) {
|
||||
await queryClient.cancelQueries([QueryKeys.models]);
|
||||
queryClient.setQueryData([QueryKeys.models], modelsConfig);
|
||||
setModelsConfig(modelsConfig);
|
||||
}
|
||||
},
|
||||
[queryClient, setEndpointsQueryEnabled, setModelsConfig],
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
if (overrideQuery.data) {
|
||||
handleOverride(overrideQuery.data);
|
||||
}
|
||||
}, [overrideQuery.data, handleOverride]);
|
||||
}
|
|
@ -1,4 +1,5 @@
|
|||
export * from './Messages';
|
||||
export * from './Config';
|
||||
export * from './Input';
|
||||
export * from './Conversations';
|
||||
|
||||
|
|
|
@ -1,20 +1,23 @@
|
|||
import { useRecoilValue } from 'recoil';
|
||||
import { useEffect, useRef } from 'react';
|
||||
import { useParams } from 'react-router-dom';
|
||||
import { useGetConvoIdQuery, useGetModelsQuery } from 'librechat-data-provider';
|
||||
import { useNewConvo, useConfigOverride } from '~/hooks';
|
||||
import ChatView from '~/components/Chat/ChatView';
|
||||
import useAuthRedirect from './useAuthRedirect';
|
||||
import { useNewConvo } from '~/hooks';
|
||||
import store from '~/store';
|
||||
|
||||
export default function ChatRoute() {
|
||||
const index = 0;
|
||||
useConfigOverride();
|
||||
const { conversationId } = useParams();
|
||||
const { conversation } = store.useCreateConversationAtom(index);
|
||||
const modelsQueryEnabled = useRecoilValue(store.modelsQueryEnabled);
|
||||
const { isAuthenticated } = useAuthRedirect();
|
||||
const { newConversation } = useNewConvo();
|
||||
const hasSetConversation = useRef(false);
|
||||
|
||||
const modelsQuery = useGetModelsQuery({ enabled: isAuthenticated });
|
||||
const modelsQuery = useGetModelsQuery({ enabled: isAuthenticated && modelsQueryEnabled });
|
||||
const initialConvoQuery = useGetConvoIdQuery(conversationId ?? '', {
|
||||
enabled: isAuthenticated && conversationId !== 'new',
|
||||
});
|
||||
|
|
|
@ -20,11 +20,12 @@ export default function Root() {
|
|||
const submission = useRecoilValue(store.submission);
|
||||
useServerStream(submission ?? null);
|
||||
|
||||
const modelsQueryEnabled = useRecoilValue(store.modelsQueryEnabled);
|
||||
const setIsSearchEnabled = useSetRecoilState(store.isSearchEnabled);
|
||||
const setModelsConfig = useSetRecoilState(store.modelsConfig);
|
||||
|
||||
const searchEnabledQuery = useGetSearchEnabledQuery({ enabled: isAuthenticated });
|
||||
const modelsQuery = useGetModelsQuery({ enabled: isAuthenticated });
|
||||
const modelsQuery = useGetModelsQuery({ enabled: isAuthenticated && modelsQueryEnabled });
|
||||
|
||||
useEffect(() => {
|
||||
localStorage.setItem('navVisible', JSON.stringify(navVisible));
|
||||
|
|
|
@ -17,6 +17,11 @@ const endpointsConfig = atom<TEndpointsConfig>({
|
|||
default: defaultConfig,
|
||||
});
|
||||
|
||||
const endpointsQueryEnabled = atom<boolean>({
|
||||
key: 'endpointsQueryEnabled',
|
||||
default: true,
|
||||
});
|
||||
|
||||
const plugins = selector({
|
||||
key: 'plugins',
|
||||
get: ({ get }) => {
|
||||
|
@ -62,4 +67,5 @@ export default {
|
|||
endpointsFilter,
|
||||
availableEndpoints,
|
||||
defaultConfig,
|
||||
endpointsQueryEnabled,
|
||||
};
|
||||
|
|
|
@ -25,6 +25,12 @@ const modelsConfig = atom<TModelsConfig>({
|
|||
},
|
||||
});
|
||||
|
||||
const modelsQueryEnabled = atom<boolean>({
|
||||
key: 'modelsQueryEnabled',
|
||||
default: true,
|
||||
});
|
||||
|
||||
export default {
|
||||
modelsConfig,
|
||||
modelsQueryEnabled,
|
||||
};
|
||||
|
|
|
@ -38,6 +38,8 @@ export const deletePreset = () => '/api/presets/delete';
|
|||
|
||||
export const aiEndpoints = () => '/api/endpoints';
|
||||
|
||||
export const endpointsConfigOverride = () => '/api/endpoints/config/override';
|
||||
|
||||
export const models = () => '/api/models';
|
||||
|
||||
export const tokenizer = () => '/api/tokenizer';
|
||||
|
|
|
@ -105,14 +105,6 @@ export const searchConversations = async (
|
|||
return request.get(endpoints.search(q, pageNumber));
|
||||
};
|
||||
|
||||
export const getAIEndpoints = (): Promise<t.TEndpointsConfig> => {
|
||||
return request.get(endpoints.aiEndpoints());
|
||||
};
|
||||
|
||||
export const getModels = async (): Promise<t.TModelsConfig> => {
|
||||
return request.get(endpoints.models());
|
||||
};
|
||||
|
||||
export const updateTokenCount = (text: string) => {
|
||||
return request.post(endpoints.tokenizer(), { arg: text });
|
||||
};
|
||||
|
@ -154,10 +146,24 @@ export const updateUserPlugins = (payload: t.TUpdateUserPlugins) => {
|
|||
return request.post(endpoints.userPlugins(), payload);
|
||||
};
|
||||
|
||||
/* Config */
|
||||
|
||||
export const getStartupConfig = (): Promise<t.TStartupConfig> => {
|
||||
return request.get(endpoints.config());
|
||||
};
|
||||
|
||||
export const getAIEndpoints = (): Promise<t.TEndpointsConfig> => {
|
||||
return request.get(endpoints.aiEndpoints());
|
||||
};
|
||||
|
||||
export const getModels = async (): Promise<t.TModelsConfig> => {
|
||||
return request.get(endpoints.models());
|
||||
};
|
||||
|
||||
export const getEndpointsConfigOverride = (): Promise<unknown | boolean> => {
|
||||
return request.get(endpoints.endpointsConfigOverride());
|
||||
};
|
||||
|
||||
/* Assistants */
|
||||
|
||||
export const createAssistant = (data: a.AssistantCreateParams): Promise<a.Assistant> => {
|
||||
|
|
|
@ -15,6 +15,7 @@ export enum QueryKeys {
|
|||
startupConfig = 'startupConfig',
|
||||
assistants = 'assistants',
|
||||
assistant = 'assistant',
|
||||
endpointsConfigOverride = 'endpointsConfigOverride',
|
||||
}
|
||||
|
||||
export enum MutationKeys {
|
||||
|
|
|
@ -257,6 +257,7 @@ export const useGetEndpointsQuery = <TData = t.TEndpointsConfig>(
|
|||
[QueryKeys.endpoints],
|
||||
() => dataService.getAIEndpoints(),
|
||||
{
|
||||
staleTime: Infinity,
|
||||
refetchOnWindowFocus: false,
|
||||
refetchOnReconnect: false,
|
||||
refetchOnMount: false,
|
||||
|
@ -269,6 +270,7 @@ export const useGetModelsQuery = (
|
|||
config?: UseQueryOptions<t.TModelsConfig>,
|
||||
): QueryObserverResult<t.TModelsConfig> => {
|
||||
return useQuery<t.TModelsConfig>([QueryKeys.models], () => dataService.getModels(), {
|
||||
staleTime: Infinity,
|
||||
refetchOnWindowFocus: false,
|
||||
refetchOnReconnect: false,
|
||||
refetchOnMount: false,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue