💫 feat: Config File & Custom Endpoints (#1474)

* WIP(backend/api): custom endpoint

* WIP(frontend/client): custom endpoint

* chore: adjust typedefs for configs

* refactor: use data-provider for cache keys and rename enums and custom endpoint for better clarity and compatibility

* feat: loadYaml utility

* refactor: rename back to  from  and proof-of-concept for creating schemas from user-defined defaults

* refactor: remove custom endpoint from default endpointsConfig as it will be exclusively managed by yaml config

* refactor(EndpointController): rename variables for clarity

* feat: initial load custom config

* feat(server/utils): add simple `isUserProvided` helper

* chore(types): update TConfig type

* refactor: remove custom endpoint handling from model services as will be handled by config, modularize fetching of models

* feat: loadCustomConfig, loadConfigEndpoints, loadConfigModels

* chore: reorganize server init imports, invoke loadCustomConfig

* refactor(loadConfigEndpoints/Models): return each custom endpoint as standalone endpoint

* refactor(Endpoint/ModelController): spread config values after default (temporary)

* chore(client): fix type issues

* WIP: first pass for multiple custom endpoints
- add endpointType to Conversation schema
- add update zod schemas for both convo/presets to allow non-EModelEndpoint value as endpoint (also using type assertion)
- use `endpointType` value as `endpoint` where mapping to type is necessary using this field
- use custom defined `endpoint` value and not type for mapping to modelsConfig
- misc: add return type to `getDefaultEndpoint`
- in `useNewConvo`, add the endpointType if it wasn't already added to conversation
- EndpointsMenu: use user-defined endpoint name as Title in menu
- TODO: custom icon via custom config, change unknown to robot icon

* refactor(parseConvo): pass args as an object and change where used accordingly; chore: comment out 'create schema' code

* chore: remove unused availableModels field in TConfig type

* refactor(parseCompactConvo): pass args as an object and change where used accordingly

* feat: chat through custom endpoint

* chore(message/convoSchemas): avoid saving empty arrays

* fix(BaseClient/saveMessageToDatabase): save endpointType

* refactor(ChatRoute): show Spinner if endpointsQuery or modelsQuery are still loading, which is apparent with slow fetching of models/remote config on first serve

* fix(useConversation): assign endpointType if it's missing

* fix(SaveAsPreset): pass real endpoint and endpointType when saving Preset)

* chore: recorganize types order for TConfig, add `iconURL`

* feat: custom endpoint icon support:
- use UnknownIcon in all icon contexts
- add mistral and openrouter as known endpoints, and add their icons
- iconURL support

* fix(presetSchema): move endpointType to default schema definitions shared between convoSchema and defaults

* refactor(Settings/OpenAI): remove legacy `isOpenAI` flag

* fix(OpenAIClient): do not invoke abortCompletion on completion error

* feat: add responseSender/label support for custom endpoints:
- use defaultModelLabel field in endpointOption
- add model defaults for custom endpoints in `getResponseSender`
- add `useGetSender` hook which uses EndpointsQuery to determine `defaultModelLabel`
- include defaultModelLabel from endpointConfig in custom endpoint client options
- pass `endpointType` to `getResponseSender`

* feat(OpenAIClient): use custom options from config file

* refactor: rename `defaultModelLabel` to `modelDisplayLabel`

* refactor(data-provider): separate concerns from `schemas` into `parsers`, `config`, and fix imports elsewhere

* feat: `iconURL` and extract environment variables from custom endpoint config values

* feat: custom config validation via zod schema, rename and move to `./projectRoot/librechat.yaml`

* docs: custom config docs and examples

* fix(OpenAIClient/mistral): mistral does not allow singular system message, also add `useChatCompletion` flag to use openai-node for title completions

* fix(custom/initializeClient): extract env var and use `isUserProvided` function

* Update librechat.example.yaml

* feat(InputWithLabel): add className props, and forwardRef

* fix(streamResponse): handle error edge case where either messages or convos query throws an error

* fix(useSSE): handle errorHandler edge cases where error response is and is not properly formatted from API, especially when a conversationId is not yet provided, which ensures stream is properly closed on error

* feat: user_provided keys for custom endpoints

* fix(config/endpointSchema): do not allow default endpoint values in custom endpoint `name`

* feat(loadConfigModels): extract env variables and optimize fetching models

* feat: support custom endpoint iconURL for messages and Nav

* feat(OpenAIClient): add/dropParams support

* docs: update docs with default params, add/dropParams, and notes to use config file instead of `OPENAI_REVERSE_PROXY`

* docs: update docs with additional notes

* feat(maxTokensMap): add mistral models (32k context)

* docs: update openrouter notes

* Update ai_setup.md

* docs(custom_config): add table of contents and fix note about custom name

* docs(custom_config): reorder ToC

* Update custom_config.md

* Add note about `max_tokens` field in custom_config.md
This commit is contained in:
Danny Avila 2024-01-03 09:22:48 -05:00 committed by GitHub
parent 3f98f92d4c
commit 29473a72db
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
100 changed files with 2146 additions and 627 deletions

3
.gitignore vendored
View file

@ -48,6 +48,9 @@ bower_components/
.floo .floo
.flooignore .flooignore
#config file
librechat.yaml
# Environment # Environment
.npmrc .npmrc
.env* .env*

View file

@ -520,6 +520,7 @@ class BaseClient {
await saveConvo(user, { await saveConvo(user, {
conversationId: message.conversationId, conversationId: message.conversationId,
endpoint: this.options.endpoint, endpoint: this.options.endpoint,
endpointType: this.options.endpointType,
...endpointOptions, ...endpointOptions,
}); });
} }

View file

@ -1,6 +1,6 @@
const OpenAI = require('openai'); const OpenAI = require('openai');
const { HttpsProxyAgent } = require('https-proxy-agent'); const { HttpsProxyAgent } = require('https-proxy-agent');
const { getResponseSender, EModelEndpoint } = require('librechat-data-provider'); const { getResponseSender } = require('librechat-data-provider');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken'); const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const { encodeAndFormat, validateVisionModel } = require('~/server/services/Files/images'); const { encodeAndFormat, validateVisionModel } = require('~/server/services/Files/images');
const { getModelMaxTokens, genAzureChatCompletion, extractBaseURL } = require('~/utils'); const { getModelMaxTokens, genAzureChatCompletion, extractBaseURL } = require('~/utils');
@ -94,10 +94,23 @@ class OpenAIClient extends BaseClient {
} }
const { reverseProxyUrl: reverseProxy } = this.options; const { reverseProxyUrl: reverseProxy } = this.options;
if (
!this.useOpenRouter &&
reverseProxy &&
reverseProxy.includes('https://openrouter.ai/api/v1')
) {
this.useOpenRouter = true;
}
this.FORCE_PROMPT = this.FORCE_PROMPT =
isEnabled(OPENAI_FORCE_PROMPT) || isEnabled(OPENAI_FORCE_PROMPT) ||
(reverseProxy && reverseProxy.includes('completions') && !reverseProxy.includes('chat')); (reverseProxy && reverseProxy.includes('completions') && !reverseProxy.includes('chat'));
if (typeof this.options.forcePrompt === 'boolean') {
this.FORCE_PROMPT = this.options.forcePrompt;
}
if (this.azure && process.env.AZURE_OPENAI_DEFAULT_MODEL) { if (this.azure && process.env.AZURE_OPENAI_DEFAULT_MODEL) {
this.azureEndpoint = genAzureChatCompletion(this.azure, this.modelOptions.model); this.azureEndpoint = genAzureChatCompletion(this.azure, this.modelOptions.model);
this.modelOptions.model = process.env.AZURE_OPENAI_DEFAULT_MODEL; this.modelOptions.model = process.env.AZURE_OPENAI_DEFAULT_MODEL;
@ -146,8 +159,10 @@ class OpenAIClient extends BaseClient {
this.options.sender ?? this.options.sender ??
getResponseSender({ getResponseSender({
model: this.modelOptions.model, model: this.modelOptions.model,
endpoint: EModelEndpoint.openAI, endpoint: this.options.endpoint,
endpointType: this.options.endpointType,
chatGptLabel: this.options.chatGptLabel, chatGptLabel: this.options.chatGptLabel,
modelDisplayLabel: this.options.modelDisplayLabel,
}); });
this.userLabel = this.options.userLabel || 'User'; this.userLabel = this.options.userLabel || 'User';
@ -434,7 +449,7 @@ class OpenAIClient extends BaseClient {
}, },
opts.abortController || new AbortController(), opts.abortController || new AbortController(),
); );
} else if (typeof opts.onProgress === 'function') { } else if (typeof opts.onProgress === 'function' || this.options.useChatCompletion) {
reply = await this.chatCompletion({ reply = await this.chatCompletion({
payload, payload,
clientOptions: opts, clientOptions: opts,
@ -530,6 +545,19 @@ class OpenAIClient extends BaseClient {
return llm; return llm;
} }
/**
* Generates a concise title for a conversation based on the user's input text and response.
* Uses either specified method or starts with the OpenAI `functions` method (using LangChain).
* If the `functions` method fails, it falls back to the `completion` method,
* which involves sending a chat completion request with specific instructions for title generation.
*
* @param {Object} params - The parameters for the conversation title generation.
* @param {string} params.text - The user's input.
* @param {string} [params.responseText=''] - The AI's immediate response to the user.
*
* @returns {Promise<string | 'New Chat'>} A promise that resolves to the generated conversation title.
* In case of failure, it will return the default title, "New Chat".
*/
async titleConvo({ text, responseText = '' }) { async titleConvo({ text, responseText = '' }) {
let title = 'New Chat'; let title = 'New Chat';
const convo = `||>User: const convo = `||>User:
@ -539,32 +567,25 @@ class OpenAIClient extends BaseClient {
const { OPENAI_TITLE_MODEL } = process.env ?? {}; const { OPENAI_TITLE_MODEL } = process.env ?? {};
const model = this.options.titleModel ?? OPENAI_TITLE_MODEL ?? 'gpt-3.5-turbo';
const modelOptions = { const modelOptions = {
model: OPENAI_TITLE_MODEL ?? 'gpt-3.5-turbo', // TODO: remove the gpt fallback and make it specific to endpoint
model,
temperature: 0.2, temperature: 0.2,
presence_penalty: 0, presence_penalty: 0,
frequency_penalty: 0, frequency_penalty: 0,
max_tokens: 16, max_tokens: 16,
}; };
try { const titleChatCompletion = async () => {
this.abortController = new AbortController(); modelOptions.model = model;
const llm = this.initializeLLM({ ...modelOptions, context: 'title', tokenBuffer: 150 });
title = await runTitleChain({ llm, text, convo, signal: this.abortController.signal });
} catch (e) {
if (e?.message?.toLowerCase()?.includes('abort')) {
logger.debug('[OpenAIClient] Aborted title generation');
return;
}
logger.error(
'[OpenAIClient] There was an issue generating title with LangChain, trying the old method...',
e,
);
modelOptions.model = OPENAI_TITLE_MODEL ?? 'gpt-3.5-turbo';
if (this.azure) { if (this.azure) {
modelOptions.model = process.env.AZURE_OPENAI_DEFAULT_MODEL ?? modelOptions.model; modelOptions.model = process.env.AZURE_OPENAI_DEFAULT_MODEL ?? modelOptions.model;
this.azureEndpoint = genAzureChatCompletion(this.azure, modelOptions.model); this.azureEndpoint = genAzureChatCompletion(this.azure, modelOptions.model);
} }
const instructionsPayload = [ const instructionsPayload = [
{ {
role: 'system', role: 'system',
@ -578,10 +599,38 @@ ${convo}
]; ];
try { try {
title = (await this.sendPayload(instructionsPayload, { modelOptions })).replaceAll('"', ''); title = (
await this.sendPayload(instructionsPayload, { modelOptions, useChatCompletion: true })
).replaceAll('"', '');
} catch (e) { } catch (e) {
logger.error('[OpenAIClient] There was another issue generating the title', e); logger.error(
'[OpenAIClient] There was an issue generating the title with the completion method',
e,
);
} }
};
if (this.options.titleMethod === 'completion') {
await titleChatCompletion();
logger.debug('[OpenAIClient] Convo Title: ' + title);
return title;
}
try {
this.abortController = new AbortController();
const llm = this.initializeLLM({ ...modelOptions, context: 'title', tokenBuffer: 150 });
title = await runTitleChain({ llm, text, convo, signal: this.abortController.signal });
} catch (e) {
if (e?.message?.toLowerCase()?.includes('abort')) {
logger.debug('[OpenAIClient] Aborted title generation');
return;
}
logger.error(
'[OpenAIClient] There was an issue generating title with LangChain, trying completion method...',
e,
);
await titleChatCompletion();
} }
logger.debug('[OpenAIClient] Convo Title: ' + title); logger.debug('[OpenAIClient] Convo Title: ' + title);
@ -593,8 +642,11 @@ ${convo}
let context = messagesToRefine; let context = messagesToRefine;
let prompt; let prompt;
// TODO: remove the gpt fallback and make it specific to endpoint
const { OPENAI_SUMMARY_MODEL = 'gpt-3.5-turbo' } = process.env ?? {}; const { OPENAI_SUMMARY_MODEL = 'gpt-3.5-turbo' } = process.env ?? {};
const maxContextTokens = getModelMaxTokens(OPENAI_SUMMARY_MODEL) ?? 4095; const model = this.options.summaryModel ?? OPENAI_SUMMARY_MODEL;
const maxContextTokens = getModelMaxTokens(model) ?? 4095;
// 3 tokens for the assistant label, and 98 for the summarizer prompt (101) // 3 tokens for the assistant label, and 98 for the summarizer prompt (101)
let promptBuffer = 101; let promptBuffer = 101;
@ -644,7 +696,7 @@ ${convo}
logger.debug('[OpenAIClient] initialPromptTokens', initialPromptTokens); logger.debug('[OpenAIClient] initialPromptTokens', initialPromptTokens);
const llm = this.initializeLLM({ const llm = this.initializeLLM({
model: OPENAI_SUMMARY_MODEL, model,
temperature: 0.2, temperature: 0.2,
context: 'summary', context: 'summary',
tokenBuffer: initialPromptTokens, tokenBuffer: initialPromptTokens,
@ -719,7 +771,9 @@ ${convo}
if (!abortController) { if (!abortController) {
abortController = new AbortController(); abortController = new AbortController();
} }
const modelOptions = { ...this.modelOptions };
let modelOptions = { ...this.modelOptions };
if (typeof onProgress === 'function') { if (typeof onProgress === 'function') {
modelOptions.stream = true; modelOptions.stream = true;
} }
@ -779,6 +833,27 @@ ${convo}
...opts, ...opts,
}); });
/* hacky fix for Mistral AI API not allowing a singular system message in payload */
if (opts.baseURL.includes('https://api.mistral.ai/v1') && modelOptions.messages) {
const { messages } = modelOptions;
if (messages.length === 1 && messages[0].role === 'system') {
modelOptions.messages[0].role = 'user';
}
}
if (this.options.addParams && typeof this.options.addParams === 'object') {
modelOptions = {
...modelOptions,
...this.options.addParams,
};
}
if (this.options.dropParams && Array.isArray(this.options.dropParams)) {
this.options.dropParams.forEach((param) => {
delete modelOptions[param];
});
}
let UnexpectedRoleError = false; let UnexpectedRoleError = false;
if (modelOptions.stream) { if (modelOptions.stream) {
const stream = await openai.beta.chat.completions const stream = await openai.beta.chat.completions

23
api/cache/getCustomConfig.js vendored Normal file
View file

@ -0,0 +1,23 @@
const { CacheKeys } = require('librechat-data-provider');
const loadCustomConfig = require('~/server/services/Config/loadCustomConfig');
const getLogStores = require('./getLogStores');
/**
* Retrieves the configuration object
* @function getCustomConfig */
async function getCustomConfig() {
const cache = getLogStores(CacheKeys.CONFIG_STORE);
let customConfig = await cache.get(CacheKeys.CUSTOM_CONFIG);
if (!customConfig) {
customConfig = await loadCustomConfig();
}
if (!customConfig) {
return null;
}
return customConfig;
}
module.exports = getCustomConfig;

View file

@ -1,9 +1,10 @@
const Keyv = require('keyv'); const Keyv = require('keyv');
const keyvMongo = require('./keyvMongo'); const { CacheKeys } = require('librechat-data-provider');
const keyvRedis = require('./keyvRedis');
const { CacheKeys } = require('~/common/enums');
const { math, isEnabled } = require('~/server/utils');
const { logFile, violationFile } = require('./keyvFiles'); const { logFile, violationFile } = require('./keyvFiles');
const { math, isEnabled } = require('~/server/utils');
const keyvRedis = require('./keyvRedis');
const keyvMongo = require('./keyvMongo');
const { BAN_DURATION, USE_REDIS } = process.env ?? {}; const { BAN_DURATION, USE_REDIS } = process.env ?? {};
const duration = math(BAN_DURATION, 7200000); const duration = math(BAN_DURATION, 7200000);
@ -20,10 +21,10 @@ const pending_req = isEnabled(USE_REDIS)
const config = isEnabled(USE_REDIS) const config = isEnabled(USE_REDIS)
? new Keyv({ store: keyvRedis }) ? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.CONFIG }); : new Keyv({ namespace: CacheKeys.CONFIG_STORE });
const namespaces = { const namespaces = {
config, [CacheKeys.CONFIG_STORE]: config,
pending_req, pending_req,
ban: new Keyv({ store: keyvMongo, namespace: 'bans', ttl: duration }), ban: new Keyv({ store: keyvMongo, namespace: 'bans', ttl: duration }),
general: new Keyv({ store: logFile, namespace: 'violations' }), general: new Keyv({ store: logFile, namespace: 'violations' }),
@ -39,19 +40,15 @@ const namespaces = {
* Returns the keyv cache specified by type. * Returns the keyv cache specified by type.
* If an invalid type is passed, an error will be thrown. * If an invalid type is passed, an error will be thrown.
* *
* @module getLogStores * @param {string} key - The key for the namespace to access
* @requires keyv - a simple key-value storage that allows you to easily switch out storage adapters. * @returns {Keyv} - If a valid key is passed, returns an object containing the cache store of the specified key.
* @requires keyvFiles - a module that includes the logFile and violationFile. * @throws Will throw an error if an invalid key is passed.
*
* @param {string} type - The type of violation, which can be 'concurrent', 'message_limit', 'registrations' or 'logins'.
* @returns {Keyv} - If a valid type is passed, returns an object containing the logs for violations of the specified type.
* @throws Will throw an error if an invalid violation type is passed.
*/ */
const getLogStores = (type) => { const getLogStores = (key) => {
if (!type || !namespaces[type]) { if (!key || !namespaces[key]) {
throw new Error(`Invalid store type: ${type}`); throw new Error(`Invalid store key: ${key}`);
} }
return namespaces[type]; return namespaces[key];
}; };
module.exports = getLogStores; module.exports = getLogStores;

View file

@ -1,17 +0,0 @@
/**
* @typedef {Object} CacheKeys
* @property {'config'} CONFIG - Key for the config cache.
* @property {'plugins'} PLUGINS - Key for the plugins cache.
* @property {'modelsConfig'} MODELS_CONFIG - Key for the model config cache.
* @property {'defaultConfig'} DEFAULT_CONFIG - Key for the default config cache.
* @property {'overrideConfig'} OVERRIDE_CONFIG - Key for the override config cache.
*/
const CacheKeys = {
CONFIG: 'config',
PLUGINS: 'plugins',
MODELS_CONFIG: 'modelsConfig',
DEFAULT_CONFIG: 'defaultConfig',
OVERRIDE_CONFIG: 'overrideConfig',
};
module.exports = { CacheKeys };

View file

@ -18,36 +18,29 @@ const convoSchema = mongoose.Schema(
user: { user: {
type: String, type: String,
index: true, index: true,
// default: null,
}, },
messages: [{ type: mongoose.Schema.Types.ObjectId, ref: 'Message' }], messages: [{ type: mongoose.Schema.Types.ObjectId, ref: 'Message' }],
// google only // google only
examples: [{ type: mongoose.Schema.Types.Mixed }], examples: { type: [{ type: mongoose.Schema.Types.Mixed }], default: undefined },
agentOptions: { agentOptions: {
type: mongoose.Schema.Types.Mixed, type: mongoose.Schema.Types.Mixed,
// default: null,
}, },
...conversationPreset, ...conversationPreset,
// for bingAI only // for bingAI only
bingConversationId: { bingConversationId: {
type: String, type: String,
// default: null,
}, },
jailbreakConversationId: { jailbreakConversationId: {
type: String, type: String,
// default: null,
}, },
conversationSignature: { conversationSignature: {
type: String, type: String,
// default: null,
}, },
clientId: { clientId: {
type: String, type: String,
// default: null,
}, },
invocationId: { invocationId: {
type: Number, type: Number,
// default: 1,
}, },
}, },
{ timestamps: true }, { timestamps: true },

View file

@ -5,6 +5,9 @@ const conversationPreset = {
default: null, default: null,
required: true, required: true,
}, },
endpointType: {
type: String,
},
// for azureOpenAI, openAI, chatGPTBrowser only // for azureOpenAI, openAI, chatGPTBrowser only
model: { model: {
type: String, type: String,
@ -95,7 +98,6 @@ const agentOptions = {
// default: null, // default: null,
required: false, required: false,
}, },
// for google only
modelLabel: { modelLabel: {
type: String, type: String,
// default: null, // default: null,

View file

@ -82,22 +82,26 @@ const messageSchema = mongoose.Schema(
select: false, select: false,
default: false, default: false,
}, },
files: [{ type: mongoose.Schema.Types.Mixed }], files: { type: [{ type: mongoose.Schema.Types.Mixed }], default: undefined },
plugin: { plugin: {
latest: { type: {
type: String, latest: {
required: false, type: String,
}, required: false,
inputs: { },
type: [mongoose.Schema.Types.Mixed], inputs: {
required: false, type: [mongoose.Schema.Types.Mixed],
}, required: false,
outputs: { default: undefined,
type: String, },
required: false, outputs: {
type: String,
required: false,
},
}, },
default: undefined,
}, },
plugins: [{ type: mongoose.Schema.Types.Mixed }], plugins: { type: [{ type: mongoose.Schema.Types.Mixed }], default: undefined },
}, },
{ timestamps: true }, { timestamps: true },
); );

View file

@ -9,6 +9,7 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
text, text,
endpointOption, endpointOption,
conversationId, conversationId,
modelDisplayLabel,
parentMessageId = null, parentMessageId = null,
overrideParentMessageId = null, overrideParentMessageId = null,
} = req.body; } = req.body;
@ -22,7 +23,11 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
let responseMessageId; let responseMessageId;
let lastSavedTimestamp = 0; let lastSavedTimestamp = 0;
let saveDelay = 100; let saveDelay = 100;
const sender = getResponseSender({ ...endpointOption, model: endpointOption.modelOptions.model }); const sender = getResponseSender({
...endpointOption,
model: endpointOption.modelOptions.model,
modelDisplayLabel,
});
const newConvo = !conversationId; const newConvo = !conversationId;
const user = req.user.id; const user = req.user.id;

View file

@ -10,6 +10,7 @@ const EditController = async (req, res, next, initializeClient) => {
generation, generation,
endpointOption, endpointOption,
conversationId, conversationId,
modelDisplayLabel,
responseMessageId, responseMessageId,
isContinued = false, isContinued = false,
parentMessageId = null, parentMessageId = null,
@ -29,7 +30,11 @@ const EditController = async (req, res, next, initializeClient) => {
let promptTokens; let promptTokens;
let lastSavedTimestamp = 0; let lastSavedTimestamp = 0;
let saveDelay = 100; let saveDelay = 100;
const sender = getResponseSender({ ...endpointOption, model: endpointOption.modelOptions.model }); const sender = getResponseSender({
...endpointOption,
model: endpointOption.modelOptions.model,
modelDisplayLabel,
});
const userMessageId = parentMessageId; const userMessageId = parentMessageId;
const user = req.user.id; const user = req.user.id;

View file

@ -1,17 +1,22 @@
const { CacheKeys } = require('librechat-data-provider');
const { loadDefaultEndpointsConfig, loadConfigEndpoints } = require('~/server/services/Config');
const { getLogStores } = require('~/cache'); const { getLogStores } = require('~/cache');
const { CacheKeys } = require('~/common/enums');
const { loadDefaultEndpointsConfig } = require('~/server/services/Config');
async function endpointController(req, res) { async function endpointController(req, res) {
const cache = getLogStores(CacheKeys.CONFIG); const cache = getLogStores(CacheKeys.CONFIG_STORE);
const config = await cache.get(CacheKeys.DEFAULT_CONFIG); const cachedEndpointsConfig = await cache.get(CacheKeys.ENDPOINT_CONFIG);
if (config) { if (cachedEndpointsConfig) {
res.send(config); res.send(cachedEndpointsConfig);
return; return;
} }
const defaultConfig = await loadDefaultEndpointsConfig();
await cache.set(CacheKeys.DEFAULT_CONFIG, defaultConfig); const defaultEndpointsConfig = await loadDefaultEndpointsConfig();
res.send(JSON.stringify(defaultConfig)); const customConfigEndpoints = await loadConfigEndpoints();
const endpointsConfig = { ...defaultEndpointsConfig, ...customConfigEndpoints };
await cache.set(CacheKeys.ENDPOINT_CONFIG, endpointsConfig);
res.send(JSON.stringify(endpointsConfig));
} }
module.exports = endpointController; module.exports = endpointController;

View file

@ -1,15 +1,19 @@
const { CacheKeys } = require('librechat-data-provider');
const { loadDefaultModels, loadConfigModels } = require('~/server/services/Config');
const { getLogStores } = require('~/cache'); const { getLogStores } = require('~/cache');
const { CacheKeys } = require('~/common/enums');
const { loadDefaultModels } = require('~/server/services/Config');
async function modelController(req, res) { async function modelController(req, res) {
const cache = getLogStores(CacheKeys.CONFIG); const cache = getLogStores(CacheKeys.CONFIG_STORE);
let modelConfig = await cache.get(CacheKeys.MODELS_CONFIG); const cachedModelsConfig = await cache.get(CacheKeys.MODELS_CONFIG);
if (modelConfig) { if (cachedModelsConfig) {
res.send(modelConfig); res.send(cachedModelsConfig);
return; return;
} }
modelConfig = await loadDefaultModels(); const defaultModelsConfig = await loadDefaultModels();
const customModelsConfig = await loadConfigModels();
const modelConfig = { ...defaultModelsConfig, ...customModelsConfig };
await cache.set(CacheKeys.MODELS_CONFIG, modelConfig); await cache.set(CacheKeys.MODELS_CONFIG, modelConfig);
res.send(modelConfig); res.send(modelConfig);
} }

View file

@ -1,9 +1,9 @@
const { getLogStores } = require('~/cache'); const { CacheKeys } = require('librechat-data-provider');
const { CacheKeys } = require('~/common/enums');
const { loadOverrideConfig } = require('~/server/services/Config'); const { loadOverrideConfig } = require('~/server/services/Config');
const { getLogStores } = require('~/cache');
async function overrideController(req, res) { async function overrideController(req, res) {
const cache = getLogStores(CacheKeys.CONFIG); const cache = getLogStores(CacheKeys.CONFIG_STORE);
let overrideConfig = await cache.get(CacheKeys.OVERRIDE_CONFIG); let overrideConfig = await cache.get(CacheKeys.OVERRIDE_CONFIG);
if (overrideConfig) { if (overrideConfig) {
res.send(overrideConfig); res.send(overrideConfig);
@ -15,7 +15,7 @@ async function overrideController(req, res) {
overrideConfig = await loadOverrideConfig(); overrideConfig = await loadOverrideConfig();
const { endpointsConfig, modelsConfig } = overrideConfig; const { endpointsConfig, modelsConfig } = overrideConfig;
if (endpointsConfig) { if (endpointsConfig) {
await cache.set(CacheKeys.DEFAULT_CONFIG, endpointsConfig); await cache.set(CacheKeys.ENDPOINT_CONFIG, endpointsConfig);
} }
if (modelsConfig) { if (modelsConfig) {
await cache.set(CacheKeys.MODELS_CONFIG, modelsConfig); await cache.set(CacheKeys.MODELS_CONFIG, modelsConfig);

View file

@ -1,7 +1,7 @@
const path = require('path'); const path = require('path');
const { promises: fs } = require('fs'); const { promises: fs } = require('fs');
const { CacheKeys } = require('librechat-data-provider');
const { addOpenAPISpecs } = require('~/app/clients/tools/util/addOpenAPISpecs'); const { addOpenAPISpecs } = require('~/app/clients/tools/util/addOpenAPISpecs');
const { CacheKeys } = require('~/common/enums');
const { getLogStores } = require('~/cache'); const { getLogStores } = require('~/cache');
const filterUniquePlugins = (plugins) => { const filterUniquePlugins = (plugins) => {
@ -29,7 +29,7 @@ const isPluginAuthenticated = (plugin) => {
const getAvailablePluginsController = async (req, res) => { const getAvailablePluginsController = async (req, res) => {
try { try {
const cache = getLogStores(CacheKeys.CONFIG); const cache = getLogStores(CacheKeys.CONFIG_STORE);
const cachedPlugins = await cache.get(CacheKeys.PLUGINS); const cachedPlugins = await cache.get(CacheKeys.PLUGINS);
if (cachedPlugins) { if (cachedPlugins) {
res.status(200).json(cachedPlugins); res.status(200).json(cachedPlugins);

View file

@ -5,14 +5,15 @@ const express = require('express');
const passport = require('passport'); const passport = require('passport');
const mongoSanitize = require('express-mongo-sanitize'); const mongoSanitize = require('express-mongo-sanitize');
const { initializeFirebase } = require('~/server/services/Files/Firebase/initialize'); const { initializeFirebase } = require('~/server/services/Files/Firebase/initialize');
const errorController = require('./controllers/ErrorController'); const loadCustomConfig = require('~/server/services/Config/loadCustomConfig');
const configureSocialLogins = require('./socialLogins'); const errorController = require('~/server/controllers/ErrorController');
const configureSocialLogins = require('~/server/socialLogins');
const noIndex = require('~/server/middleware/noIndex');
const { connectDb, indexSync } = require('~/lib/db'); const { connectDb, indexSync } = require('~/lib/db');
const { logger } = require('~/config'); const { logger } = require('~/config');
const noIndex = require('./middleware/noIndex');
const routes = require('~/server/routes');
const paths = require('~/config/paths'); const paths = require('~/config/paths');
const routes = require('./routes');
const { PORT, HOST, ALLOW_SOCIAL_LOGIN } = process.env ?? {}; const { PORT, HOST, ALLOW_SOCIAL_LOGIN } = process.env ?? {};
@ -24,6 +25,7 @@ const { jwtLogin, passportLogin } = require('~/strategies');
const startServer = async () => { const startServer = async () => {
await connectDb(); await connectDb();
logger.info('Connected to MongoDB'); logger.info('Connected to MongoDB');
await loadCustomConfig();
initializeFirebase(); initializeFirebase();
await indexSync(); await indexSync();

View file

@ -1,5 +1,6 @@
const { processFiles } = require('~/server/services/Files'); const { processFiles } = require('~/server/services/Files');
const openAI = require('~/server/services/Endpoints/openAI'); const openAI = require('~/server/services/Endpoints/openAI');
const custom = require('~/server/services/Endpoints/custom');
const google = require('~/server/services/Endpoints/google'); const google = require('~/server/services/Endpoints/google');
const anthropic = require('~/server/services/Endpoints/anthropic'); const anthropic = require('~/server/services/Endpoints/anthropic');
const gptPlugins = require('~/server/services/Endpoints/gptPlugins'); const gptPlugins = require('~/server/services/Endpoints/gptPlugins');
@ -8,15 +9,20 @@ const { parseConvo, EModelEndpoint } = require('librechat-data-provider');
const buildFunction = { const buildFunction = {
[EModelEndpoint.openAI]: openAI.buildOptions, [EModelEndpoint.openAI]: openAI.buildOptions,
[EModelEndpoint.google]: google.buildOptions, [EModelEndpoint.google]: google.buildOptions,
[EModelEndpoint.custom]: custom.buildOptions,
[EModelEndpoint.azureOpenAI]: openAI.buildOptions, [EModelEndpoint.azureOpenAI]: openAI.buildOptions,
[EModelEndpoint.anthropic]: anthropic.buildOptions, [EModelEndpoint.anthropic]: anthropic.buildOptions,
[EModelEndpoint.gptPlugins]: gptPlugins.buildOptions, [EModelEndpoint.gptPlugins]: gptPlugins.buildOptions,
}; };
function buildEndpointOption(req, res, next) { function buildEndpointOption(req, res, next) {
const { endpoint } = req.body; const { endpoint, endpointType } = req.body;
const parsedBody = parseConvo(endpoint, req.body); const parsedBody = parseConvo({ endpoint, endpointType, conversation: req.body });
req.body.endpointOption = buildFunction[endpoint](endpoint, parsedBody); req.body.endpointOption = buildFunction[endpointType ?? endpoint](
endpoint,
parsedBody,
endpointType,
);
if (req.body.files) { if (req.body.files) {
// hold the promise // hold the promise
req.body.endpointOption.attachments = processFiles(req.body.files); req.body.endpointOption.attachments = processFiles(req.body.files);

View file

@ -1,7 +1,8 @@
const { handleError } = require('../utils'); const { handleError } = require('../utils');
function validateEndpoint(req, res, next) { function validateEndpoint(req, res, next) {
const { endpoint } = req.body; const { endpoint: _endpoint, endpointType } = req.body;
const endpoint = endpointType ?? _endpoint;
if (!req.body.text || req.body.text.length === 0) { if (!req.body.text || req.body.text.length === 0) {
return handleError(res, { text: 'Prompt empty or too short' }); return handleError(res, { text: 'Prompt empty or too short' });

View file

@ -0,0 +1,20 @@
const express = require('express');
const AskController = require('~/server/controllers/AskController');
const { initializeClient } = require('~/server/services/Endpoints/custom');
const { addTitle } = require('~/server/services/Endpoints/openAI');
const {
handleAbort,
setHeaders,
validateEndpoint,
buildEndpointOption,
} = require('~/server/middleware');
const router = express.Router();
router.post('/abort', handleAbort());
router.post('/', validateEndpoint, buildEndpointOption, setHeaders, async (req, res, next) => {
await AskController(req, res, next, initializeClient, addTitle);
});
module.exports = router;

View file

@ -1,5 +1,6 @@
const express = require('express'); const express = require('express');
const openAI = require('./openAI'); const openAI = require('./openAI');
const custom = require('./custom');
const google = require('./google'); const google = require('./google');
const bingAI = require('./bingAI'); const bingAI = require('./bingAI');
const anthropic = require('./anthropic'); const anthropic = require('./anthropic');
@ -42,5 +43,6 @@ router.use(`/${EModelEndpoint.gptPlugins}`, gptPlugins);
router.use(`/${EModelEndpoint.anthropic}`, anthropic); router.use(`/${EModelEndpoint.anthropic}`, anthropic);
router.use(`/${EModelEndpoint.google}`, google); router.use(`/${EModelEndpoint.google}`, google);
router.use(`/${EModelEndpoint.bingAI}`, bingAI); router.use(`/${EModelEndpoint.bingAI}`, bingAI);
router.use(`/${EModelEndpoint.custom}`, custom);
module.exports = router; module.exports = router;

View file

@ -0,0 +1,20 @@
const express = require('express');
const EditController = require('~/server/controllers/EditController');
const { initializeClient } = require('~/server/services/Endpoints/custom');
const { addTitle } = require('~/server/services/Endpoints/openAI');
const {
handleAbort,
setHeaders,
validateEndpoint,
buildEndpointOption,
} = require('~/server/middleware');
const router = express.Router();
router.post('/abort', handleAbort());
router.post('/', validateEndpoint, buildEndpointOption, setHeaders, async (req, res, next) => {
await EditController(req, res, next, initializeClient, addTitle);
});
module.exports = router;

View file

@ -1,5 +1,6 @@
const express = require('express'); const express = require('express');
const openAI = require('./openAI'); const openAI = require('./openAI');
const custom = require('./custom');
const google = require('./google'); const google = require('./google');
const anthropic = require('./anthropic'); const anthropic = require('./anthropic');
const gptPlugins = require('./gptPlugins'); const gptPlugins = require('./gptPlugins');
@ -38,5 +39,6 @@ router.use([`/${EModelEndpoint.azureOpenAI}`, `/${EModelEndpoint.openAI}`], open
router.use(`/${EModelEndpoint.gptPlugins}`, gptPlugins); router.use(`/${EModelEndpoint.gptPlugins}`, gptPlugins);
router.use(`/${EModelEndpoint.anthropic}`, anthropic); router.use(`/${EModelEndpoint.anthropic}`, anthropic);
router.use(`/${EModelEndpoint.google}`, google); router.use(`/${EModelEndpoint.google}`, google);
router.use(`/${EModelEndpoint.custom}`, custom);
module.exports = router; module.exports = router;

View file

@ -1,13 +1,19 @@
const { config } = require('./EndpointService'); const { config } = require('./EndpointService');
const loadCustomConfig = require('./loadCustomConfig');
const loadConfigModels = require('./loadConfigModels');
const loadDefaultModels = require('./loadDefaultModels'); const loadDefaultModels = require('./loadDefaultModels');
const loadOverrideConfig = require('./loadOverrideConfig'); const loadOverrideConfig = require('./loadOverrideConfig');
const loadAsyncEndpoints = require('./loadAsyncEndpoints'); const loadAsyncEndpoints = require('./loadAsyncEndpoints');
const loadConfigEndpoints = require('./loadConfigEndpoints');
const loadDefaultEndpointsConfig = require('./loadDefaultEConfig'); const loadDefaultEndpointsConfig = require('./loadDefaultEConfig');
module.exports = { module.exports = {
config, config,
loadCustomConfig,
loadConfigModels,
loadDefaultModels, loadDefaultModels,
loadOverrideConfig, loadOverrideConfig,
loadAsyncEndpoints, loadAsyncEndpoints,
loadConfigEndpoints,
loadDefaultEndpointsConfig, loadDefaultEndpointsConfig,
}; };

View file

@ -0,0 +1,54 @@
const { CacheKeys, EModelEndpoint } = require('librechat-data-provider');
const { isUserProvided, extractEnvVariable } = require('~/server/utils');
const loadCustomConfig = require('./loadCustomConfig');
const { getLogStores } = require('~/cache');
/**
* Load config endpoints from the cached configuration object
* @function loadConfigEndpoints */
async function loadConfigEndpoints() {
const cache = getLogStores(CacheKeys.CONFIG_STORE);
let customConfig = await cache.get(CacheKeys.CUSTOM_CONFIG);
if (!customConfig) {
customConfig = await loadCustomConfig();
}
if (!customConfig) {
return {};
}
const { endpoints = {} } = customConfig ?? {};
const endpointsConfig = {};
if (Array.isArray(endpoints[EModelEndpoint.custom])) {
const customEndpoints = endpoints[EModelEndpoint.custom].filter(
(endpoint) =>
endpoint.baseURL &&
endpoint.apiKey &&
endpoint.name &&
endpoint.models &&
(endpoint.models.fetch || endpoint.models.default),
);
for (let i = 0; i < customEndpoints.length; i++) {
const endpoint = customEndpoints[i];
const { baseURL, apiKey, name, iconURL, modelDisplayLabel } = endpoint;
const resolvedApiKey = extractEnvVariable(apiKey);
const resolvedBaseURL = extractEnvVariable(baseURL);
endpointsConfig[name] = {
type: EModelEndpoint.custom,
userProvide: isUserProvided(resolvedApiKey),
userProvideURL: isUserProvided(resolvedBaseURL),
modelDisplayLabel,
iconURL,
};
}
}
return endpointsConfig;
}
module.exports = loadConfigEndpoints;

View file

@ -0,0 +1,79 @@
const { CacheKeys, EModelEndpoint } = require('librechat-data-provider');
const { isUserProvided, extractEnvVariable } = require('~/server/utils');
const { fetchModels } = require('~/server/services/ModelService');
const loadCustomConfig = require('./loadCustomConfig');
const { getLogStores } = require('~/cache');
/**
* Load config endpoints from the cached configuration object
* @function loadConfigModels */
async function loadConfigModels() {
const cache = getLogStores(CacheKeys.CONFIG_STORE);
let customConfig = await cache.get(CacheKeys.CUSTOM_CONFIG);
if (!customConfig) {
customConfig = await loadCustomConfig();
}
if (!customConfig) {
return {};
}
const { endpoints = {} } = customConfig ?? {};
const modelsConfig = {};
if (!Array.isArray(endpoints[EModelEndpoint.custom])) {
return modelsConfig;
}
const customEndpoints = endpoints[EModelEndpoint.custom].filter(
(endpoint) =>
endpoint.baseURL &&
endpoint.apiKey &&
endpoint.name &&
endpoint.models &&
(endpoint.models.fetch || endpoint.models.default),
);
const fetchPromisesMap = {}; // Map for promises keyed by baseURL
const baseUrlToNameMap = {}; // Map to associate baseURLs with names
for (let i = 0; i < customEndpoints.length; i++) {
const endpoint = customEndpoints[i];
const { models, name, baseURL, apiKey } = endpoint;
const API_KEY = extractEnvVariable(apiKey);
const BASE_URL = extractEnvVariable(baseURL);
modelsConfig[name] = [];
if (models.fetch && !isUserProvided(API_KEY) && !isUserProvided(BASE_URL)) {
fetchPromisesMap[BASE_URL] =
fetchPromisesMap[BASE_URL] || fetchModels({ baseURL: BASE_URL, apiKey: API_KEY });
baseUrlToNameMap[BASE_URL] = baseUrlToNameMap[BASE_URL] || [];
baseUrlToNameMap[BASE_URL].push(name);
continue;
}
if (Array.isArray(models.default)) {
modelsConfig[name] = models.default;
}
}
const fetchedData = await Promise.all(Object.values(fetchPromisesMap));
const baseUrls = Object.keys(fetchPromisesMap);
for (let i = 0; i < fetchedData.length; i++) {
const currentBaseUrl = baseUrls[i];
const modelData = fetchedData[i];
const associatedNames = baseUrlToNameMap[currentBaseUrl];
for (const name of associatedNames) {
modelsConfig[name] = modelData;
}
}
return modelsConfig;
}
module.exports = loadConfigModels;

View file

@ -0,0 +1,41 @@
const path = require('path');
const { CacheKeys, configSchema } = require('librechat-data-provider');
const loadYaml = require('~/utils/loadYaml');
const { getLogStores } = require('~/cache');
const { logger } = require('~/config');
const projectRoot = path.resolve(__dirname, '..', '..', '..', '..');
const configPath = path.resolve(projectRoot, 'librechat.yaml');
/**
* Load custom configuration files and caches the object if the `cache` field at root is true.
* Validation via parsing the config file with the config schema.
* @function loadCustomConfig
* @returns {Promise<null | Object>} A promise that resolves to null or the custom config object.
* */
async function loadCustomConfig() {
const customConfig = loadYaml(configPath);
if (!customConfig) {
return null;
}
const result = configSchema.strict().safeParse(customConfig);
if (!result.success) {
logger.error(`Invalid custom config file at ${configPath}`, result.error);
return null;
} else {
logger.info('Loaded custom config file');
}
if (customConfig.cache) {
const cache = getLogStores(CacheKeys.CONFIG_STORE);
await cache.set(CacheKeys.CUSTOM_CONFIG, customConfig);
}
// TODO: handle remote config
return customConfig;
}
module.exports = loadCustomConfig;

View file

@ -0,0 +1,16 @@
const buildOptions = (endpoint, parsedBody, endpointType) => {
const { chatGptLabel, promptPrefix, ...rest } = parsedBody;
const endpointOption = {
endpoint,
endpointType,
chatGptLabel,
promptPrefix,
modelOptions: {
...rest,
},
};
return endpointOption;
};
module.exports = buildOptions;

View file

@ -0,0 +1,7 @@
const initializeClient = require('./initializeClient');
const buildOptions = require('./buildOptions');
module.exports = {
initializeClient,
buildOptions,
};

View file

@ -0,0 +1,79 @@
const { EModelEndpoint } = require('librechat-data-provider');
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
const { isUserProvided, extractEnvVariable } = require('~/server/utils');
const getCustomConfig = require('~/cache/getCustomConfig');
const { OpenAIClient } = require('~/app');
const { PROXY } = process.env;
const initializeClient = async ({ req, res, endpointOption }) => {
const { key: expiresAt, endpoint } = req.body;
const customConfig = await getCustomConfig();
if (!customConfig) {
throw new Error(`Config not found for the ${endpoint} custom endpoint.`);
}
const { endpoints = {} } = customConfig;
const customEndpoints = endpoints[EModelEndpoint.custom] ?? [];
const endpointConfig = customEndpoints.find((endpointConfig) => endpointConfig.name === endpoint);
const CUSTOM_API_KEY = extractEnvVariable(endpointConfig.apiKey);
const CUSTOM_BASE_URL = extractEnvVariable(endpointConfig.baseURL);
const customOptions = {
addParams: endpointConfig.addParams,
dropParams: endpointConfig.dropParams,
titleConvo: endpointConfig.titleConvo,
titleModel: endpointConfig.titleModel,
forcePrompt: endpointConfig.forcePrompt,
summaryModel: endpointConfig.summaryModel,
modelDisplayLabel: endpointConfig.modelDisplayLabel,
titleMethod: endpointConfig.titleMethod ?? 'completion',
contextStrategy: endpointConfig.summarize ? 'summarize' : null,
};
const useUserKey = isUserProvided(CUSTOM_API_KEY);
const useUserURL = isUserProvided(CUSTOM_BASE_URL);
let userValues = null;
if (expiresAt && (useUserKey || useUserURL)) {
checkUserKeyExpiry(
expiresAt,
`Your API values for ${endpoint} have expired. Please configure them again.`,
);
userValues = await getUserKey({ userId: req.user.id, name: endpoint });
try {
userValues = JSON.parse(userValues);
} catch (e) {
throw new Error(`Invalid JSON provided for ${endpoint} user values.`);
}
}
let apiKey = useUserKey ? userValues.apiKey : CUSTOM_API_KEY;
let baseURL = useUserURL ? userValues.baseURL : CUSTOM_BASE_URL;
if (!apiKey) {
throw new Error(`${endpoint} API key not provided.`);
}
if (!baseURL) {
throw new Error(`${endpoint} Base URL not provided.`);
}
const clientOptions = {
reverseProxyUrl: baseURL ?? null,
proxy: PROXY ?? null,
req,
res,
...customOptions,
...endpointOption,
};
const client = new OpenAIClient(apiKey, clientOptions);
return {
client,
openAIApiKey: apiKey,
};
};
module.exports = initializeClient;

View file

@ -7,6 +7,10 @@ const addTitle = async (req, { text, response, client }) => {
return; return;
} }
if (client.options.titleConvo === false) {
return;
}
// If the request was aborted and is not azure, don't generate the title. // If the request was aborted and is not azure, don't generate the title.
if (!client.azure && client.abortController.signal.aborted) { if (!client.azure && client.abortController.signal.aborted) {
return; return;

View file

@ -24,15 +24,53 @@ const {
PROXY, PROXY,
} = process.env ?? {}; } = process.env ?? {};
/**
* Fetches OpenAI models from the specified base API path or Azure, based on the provided configuration.
*
* @param {Object} params - The parameters for fetching the models.
* @param {string} params.apiKey - The API key for authentication with the API.
* @param {string} params.baseURL - The base path URL for the API.
* @param {string} [params.name='OpenAI'] - The name of the API; defaults to 'OpenAI'.
* @param {boolean} [params.azure=false] - Whether to fetch models from Azure.
* @returns {Promise<string[]>} A promise that resolves to an array of model identifiers.
* @async
*/
const fetchModels = async ({ apiKey, baseURL, name = 'OpenAI', azure = false }) => {
let models = [];
if (!baseURL && !azure) {
return models;
}
try {
const payload = {
headers: {
Authorization: `Bearer ${apiKey}`,
},
};
if (PROXY) {
payload.httpsAgent = new HttpsProxyAgent(PROXY);
}
const res = await axios.get(`${baseURL}${azure ? '' : '/models'}`, payload);
models = res.data.data.map((item) => item.id);
} catch (err) {
logger.error(`Failed to fetch models from ${azure ? 'Azure ' : ''}${name} API`, err);
}
return models;
};
const fetchOpenAIModels = async (opts = { azure: false, plugins: false }, _models = []) => { const fetchOpenAIModels = async (opts = { azure: false, plugins: false }, _models = []) => {
let models = _models.slice() ?? []; let models = _models.slice() ?? [];
let apiKey = openAIApiKey; let apiKey = openAIApiKey;
let basePath = 'https://api.openai.com/v1'; let baseURL = 'https://api.openai.com/v1';
let reverseProxyUrl = OPENAI_REVERSE_PROXY; let reverseProxyUrl = OPENAI_REVERSE_PROXY;
if (opts.azure) { if (opts.azure) {
return models; return models;
// const azure = getAzureCredentials(); // const azure = getAzureCredentials();
// basePath = (genAzureChatCompletion(azure)) // baseURL = (genAzureChatCompletion(azure))
// .split('/deployments')[0] // .split('/deployments')[0]
// .concat(`/models?api-version=${azure.azureOpenAIApiVersion}`); // .concat(`/models?api-version=${azure.azureOpenAIApiVersion}`);
// apiKey = azureOpenAIApiKey; // apiKey = azureOpenAIApiKey;
@ -42,32 +80,20 @@ const fetchOpenAIModels = async (opts = { azure: false, plugins: false }, _model
} }
if (reverseProxyUrl) { if (reverseProxyUrl) {
basePath = extractBaseURL(reverseProxyUrl); baseURL = extractBaseURL(reverseProxyUrl);
} }
const cachedModels = await modelsCache.get(basePath); const cachedModels = await modelsCache.get(baseURL);
if (cachedModels) { if (cachedModels) {
return cachedModels; return cachedModels;
} }
if (basePath || opts.azure) { if (baseURL || opts.azure) {
try { models = await fetchModels({
const payload = { apiKey,
headers: { baseURL,
Authorization: `Bearer ${apiKey}`, azure: opts.azure,
}, });
};
if (PROXY) {
payload.httpsAgent = new HttpsProxyAgent(PROXY);
}
const res = await axios.get(`${basePath}${opts.azure ? '' : '/models'}`, payload);
models = res.data.data.map((item) => item.id);
// logger.debug(`Fetched ${models.length} models from ${opts.azure ? 'Azure ' : ''}OpenAI API`);
} catch (err) {
logger.error(`Failed to fetch models from ${opts.azure ? 'Azure ' : ''}OpenAI API`, err);
}
} }
if (!reverseProxyUrl) { if (!reverseProxyUrl) {
@ -75,7 +101,7 @@ const fetchOpenAIModels = async (opts = { azure: false, plugins: false }, _model
models = models.filter((model) => regex.test(model)); models = models.filter((model) => regex.test(model));
} }
await modelsCache.set(basePath, models); await modelsCache.set(baseURL, models);
return models; return models;
}; };
@ -142,6 +168,7 @@ const getGoogleModels = () => {
}; };
module.exports = { module.exports = {
fetchModels,
getOpenAIModels, getOpenAIModels,
getChatGPTBrowserModels, getChatGPTBrowserModels,
getAnthropicModels, getAnthropicModels,

View file

@ -165,6 +165,27 @@ function isEnabled(value) {
return false; return false;
} }
/**
* Checks if the provided value is 'user_provided'.
*
* @param {string} value - The value to check.
* @returns {boolean} - Returns true if the value is 'user_provided', otherwise false.
*/
const isUserProvided = (value) => value === 'user_provided';
/**
* Extracts the value of an environment variable from a string.
* @param {string} value - The value to be processed, possibly containing an env variable placeholder.
* @returns {string} - The actual value from the environment variable or the original value.
*/
function extractEnvVariable(value) {
const envVarMatch = value.match(/^\${(.+)}$/);
if (envVarMatch) {
return process.env[envVarMatch[1]] || value;
}
return value;
}
module.exports = { module.exports = {
createOnProgress, createOnProgress,
isEnabled, isEnabled,
@ -172,4 +193,6 @@ module.exports = {
formatSteps, formatSteps,
formatAction, formatAction,
addSpaceIfNeeded, addSpaceIfNeeded,
isUserProvided,
extractEnvVariable,
}; };

View file

@ -1,4 +1,4 @@
const { isEnabled } = require('./handleText'); const { isEnabled, extractEnvVariable } = require('./handleText');
describe('isEnabled', () => { describe('isEnabled', () => {
test('should return true when input is "true"', () => { test('should return true when input is "true"', () => {
@ -48,4 +48,51 @@ describe('isEnabled', () => {
test('should return false when input is an array', () => { test('should return false when input is an array', () => {
expect(isEnabled([])).toBe(false); expect(isEnabled([])).toBe(false);
}); });
describe('extractEnvVariable', () => {
const originalEnv = process.env;
beforeEach(() => {
jest.resetModules();
process.env = { ...originalEnv };
});
afterAll(() => {
process.env = originalEnv;
});
test('should return the value of the environment variable', () => {
process.env.TEST_VAR = 'test_value';
expect(extractEnvVariable('${TEST_VAR}')).toBe('test_value');
});
test('should return the original string if the envrionment variable is not defined correctly', () => {
process.env.TEST_VAR = 'test_value';
expect(extractEnvVariable('${ TEST_VAR }')).toBe('${ TEST_VAR }');
});
test('should return the original string if environment variable is not set', () => {
expect(extractEnvVariable('${NON_EXISTENT_VAR}')).toBe('${NON_EXISTENT_VAR}');
});
test('should return the original string if it does not contain an environment variable', () => {
expect(extractEnvVariable('some_string')).toBe('some_string');
});
test('should handle empty strings', () => {
expect(extractEnvVariable('')).toBe('');
});
test('should handle strings without variable format', () => {
expect(extractEnvVariable('no_var_here')).toBe('no_var_here');
});
test('should not process multiple variable formats', () => {
process.env.FIRST_VAR = 'first';
process.env.SECOND_VAR = 'second';
expect(extractEnvVariable('${FIRST_VAR} and ${SECOND_VAR}')).toBe(
'${FIRST_VAR} and ${SECOND_VAR}',
);
});
});
}); });

View file

@ -1,6 +1,8 @@
const crypto = require('crypto'); const crypto = require('crypto');
const { parseConvo } = require('librechat-data-provider');
const { saveMessage, getMessages } = require('~/models/Message'); const { saveMessage, getMessages } = require('~/models/Message');
const { getConvo } = require('~/models/Conversation'); const { getConvo } = require('~/models/Conversation');
const { logger } = require('~/config');
/** /**
* Sends error data in Server Sent Events format and ends the response. * Sends error data in Server Sent Events format and ends the response.
@ -65,12 +67,21 @@ const sendError = async (res, options, callback) => {
if (!errorMessage.error) { if (!errorMessage.error) {
const requestMessage = { messageId: parentMessageId, conversationId }; const requestMessage = { messageId: parentMessageId, conversationId };
const query = await getMessages(requestMessage); let query = [],
convo = {};
try {
query = await getMessages(requestMessage);
convo = await getConvo(user, conversationId);
} catch (err) {
logger.error('[sendError] Error retrieving conversation data:', err);
convo = parseConvo(errorMessage);
}
return sendMessage(res, { return sendMessage(res, {
final: true, final: true,
requestMessage: query?.[0] ? query[0] : requestMessage, requestMessage: query?.[0] ? query[0] : requestMessage,
responseMessage: errorMessage, responseMessage: errorMessage,
conversation: await getConvo(user, conversationId), conversation: convo,
}); });
} }

View file

@ -20,6 +20,12 @@
* @memberof typedefs * @memberof typedefs
*/ */
/**
* @exports TConfig
* @typedef {import('librechat-data-provider').TConfig} TConfig
* @memberof typedefs
*/
/** /**
* @exports ImageMetadata * @exports ImageMetadata
* @typedef {Object} ImageMetadata * @typedef {Object} ImageMetadata
@ -280,8 +286,8 @@
* @property {boolean|{userProvide: boolean}} [chatGPTBrowser] - Flag to indicate if ChatGPT Browser endpoint is user provided, or its configuration. * @property {boolean|{userProvide: boolean}} [chatGPTBrowser] - Flag to indicate if ChatGPT Browser endpoint is user provided, or its configuration.
* @property {boolean|{userProvide: boolean}} [anthropic] - Flag to indicate if Anthropic endpoint is user provided, or its configuration. * @property {boolean|{userProvide: boolean}} [anthropic] - Flag to indicate if Anthropic endpoint is user provided, or its configuration.
* @property {boolean|{userProvide: boolean}} [bingAI] - Flag to indicate if BingAI endpoint is user provided, or its configuration. * @property {boolean|{userProvide: boolean}} [bingAI] - Flag to indicate if BingAI endpoint is user provided, or its configuration.
* @property {boolean|{userProvide: boolean}} [bingAI] - Flag to indicate if BingAI endpoint is user provided, or its configuration. * @property {boolean|{userProvide: boolean}} [google] - Flag to indicate if BingAI endpoint is user provided, or its configuration.
* @property {boolean|{userProvide: boolean}} [bingAI] - Flag to indicate if BingAI endpoint is user provided, or its configuration. * @property {boolean|{userProvide: boolean, userProvideURL: boolean, name: string}} [custom] - Custom Endpoint configuration.
* @memberof typedefs * @memberof typedefs
*/ */
@ -313,13 +319,14 @@
* @property {boolean|{userProvide: boolean}} [anthropic] - Flag to indicate if Anthropic endpoint is user provided, or its configuration. * @property {boolean|{userProvide: boolean}} [anthropic] - Flag to indicate if Anthropic endpoint is user provided, or its configuration.
* @property {boolean|{userProvide: boolean}} [bingAI] - Flag to indicate if BingAI endpoint is user provided, or its configuration. * @property {boolean|{userProvide: boolean}} [bingAI] - Flag to indicate if BingAI endpoint is user provided, or its configuration.
* @property {boolean|{userProvide: boolean}} [google] - Flag to indicate if Google endpoint is user provided, or its configuration. * @property {boolean|{userProvide: boolean}} [google] - Flag to indicate if Google endpoint is user provided, or its configuration.
* @property {boolean|{userProvide: boolean, userProvideURL: boolean, name: string}} [custom] - Custom Endpoint configuration.
* @property {boolean|GptPlugins} [gptPlugins] - Configuration for GPT plugins. * @property {boolean|GptPlugins} [gptPlugins] - Configuration for GPT plugins.
* @memberof typedefs * @memberof typedefs
*/ */
/** /**
* @exports EndpointConfig * @exports EndpointConfig
* @typedef {boolean|{userProvide: boolean}|GptPlugins} EndpointConfig * @typedef {boolean|TConfig} EndpointConfig
* @memberof typedefs * @memberof typedefs
*/ */

View file

@ -1,3 +1,4 @@
const loadYaml = require('./loadYaml');
const tokenHelpers = require('./tokens'); const tokenHelpers = require('./tokens');
const azureUtils = require('./azureUtils'); const azureUtils = require('./azureUtils');
const extractBaseURL = require('./extractBaseURL'); const extractBaseURL = require('./extractBaseURL');
@ -8,4 +9,5 @@ module.exports = {
...tokenHelpers, ...tokenHelpers,
extractBaseURL, extractBaseURL,
findMessageContent, findMessageContent,
loadYaml,
}; };

13
api/utils/loadYaml.js Normal file
View file

@ -0,0 +1,13 @@
const fs = require('fs');
const yaml = require('js-yaml');
function loadYaml(filepath) {
try {
let fileContents = fs.readFileSync(filepath, 'utf8');
return yaml.load(fileContents);
} catch (e) {
console.error(e);
}
}
module.exports = loadYaml;

View file

@ -39,22 +39,26 @@ const models = [
'gpt-3.5-turbo-0301', 'gpt-3.5-turbo-0301',
]; ];
const openAIModels = {
'gpt-4': 8191,
'gpt-4-0613': 8191,
'gpt-4-32k': 32767,
'gpt-4-32k-0314': 32767,
'gpt-4-32k-0613': 32767,
'gpt-3.5-turbo': 4095,
'gpt-3.5-turbo-0613': 4095,
'gpt-3.5-turbo-0301': 4095,
'gpt-3.5-turbo-16k': 15999,
'gpt-3.5-turbo-16k-0613': 15999,
'gpt-3.5-turbo-1106': 16380, // -5 from max
'gpt-4-1106': 127995, // -5 from max
'mistral-': 31995, // -5 from max
};
// Order is important here: by model series and context size (gpt-4 then gpt-3, ascending) // Order is important here: by model series and context size (gpt-4 then gpt-3, ascending)
const maxTokensMap = { const maxTokensMap = {
[EModelEndpoint.openAI]: { [EModelEndpoint.openAI]: openAIModels,
'gpt-4': 8191, [EModelEndpoint.custom]: openAIModels,
'gpt-4-0613': 8191,
'gpt-4-32k': 32767,
'gpt-4-32k-0314': 32767,
'gpt-4-32k-0613': 32767,
'gpt-3.5-turbo': 4095,
'gpt-3.5-turbo-0613': 4095,
'gpt-3.5-turbo-0301': 4095,
'gpt-3.5-turbo-16k': 15999,
'gpt-3.5-turbo-16k-0613': 15999,
'gpt-3.5-turbo-1106': 16380, // -5 from max
'gpt-4-1106': 127995, // -5 from max
},
[EModelEndpoint.google]: { [EModelEndpoint.google]: {
/* Max I/O is combined so we subtract the amount from max response tokens for actual total */ /* Max I/O is combined so we subtract the amount from max response tokens for actual total */
gemini: 32750, // -10 from max gemini: 32750, // -10 from max

Binary file not shown.

After

Width:  |  Height:  |  Size: 548 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

View file

@ -1,4 +1,11 @@
import type { TConversation, TMessage, TPreset, TLoginUser, TUser } from 'librechat-data-provider'; import type {
TConversation,
TMessage,
TPreset,
TLoginUser,
TUser,
EModelEndpoint,
} from 'librechat-data-provider';
import type { UseMutationResult } from '@tanstack/react-query'; import type { UseMutationResult } from '@tanstack/react-query';
export type TSetOption = (param: number | string) => (newValue: number | string | boolean) => void; export type TSetOption = (param: number | string) => (newValue: number | string | boolean) => void;
@ -141,7 +148,7 @@ export type TDisplayProps = TText &
export type TConfigProps = { export type TConfigProps = {
userKey: string; userKey: string;
setUserKey: React.Dispatch<React.SetStateAction<string>>; setUserKey: React.Dispatch<React.SetStateAction<string>>;
endpoint: string; endpoint: EModelEndpoint | string;
}; };
export type TDangerButtonProps = { export type TDangerButtonProps = {
@ -194,9 +201,11 @@ export type IconProps = Pick<TMessage, 'isCreatedByUser' | 'model' | 'error'> &
Pick<TConversation, 'chatGptLabel' | 'modelLabel' | 'jailbreak'> & { Pick<TConversation, 'chatGptLabel' | 'modelLabel' | 'jailbreak'> & {
size?: number; size?: number;
button?: boolean; button?: boolean;
iconURL?: string;
message?: boolean; message?: boolean;
className?: string; className?: string;
endpoint?: string | null; endpoint?: EModelEndpoint | string | null;
endpointType?: EModelEndpoint | null;
}; };
export type Option = Record<string, unknown> & { export type Option = Record<string, unknown> & {

View file

@ -30,6 +30,8 @@ export default function ChatForm({ index = 0 }) {
}; };
const { requiresKey } = useRequiresKey(); const { requiresKey } = useRequiresKey();
const { endpoint: _endpoint, endpointType } = conversation ?? { endpoint: null };
const endpoint = endpointType ?? _endpoint;
return ( return (
<form <form
@ -49,9 +51,9 @@ export default function ChatForm({ index = 0 }) {
onChange={(e: ChangeEvent<HTMLTextAreaElement>) => setText(e.target.value)} onChange={(e: ChangeEvent<HTMLTextAreaElement>) => setText(e.target.value)}
setText={setText} setText={setText}
submitMessage={submitMessage} submitMessage={submitMessage}
endpoint={conversation?.endpoint} endpoint={endpoint}
/> />
<AttachFile endpoint={conversation?.endpoint ?? ''} disabled={requiresKey} /> <AttachFile endpoint={endpoint ?? ''} disabled={requiresKey} />
{isSubmitting && showStopButton ? ( {isSubmitting && showStopButton ? (
<StopButton stop={handleStopGenerating} setShowStopButton={setShowStopButton} /> <StopButton stop={handleStopGenerating} setShowStopButton={setShowStopButton} />
) : ( ) : (

View file

@ -2,7 +2,8 @@ import { useRecoilState } from 'recoil';
import { Settings2 } from 'lucide-react'; import { Settings2 } from 'lucide-react';
import { Root, Anchor } from '@radix-ui/react-popover'; import { Root, Anchor } from '@radix-ui/react-popover';
import { useState, useEffect, useMemo } from 'react'; import { useState, useEffect, useMemo } from 'react';
import { tPresetSchema, EModelEndpoint } from 'librechat-data-provider'; import { tPresetUpdateSchema, EModelEndpoint } from 'librechat-data-provider';
import type { TPreset } from 'librechat-data-provider';
import { EndpointSettings, SaveAsPresetDialog } from '~/components/Endpoints'; import { EndpointSettings, SaveAsPresetDialog } from '~/components/Endpoints';
import { ModelSelect } from '~/components/Input/ModelSelect'; import { ModelSelect } from '~/components/Input/ModelSelect';
import { PluginStoreDialog } from '~/components'; import { PluginStoreDialog } from '~/components';
@ -106,7 +107,11 @@ export default function OptionsBar() {
<SaveAsPresetDialog <SaveAsPresetDialog
open={saveAsDialogShow} open={saveAsDialogShow}
onOpenChange={setSaveAsDialogShow} onOpenChange={setSaveAsDialogShow}
preset={tPresetSchema.parse({ ...conversation })} preset={
tPresetUpdateSchema.parse({
...conversation,
}) as TPreset
}
/> />
<PluginStoreDialog <PluginStoreDialog
isOpen={showPluginStoreDialog} isOpen={showPluginStoreDialog}

View file

@ -1,7 +1,8 @@
import { useRecoilState } from 'recoil'; import { useRecoilState } from 'recoil';
import { Settings2 } from 'lucide-react'; import { Settings2 } from 'lucide-react';
import { useState, useEffect, useMemo } from 'react'; import { useState, useEffect, useMemo } from 'react';
import { tPresetSchema, EModelEndpoint } from 'librechat-data-provider'; import { tPresetUpdateSchema, EModelEndpoint } from 'librechat-data-provider';
import type { TPreset } from 'librechat-data-provider';
import { PluginStoreDialog } from '~/components'; import { PluginStoreDialog } from '~/components';
import { import {
EndpointSettings, EndpointSettings,
@ -24,14 +25,8 @@ export default function OptionsBar({ messagesTree }) {
store.showPluginStoreDialog, store.showPluginStoreDialog,
); );
const { const { showPopover, conversation, latestMessage, setShowPopover, setShowBingToneSetting } =
showPopover, useChatContext();
conversation,
latestMessage,
setShowPopover,
setShowBingToneSetting,
textareaHeight,
} = useChatContext();
const { setOption } = useSetIndexOptions(); const { setOption } = useSetIndexOptions();
const { endpoint, conversationId, jailbreak } = conversation ?? {}; const { endpoint, conversationId, jailbreak } = conversation ?? {};
@ -81,14 +76,7 @@ export default function OptionsBar({ messagesTree }) {
? altSettings[endpoint] ? altSettings[endpoint]
: () => setShowPopover((prev) => !prev); : () => setShowPopover((prev) => !prev);
return ( return (
<div <div className="absolute left-0 right-0 mx-auto mb-2 last:mb-2 md:mx-4 md:last:mb-6 lg:mx-auto lg:max-w-2xl xl:max-w-3xl">
className="absolute left-0 right-0 mx-auto mb-2 last:mb-2 md:mx-4 md:last:mb-6 lg:mx-auto lg:max-w-2xl xl:max-w-3xl"
style={{
// TODO: option to hide footer and handle this
// bottom: `${80 + (textareaHeight - 56)}px`, // without footer
bottom: `${85 + (textareaHeight - 56)}px`, // with footer
}}
>
<GenerationButtons <GenerationButtons
endpoint={endpoint} endpoint={endpoint}
showPopover={showPopover} showPopover={showPopover}
@ -151,7 +139,7 @@ export default function OptionsBar({ messagesTree }) {
visible={showPopover} visible={showPopover}
saveAsPreset={saveAsPreset} saveAsPreset={saveAsPreset}
closePopover={() => setShowPopover(false)} closePopover={() => setShowPopover(false)}
PopoverButtons={<PopoverButtons endpoint={endpoint} />} PopoverButtons={<PopoverButtons />}
> >
<div className="px-4 py-4"> <div className="px-4 py-4">
<EndpointSettings <EndpointSettings
@ -164,7 +152,11 @@ export default function OptionsBar({ messagesTree }) {
<SaveAsPresetDialog <SaveAsPresetDialog
open={saveAsDialogShow} open={saveAsDialogShow}
onOpenChange={setSaveAsDialogShow} onOpenChange={setSaveAsDialogShow}
preset={tPresetSchema.parse({ ...conversation })} preset={
tPresetUpdateSchema.parse({
...conversation,
}) as TPreset
}
/> />
<PluginStoreDialog isOpen={showPluginStoreDialog} setIsOpen={setShowPluginStoreDialog} /> <PluginStoreDialog isOpen={showPluginStoreDialog} setIsOpen={setShowPluginStoreDialog} />
</span> </span>

View file

@ -27,7 +27,8 @@ export default function PopoverButtons({
setShowAgentSettings, setShowAgentSettings,
} = useChatContext(); } = useChatContext();
const { model, endpoint } = conversation ?? {}; const { model, endpoint: _endpoint, endpointType } = conversation ?? {};
const endpoint = endpointType ?? _endpoint;
const isGenerativeModel = model?.toLowerCase()?.includes('gemini'); const isGenerativeModel = model?.toLowerCase()?.includes('gemini');
const isChatModel = !isGenerativeModel && model?.toLowerCase()?.includes('chat'); const isChatModel = !isGenerativeModel && model?.toLowerCase()?.includes('chat');
const isTextModel = !isGenerativeModel && !isChatModel && /code|text/.test(model ?? ''); const isTextModel = !isGenerativeModel && !isChatModel && /code|text/.test(model ?? '');

View file

@ -1,10 +1,12 @@
import type { ReactNode } from 'react'; import type { ReactNode } from 'react';
import { useGetEndpointsQuery } from 'librechat-data-provider/react-query';
import { EModelEndpoint } from 'librechat-data-provider'; import { EModelEndpoint } from 'librechat-data-provider';
import { icons } from './Menus/Endpoints/Icons'; import { icons } from './Menus/Endpoints/Icons';
import { useChatContext } from '~/Providers'; import { useChatContext } from '~/Providers';
import { useLocalize } from '~/hooks'; import { useLocalize } from '~/hooks';
export default function Landing({ Header }: { Header?: ReactNode }) { export default function Landing({ Header }: { Header?: ReactNode }) {
const { data: endpointsConfig } = useGetEndpointsQuery();
const { conversation } = useChatContext(); const { conversation } = useChatContext();
const localize = useLocalize(); const localize = useLocalize();
let { endpoint } = conversation ?? {}; let { endpoint } = conversation ?? {};
@ -16,13 +18,22 @@ export default function Landing({ Header }: { Header?: ReactNode }) {
) { ) {
endpoint = EModelEndpoint.openAI; endpoint = EModelEndpoint.openAI;
} }
const iconKey = endpointsConfig?.[endpoint ?? '']?.type ? 'unknown' : endpoint ?? 'unknown';
return ( return (
<div className="relative h-full"> <div className="relative h-full">
<div className="absolute left-0 right-0">{Header && Header}</div> <div className="absolute left-0 right-0">{Header && Header}</div>
<div className="flex h-full flex-col items-center justify-center"> <div className="flex h-full flex-col items-center justify-center">
<div className="mb-3 h-[72px] w-[72px]"> <div className="mb-3 h-[72px] w-[72px]">
<div className="gizmo-shadow-stroke relative flex h-full items-center justify-center rounded-full bg-white text-black"> <div className="gizmo-shadow-stroke relative flex h-full items-center justify-center rounded-full bg-white text-black">
{icons[endpoint ?? 'unknown']({ size: 41, className: 'h-2/3 w-2/3' })} {icons[iconKey]({
size: 41,
context: 'landing',
className: 'h-2/3 w-2/3',
endpoint: endpoint as EModelEndpoint | string,
iconURL: endpointsConfig?.[endpoint ?? ''].iconURL,
})}
</div> </div>
</div> </div>
<div className="mb-5 text-2xl font-medium dark:text-white"> <div className="mb-5 text-2xl font-medium dark:text-white">

View file

@ -6,8 +6,10 @@ import {
AzureMinimalIcon, AzureMinimalIcon,
BingAIMinimalIcon, BingAIMinimalIcon,
GoogleMinimalIcon, GoogleMinimalIcon,
CustomMinimalIcon,
LightningIcon, LightningIcon,
} from '~/components/svg'; } from '~/components/svg';
import UnknownIcon from './UnknownIcon';
import { cn } from '~/utils'; import { cn } from '~/utils';
export const icons = { export const icons = {
@ -18,6 +20,7 @@ export const icons = {
[EModelEndpoint.chatGPTBrowser]: LightningIcon, [EModelEndpoint.chatGPTBrowser]: LightningIcon,
[EModelEndpoint.google]: GoogleMinimalIcon, [EModelEndpoint.google]: GoogleMinimalIcon,
[EModelEndpoint.bingAI]: BingAIMinimalIcon, [EModelEndpoint.bingAI]: BingAIMinimalIcon,
[EModelEndpoint.custom]: CustomMinimalIcon,
[EModelEndpoint.assistant]: ({ className = '' }) => ( [EModelEndpoint.assistant]: ({ className = '' }) => (
<svg <svg
width="24" width="24"
@ -39,5 +42,5 @@ export const icons = {
></path> ></path>
</svg> </svg>
), ),
unknown: GPTIcon, unknown: UnknownIcon,
}; };

View file

@ -1,6 +1,7 @@
import { useState } from 'react'; import { useState } from 'react';
import { Settings } from 'lucide-react'; import { Settings } from 'lucide-react';
import { EModelEndpoint } from 'librechat-data-provider'; import { EModelEndpoint } from 'librechat-data-provider';
import { useGetEndpointsQuery } from 'librechat-data-provider/react-query';
import type { FC } from 'react'; import type { FC } from 'react';
import { useLocalize, useUserKey } from '~/hooks'; import { useLocalize, useUserKey } from '~/hooks';
import { SetKeyDialog } from '~/components/Input/SetKeyDialog'; import { SetKeyDialog } from '~/components/Input/SetKeyDialog';
@ -26,7 +27,8 @@ const MenuItem: FC<MenuItemProps> = ({
userProvidesKey, userProvidesKey,
...rest ...rest
}) => { }) => {
const Icon = icons[endpoint] ?? icons.unknown; const { data: endpointsConfig } = useGetEndpointsQuery();
const [isDialogOpen, setDialogOpen] = useState(false); const [isDialogOpen, setDialogOpen] = useState(false);
const { newConversation } = useChatContext(); const { newConversation } = useChatContext();
const { getExpiry } = useUserKey(endpoint); const { getExpiry } = useUserKey(endpoint);
@ -44,6 +46,10 @@ const MenuItem: FC<MenuItemProps> = ({
} }
}; };
const endpointType = endpointsConfig?.[endpoint ?? '']?.type;
const iconKey = endpointType ? 'unknown' : endpoint ?? 'unknown';
const Icon = icons[iconKey];
return ( return (
<> <>
<div <div
@ -56,7 +62,15 @@ const MenuItem: FC<MenuItemProps> = ({
<div className="flex grow items-center justify-between gap-2"> <div className="flex grow items-center justify-between gap-2">
<div> <div>
<div className="flex items-center gap-2"> <div className="flex items-center gap-2">
{<Icon size={18} className="icon-md shrink-0 dark:text-white" />} {
<Icon
size={18}
endpoint={endpoint}
context={'menu-item'}
className="icon-md shrink-0 dark:text-white"
iconURL={endpointsConfig?.[endpoint ?? '']?.iconURL}
/>
}
<div> <div>
{title} {title}
<div className="text-token-text-tertiary">{description}</div> <div className="text-token-text-tertiary">{description}</div>
@ -128,7 +142,13 @@ const MenuItem: FC<MenuItemProps> = ({
</div> </div>
</div> </div>
{userProvidesKey && ( {userProvidesKey && (
<SetKeyDialog open={isDialogOpen} onOpenChange={setDialogOpen} endpoint={endpoint} /> <SetKeyDialog
open={isDialogOpen}
endpoint={endpoint}
endpointType={endpointType}
onOpenChange={setDialogOpen}
userProvideURL={endpointsConfig?.[endpoint ?? '']?.userProvideURL}
/>
)} )}
</> </>
); );

View file

@ -0,0 +1,36 @@
import { EModelEndpoint, KnownEndpoints } from 'librechat-data-provider';
import { CustomMinimalIcon } from '~/components/svg';
export default function UnknownIcon({
className = '',
endpoint,
iconURL,
context,
}: {
iconURL?: string;
className?: string;
endpoint: EModelEndpoint | string | null;
context?: 'landing' | 'menu-item' | 'nav' | 'message';
}) {
if (!endpoint) {
return <CustomMinimalIcon className={className} />;
}
const currentEndpoint = endpoint.toLowerCase();
if (iconURL) {
return <img className={className} src={iconURL} alt={`${endpoint} Icon`} />;
} else if (currentEndpoint === KnownEndpoints.mistral) {
return (
<img
className={context === 'landing' ? '' : className}
src="/assets/mistral.png"
alt="Mistral AI Icon"
/>
);
} else if (currentEndpoint === KnownEndpoints.openrouter) {
return <img className={className} src="/assets/openrouter.png" alt="OpenRouter Icon" />;
}
return <CustomMinimalIcon className={className} />;
}

View file

@ -21,7 +21,7 @@ const EndpointsMenu: FC = () => {
} }
return ( return (
<Root> <Root>
<TitleButton primaryText={(alternateName[selected] ?? '') + ' '} /> <TitleButton primaryText={(alternateName[selected] ?? selected ?? '') + ' '} />
<Portal> <Portal>
<div <div
style={{ style={{

View file

@ -77,7 +77,6 @@ const EditPresetDialog = ({
{''} {''}
</Label> </Label>
<PopoverButtons <PopoverButtons
endpoint={endpoint}
buttonClass="ml-0 w-full dark:bg-gray-700 dark:hover:bg-gray-800 p-2 h-[40px] justify-center mt-0" buttonClass="ml-0 w-full dark:bg-gray-700 dark:hover:bg-gray-800 p-2 h-[40px] justify-center mt-0"
iconClass="hidden lg:block w-4" iconClass="hidden lg:block w-4"
/> />

View file

@ -2,6 +2,7 @@ import { Trash2 } from 'lucide-react';
import { useRecoilValue } from 'recoil'; import { useRecoilValue } from 'recoil';
import { Close } from '@radix-ui/react-popover'; import { Close } from '@radix-ui/react-popover';
import { Flipper, Flipped } from 'react-flip-toolkit'; import { Flipper, Flipped } from 'react-flip-toolkit';
import { useGetEndpointsQuery } from 'librechat-data-provider/react-query';
import type { FC } from 'react'; import type { FC } from 'react';
import type { TPreset } from 'librechat-data-provider'; import type { TPreset } from 'librechat-data-provider';
import FileUpload from '~/components/Input/EndpointMenu/FileUpload'; import FileUpload from '~/components/Input/EndpointMenu/FileUpload';
@ -31,6 +32,7 @@ const PresetItems: FC<{
clearAllPresets, clearAllPresets,
onFileSelected, onFileSelected,
}) => { }) => {
const { data: endpointsConfig } = useGetEndpointsQuery();
const defaultPreset = useRecoilValue(store.defaultPreset); const defaultPreset = useRecoilValue(store.defaultPreset);
const localize = useLocalize(); const localize = useLocalize();
return ( return (
@ -93,6 +95,10 @@ const PresetItems: FC<{
return null; return null;
} }
const iconKey = endpointsConfig?.[preset.endpoint ?? '']?.type
? 'unknown'
: preset.endpoint ?? 'unknown';
return ( return (
<Close asChild key={`preset-${preset.presetId}`}> <Close asChild key={`preset-${preset.presetId}`}>
<div key={`preset-${preset.presetId}`}> <div key={`preset-${preset.presetId}`}>
@ -103,8 +109,11 @@ const PresetItems: FC<{
title={getPresetTitle(preset)} title={getPresetTitle(preset)}
disableHover={true} disableHover={true}
onClick={() => onSelectPreset(preset)} onClick={() => onSelectPreset(preset)}
icon={icons[preset.endpoint ?? 'unknown']({ icon={icons[iconKey]({
context: 'menu-item',
iconURL: endpointsConfig?.[preset.endpoint ?? ''].iconURL,
className: 'icon-md mr-1 dark:text-white', className: 'icon-md mr-1 dark:text-white',
endpoint: preset.endpoint,
})} })}
selected={false} selected={false}
data-testid={`preset-item-${preset}`} data-testid={`preset-item-${preset}`}

View file

@ -3,6 +3,7 @@ import { BookCopy } from 'lucide-react';
import { Content, Portal, Root, Trigger } from '@radix-ui/react-popover'; import { Content, Portal, Root, Trigger } from '@radix-ui/react-popover';
import { EditPresetDialog, PresetItems } from './Presets'; import { EditPresetDialog, PresetItems } from './Presets';
import { useLocalize, usePresets } from '~/hooks'; import { useLocalize, usePresets } from '~/hooks';
import { useChatContext } from '~/Providers';
import { cn } from '~/utils'; import { cn } from '~/utils';
const PresetsMenu: FC = () => { const PresetsMenu: FC = () => {
@ -18,6 +19,7 @@ const PresetsMenu: FC = () => {
submitPreset, submitPreset,
exportPreset, exportPreset,
} = usePresets(); } = usePresets();
const { preset } = useChatContext();
const presets = presetsQuery.data || []; const presets = presetsQuery.data || [];
return ( return (
@ -64,7 +66,7 @@ const PresetsMenu: FC = () => {
</Content> </Content>
</div> </div>
</Portal> </Portal>
<EditPresetDialog submitPreset={submitPreset} exportPreset={exportPreset} /> {preset && <EditPresetDialog submitPreset={submitPreset} exportPreset={exportPreset} />}
</Root> </Root>
); );
}; };

View file

@ -19,7 +19,8 @@ const EditMessage = ({
const textEditor = useRef<HTMLDivElement | null>(null); const textEditor = useRef<HTMLDivElement | null>(null);
const { conversationId, parentMessageId, messageId } = message; const { conversationId, parentMessageId, messageId } = message;
const { endpoint } = conversation ?? { endpoint: null }; const { endpoint: _endpoint, endpointType } = conversation ?? { endpoint: null };
const endpoint = endpointType ?? _endpoint;
const updateMessageMutation = useUpdateMessageMutation(conversationId ?? ''); const updateMessageMutation = useUpdateMessageMutation(conversationId ?? '');
const localize = useLocalize(); const localize = useLocalize();

View file

@ -1,7 +1,7 @@
import { useState } from 'react'; import { useState } from 'react';
import type { TConversation, TMessage } from 'librechat-data-provider'; import type { TConversation, TMessage } from 'librechat-data-provider';
import { Clipboard, CheckMark, EditIcon, RegenerateIcon, ContinueIcon } from '~/components/svg'; import { Clipboard, CheckMark, EditIcon, RegenerateIcon, ContinueIcon } from '~/components/svg';
import { useGenerations, useLocalize } from '~/hooks'; import { useGenerationsByLatest, useLocalize } from '~/hooks';
import { cn } from '~/utils'; import { cn } from '~/utils';
type THoverButtons = { type THoverButtons = {
@ -28,9 +28,10 @@ export default function HoverButtons({
latestMessage, latestMessage,
}: THoverButtons) { }: THoverButtons) {
const localize = useLocalize(); const localize = useLocalize();
const { endpoint } = conversation ?? {}; const { endpoint: _endpoint, endpointType } = conversation ?? {};
const endpoint = endpointType ?? _endpoint;
const [isCopied, setIsCopied] = useState(false); const [isCopied, setIsCopied] = useState(false);
const { hideEditButton, regenerateEnabled, continueSupported } = useGenerations({ const { hideEditButton, regenerateEnabled, continueSupported } = useGenerationsByLatest({
isEditing, isEditing,
isSubmitting, isSubmitting,
message, message,

View file

@ -1,7 +1,10 @@
import { useRecoilValue } from 'recoil'; import { useRecoilValue } from 'recoil';
import { useState, useRef } from 'react'; import { useState, useRef } from 'react';
import { useParams } from 'react-router-dom'; import { useParams } from 'react-router-dom';
import { useUpdateConversationMutation } from 'librechat-data-provider/react-query'; import {
useGetEndpointsQuery,
useUpdateConversationMutation,
} from 'librechat-data-provider/react-query';
import type { MouseEvent, FocusEvent, KeyboardEvent } from 'react'; import type { MouseEvent, FocusEvent, KeyboardEvent } from 'react';
import { useConversations, useNavigateToConvo } from '~/hooks'; import { useConversations, useNavigateToConvo } from '~/hooks';
import { MinimalIcon } from '~/components/Endpoints'; import { MinimalIcon } from '~/components/Endpoints';
@ -15,8 +18,9 @@ type KeyEvent = KeyboardEvent<HTMLInputElement>;
export default function Conversation({ conversation, retainView, toggleNav, i }) { export default function Conversation({ conversation, retainView, toggleNav, i }) {
const { conversationId: currentConvoId } = useParams(); const { conversationId: currentConvoId } = useParams();
const activeConvos = useRecoilValue(store.allConversationsSelector);
const updateConvoMutation = useUpdateConversationMutation(currentConvoId ?? ''); const updateConvoMutation = useUpdateConversationMutation(currentConvoId ?? '');
const activeConvos = useRecoilValue(store.allConversationsSelector);
const { data: endpointsConfig } = useGetEndpointsQuery();
const { refreshConversations } = useConversations(); const { refreshConversations } = useConversations();
const { navigateToConvo } = useNavigateToConvo(); const { navigateToConvo } = useNavigateToConvo();
const { showToast } = useToastContext(); const { showToast } = useToastContext();
@ -86,7 +90,9 @@ export default function Conversation({ conversation, retainView, toggleNav, i })
const icon = MinimalIcon({ const icon = MinimalIcon({
size: 20, size: 20,
iconURL: endpointsConfig?.[conversation.endpoint ?? '']?.iconURL,
endpoint: conversation.endpoint, endpoint: conversation.endpoint,
endpointType: conversation.endpointType,
model: conversation.model, model: conversation.model,
error: false, error: false,
className: 'mr-0', className: 'mr-0',

View file

@ -17,8 +17,9 @@ export default function Settings({
} }
const { settings, multiViewSettings } = getSettings(isMultiChat); const { settings, multiViewSettings } = getSettings(isMultiChat);
const { endpoint } = conversation; const { endpoint: _endpoint, endpointType } = conversation;
const models = modelsConfig?.[endpoint] ?? []; const models = modelsConfig?.[_endpoint] ?? [];
const endpoint = endpointType ?? _endpoint;
const OptionComponent = settings[endpoint]; const OptionComponent = settings[endpoint];
if (OptionComponent) { if (OptionComponent) {

View file

@ -1,9 +1,11 @@
import { EModelEndpoint } from 'librechat-data-provider'; import { EModelEndpoint } from 'librechat-data-provider';
import UnknownIcon from '~/components/Chat/Menus/Endpoints/UnknownIcon';
import { import {
Plugin, Plugin,
GPTIcon, GPTIcon,
AnthropicIcon, AnthropicIcon,
AzureMinimalIcon, AzureMinimalIcon,
CustomMinimalIcon,
PaLMIcon, PaLMIcon,
CodeyIcon, CodeyIcon,
GeminiIcon, GeminiIcon,
@ -13,9 +15,8 @@ import { IconProps } from '~/common';
import { cn } from '~/utils'; import { cn } from '~/utils';
const Icon: React.FC<IconProps> = (props) => { const Icon: React.FC<IconProps> = (props) => {
const { size = 30, isCreatedByUser, button, model = '', endpoint, error, jailbreak } = props;
const { user } = useAuthContext(); const { user } = useAuthContext();
const { size = 30, isCreatedByUser, button, model = '', endpoint, error, jailbreak } = props;
if (isCreatedByUser) { if (isCreatedByUser) {
const username = user?.name || 'User'; const username = user?.name || 'User';
@ -94,8 +95,22 @@ const Icon: React.FC<IconProps> = (props) => {
: `rgba(0, 163, 255, ${button ? 0.75 : 1})`, : `rgba(0, 163, 255, ${button ? 0.75 : 1})`,
name: 'ChatGPT', name: 'ChatGPT',
}, },
[EModelEndpoint.custom]: {
icon: <CustomMinimalIcon size={size * 0.7} />,
name: 'Custom',
},
null: { icon: <GPTIcon size={size * 0.7} />, bg: 'grey', name: 'N/A' }, null: { icon: <GPTIcon size={size * 0.7} />, bg: 'grey', name: 'N/A' },
default: { icon: <GPTIcon size={size * 0.7} />, bg: 'grey', name: 'UNKNOWN' }, default: {
icon: (
<UnknownIcon
iconURL={props.iconURL}
endpoint={endpoint ?? ''}
className="icon-sm"
context="message"
/>
),
name: endpoint,
},
}; };
const { icon, bg, name } = const { icon, bg, name } =

View file

@ -1,4 +1,5 @@
import { EModelEndpoint } from 'librechat-data-provider'; import { EModelEndpoint } from 'librechat-data-provider';
import UnknownIcon from '~/components/Chat/Menus/Endpoints/UnknownIcon';
import { import {
AzureMinimalIcon, AzureMinimalIcon,
OpenAIMinimalIcon, OpenAIMinimalIcon,
@ -6,6 +7,7 @@ import {
PluginMinimalIcon, PluginMinimalIcon,
BingAIMinimalIcon, BingAIMinimalIcon,
GoogleMinimalIcon, GoogleMinimalIcon,
CustomMinimalIcon,
AnthropicIcon, AnthropicIcon,
} from '~/components/svg'; } from '~/components/svg';
import { cn } from '~/utils'; import { cn } from '~/utils';
@ -32,9 +34,23 @@ const MinimalIcon: React.FC<IconProps> = (props) => {
icon: <AnthropicIcon className="icon-md shrink-0 dark:text-white" />, icon: <AnthropicIcon className="icon-md shrink-0 dark:text-white" />,
name: props.modelLabel || 'Claude', name: props.modelLabel || 'Claude',
}, },
[EModelEndpoint.custom]: {
icon: <CustomMinimalIcon />,
name: 'Custom',
},
[EModelEndpoint.bingAI]: { icon: <BingAIMinimalIcon />, name: 'BingAI' }, [EModelEndpoint.bingAI]: { icon: <BingAIMinimalIcon />, name: 'BingAI' },
[EModelEndpoint.chatGPTBrowser]: { icon: <LightningIcon />, name: 'ChatGPT' }, [EModelEndpoint.chatGPTBrowser]: { icon: <LightningIcon />, name: 'ChatGPT' },
default: { icon: <OpenAIMinimalIcon />, name: 'UNKNOWN' }, default: {
icon: (
<UnknownIcon
iconURL={props.iconURL}
endpoint={endpoint}
className="icon-sm"
context="nav"
/>
),
name: endpoint,
},
}; };
const { icon, name } = endpointIcons[endpoint] ?? endpointIcons.default; const { icon, name } = endpointIcons[endpoint] ?? endpointIcons.default;

View file

@ -1,6 +1,4 @@
import TextareaAutosize from 'react-textarea-autosize'; import TextareaAutosize from 'react-textarea-autosize';
import type { TModelSelectProps } from '~/common';
import { ESide } from '~/common';
import { import {
SelectDropDown, SelectDropDown,
Input, Input,
@ -10,9 +8,11 @@ import {
HoverCard, HoverCard,
HoverCardTrigger, HoverCardTrigger,
} from '~/components/ui'; } from '~/components/ui';
import OptionHover from './OptionHover';
import { cn, defaultTextProps, optionText, removeFocusOutlines } from '~/utils/'; import { cn, defaultTextProps, optionText, removeFocusOutlines } from '~/utils/';
import type { TModelSelectProps } from '~/common';
import OptionHover from './OptionHover';
import { useLocalize } from '~/hooks'; import { useLocalize } from '~/hooks';
import { ESide } from '~/common';
export default function Settings({ conversation, setOption, models, readonly }: TModelSelectProps) { export default function Settings({ conversation, setOption, models, readonly }: TModelSelectProps) {
const localize = useLocalize(); const localize = useLocalize();
@ -28,9 +28,6 @@ export default function Settings({ conversation, setOption, models, readonly }:
frequency_penalty: freqP, frequency_penalty: freqP,
presence_penalty: presP, presence_penalty: presP,
} = conversation; } = conversation;
const endpoint = conversation.endpoint || 'openAI';
const isOpenAI = endpoint === 'openAI' || endpoint === 'azureOpenAI';
const setModel = setOption('model'); const setModel = setOption('model');
const setChatGptLabel = setOption('chatGptLabel'); const setChatGptLabel = setOption('chatGptLabel');
const setPromptPrefix = setOption('promptPrefix'); const setPromptPrefix = setOption('promptPrefix');
@ -52,47 +49,43 @@ export default function Settings({ conversation, setOption, models, readonly }:
containerClassName="flex w-full resize-none" containerClassName="flex w-full resize-none"
/> />
</div> </div>
{isOpenAI && ( <div className="grid w-full items-center gap-2">
<> <Label htmlFor="chatGptLabel" className="text-left text-sm font-medium">
<div className="grid w-full items-center gap-2"> {localize('com_endpoint_custom_name')}{' '}
<Label htmlFor="chatGptLabel" className="text-left text-sm font-medium"> <small className="opacity-40">({localize('com_endpoint_default_blank')})</small>
{localize('com_endpoint_custom_name')}{' '} </Label>
<small className="opacity-40">({localize('com_endpoint_default_blank')})</small> <Input
</Label> id="chatGptLabel"
<Input disabled={readonly}
id="chatGptLabel" value={chatGptLabel || ''}
disabled={readonly} onChange={(e) => setChatGptLabel(e.target.value ?? null)}
value={chatGptLabel || ''} placeholder={localize('com_endpoint_openai_custom_name_placeholder')}
onChange={(e) => setChatGptLabel(e.target.value ?? null)} className={cn(
placeholder={localize('com_endpoint_openai_custom_name_placeholder')} defaultTextProps,
className={cn( 'dark:bg-gray-700 dark:hover:bg-gray-700/60 dark:focus:bg-gray-700',
defaultTextProps, 'flex h-10 max-h-10 w-full resize-none px-3 py-2',
'dark:bg-gray-700 dark:hover:bg-gray-700/60 dark:focus:bg-gray-700', removeFocusOutlines,
'flex h-10 max-h-10 w-full resize-none px-3 py-2', )}
removeFocusOutlines, />
)} </div>
/> <div className="grid w-full items-center gap-2">
</div> <Label htmlFor="promptPrefix" className="text-left text-sm font-medium">
<div className="grid w-full items-center gap-2"> {localize('com_endpoint_prompt_prefix')}{' '}
<Label htmlFor="promptPrefix" className="text-left text-sm font-medium"> <small className="opacity-40">({localize('com_endpoint_default_blank')})</small>
{localize('com_endpoint_prompt_prefix')}{' '} </Label>
<small className="opacity-40">({localize('com_endpoint_default_blank')})</small> <TextareaAutosize
</Label> id="promptPrefix"
<TextareaAutosize disabled={readonly}
id="promptPrefix" value={promptPrefix || ''}
disabled={readonly} onChange={(e) => setPromptPrefix(e.target.value ?? null)}
value={promptPrefix || ''} placeholder={localize('com_endpoint_openai_prompt_prefix_placeholder')}
onChange={(e) => setPromptPrefix(e.target.value ?? null)} className={cn(
placeholder={localize('com_endpoint_openai_prompt_prefix_placeholder')} defaultTextProps,
className={cn( 'dark:bg-gray-700 dark:hover:bg-gray-700/60 dark:focus:bg-gray-700',
defaultTextProps, 'flex max-h-[138px] min-h-[100px] w-full resize-none px-3 py-2 ',
'dark:bg-gray-700 dark:hover:bg-gray-700/60 dark:focus:bg-gray-700', )}
'flex max-h-[138px] min-h-[100px] w-full resize-none px-3 py-2 ', />
)} </div>
/>
</div>
</>
)}
</div> </div>
<div className="col-span-5 flex flex-col items-center justify-start gap-6 px-3 sm:col-span-2"> <div className="col-span-5 flex flex-col items-center justify-start gap-6 px-3 sm:col-span-2">
<HoverCard openDelay={300}> <HoverCard openDelay={300}>
@ -101,7 +94,7 @@ export default function Settings({ conversation, setOption, models, readonly }:
<Label htmlFor="temp-int" className="text-left text-sm font-medium"> <Label htmlFor="temp-int" className="text-left text-sm font-medium">
{localize('com_endpoint_temperature')}{' '} {localize('com_endpoint_temperature')}{' '}
<small className="opacity-40"> <small className="opacity-40">
({localize('com_endpoint_default_with_num', isOpenAI ? '1' : '0')}) ({localize('com_endpoint_default_with_num', '1')})
</small> </small>
</Label> </Label>
<InputNumber <InputNumber

View file

@ -1,13 +1,14 @@
import { EModelEndpoint } from 'librechat-data-provider'; import { EModelEndpoint } from 'librechat-data-provider';
import OpenAISettings from './OpenAI';
import BingAISettings from './BingAI';
import AnthropicSettings from './Anthropic';
import { Google, Plugins, GoogleSettings, PluginSettings } from './MultiView';
import type { FC } from 'react'; import type { FC } from 'react';
import type { TModelSelectProps, TBaseSettingsProps, TModels } from '~/common'; import type { TModelSelectProps, TBaseSettingsProps, TModels } from '~/common';
import { Google, Plugins, GoogleSettings, PluginSettings } from './MultiView';
import AnthropicSettings from './Anthropic';
import BingAISettings from './BingAI';
import OpenAISettings from './OpenAI';
const settings: { [key: string]: FC<TModelSelectProps> } = { const settings: { [key: string]: FC<TModelSelectProps> } = {
[EModelEndpoint.openAI]: OpenAISettings, [EModelEndpoint.openAI]: OpenAISettings,
[EModelEndpoint.custom]: OpenAISettings,
[EModelEndpoint.azureOpenAI]: OpenAISettings, [EModelEndpoint.azureOpenAI]: OpenAISettings,
[EModelEndpoint.bingAI]: BingAISettings, [EModelEndpoint.bingAI]: BingAISettings,
[EModelEndpoint.anthropic]: AnthropicSettings, [EModelEndpoint.anthropic]: AnthropicSettings,

View file

@ -28,9 +28,11 @@ export default function ModelSelect({
return null; return null;
} }
const { endpoint } = conversation; const { endpoint: _endpoint, endpointType } = conversation;
const models = modelsConfig?.[_endpoint] ?? [];
const endpoint = endpointType ?? _endpoint;
const OptionComponent = isMultiChat ? multiChatOptions[endpoint] : options[endpoint]; const OptionComponent = isMultiChat ? multiChatOptions[endpoint] : options[endpoint];
const models = modelsConfig?.[endpoint] ?? [];
if (!OptionComponent) { if (!OptionComponent) {
return null; return null;

View file

@ -12,6 +12,7 @@ import PluginsByIndex from './PluginsByIndex';
export const options: { [key: string]: FC<TModelSelectProps> } = { export const options: { [key: string]: FC<TModelSelectProps> } = {
[EModelEndpoint.openAI]: OpenAI, [EModelEndpoint.openAI]: OpenAI,
[EModelEndpoint.custom]: OpenAI,
[EModelEndpoint.azureOpenAI]: OpenAI, [EModelEndpoint.azureOpenAI]: OpenAI,
[EModelEndpoint.bingAI]: BingAI, [EModelEndpoint.bingAI]: BingAI,
[EModelEndpoint.google]: Google, [EModelEndpoint.google]: Google,

View file

@ -0,0 +1,46 @@
import { EModelEndpoint } from 'librechat-data-provider';
import { useFormContext, Controller } from 'react-hook-form';
import InputWithLabel from './InputWithLabel';
const CustomEndpoint = ({
endpoint,
userProvideURL,
}: {
endpoint: EModelEndpoint | string;
userProvideURL?: boolean | null;
}) => {
const { control } = useFormContext();
return (
<form className="flex-wrap">
<Controller
name="apiKey"
control={control}
render={({ field }) => (
<InputWithLabel
id="apiKey"
{...field}
label={`${endpoint} API Key`}
labelClassName="mb-1"
inputClassName="mb-2"
/>
)}
/>
{userProvideURL && (
<Controller
name="baseURL"
control={control}
render={({ field }) => (
<InputWithLabel
id="baseURL"
{...field}
label={`${endpoint} API URL`}
labelClassName="mb-1"
/>
)}
/>
)}
</form>
);
};
export default CustomEndpoint;

View file

@ -1,21 +1,26 @@
import React, { ChangeEvent, FC } from 'react'; import { forwardRef } from 'react';
import type { ChangeEvent, FC, Ref } from 'react';
import { cn, defaultTextPropsLabel, removeFocusOutlines } from '~/utils/'; import { cn, defaultTextPropsLabel, removeFocusOutlines } from '~/utils/';
import { Input, Label } from '~/components/ui'; import { Input, Label } from '~/components/ui';
import { useLocalize } from '~/hooks'; import { useLocalize } from '~/hooks';
interface InputWithLabelProps { interface InputWithLabelProps {
id: string;
value: string; value: string;
onChange: (event: ChangeEvent<HTMLInputElement>) => void;
label: string; label: string;
subLabel?: string; subLabel?: string;
id: string; onChange: (event: ChangeEvent<HTMLInputElement>) => void;
labelClassName?: string;
inputClassName?: string;
ref?: Ref<HTMLInputElement>;
} }
const InputWithLabel: FC<InputWithLabelProps> = ({ value, onChange, label, subLabel, id }) => { const InputWithLabel: FC<InputWithLabelProps> = forwardRef((props, ref) => {
const { id, value, label, subLabel, onChange, labelClassName = '', inputClassName = '' } = props;
const localize = useLocalize(); const localize = useLocalize();
return ( return (
<> <>
<div className="flex flex-row"> <div className={cn('flex flex-row', labelClassName)}>
<Label htmlFor={id} className="text-left text-sm font-medium"> <Label htmlFor={id} className="text-left text-sm font-medium">
{label} {label}
</Label> </Label>
@ -24,21 +29,22 @@ const InputWithLabel: FC<InputWithLabelProps> = ({ value, onChange, label, subLa
)} )}
<br /> <br />
</div> </div>
<Input <Input
id={id} id={id}
data-testid={`input-${id}`} data-testid={`input-${id}`}
value={value ?? ''} value={value ?? ''}
onChange={onChange} onChange={onChange}
ref={ref}
placeholder={`${localize('com_endpoint_config_value')} ${label}`} placeholder={`${localize('com_endpoint_config_value')} ${label}`}
className={cn( className={cn(
defaultTextPropsLabel, defaultTextPropsLabel,
'flex h-10 max-h-10 w-full resize-none px-3 py-2', 'flex h-10 max-h-10 w-full resize-none px-3 py-2',
removeFocusOutlines, removeFocusOutlines,
inputClassName,
)} )}
/> />
</> </>
); );
}; });
export default InputWithLabel; export default InputWithLabel;

View file

@ -1,10 +1,13 @@
import React, { useState } from 'react'; import React, { useState } from 'react';
import { useForm, FormProvider } from 'react-hook-form';
import { EModelEndpoint, alternateName } from 'librechat-data-provider'; import { EModelEndpoint, alternateName } from 'librechat-data-provider';
import type { TDialogProps } from '~/common'; import type { TDialogProps } from '~/common';
import DialogTemplate from '~/components/ui/DialogTemplate'; import DialogTemplate from '~/components/ui/DialogTemplate';
import { RevokeKeysButton } from '~/components/Nav'; import { RevokeKeysButton } from '~/components/Nav';
import { Dialog, Dropdown } from '~/components/ui'; import { Dialog, Dropdown } from '~/components/ui';
import { useUserKey, useLocalize } from '~/hooks'; import { useUserKey, useLocalize } from '~/hooks';
import { useToastContext } from '~/Providers';
import CustomConfig from './CustomEndpoint';
import GoogleConfig from './GoogleConfig'; import GoogleConfig from './GoogleConfig';
import OpenAIConfig from './OpenAIConfig'; import OpenAIConfig from './OpenAIConfig';
import OtherConfig from './OtherConfig'; import OtherConfig from './OtherConfig';
@ -13,6 +16,7 @@ import HelpText from './HelpText';
const endpointComponents = { const endpointComponents = {
[EModelEndpoint.google]: GoogleConfig, [EModelEndpoint.google]: GoogleConfig,
[EModelEndpoint.openAI]: OpenAIConfig, [EModelEndpoint.openAI]: OpenAIConfig,
[EModelEndpoint.custom]: CustomConfig,
[EModelEndpoint.azureOpenAI]: OpenAIConfig, [EModelEndpoint.azureOpenAI]: OpenAIConfig,
[EModelEndpoint.gptPlugins]: OpenAIConfig, [EModelEndpoint.gptPlugins]: OpenAIConfig,
default: OtherConfig, default: OtherConfig,
@ -31,12 +35,28 @@ const SetKeyDialog = ({
open, open,
onOpenChange, onOpenChange,
endpoint, endpoint,
endpointType,
userProvideURL,
}: Pick<TDialogProps, 'open' | 'onOpenChange'> & { }: Pick<TDialogProps, 'open' | 'onOpenChange'> & {
endpoint: string; endpoint: EModelEndpoint | string;
endpointType?: EModelEndpoint;
userProvideURL?: boolean | null;
}) => { }) => {
const methods = useForm({
defaultValues: {
apiKey: '',
baseURL: '',
// TODO: allow endpoint definitions from user
// name: '',
// TODO: add custom endpoint models defined by user
// models: '',
},
});
const [userKey, setUserKey] = useState(''); const [userKey, setUserKey] = useState('');
const [expiresAtLabel, setExpiresAtLabel] = useState(EXPIRY.TWELVE_HOURS.display); const [expiresAtLabel, setExpiresAtLabel] = useState(EXPIRY.TWELVE_HOURS.display);
const { getExpiry, saveUserKey } = useUserKey(endpoint); const { getExpiry, saveUserKey } = useUserKey(endpoint);
const { showToast } = useToastContext();
const localize = useLocalize(); const localize = useLocalize();
const expirationOptions = Object.values(EXPIRY); const expirationOptions = Object.values(EXPIRY);
@ -48,12 +68,42 @@ const SetKeyDialog = ({
const submit = () => { const submit = () => {
const selectedOption = expirationOptions.find((option) => option.display === expiresAtLabel); const selectedOption = expirationOptions.find((option) => option.display === expiresAtLabel);
const expiresAt = Date.now() + (selectedOption ? selectedOption.value : 0); const expiresAt = Date.now() + (selectedOption ? selectedOption.value : 0);
saveUserKey(userKey, expiresAt);
onOpenChange(false); const saveKey = (key: string) => {
saveUserKey(key, expiresAt);
onOpenChange(false);
};
if (endpoint === EModelEndpoint.custom || endpointType === EModelEndpoint.custom) {
// TODO: handle other user provided options besides baseURL and apiKey
methods.handleSubmit((data) => {
const emptyValues = Object.keys(data).filter((key) => {
if (key === 'baseURL' && !userProvideURL) {
return false;
}
return data[key] === '';
});
if (emptyValues.length > 0) {
showToast({
message: 'The following fields are required: ' + emptyValues.join(', '),
status: 'error',
});
onOpenChange(true);
} else {
saveKey(JSON.stringify(data));
methods.reset();
}
})();
return;
}
saveKey(userKey);
setUserKey(''); setUserKey('');
}; };
const EndpointComponent = endpointComponents[endpoint] ?? endpointComponents['default']; const EndpointComponent =
endpointComponents[endpointType ?? endpoint] ?? endpointComponents['default'];
const expiryTime = getExpiry(); const expiryTime = getExpiry();
return ( return (
@ -77,7 +127,14 @@ const SetKeyDialog = ({
options={expirationOptions.map((option) => option.display)} options={expirationOptions.map((option) => option.display)}
width={185} width={185}
/> />
<EndpointComponent userKey={userKey} setUserKey={setUserKey} endpoint={endpoint} /> <FormProvider {...methods}>
<EndpointComponent
userKey={userKey}
setUserKey={setUserKey}
endpoint={endpoint}
userProvideURL={userProvideURL}
/>
</FormProvider>
<HelpText endpoint={endpoint} /> <HelpText endpoint={endpoint} />
</div> </div>
} }

View file

@ -0,0 +1,30 @@
import { cn } from '~/utils';
export default function CustomMinimalIcon({
size = 25,
className = '',
}: {
size?: number;
className?: string;
}) {
return (
<svg
xmlns="http://www.w3.org/2000/svg"
width={size}
height={size}
viewBox="0 0 24 24"
fill="none"
stroke="currentColor"
strokeWidth="2"
strokeLinecap="round"
strokeLinejoin="round"
className={cn('lucide lucide-bot', className)}
>
<path d="M12 8V4H8" />
<rect width="16" height="12" x="4" y="8" rx="2" />
<path d="M2 14h2" />
<path d="M20 14h2" />
<path d="M15 13v2" />
<path d="M9 13v2" />
</svg>
);
}

View file

@ -8,6 +8,7 @@ export { default as Clipboard } from './Clipboard';
export { default as CheckMark } from './CheckMark'; export { default as CheckMark } from './CheckMark';
export { default as CrossIcon } from './CrossIcon'; export { default as CrossIcon } from './CrossIcon';
export { default as LogOutIcon } from './LogOutIcon'; export { default as LogOutIcon } from './LogOutIcon';
export { default as CustomMinimalIcon } from './CustomMinimalIcon';
export { default as LightningIcon } from './LightningIcon'; export { default as LightningIcon } from './LightningIcon';
export { default as AttachmentIcon } from './AttachmentIcon'; export { default as AttachmentIcon } from './AttachmentIcon';
export { default as MessagesSquared } from './MessagesSquared'; export { default as MessagesSquared } from './MessagesSquared';

View file

@ -1 +1,2 @@
export { default as usePresets } from './usePresets'; export { default as usePresets } from './usePresets';
export { default as useGetSender } from './useGetSender';

View file

@ -0,0 +1,15 @@
import { useCallback } from 'react';
import { getResponseSender } from 'librechat-data-provider';
import { useGetEndpointsQuery } from 'librechat-data-provider/react-query';
import type { TEndpointOption, TEndpointsConfig } from 'librechat-data-provider';
export default function useGetSender() {
const { data: endpointsConfig = {} as TEndpointsConfig } = useGetEndpointsQuery();
return useCallback(
(endpointOption: TEndpointOption) => {
const { modelDisplayLabel } = endpointsConfig[endpointOption.endpoint ?? ''] ?? {};
return getResponseSender({ ...endpointOption, modelDisplayLabel });
},
[endpointsConfig],
);
}

View file

@ -1,7 +1,8 @@
import { useEffect, useRef } from 'react';
import debounce from 'lodash/debounce'; import debounce from 'lodash/debounce';
import { TEndpointOption, getResponseSender } from 'librechat-data-provider'; import { useEffect, useRef } from 'react';
import { TEndpointOption } from 'librechat-data-provider';
import type { KeyboardEvent } from 'react'; import type { KeyboardEvent } from 'react';
import useGetSender from '~/hooks/Conversations/useGetSender';
import { useChatContext } from '~/Providers/ChatContext'; import { useChatContext } from '~/Providers/ChatContext';
import useFileHandling from '~/hooks/useFileHandling'; import useFileHandling from '~/hooks/useFileHandling';
import useLocalize from '~/hooks/useLocalize'; import useLocalize from '~/hooks/useLocalize';
@ -14,6 +15,7 @@ export default function useTextarea({ setText, submitMessage, disabled = false }
const isComposing = useRef(false); const isComposing = useRef(false);
const inputRef = useRef<HTMLTextAreaElement | null>(null); const inputRef = useRef<HTMLTextAreaElement | null>(null);
const { handleFiles } = useFileHandling(); const { handleFiles } = useFileHandling();
const getSender = useGetSender();
const localize = useLocalize(); const localize = useLocalize();
const { conversationId, jailbreak } = conversation || {}; const { conversationId, jailbreak } = conversation || {};
@ -59,7 +61,7 @@ export default function useTextarea({ setText, submitMessage, disabled = false }
return localize('com_endpoint_message_not_appendable'); return localize('com_endpoint_message_not_appendable');
} }
const sender = getResponseSender(conversation as TEndpointOption); const sender = getSender(conversation as TEndpointOption);
return `${localize('com_endpoint_message')} ${sender ? sender : 'ChatGPT'}`; return `${localize('com_endpoint_message')} ${sender ? sender : 'ChatGPT'}`;
}; };
@ -82,7 +84,7 @@ export default function useTextarea({ setText, submitMessage, disabled = false }
debouncedSetPlaceholder(); debouncedSetPlaceholder();
return () => debouncedSetPlaceholder.cancel(); return () => debouncedSetPlaceholder.cancel();
}, [conversation, disabled, latestMessage, isNotAppendable, localize]); }, [conversation, disabled, latestMessage, isNotAppendable, localize, getSender]);
const handleKeyDown = (e: KeyEvent) => { const handleKeyDown = (e: KeyEvent) => {
if (e.key === 'Enter' && isSubmitting) { if (e.key === 'Enter' && isSubmitting) {

View file

@ -1,5 +1,6 @@
import { useEffect, useRef } from 'react';
import copy from 'copy-to-clipboard'; import copy from 'copy-to-clipboard';
import { useEffect, useRef } from 'react';
import { useGetEndpointsQuery } from 'librechat-data-provider/react-query';
import type { TMessage } from 'librechat-data-provider'; import type { TMessage } from 'librechat-data-provider';
import type { TMessageProps } from '~/common'; import type { TMessageProps } from '~/common';
import Icon from '~/components/Endpoints/Icon'; import Icon from '~/components/Endpoints/Icon';
@ -7,6 +8,7 @@ import { useChatContext } from '~/Providers';
export default function useMessageHelpers(props: TMessageProps) { export default function useMessageHelpers(props: TMessageProps) {
const latestText = useRef(''); const latestText = useRef('');
const { data: endpointsConfig } = useGetEndpointsQuery();
const { message, currentEditId, setCurrentEditId } = props; const { message, currentEditId, setCurrentEditId } = props;
const { const {
@ -51,6 +53,7 @@ export default function useMessageHelpers(props: TMessageProps) {
const icon = Icon({ const icon = Icon({
...conversation, ...conversation,
...(message as TMessage), ...(message as TMessage),
iconURL: endpointsConfig?.[conversation?.endpoint ?? '']?.iconURL,
model: message?.model ?? conversation?.model, model: message?.model ?? conversation?.model,
size: 28.8, size: 28.8,
}); });

View file

@ -1,18 +1,20 @@
import { v4 } from 'uuid'; import { v4 } from 'uuid';
import { useCallback, useState } from 'react'; import { useCallback, useState } from 'react';
import { useQueryClient } from '@tanstack/react-query'; import { useQueryClient } from '@tanstack/react-query';
import { QueryKeys, parseCompactConvo } from 'librechat-data-provider';
import { useRecoilState, useResetRecoilState, useSetRecoilState } from 'recoil'; import { useRecoilState, useResetRecoilState, useSetRecoilState } from 'recoil';
import { QueryKeys, parseCompactConvo, getResponseSender } from 'librechat-data-provider'; import { useGetMessagesByConvoId, useGetEndpointsQuery } from 'librechat-data-provider/react-query';
import { useGetMessagesByConvoId } from 'librechat-data-provider/react-query';
import type { import type {
TMessage, TMessage,
TSubmission, TSubmission,
TEndpointOption, TEndpointOption,
TConversation, TConversation,
TEndpointsConfig,
TGetConversationsResponse, TGetConversationsResponse,
} from 'librechat-data-provider'; } from 'librechat-data-provider';
import type { TAskFunction } from '~/common'; import type { TAskFunction } from '~/common';
import useSetFilesToDelete from './useSetFilesToDelete'; import useSetFilesToDelete from './useSetFilesToDelete';
import useGetSender from './Conversations/useGetSender';
import { useAuthContext } from './AuthContext'; import { useAuthContext } from './AuthContext';
import useUserKey from './Input/useUserKey'; import useUserKey from './Input/useUserKey';
import useNewConvo from './useNewConvo'; import useNewConvo from './useNewConvo';
@ -20,10 +22,12 @@ import store from '~/store';
// this to be set somewhere else // this to be set somewhere else
export default function useChatHelpers(index = 0, paramId: string | undefined) { export default function useChatHelpers(index = 0, paramId: string | undefined) {
const { data: endpointsConfig = {} as TEndpointsConfig } = useGetEndpointsQuery();
const [files, setFiles] = useRecoilState(store.filesByIndex(index)); const [files, setFiles] = useRecoilState(store.filesByIndex(index));
const [showStopButton, setShowStopButton] = useState(true); const [showStopButton, setShowStopButton] = useState(true);
const [filesLoading, setFilesLoading] = useState(false); const [filesLoading, setFilesLoading] = useState(false);
const setFilesToDelete = useSetFilesToDelete(); const setFilesToDelete = useSetFilesToDelete();
const getSender = useGetSender();
const queryClient = useQueryClient(); const queryClient = useQueryClient();
const { isAuthenticated } = useAuthContext(); const { isAuthenticated } = useAuthContext();
@ -31,7 +35,7 @@ export default function useChatHelpers(index = 0, paramId: string | undefined) {
const { newConversation } = useNewConvo(index); const { newConversation } = useNewConvo(index);
const { useCreateConversationAtom } = store; const { useCreateConversationAtom } = store;
const { conversation, setConversation } = useCreateConversationAtom(index); const { conversation, setConversation } = useCreateConversationAtom(index);
const { conversationId, endpoint } = conversation ?? {}; const { conversationId, endpoint, endpointType } = conversation ?? {};
const queryParam = paramId === 'new' ? paramId : conversationId ?? paramId ?? ''; const queryParam = paramId === 'new' ? paramId : conversationId ?? paramId ?? '';
@ -151,13 +155,21 @@ export default function useChatHelpers(index = 0, paramId: string | undefined) {
const isEditOrContinue = isEdited || isContinued; const isEditOrContinue = isEdited || isContinued;
// set the endpoint option // set the endpoint option
const convo = parseCompactConvo(endpoint, conversation ?? {}); const convo = parseCompactConvo({
endpoint,
endpointType,
conversation: conversation ?? {},
});
const { modelDisplayLabel } = endpointsConfig[endpoint ?? ''] ?? {};
const endpointOption = { const endpointOption = {
...convo, ...convo,
endpoint, endpoint,
endpointType,
modelDisplayLabel,
key: getExpiry(), key: getExpiry(),
} as TEndpointOption; } as TEndpointOption;
const responseSender = getResponseSender({ model: conversation?.model, ...endpointOption }); const responseSender = getSender({ model: conversation?.model, ...endpointOption });
let currentMessages: TMessage[] | null = getMessages() ?? []; let currentMessages: TMessage[] | null = getMessages() ?? [];

View file

@ -7,6 +7,7 @@ import type {
TSubmission, TSubmission,
TPreset, TPreset,
TModelsConfig, TModelsConfig,
TEndpointsConfig,
} from 'librechat-data-provider'; } from 'librechat-data-provider';
import { buildDefaultConvo, getDefaultEndpoint } from '~/utils'; import { buildDefaultConvo, getDefaultEndpoint } from '~/utils';
import useOriginNavigate from './useOriginNavigate'; import useOriginNavigate from './useOriginNavigate';
@ -18,7 +19,7 @@ const useConversation = () => {
const setMessages = useSetRecoilState<TMessagesAtom>(store.messages); const setMessages = useSetRecoilState<TMessagesAtom>(store.messages);
const setSubmission = useSetRecoilState<TSubmission | null>(store.submission); const setSubmission = useSetRecoilState<TSubmission | null>(store.submission);
const resetLatestMessage = useResetRecoilState(store.latestMessage); const resetLatestMessage = useResetRecoilState(store.latestMessage);
const { data: endpointsConfig = {} } = useGetEndpointsQuery(); const { data: endpointsConfig = {} as TEndpointsConfig } = useGetEndpointsQuery();
const switchToConversation = useRecoilCallback( const switchToConversation = useRecoilCallback(
({ snapshot }) => ({ snapshot }) =>
@ -37,6 +38,10 @@ const useConversation = () => {
endpointsConfig, endpointsConfig,
}); });
if (!conversation.endpointType && endpointsConfig[defaultEndpoint]?.type) {
conversation.endpointType = endpointsConfig[defaultEndpoint]?.type;
}
const models = modelsConfig?.[defaultEndpoint] ?? []; const models = modelsConfig?.[defaultEndpoint] ?? [];
conversation = buildDefaultConvo({ conversation = buildDefaultConvo({
conversation, conversation,

View file

@ -1,13 +1,13 @@
import { useRecoilValue } from 'recoil'; import { useRecoilValue } from 'recoil';
import { useGetEndpointsQuery } from 'librechat-data-provider/react-query'; import { useGetEndpointsQuery } from 'librechat-data-provider/react-query';
import type { TConversation, TPreset } from 'librechat-data-provider'; import type { TConversation, TPreset, TEndpointsConfig } from 'librechat-data-provider';
import { getDefaultEndpoint, buildDefaultConvo } from '~/utils'; import { getDefaultEndpoint, buildDefaultConvo } from '~/utils';
import store from '~/store'; import store from '~/store';
type TDefaultConvo = { conversation: Partial<TConversation>; preset?: Partial<TPreset> | null }; type TDefaultConvo = { conversation: Partial<TConversation>; preset?: Partial<TPreset> | null };
const useDefaultConvo = () => { const useDefaultConvo = () => {
const { data: endpointsConfig = {} } = useGetEndpointsQuery(); const { data: endpointsConfig = {} as TEndpointsConfig } = useGetEndpointsQuery();
const modelsConfig = useRecoilValue(store.modelsConfig); const modelsConfig = useRecoilValue(store.modelsConfig);
const getDefaultConversation = ({ conversation, preset }: TDefaultConvo) => { const getDefaultConversation = ({ conversation, preset }: TDefaultConvo) => {

View file

@ -19,6 +19,7 @@ export default function useGenerationsByLatest({
const { error, messageId, searchResult, finish_reason, isCreatedByUser } = message ?? {}; const { error, messageId, searchResult, finish_reason, isCreatedByUser } = message ?? {};
const isEditableEndpoint = !![ const isEditableEndpoint = !![
EModelEndpoint.openAI, EModelEndpoint.openAI,
EModelEndpoint.custom,
EModelEndpoint.google, EModelEndpoint.google,
EModelEndpoint.assistant, EModelEndpoint.assistant,
EModelEndpoint.anthropic, EModelEndpoint.anthropic,
@ -39,6 +40,7 @@ export default function useGenerationsByLatest({
!![ !![
EModelEndpoint.azureOpenAI, EModelEndpoint.azureOpenAI,
EModelEndpoint.openAI, EModelEndpoint.openAI,
EModelEndpoint.custom,
EModelEndpoint.chatGPTBrowser, EModelEndpoint.chatGPTBrowser,
EModelEndpoint.google, EModelEndpoint.google,
EModelEndpoint.bingAI, EModelEndpoint.bingAI,

View file

@ -7,7 +7,13 @@ import {
useRecoilState, useRecoilState,
useRecoilValue, useRecoilValue,
} from 'recoil'; } from 'recoil';
import type { TConversation, TSubmission, TPreset, TModelsConfig } from 'librechat-data-provider'; import type {
TConversation,
TSubmission,
TPreset,
TModelsConfig,
TEndpointsConfig,
} from 'librechat-data-provider';
import { buildDefaultConvo, getDefaultEndpoint } from '~/utils'; import { buildDefaultConvo, getDefaultEndpoint } from '~/utils';
import { useDeleteFilesMutation } from '~/data-provider'; import { useDeleteFilesMutation } from '~/data-provider';
import useOriginNavigate from './useOriginNavigate'; import useOriginNavigate from './useOriginNavigate';
@ -22,7 +28,7 @@ const useNewConvo = (index = 0) => {
const [files, setFiles] = useRecoilState(store.filesByIndex(index)); const [files, setFiles] = useRecoilState(store.filesByIndex(index));
const setSubmission = useSetRecoilState<TSubmission | null>(store.submissionByIndex(index)); const setSubmission = useSetRecoilState<TSubmission | null>(store.submissionByIndex(index));
const resetLatestMessage = useResetRecoilState(store.latestMessageFamily(index)); const resetLatestMessage = useResetRecoilState(store.latestMessageFamily(index));
const { data: endpointsConfig = {} } = useGetEndpointsQuery(); const { data: endpointsConfig = {} as TEndpointsConfig } = useGetEndpointsQuery();
const { mutateAsync } = useDeleteFilesMutation({ const { mutateAsync } = useDeleteFilesMutation({
onSuccess: () => { onSuccess: () => {
@ -62,6 +68,10 @@ const useNewConvo = (index = 0) => {
endpointsConfig, endpointsConfig,
}); });
if (!conversation.endpointType && endpointsConfig[defaultEndpoint]?.type) {
conversation.endpointType = endpointsConfig[defaultEndpoint]?.type;
}
const models = modelsConfig?.[defaultEndpoint] ?? []; const models = modelsConfig?.[defaultEndpoint] ?? [];
conversation = buildDefaultConvo({ conversation = buildDefaultConvo({
conversation, conversation,

View file

@ -1,3 +1,4 @@
import { v4 } from 'uuid';
import { useEffect } from 'react'; import { useEffect } from 'react';
import { useParams } from 'react-router-dom'; import { useParams } from 'react-router-dom';
import { import {
@ -5,7 +6,7 @@ import {
SSE, SSE,
createPayload, createPayload,
tMessageSchema, tMessageSchema,
tConversationSchema, tConvoUpdateSchema,
EModelEndpoint, EModelEndpoint,
removeNullishValues, removeNullishValues,
} from 'librechat-data-provider'; } from 'librechat-data-provider';
@ -152,10 +153,10 @@ export default function useSSE(submission: TSubmission | null, index = 0) {
let update = {} as TConversation; let update = {} as TConversation;
setConversation((prevState) => { setConversation((prevState) => {
update = tConversationSchema.parse({ update = tConvoUpdateSchema.parse({
...prevState, ...prevState,
conversationId, conversationId,
}); }) as TConversation;
setStorage(update); setStorage(update);
return update; return update;
@ -207,10 +208,37 @@ export default function useSSE(submission: TSubmission | null, index = 0) {
setIsSubmitting(false); setIsSubmitting(false);
}; };
const errorHandler = (data: TResData, submission: TSubmission) => { const errorHandler = ({ data, submission }: { data?: TResData; submission: TSubmission }) => {
const { messages, message } = submission; const { messages, message } = submission;
if (!data.conversationId) { const conversationId = message?.conversationId ?? submission?.conversationId;
const parseErrorResponse = (data: TResData | Partial<TMessage>) => {
const metadata = data['responseMessage'] ?? data;
return tMessageSchema.parse({
...metadata,
error: true,
parentMessageId: message?.messageId,
});
};
if (!data) {
const convoId = conversationId ?? v4();
const errorResponse = parseErrorResponse({
text: 'Error connecting to server',
...submission,
conversationId: convoId,
});
setMessages([...messages, message, errorResponse]);
newConversation({ template: { conversationId: convoId } });
setIsSubmitting(false);
return;
}
if (!conversationId && !data.conversationId) {
const convoId = v4();
const errorResponse = parseErrorResponse(data);
setMessages([...messages, message, errorResponse]);
newConversation({ template: { conversationId: convoId } });
setIsSubmitting(false); setIsSubmitting(false);
return; return;
} }
@ -318,19 +346,20 @@ export default function useSSE(submission: TSubmission | null, index = 0) {
abortConversation(message?.conversationId ?? submission?.conversationId, submission); abortConversation(message?.conversationId ?? submission?.conversationId, submission);
events.onerror = function (e: MessageEvent) { events.onerror = function (e: MessageEvent) {
console.log('error in opening conn.'); console.log('error in server stream.');
startupConfig?.checkBalance && balanceQuery.refetch(); startupConfig?.checkBalance && balanceQuery.refetch();
events.close(); events.close();
let data = {} as TResData; let data: TResData | undefined = undefined;
try { try {
data = JSON.parse(e.data); data = JSON.parse(e.data) as TResData;
} catch (error) { } catch (error) {
console.error(error); console.error(error);
console.log(e); console.log(e);
} }
errorHandler(data, { ...submission, message }); errorHandler({ data, submission: { ...submission, message } });
events.oncancel();
}; };
setIsSubmitting(true); setIsSubmitting(true);

View file

@ -1,5 +1,11 @@
import { useRecoilValue, useSetRecoilState } from 'recoil'; import { useRecoilValue, useSetRecoilState } from 'recoil';
import { TPreset, TPlugin, tConversationSchema, EModelEndpoint } from 'librechat-data-provider'; import {
TPreset,
TPlugin,
tConvoUpdateSchema,
EModelEndpoint,
TConversation,
} from 'librechat-data-provider';
import type { TSetExample, TSetOption, TSetOptionsPayload } from '~/common'; import type { TSetExample, TSetOption, TSetOptionsPayload } from '~/common';
import usePresetIndexOptions from './usePresetIndexOptions'; import usePresetIndexOptions from './usePresetIndexOptions';
import { useChatContext } from '~/Providers/ChatContext'; import { useChatContext } from '~/Providers/ChatContext';
@ -36,11 +42,12 @@ const useSetOptions: TUseSetOptions = (preset = false) => {
setLastBingSettings({ ...lastBingSettings, jailbreak: newValue }); setLastBingSettings({ ...lastBingSettings, jailbreak: newValue });
} }
setConversation((prevState) => setConversation(
tConversationSchema.parse({ (prevState) =>
...prevState, tConvoUpdateSchema.parse({
...update, ...prevState,
}), ...update,
}) as TConversation,
); );
}; };
@ -51,11 +58,12 @@ const useSetOptions: TUseSetOptions = (preset = false) => {
currentExample[type] = { content: newValue }; currentExample[type] = { content: newValue };
current[i] = currentExample; current[i] = currentExample;
update['examples'] = current; update['examples'] = current;
setConversation((prevState) => setConversation(
tConversationSchema.parse({ (prevState) =>
...prevState, tConvoUpdateSchema.parse({
...update, ...prevState,
}), ...update,
}) as TConversation,
); );
}; };
@ -64,11 +72,12 @@ const useSetOptions: TUseSetOptions = (preset = false) => {
const current = conversation?.examples?.slice() || []; const current = conversation?.examples?.slice() || [];
current.push({ input: { content: '' }, output: { content: '' } }); current.push({ input: { content: '' }, output: { content: '' } });
update['examples'] = current; update['examples'] = current;
setConversation((prevState) => setConversation(
tConversationSchema.parse({ (prevState) =>
...prevState, tConvoUpdateSchema.parse({
...update, ...prevState,
}), ...update,
}) as TConversation,
); );
}; };
@ -77,21 +86,23 @@ const useSetOptions: TUseSetOptions = (preset = false) => {
const current = conversation?.examples?.slice() || []; const current = conversation?.examples?.slice() || [];
if (current.length <= 1) { if (current.length <= 1) {
update['examples'] = [{ input: { content: '' }, output: { content: '' } }]; update['examples'] = [{ input: { content: '' }, output: { content: '' } }];
setConversation((prevState) => setConversation(
tConversationSchema.parse({ (prevState) =>
...prevState, tConvoUpdateSchema.parse({
...update, ...prevState,
}), ...update,
}) as TConversation,
); );
return; return;
} }
current.pop(); current.pop();
update['examples'] = current; update['examples'] = current;
setConversation((prevState) => setConversation(
tConversationSchema.parse({ (prevState) =>
...prevState, tConvoUpdateSchema.parse({
...update, ...prevState,
}), ...update,
}) as TConversation,
); );
}; };
@ -113,11 +124,12 @@ const useSetOptions: TUseSetOptions = (preset = false) => {
lastModelUpdate.secondaryModel = newValue; lastModelUpdate.secondaryModel = newValue;
setLastModel(lastModelUpdate); setLastModel(lastModelUpdate);
} }
setConversation((prevState) => setConversation(
tConversationSchema.parse({ (prevState) =>
...prevState, tConvoUpdateSchema.parse({
agentOptions, ...prevState,
}), agentOptions,
}) as TConversation,
); );
}; };
@ -139,11 +151,12 @@ const useSetOptions: TUseSetOptions = (preset = false) => {
} }
localStorage.setItem('lastSelectedTools', JSON.stringify(update['tools'])); localStorage.setItem('lastSelectedTools', JSON.stringify(update['tools']));
setConversation((prevState) => setConversation(
tConversationSchema.parse({ (prevState) =>
...prevState, tConvoUpdateSchema.parse({
...update, ...prevState,
}), ...update,
}) as TConversation,
); );
}; };

View file

@ -9,6 +9,7 @@ import {
import { useNewConvo, useConfigOverride } from '~/hooks'; import { useNewConvo, useConfigOverride } from '~/hooks';
import ChatView from '~/components/Chat/ChatView'; import ChatView from '~/components/Chat/ChatView';
import useAuthRedirect from './useAuthRedirect'; import useAuthRedirect from './useAuthRedirect';
import { Spinner } from '~/components/svg';
import store from '~/store'; import store from '~/store';
export default function ChatRoute() { export default function ChatRoute() {
@ -51,6 +52,10 @@ export default function ChatRoute() {
// eslint-disable-next-line react-hooks/exhaustive-deps // eslint-disable-next-line react-hooks/exhaustive-deps
}, [initialConvoQuery.data, modelsQuery.data, endpointsQuery.data]); }, [initialConvoQuery.data, modelsQuery.data, endpointsQuery.data]);
if (endpointsQuery.isLoading || modelsQuery.isLoading) {
return <Spinner className="m-auto dark:text-white" />;
}
if (!isAuthenticated) { if (!isAuthenticated) {
return null; return null;
} }

View file

@ -11,6 +11,7 @@ const defaultConfig: TEndpointsConfig = {
[EModelEndpoint.gptPlugins]: null, [EModelEndpoint.gptPlugins]: null,
[EModelEndpoint.google]: null, [EModelEndpoint.google]: null,
[EModelEndpoint.anthropic]: null, [EModelEndpoint.anthropic]: null,
[EModelEndpoint.custom]: null,
}; };
const endpointsConfig = atom<TEndpointsConfig>({ const endpointsConfig = atom<TEndpointsConfig>({
@ -55,6 +56,7 @@ const availableEndpoints = selector({
'bingAI', 'bingAI',
'google', 'google',
'anthropic', 'anthropic',
'custom',
]; ];
const f = get(endpointsFilter); const f = get(endpointsFilter);
return endpoints.filter((endpoint) => f[endpoint]); return endpoints.filter((endpoint) => f[endpoint]);

View file

@ -15,10 +15,12 @@ const buildDefaultConvo = ({
}) => { }) => {
const { lastSelectedModel, lastSelectedTools, lastBingSettings } = getLocalStorageItems(); const { lastSelectedModel, lastSelectedTools, lastBingSettings } = getLocalStorageItems();
const { jailbreak, toneStyle } = lastBingSettings; const { jailbreak, toneStyle } = lastBingSettings;
const { endpointType } = conversation;
if (!endpoint) { if (!endpoint) {
return { return {
...conversation, ...conversation,
endpointType,
endpoint, endpoint,
}; };
} }
@ -44,13 +46,20 @@ const buildDefaultConvo = ({
secondaryModels = [...availableModels]; secondaryModels = [...availableModels];
} }
const convo = parseConvo(endpoint, lastConversationSetup, { const convo = parseConvo({
models: possibleModels, endpoint,
secondaryModels, endpointType,
conversation: lastConversationSetup,
possibleValues: {
models: possibleModels,
secondaryModels,
},
}); });
const defaultConvo = { const defaultConvo = {
...conversation, ...conversation,
...convo, ...convo,
endpointType,
endpoint, endpoint,
}; };

View file

@ -6,7 +6,7 @@ type TCleanupPreset = {
}; };
const cleanupPreset = ({ preset: _preset }: TCleanupPreset): TPreset => { const cleanupPreset = ({ preset: _preset }: TCleanupPreset): TPreset => {
const { endpoint } = _preset; const { endpoint, endpointType } = _preset;
if (!endpoint) { if (!endpoint) {
console.error(`Unknown endpoint ${endpoint}`, _preset); console.error(`Unknown endpoint ${endpoint}`, _preset);
return { return {
@ -16,12 +16,13 @@ const cleanupPreset = ({ preset: _preset }: TCleanupPreset): TPreset => {
}; };
} }
const parsedPreset = parseConvo(endpoint, _preset); const parsedPreset = parseConvo({ endpoint, endpointType, conversation: _preset });
return { return {
presetId: _preset?.presetId ?? null, presetId: _preset?.presetId ?? null,
...parsedPreset, ...parsedPreset,
endpoint, endpoint,
endpointType,
title: _preset?.title ?? 'New Preset', title: _preset?.title ?? 'New Preset',
} as TPreset; } as TPreset;
}; };

View file

@ -1,4 +1,9 @@
import type { TConversation, TPreset, TEndpointsConfig } from 'librechat-data-provider'; import type {
TConversation,
TPreset,
TEndpointsConfig,
EModelEndpoint,
} from 'librechat-data-provider';
import getLocalStorageItems from './getLocalStorageItems'; import getLocalStorageItems from './getLocalStorageItems';
import mapEndpoints from './mapEndpoints'; import mapEndpoints from './mapEndpoints';
@ -42,7 +47,7 @@ const getDefinedEndpoint = (endpointsConfig: TEndpointsConfig) => {
return endpoints.find((e) => Object.hasOwn(endpointsConfig ?? {}, e)); return endpoints.find((e) => Object.hasOwn(endpointsConfig ?? {}, e));
}; };
const getDefaultEndpoint = ({ convoSetup, endpointsConfig }: TDefaultEndpoint) => { const getDefaultEndpoint = ({ convoSetup, endpointsConfig }: TDefaultEndpoint): EModelEndpoint => {
return ( return (
getEndpointFromSetup(convoSetup, endpointsConfig) || getEndpointFromSetup(convoSetup, endpointsConfig) ||
getEndpointFromLocalStorage(endpointsConfig) || getEndpointFromLocalStorage(endpointsConfig) ||

View file

@ -1,20 +1,37 @@
import { defaultEndpoints } from 'librechat-data-provider'; import { defaultEndpoints } from 'librechat-data-provider';
import type { TEndpointsConfig } from 'librechat-data-provider'; import type { EModelEndpoint, TEndpointsConfig } from 'librechat-data-provider';
const getEndpointsFilter = (config: TEndpointsConfig) => { const getEndpointsFilter = (endpointsConfig: TEndpointsConfig) => {
const filter: Record<string, boolean> = {}; const filter: Record<string, boolean> = {};
for (const key of Object.keys(config)) { for (const key of Object.keys(endpointsConfig)) {
filter[key] = !!config[key]; filter[key] = !!endpointsConfig[key];
} }
return filter; return filter;
}; };
const getAvailableEndpoints = (filter: Record<string, boolean>) => { const getAvailableEndpoints = (
const endpoints = defaultEndpoints; filter: Record<string, boolean>,
return endpoints.filter((endpoint) => filter[endpoint]); endpointsConfig: TEndpointsConfig,
) => {
const defaultSet = new Set(defaultEndpoints);
const availableEndpoints: EModelEndpoint[] = [];
for (const endpoint in endpointsConfig) {
// Check if endpoint is in the filter or its type is in defaultEndpoints
if (
filter[endpoint] ||
(endpointsConfig[endpoint]?.type && defaultSet.has(endpointsConfig[endpoint].type))
) {
availableEndpoints.push(endpoint as EModelEndpoint);
}
}
return availableEndpoints;
}; };
export default function mapEndpoints(config: TEndpointsConfig) { export default function mapEndpoints(endpointsConfig: TEndpointsConfig) {
const filter = getEndpointsFilter(config); const filter = getEndpointsFilter(endpointsConfig);
return getAvailableEndpoints(filter).sort((a, b) => config[a].order - config[b].order); return getAvailableEndpoints(filter, endpointsConfig).sort(
(a, b) => (endpointsConfig[a]?.order ?? 0) - (endpointsConfig[b]?.order ?? 0),
);
} }

View file

@ -26,7 +26,10 @@ export const getPresetTitle = (preset: TPreset) => {
let modelInfo = model || ''; let modelInfo = model || '';
let label = ''; let label = '';
if (endpoint && [EModelEndpoint.azureOpenAI, EModelEndpoint.openAI].includes(endpoint)) { if (
endpoint &&
[EModelEndpoint.azureOpenAI, EModelEndpoint.openAI, EModelEndpoint.custom].includes(endpoint)
) {
label = chatGptLabel || ''; label = chatGptLabel || '';
} else if (endpoint && [EModelEndpoint.google, EModelEndpoint.anthropic].includes(endpoint)) { } else if (endpoint && [EModelEndpoint.google, EModelEndpoint.anthropic].includes(endpoint)) {
label = modelLabel || ''; label = modelLabel || '';

View file

@ -330,15 +330,21 @@ To use Azure with the Plugins endpoint, make sure the following environment vari
> See their available models and pricing here: **[Supported Models](https://openrouter.ai/docs#models)** > See their available models and pricing here: **[Supported Models](https://openrouter.ai/docs#models)**
OpenRouter is so great, I decided to integrate it to the project as a standalone feature. OpenRouter is integrated to the LibreChat by overriding the OpenAI endpoint.
**Setup:** **Important**: As of v0.6.6, you can use OpenRouter as its own standalone endpoint:
![image](https://github.com/danny-avila/LibreChat/assets/110412045/4955bfa3-7b6b-4602-933f-daef89c9eab3)
### [Review the Custom Config Guide (click here)](./custom_config.md) to add an `OpenRouter` Endpoint
**Setup (legacy):**
- Signup to **[OpenRouter](https://openrouter.ai/)** and create a key. You should name it and set a limit as well. - Signup to **[OpenRouter](https://openrouter.ai/)** and create a key. You should name it and set a limit as well.
- Set the environment variable `OPENROUTER_API_KEY` in your .env file to the key you just created. - Set the environment variable `OPENROUTER_API_KEY` in your .env file to the key you just created.
- Set something in the `OPENAI_API_KEY`, it can be anyting, but **do not** leave it blank or set to `user_provided` - Set something in the `OPENAI_API_KEY`, it can be anyting, but **do not** leave it blank or set to `user_provided`
- Restart your LibreChat server and use the OpenAI or Plugins endpoints. - Restart your LibreChat server and use the OpenAI or Plugins endpoints.
**Notes:** **Notes:**
- [TODO] **In the future, you will be able to set up OpenRouter from the frontend as well.** - [TODO] **In the future, you will be able to set up OpenRouter from the frontend as well.**
- This will override the official OpenAI API or your reverse proxy settings for both Plugins and OpenAI. - This will override the official OpenAI API or your reverse proxy settings for both Plugins and OpenAI.
- On initial setup, you may need to refresh your page twice to see all their supported models populate automatically. - On initial setup, you may need to refresh your page twice to see all their supported models populate automatically.

View file

@ -0,0 +1,221 @@
# LibreChat Configuration Guide
This document provides detailed instructions for configuring the `librechat.yaml` file used by LibreChat.
In future updates, some of the configurations from [your `.env` file](./dotenv.md) will migrate here.
Further customization of the current configurations are also planned.
# Table of Contents
1. [Intro](#librechat-configuration-guide)
- [Configuration Overview](#configuration-overview)
- [1. Version](#1-version)
- [2. Cache Settings](#2-cache-settings)
- [3. Endpoints](#3-endpoints)
- [Endpoint Object Structure](#endpoint-object-structure)
- [Additional Notes](#additional-notes)
- [Default Parameters](#default-parameters)
- [Breakdown of Default Params](#breakdown-of-default-params)
- [Example Config](#example-config)
## Configuration Overview
The `librechat.yaml` file contains several key sections.
**Note:** Fields not specifically mentioned as required are optional.
### 1. Version
- **Key**: `version`
- **Type**: String
- **Description**: Specifies the version of the configuration file.
- **Example**: `version: 1.0.0`
- **Required**
### 2. Cache Settings
- **Key**: `cache`
- **Type**: Boolean
- **Description**: Toggles caching on or off. Set to `true` to enable caching.
- **Example**: `cache: true`
### 3. Endpoints
- **Key**: `endpoints`
- **Type**: Object
- **Description**: Defines custom API endpoints for the application.
- **Sub-Key**: `custom`
- **Type**: Array of Objects
- **Description**: Each object in the array represents a unique endpoint configuration.
- **Required**
#### Endpoint Object Structure
Each endpoint in the `custom` array should have the following structure:
- **name**: A unique name for the endpoint.
- Type: String
- Example: `name: "Mistral"`
- **Required**
- **Note**: Will be used as the "title" in the Endpoints Selector
- **apiKey**: Your API key for the service. Can reference an environment variable, or allow user to provide the value.
- Type: String (apiKey | `"user_provided"`)
- **Example**: `apiKey: "${MISTRAL_API_KEY}"` | `apiKey: "your_api_key"` | `apiKey: "user_provided"`
- **Required**
- **Note**: It's highly recommended to use the env. variable reference for this field, i.e. `${YOUR_VARIABLE}`
- **baseURL**: Base URL for the API. Can reference an environment variable, or allow user to provide the value.
- Type: String (baseURL | `"user_provided"`)
- **Example**: `baseURL: "https://api.mistral.ai/v1"` | `baseURL: "${MISTRAL_BASE_URL}"` | `baseURL: "user_provided"`
- **Required**
- **Note**: It's highly recommended to use the env. variable reference for this field, i.e. `${YOUR_VARIABLE}`
- **iconURL**: The URL to use as the Endpoint Icon.
- Type: Boolean
- Example: `iconURL: https://github.com/danny-avila/LibreChat/raw/main/docs/assets/LibreChat.svg`
- **Note**: The following are "known endpoints" (case-insensitive), which have icons provided for them. If your endpoint `name` matches these, you should omit this field:
- "Mistral"
- "OpenRouter"
- **models**: Configuration for models.
- **Required**
- **default**: An array of strings indicating the default models to use. At least one value is required.
- Type: Array of Strings
- Example: `default: ["mistral-tiny", "mistral-small", "mistral-medium"]`
- **Note**: If fetching models fails, these defaults are used as a fallback.
- **fetch**: When set to `true`, attempts to fetch a list of models from the API.
- Type: Boolean
- Example: `fetch: true`
- **Note**: May cause slowdowns during initial use of the app if the response is delayed. Defaults to `false`.
- **titleConvo**: Enables title conversation when set to `true`.
- Type: Boolean
- Example: `titleConvo: true`
- **titleMethod**: Chooses between "completion" or "functions" for title method.
- Type: String (`"completion"` | `"functions"`)
- Example: `titleMethod: "completion"`
- **Note**: Defaults to "completion" if omitted.
- **titleModel**: Specifies the model to use for titles.
- Type: String
- Example: `titleModel: "mistral-tiny"`
- **Note**: Defaults to "gpt-3.5-turbo" if omitted. May cause issues if "gpt-3.5-turbo" is not available.
- **summarize**: Enables summarization when set to `true`.
- Type: Boolean
- Example: `summarize: false`
- **Note**: This feature requires an OpenAI Functions compatible API
- **summaryModel**: Specifies the model to use if summarization is enabled.
- Type: String
- Example: `summaryModel: "mistral-tiny"`
- **Note**: Defaults to "gpt-3.5-turbo" if omitted. May cause issues if "gpt-3.5-turbo" is not available.
- **forcePrompt**: If `true`, sends a `prompt` parameter instead of `messages`.
- Type: Boolean
- Example: `forcePrompt: false`
- **Note**: This combines all messages into a single text payload, [following OpenAI format](https://github.com/pvicente/openai-python/blob/main/chatml.md), and uses the `/completions` endpoint of your baseURL rather than `/chat/completions`.
- **modelDisplayLabel**: The label displayed in messages next to the Icon for the current AI model.
- Type: String
- Example: `modelDisplayLabel: "Mistral"`
- **Note**: The display order is:
- 1. Custom name set via preset (if available)
- 2. Label derived from the model name (if applicable)
- 3. This value, `modelDisplayLabel`, is used if the above are not specified. Defaults to "AI".
- **addParams**: Adds additional parameters to requests.
- Type: Object/Dictionary
- **Description**: Adds/Overrides parameters. Useful for specifying API-specific options.
- **Example**:
```yaml
addParams:
safe_mode: true
```
- **dropParams**: Removes default parameters from requests.
- Type: Array/List of Strings
- **Description**: Excludes specified default parameters. Useful for APIs that do not accept or recognize certain parameters.
- **Example**: `dropParams: ["stop", "temperature", "top_p"]`
- **Note**: For a list of default parameters sent with every request, see the "Default Parameters" Section below.
## Additional Notes
- Ensure that all URLs and keys are correctly specified to avoid connectivity issues.
## Default Parameters
Custom endpoints share logic with the OpenAI endpoint, and thus have default parameters tailored to the OpenAI API.
```json
{
"model": "your-selected-model",
"temperature": 1,
"top_p": 1,
"presence_penalty": 0,
"frequency_penalty": 0,
"stop": [
"||>",
"\nUser:",
"<|diff_marker|>",
],
"user": "LibreChat_User_ID",
"stream": true,
"messages": [
{
"role": "user",
"content": "hi how are you",
},
],
}
```
### Breakdown of Default Params
- `model`: The selected model from list of models.
- `temperature`: Defaults to `1` if not provided via preset,
- `top_p`: Defaults to `1` if not provided via preset,
- `presence_penalty`: Defaults to `0` if not provided via preset,
- `frequency_penalty`: Defaults to `0` if not provided via preset,
- `stop`: Sequences where the AI will stop generating further tokens. By default, uses the start token (`||>`), the user label (`\nUser:`), and end token (`<|diff_marker|>`). Up to 4 sequences can be provided to the [OpenAI API](https://platform.openai.com/docs/api-reference/chat/create#chat-create-stop)
- `user`: A unique identifier representing your end-user, which can help OpenAI to [monitor and detect abuse](https://platform.openai.com/docs/api-reference/chat/create#chat-create-user).
- `stream`: If set, partial message deltas will be sent, like in ChatGPT. Otherwise, generation will only be available when completed.
- `messages`: [OpenAI format for messages](https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages); the `name` field is added to messages with `system` and `assistant` roles when a custom name is specified via preset.
**Note:** The `max_tokens` field is not sent to use the maximum amount of tokens available, which is default OpenAI API behavior. Some alternate APIs require this field, or it may default to a very low value and your responses may appear cut off; in this case, you should add it to `addParams` field as shown in the [Endpoint Object Structure](#endpoint-object-structure).
## Example Config
```yaml
version: 1.0.0
cache: true
endpoints:
custom:
# Mistral AI API
- name: "Mistral"
apiKey: "your_api_key"
baseURL: "https://api.mistral.ai/v1"
models:
default: ["mistral-tiny", "mistral-small", "mistral-medium"]
titleConvo: true
titleModel: "mistral-tiny"
summarize: false
summaryModel: "mistral-tiny"
forcePrompt: false
modelDisplayLabel: "Mistral"
addParams:
safe_mode: true
dropParams: ["stop", "temperature", "top_p"]
# OpenRouter.ai API
- name: "OpenRouter"
# Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well.
apiKey: "${OPENROUTER_KEY}"
baseURL: "https://openrouter.ai/api/v1"
models:
default: ["gpt-3.5-turbo"]
fetch: true
titleConvo: true
titleModel: "gpt-3.5-turbo"
summarize: false
summaryModel: "gpt-3.5-turbo"
forcePrompt: false
modelDisplayLabel: "OpenRouter"
```

View file

@ -302,12 +302,14 @@ OPENAI_TITLE_MODEL=gpt-3.5-turbo
OPENAI_SUMMARIZE=true OPENAI_SUMMARIZE=true
``` ```
> **Not yet implemented**: this will be a conversation option enabled by default to save users on tokens. We are using the ConversationSummaryBufferMemory method to summarize messages. To learn more about this, see this article: [https://www.pinecone.io/learn/series/langchain/langchain-conversational-memory/](https://www.pinecone.io/learn/series/langchain/langchain-conversational-memory/) > **Experimental**: We are using the ConversationSummaryBufferMemory method to summarize messages. To learn more about this, see this article: [https://www.pinecone.io/learn/series/langchain/langchain-conversational-memory/](https://www.pinecone.io/learn/series/langchain/langchain-conversational-memory/)
- Reverse proxy settings for OpenAI: - Reverse proxy settings for OpenAI:
- see: [LiteLLM](./litellm.md) - see: [LiteLLM](./litellm.md)
- see also: [Free AI APIs](./free_ai_apis.md#nagaai) - see also: [Free AI APIs](./free_ai_apis.md#nagaai)
**Important**: As of v0.6.6, it's recommend you use the `librechat.yaml` [Configuration file (guide here)](./custom_config.md) to add Reverse Proxies as separate endpoints.
```bash ```bash
OPENAI_REVERSE_PROXY= OPENAI_REVERSE_PROXY=
``` ```

View file

@ -34,6 +34,8 @@ OPENAI_REVERSE_PROXY=https://api.naga.ac/v1/chat/completions
# OPENAI_MODELS=gpt-3.5-turbo,gpt-3.5-turbo-16k,gpt-3.5-turbo-0301,text-davinci-003,gpt-4,gpt-4-0314,gpt-4-0613 # OPENAI_MODELS=gpt-3.5-turbo,gpt-3.5-turbo-16k,gpt-3.5-turbo-0301,text-davinci-003,gpt-4,gpt-4-0314,gpt-4-0613
``` ```
**Important**: As of v0.6.6, it's recommend you use the `librechat.yaml` [Configuration file (guide here)](./custom_config.md) to add Reverse Proxies as separate endpoints.
**Note:** The `OPENAI_MODELS` variable is commented out so that the server can fetch nagaai/api/v1/models for all available models. Uncomment and adjust if you wish to specify which exact models you want to use. **Note:** The `OPENAI_MODELS` variable is commented out so that the server can fetch nagaai/api/v1/models for all available models. Uncomment and adjust if you wish to specify which exact models you want to use.
It's worth noting that not all models listed by their API will work, with or without this project. The exact URL may also change, just make sure you include `/v1/chat/completions` in the reverse proxy URL if it ever changes. It's worth noting that not all models listed by their API will work, with or without this project. The exact URL may also change, just make sure you include `/v1/chat/completions` in the reverse proxy URL if it ever changes.

View file

@ -7,6 +7,7 @@ weight: 2
# Configuration # Configuration
* ⚙️ [Environment Variables](./dotenv.md) * ⚙️ [Environment Variables](./dotenv.md)
* 🖥️ [Custom Config & Endpoints](./configuration/custom_config.md)
* 🐋 [Docker Compose Override](./docker_override.md) * 🐋 [Docker Compose Override](./docker_override.md)
--- ---
* 🤖 [AI Setup](./ai_setup.md) * 🤖 [AI Setup](./ai_setup.md)

View file

@ -62,6 +62,8 @@ git clone https://github.com/danny-avila/LibreChat.git
OPENAI_REVERSE_PROXY=http://host.docker.internal:8000/v1/chat/completions OPENAI_REVERSE_PROXY=http://host.docker.internal:8000/v1/chat/completions
``` ```
**Important**: As of v0.6.6, it's recommend you use the `librechat.yaml` [Configuration file (guide here)](./custom_config.md) to add Reverse Proxies as separate endpoints.
#### 3. Save fake OpenAI key in Librechat's `.env` #### 3. Save fake OpenAI key in Librechat's `.env`
Copy Librechat's `.env.example` to `.env` and overwrite the default OPENAI_API_KEY (by default it requires the user to pass a key). Copy Librechat's `.env.example` to `.env` and overwrite the default OPENAI_API_KEY (by default it requires the user to pass a key).

View file

@ -17,6 +17,7 @@ weight: 1
## **[Configuration](./configuration/index.md)** ## **[Configuration](./configuration/index.md)**
* ⚙️ [Environment Variables](./configuration/dotenv.md) * ⚙️ [Environment Variables](./configuration/dotenv.md)
* 🖥️ [Custom Config & Endpoints](./configuration/custom_config.md)
* 🐋 [Docker Compose Override](./configuration/docker_override.md) * 🐋 [Docker Compose Override](./configuration/docker_override.md)
* 🤖 [AI Setup](./configuration/ai_setup.md) * 🤖 [AI Setup](./configuration/ai_setup.md)
* 🚅 [LiteLLM](./configuration/litellm.md) * 🚅 [LiteLLM](./configuration/litellm.md)

76
librechat.example.yaml Normal file
View file

@ -0,0 +1,76 @@
# Configuration version (required)
version: 1.0.0
# Cache settings: Set to true to enable caching
cache: true
# Definition of custom endpoints
endpoints:
custom:
# Mistral AI API
- name: "Mistral" # Unique name for the endpoint
# For `apiKey` and `baseURL`, you can use environment variables that you define.
# recommended environment variables:
apiKey: "${MISTRAL_API_KEY}"
baseURL: "https://api.mistral.ai/v1"
# Models configuration
models:
# List of default models to use. At least one value is required.
default: ["mistral-tiny", "mistral-small", "mistral-medium"]
# Fetch option: Set to true to fetch models from API.
fetch: true # Defaults to false.
# Optional configurations
# Title Conversation setting
titleConvo: true # Set to true to enable title conversation
# Title Method: Choose between "completion" or "functions".
titleMethod: "completion" # Defaults to "completion" if omitted.
# Title Model: Specify the model to use for titles.
titleModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted.
# Summarize setting: Set to true to enable summarization.
summarize: false
# Summary Model: Specify the model to use if summarization is enabled.
summaryModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted.
# Force Prompt setting: If true, sends a `prompt` parameter instead of `messages`.
forcePrompt: false
# The label displayed for the AI model in messages.
modelDisplayLabel: "Mistral" # Default is "AI" when not set.
# Add additional parameters to the request. Default params will be overwritten.
addParams:
safe_mode: true # This field is specific to Mistral AI: https://docs.mistral.ai/api/
# Drop Default params parameters from the request. See default params in guide linked below.
dropParams: ["stop", "temperature", "top_p"]
# - stop # dropped since it's not recognized by Mistral AI API
# `temperature` and `top_p` are removed to allow Mistral AI API defaults to be used:
# - temperature
# - top_p
# OpenRouter.ai Example
- name: "OpenRouter"
# For `apiKey` and `baseURL`, you can use environment variables that you define.
# recommended environment variables:
# Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well.
apiKey: "${OPENROUTER_KEY}"
baseURL: "https://openrouter.ai/api/v1"
models:
default: ["gpt-3.5-turbo"]
fetch: true
titleConvo: true
titleModel: "gpt-3.5-turbo"
summarize: false
summaryModel: "gpt-3.5-turbo"
forcePrompt: false
modelDisplayLabel: "OpenRouter"
# See the Custom Configuration Guide for more information:
# https://docs.librechat.ai/install/configuration/custom_config.html

View file

@ -0,0 +1,186 @@
import { z } from 'zod';
import { EModelEndpoint, eModelEndpointSchema } from './schemas';
export const endpointSchema = z.object({
name: z.string().refine((value) => !eModelEndpointSchema.safeParse(value).success, {
message: `Value cannot be one of the default endpoint (EModelEndpoint) values: ${Object.values(
EModelEndpoint,
).join(', ')}`,
}),
apiKey: z.string(),
baseURL: z.string(),
models: z.object({
default: z.array(z.string()).min(1),
fetch: z.boolean().optional(),
}),
titleConvo: z.boolean().optional(),
titleMethod: z.union([z.literal('completion'), z.literal('functions')]).optional(),
titleModel: z.string().optional(),
summarize: z.boolean().optional(),
summaryModel: z.string().optional(),
forcePrompt: z.boolean().optional(),
modelDisplayLabel: z.string().optional(),
});
export const configSchema = z.object({
version: z.string(),
cache: z.boolean(),
endpoints: z
.object({
custom: z.array(endpointSchema.partial()),
})
.strict(),
});
export enum KnownEndpoints {
mistral = 'mistral',
openrouter = 'openrouter',
}
export const defaultEndpoints: EModelEndpoint[] = [
EModelEndpoint.openAI,
EModelEndpoint.assistant,
EModelEndpoint.azureOpenAI,
EModelEndpoint.bingAI,
EModelEndpoint.chatGPTBrowser,
EModelEndpoint.gptPlugins,
EModelEndpoint.google,
EModelEndpoint.anthropic,
EModelEndpoint.custom,
];
export const alternateName = {
[EModelEndpoint.openAI]: 'OpenAI',
[EModelEndpoint.assistant]: 'Assistants',
[EModelEndpoint.azureOpenAI]: 'Azure OpenAI',
[EModelEndpoint.bingAI]: 'Bing',
[EModelEndpoint.chatGPTBrowser]: 'ChatGPT',
[EModelEndpoint.gptPlugins]: 'Plugins',
[EModelEndpoint.google]: 'Google',
[EModelEndpoint.anthropic]: 'Anthropic',
[EModelEndpoint.custom]: 'Custom',
};
export const defaultModels = {
[EModelEndpoint.google]: [
'gemini-pro',
'gemini-pro-vision',
'chat-bison',
'chat-bison-32k',
'codechat-bison',
'codechat-bison-32k',
'text-bison',
'text-bison-32k',
'text-unicorn',
'code-gecko',
'code-bison',
'code-bison-32k',
],
[EModelEndpoint.anthropic]: [
'claude-2.1',
'claude-2',
'claude-1.2',
'claude-1',
'claude-1-100k',
'claude-instant-1',
'claude-instant-1-100k',
],
[EModelEndpoint.openAI]: [
'gpt-3.5-turbo-16k-0613',
'gpt-3.5-turbo-16k',
'gpt-4-1106-preview',
'gpt-3.5-turbo',
'gpt-3.5-turbo-1106',
'gpt-4-vision-preview',
'gpt-4',
'gpt-3.5-turbo-instruct-0914',
'gpt-3.5-turbo-0613',
'gpt-3.5-turbo-0301',
'gpt-3.5-turbo-instruct',
'gpt-4-0613',
'text-davinci-003',
'gpt-4-0314',
],
};
export const EndpointURLs: { [key in EModelEndpoint]: string } = {
[EModelEndpoint.openAI]: `/api/ask/${EModelEndpoint.openAI}`,
[EModelEndpoint.bingAI]: `/api/ask/${EModelEndpoint.bingAI}`,
[EModelEndpoint.google]: `/api/ask/${EModelEndpoint.google}`,
[EModelEndpoint.custom]: `/api/ask/${EModelEndpoint.custom}`,
[EModelEndpoint.anthropic]: `/api/ask/${EModelEndpoint.anthropic}`,
[EModelEndpoint.gptPlugins]: `/api/ask/${EModelEndpoint.gptPlugins}`,
[EModelEndpoint.azureOpenAI]: `/api/ask/${EModelEndpoint.azureOpenAI}`,
[EModelEndpoint.chatGPTBrowser]: `/api/ask/${EModelEndpoint.chatGPTBrowser}`,
[EModelEndpoint.assistant]: '/api/assistants/chat',
};
export const modularEndpoints = new Set<EModelEndpoint | string>([
EModelEndpoint.gptPlugins,
EModelEndpoint.anthropic,
EModelEndpoint.google,
EModelEndpoint.openAI,
EModelEndpoint.azureOpenAI,
EModelEndpoint.custom,
]);
export const supportsFiles = {
[EModelEndpoint.openAI]: true,
[EModelEndpoint.google]: true,
[EModelEndpoint.assistant]: true,
[EModelEndpoint.azureOpenAI]: true,
[EModelEndpoint.custom]: true,
};
export const supportsBalanceCheck = {
[EModelEndpoint.openAI]: true,
[EModelEndpoint.azureOpenAI]: true,
[EModelEndpoint.gptPlugins]: true,
[EModelEndpoint.custom]: true,
};
export const visionModels = ['gpt-4-vision', 'llava-13b', 'gemini-pro-vision'];
/**
* Enum for cache keys.
*/
export enum CacheKeys {
/**
* Key for the config store namespace.
*/
CONFIG_STORE = 'configStore',
/**
* Key for the plugins cache.
*/
PLUGINS = 'plugins',
/**
* Key for the model config cache.
*/
MODELS_CONFIG = 'modelsConfig',
/**
* Key for the default endpoint config cache.
*/
ENDPOINT_CONFIG = 'endpointsConfig',
/**
* Key for the custom config cache.
*/
CUSTOM_CONFIG = 'customConfig',
/**
* Key for the override config cache.
*/
OVERRIDE_CONFIG = 'overrideConfig',
}
/**
* Enum for authentication keys.
*/
export enum AuthKeys {
/**
* Key for the Service Account to use Vertex AI.
*/
GOOGLE_SERVICE_KEY = 'GOOGLE_SERVICE_KEY',
/**
* API key to use Google Generative AI.
*/
GOOGLE_API_KEY = 'GOOGLE_API_KEY',
}

View file

@ -1,13 +1,16 @@
import { tConversationSchema } from './schemas';
import type { TSubmission, TMessage, TEndpointOption } from './types'; import type { TSubmission, TMessage, TEndpointOption } from './types';
import { EModelEndpoint, EndpointURLs } from './types'; import { tConvoUpdateSchema, EModelEndpoint } from './schemas';
import { EndpointURLs } from './config';
export default function createPayload(submission: TSubmission) { export default function createPayload(submission: TSubmission) {
const { conversation, message, messages, endpointOption, isEdited, isContinued } = submission; const { conversation, message, messages, endpointOption, isEdited, isContinued } = submission;
const { conversationId } = tConversationSchema.parse(conversation); const { conversationId } = tConvoUpdateSchema.parse(conversation);
const { endpoint } = endpointOption as { endpoint: EModelEndpoint }; const { endpoint, endpointType } = endpointOption as {
endpoint: EModelEndpoint;
endpointType?: EModelEndpoint;
};
let server = EndpointURLs[endpoint]; let server = EndpointURLs[endpointType ?? endpoint];
if (isEdited && endpoint === EModelEndpoint.assistant) { if (isEdited && endpoint === EModelEndpoint.assistant) {
server += '/modify'; server += '/modify';

View file

@ -1,8 +1,13 @@
/* types/schemas/schema helpers */ /* config */
export * from './config';
/* schema helpers */
export * from './parsers';
/* types (exports schemas from `./types` as they contain needed in other defs) */
export * from './types'; export * from './types';
export * from './types/assistants'; export * from './types/assistants';
export * from './types/files'; export * from './types/files';
export * from './types/mutations'; export * from './types/mutations';
/* query/mutation keys */
export * from './keys'; export * from './keys';
/* api call helpers */ /* api call helpers */
export * from './headers-helpers'; export * from './headers-helpers';

View file

@ -0,0 +1,225 @@
import type { TConversation, TPreset } from './schemas';
import type { TEndpointOption } from './types';
import {
EModelEndpoint,
openAISchema,
googleSchema,
bingAISchema,
anthropicSchema,
chatGPTBrowserSchema,
gptPluginsSchema,
assistantSchema,
compactOpenAISchema,
compactGoogleSchema,
compactAnthropicSchema,
compactChatGPTSchema,
compactPluginsSchema,
} from './schemas';
import { alternateName } from './config';
type EndpointSchema =
| typeof openAISchema
| typeof googleSchema
| typeof bingAISchema
| typeof anthropicSchema
| typeof chatGPTBrowserSchema
| typeof gptPluginsSchema
| typeof assistantSchema;
const endpointSchemas: Record<EModelEndpoint, EndpointSchema> = {
[EModelEndpoint.openAI]: openAISchema,
[EModelEndpoint.azureOpenAI]: openAISchema,
[EModelEndpoint.custom]: openAISchema,
[EModelEndpoint.google]: googleSchema,
[EModelEndpoint.bingAI]: bingAISchema,
[EModelEndpoint.anthropic]: anthropicSchema,
[EModelEndpoint.chatGPTBrowser]: chatGPTBrowserSchema,
[EModelEndpoint.gptPlugins]: gptPluginsSchema,
[EModelEndpoint.assistant]: assistantSchema,
};
// const schemaCreators: Record<EModelEndpoint, (customSchema: DefaultSchemaValues) => EndpointSchema> = {
// [EModelEndpoint.google]: createGoogleSchema,
// };
export function getFirstDefinedValue(possibleValues: string[]) {
let returnValue;
for (const value of possibleValues) {
if (value) {
returnValue = value;
break;
}
}
return returnValue;
}
export type TPossibleValues = {
models: string[];
secondaryModels?: string[];
};
export const parseConvo = ({
endpoint,
endpointType,
conversation,
possibleValues,
}: {
endpoint: EModelEndpoint;
endpointType?: EModelEndpoint;
conversation: Partial<TConversation | TPreset>;
possibleValues?: TPossibleValues;
// TODO: POC for default schema
// defaultSchema?: Partial<EndpointSchema>,
}) => {
let schema = endpointSchemas[endpoint];
if (!schema && !endpointType) {
throw new Error(`Unknown endpoint: ${endpoint}`);
} else if (!schema && endpointType) {
schema = endpointSchemas[endpointType];
}
// if (defaultSchema && schemaCreators[endpoint]) {
// schema = schemaCreators[endpoint](defaultSchema);
// }
const convo = schema.parse(conversation) as TConversation;
const { models, secondaryModels } = possibleValues ?? {};
if (models && convo) {
convo.model = getFirstDefinedValue(models) ?? convo.model;
}
if (secondaryModels && convo.agentOptions) {
convo.agentOptions.model = getFirstDefinedValue(secondaryModels) ?? convo.agentOptions.model;
}
return convo;
};
export const getResponseSender = (endpointOption: TEndpointOption): string => {
const { model, endpoint, endpointType, modelDisplayLabel, chatGptLabel, modelLabel, jailbreak } =
endpointOption;
if (
[
EModelEndpoint.openAI,
EModelEndpoint.azureOpenAI,
EModelEndpoint.gptPlugins,
EModelEndpoint.chatGPTBrowser,
].includes(endpoint)
) {
if (chatGptLabel) {
return chatGptLabel;
} else if (model && model.includes('gpt-3')) {
return 'GPT-3.5';
} else if (model && model.includes('gpt-4')) {
return 'GPT-4';
} else if (model && model.includes('mistral')) {
return 'Mistral';
}
return alternateName[endpoint] ?? 'ChatGPT';
}
if (endpoint === EModelEndpoint.bingAI) {
return jailbreak ? 'Sydney' : 'BingAI';
}
if (endpoint === EModelEndpoint.anthropic) {
return modelLabel ?? 'Claude';
}
if (endpoint === EModelEndpoint.google) {
if (modelLabel) {
return modelLabel;
} else if (model && model.includes('gemini')) {
return 'Gemini';
} else if (model && model.includes('code')) {
return 'Codey';
}
return 'PaLM2';
}
if (endpoint === EModelEndpoint.custom || endpointType === EModelEndpoint.custom) {
if (modelLabel) {
return modelLabel;
} else if (chatGptLabel) {
return chatGptLabel;
} else if (model && model.includes('mistral')) {
return 'Mistral';
} else if (model && model.includes('gpt-3')) {
return 'GPT-3.5';
} else if (model && model.includes('gpt-4')) {
return 'GPT-4';
} else if (modelDisplayLabel) {
return modelDisplayLabel;
}
return 'AI';
}
return '';
};
type CompactEndpointSchema =
| typeof compactOpenAISchema
| typeof assistantSchema
| typeof compactGoogleSchema
| typeof bingAISchema
| typeof compactAnthropicSchema
| typeof compactChatGPTSchema
| typeof compactPluginsSchema;
const compactEndpointSchemas: Record<string, CompactEndpointSchema> = {
openAI: compactOpenAISchema,
azureOpenAI: compactOpenAISchema,
custom: compactOpenAISchema,
assistant: assistantSchema,
google: compactGoogleSchema,
/* BingAI needs all fields */
bingAI: bingAISchema,
anthropic: compactAnthropicSchema,
chatGPTBrowser: compactChatGPTSchema,
gptPlugins: compactPluginsSchema,
};
export const parseCompactConvo = ({
endpoint,
endpointType,
conversation,
possibleValues,
}: {
endpoint?: EModelEndpoint;
endpointType?: EModelEndpoint;
conversation: Partial<TConversation | TPreset>;
possibleValues?: TPossibleValues;
// TODO: POC for default schema
// defaultSchema?: Partial<EndpointSchema>,
}) => {
if (!endpoint) {
throw new Error(`undefined endpoint: ${endpoint}`);
}
let schema = compactEndpointSchemas[endpoint];
if (!schema && !endpointType) {
throw new Error(`Unknown endpoint: ${endpoint}`);
} else if (!schema && endpointType) {
schema = compactEndpointSchemas[endpointType];
}
const convo = schema.parse(conversation) as TConversation;
// const { models, secondaryModels } = possibleValues ?? {};
const { models } = possibleValues ?? {};
if (models && convo) {
convo.model = getFirstDefinedValue(models) ?? convo.model;
}
// if (secondaryModels && convo.agentOptions) {
// convo.agentOptionmodel = getFirstDefinedValue(secondaryModels) ?? convo.agentOptionmodel;
// }
return convo;
};

View file

@ -9,75 +9,7 @@ export enum EModelEndpoint {
gptPlugins = 'gptPlugins', gptPlugins = 'gptPlugins',
anthropic = 'anthropic', anthropic = 'anthropic',
assistant = 'assistant', assistant = 'assistant',
} custom = 'custom',
export const defaultEndpoints: EModelEndpoint[] = [
EModelEndpoint.openAI,
EModelEndpoint.assistant,
EModelEndpoint.azureOpenAI,
EModelEndpoint.bingAI,
EModelEndpoint.chatGPTBrowser,
EModelEndpoint.gptPlugins,
EModelEndpoint.google,
EModelEndpoint.anthropic,
];
export const defaultModels = {
[EModelEndpoint.google]: [
'gemini-pro',
'gemini-pro-vision',
'chat-bison',
'chat-bison-32k',
'codechat-bison',
'codechat-bison-32k',
'text-bison',
'text-bison-32k',
'text-unicorn',
'code-gecko',
'code-bison',
'code-bison-32k',
],
[EModelEndpoint.anthropic]: [
'claude-2.1',
'claude-2',
'claude-1.2',
'claude-1',
'claude-1-100k',
'claude-instant-1',
'claude-instant-1-100k',
],
[EModelEndpoint.openAI]: [
'gpt-3.5-turbo-16k-0613',
'gpt-3.5-turbo-16k',
'gpt-4-1106-preview',
'gpt-3.5-turbo',
'gpt-3.5-turbo-1106',
'gpt-4-vision-preview',
'gpt-4',
'gpt-3.5-turbo-instruct-0914',
'gpt-3.5-turbo-0613',
'gpt-3.5-turbo-0301',
'gpt-3.5-turbo-instruct',
'gpt-4-0613',
'text-davinci-003',
'gpt-4-0314',
],
};
export const alternateName = {
[EModelEndpoint.openAI]: 'OpenAI',
[EModelEndpoint.assistant]: 'Assistants',
[EModelEndpoint.azureOpenAI]: 'Azure OpenAI',
[EModelEndpoint.bingAI]: 'Bing',
[EModelEndpoint.chatGPTBrowser]: 'ChatGPT',
[EModelEndpoint.gptPlugins]: 'Plugins',
[EModelEndpoint.google]: 'Google',
[EModelEndpoint.anthropic]: 'Anthropic',
};
export enum AuthKeys {
GOOGLE_SERVICE_KEY = 'GOOGLE_SERVICE_KEY',
GOOGLE_API_KEY = 'GOOGLE_API_KEY',
} }
export const endpointSettings = { export const endpointSettings = {
@ -116,41 +48,10 @@ export const endpointSettings = {
const google = endpointSettings[EModelEndpoint.google]; const google = endpointSettings[EModelEndpoint.google];
export const EndpointURLs: { [key in EModelEndpoint]: string } = {
[EModelEndpoint.azureOpenAI]: '/api/ask/azureOpenAI',
[EModelEndpoint.openAI]: '/api/ask/openAI',
[EModelEndpoint.bingAI]: '/api/ask/bingAI',
[EModelEndpoint.chatGPTBrowser]: '/api/ask/chatGPTBrowser',
[EModelEndpoint.google]: '/api/ask/google',
[EModelEndpoint.gptPlugins]: '/api/ask/gptPlugins',
[EModelEndpoint.anthropic]: '/api/ask/anthropic',
[EModelEndpoint.assistant]: '/api/assistants/chat',
};
export const modularEndpoints = new Set<EModelEndpoint | string>([
EModelEndpoint.gptPlugins,
EModelEndpoint.anthropic,
EModelEndpoint.google,
EModelEndpoint.openAI,
]);
export const supportsFiles = {
[EModelEndpoint.openAI]: true,
[EModelEndpoint.google]: true,
[EModelEndpoint.assistant]: true,
[EModelEndpoint.azureOpenAI]: true,
};
export const supportsBalanceCheck = {
[EModelEndpoint.openAI]: true,
[EModelEndpoint.azureOpenAI]: true,
[EModelEndpoint.gptPlugins]: true,
};
export const visionModels = ['gpt-4-vision', 'llava-13b', 'gemini-pro-vision'];
export const eModelEndpointSchema = z.nativeEnum(EModelEndpoint); export const eModelEndpointSchema = z.nativeEnum(EModelEndpoint);
export const extendedModelEndpointSchema = z.union([eModelEndpointSchema, z.string()]);
export const tPluginAuthConfigSchema = z.object({ export const tPluginAuthConfigSchema = z.object({
authField: z.string(), authField: z.string(),
label: z.string(), label: z.string(),
@ -253,6 +154,7 @@ export const tConversationSchema = z.object({
title: z.string().nullable().or(z.literal('New Chat')).default('New Chat'), title: z.string().nullable().or(z.literal('New Chat')).default('New Chat'),
user: z.string().optional(), user: z.string().optional(),
endpoint: eModelEndpointSchema.nullable(), endpoint: eModelEndpointSchema.nullable(),
endpointType: eModelEndpointSchema.optional(),
suggestions: z.array(z.string()).optional(), suggestions: z.array(z.string()).optional(),
messages: z.array(z.string()).optional(), messages: z.array(z.string()).optional(),
tools: z.array(tPluginSchema).optional(), tools: z.array(tPluginSchema).optional(),
@ -305,8 +207,22 @@ export const tPresetSchema = tConversationSchema
}), }),
); );
export const tConvoUpdateSchema = tConversationSchema.merge(
z.object({
endpoint: extendedModelEndpointSchema.nullable(),
}),
);
export const tPresetUpdateSchema = tConversationSchema.merge(
z.object({
endpoint: extendedModelEndpointSchema.nullable(),
}),
);
export type TPreset = z.infer<typeof tPresetSchema>; export type TPreset = z.infer<typeof tPresetSchema>;
// type DefaultSchemaValues = Partial<typeof google>;
export const openAISchema = tConversationSchema export const openAISchema = tConversationSchema
.pick({ .pick({
model: true, model: true,
@ -528,122 +444,6 @@ export const assistantSchema = tConversationSchema
.transform(removeNullishValues) .transform(removeNullishValues)
.catch(() => ({})); .catch(() => ({}));
type EndpointSchema =
| typeof openAISchema
| typeof googleSchema
| typeof bingAISchema
| typeof anthropicSchema
| typeof chatGPTBrowserSchema
| typeof gptPluginsSchema
| typeof assistantSchema;
const endpointSchemas: Record<EModelEndpoint, EndpointSchema> = {
[EModelEndpoint.openAI]: openAISchema,
[EModelEndpoint.azureOpenAI]: openAISchema,
[EModelEndpoint.google]: googleSchema,
[EModelEndpoint.bingAI]: bingAISchema,
[EModelEndpoint.anthropic]: anthropicSchema,
[EModelEndpoint.chatGPTBrowser]: chatGPTBrowserSchema,
[EModelEndpoint.gptPlugins]: gptPluginsSchema,
[EModelEndpoint.assistant]: assistantSchema,
};
export function getFirstDefinedValue(possibleValues: string[]) {
let returnValue;
for (const value of possibleValues) {
if (value) {
returnValue = value;
break;
}
}
return returnValue;
}
export type TPossibleValues = {
models: string[];
secondaryModels?: string[];
};
export const parseConvo = (
endpoint: EModelEndpoint,
conversation: Partial<TConversation | TPreset>,
possibleValues?: TPossibleValues,
) => {
const schema = endpointSchemas[endpoint];
if (!schema) {
throw new Error(`Unknown endpoint: ${endpoint}`);
}
const convo = schema.parse(conversation) as TConversation;
const { models, secondaryModels } = possibleValues ?? {};
if (models && convo) {
convo.model = getFirstDefinedValue(models) ?? convo.model;
}
if (secondaryModels && convo.agentOptions) {
convo.agentOptions.model = getFirstDefinedValue(secondaryModels) ?? convo.agentOptions.model;
}
return convo;
};
export type TEndpointOption = {
endpoint: EModelEndpoint;
model?: string | null;
promptPrefix?: string;
temperature?: number;
chatGptLabel?: string | null;
modelLabel?: string | null;
jailbreak?: boolean;
key?: string | null;
};
export const getResponseSender = (endpointOption: TEndpointOption): string => {
const { model, endpoint, chatGptLabel, modelLabel, jailbreak } = endpointOption;
if (
[
EModelEndpoint.openAI,
EModelEndpoint.azureOpenAI,
EModelEndpoint.gptPlugins,
EModelEndpoint.chatGPTBrowser,
].includes(endpoint)
) {
if (chatGptLabel) {
return chatGptLabel;
} else if (model && model.includes('gpt-3')) {
return 'GPT-3.5';
} else if (model && model.includes('gpt-4')) {
return 'GPT-4';
}
return alternateName[endpoint] ?? 'ChatGPT';
}
if (endpoint === EModelEndpoint.bingAI) {
return jailbreak ? 'Sydney' : 'BingAI';
}
if (endpoint === EModelEndpoint.anthropic) {
return modelLabel ?? 'Claude';
}
if (endpoint === EModelEndpoint.google) {
if (modelLabel) {
return modelLabel;
} else if (model && model.includes('gemini')) {
return 'Gemini';
} else if (model && model.includes('code')) {
return 'Codey';
}
return 'PaLM2';
}
return '';
};
export const compactOpenAISchema = tConversationSchema export const compactOpenAISchema = tConversationSchema
.pick({ .pick({
model: true, model: true,
@ -809,53 +609,52 @@ export const compactPluginsSchema = tConversationSchema
}) })
.catch(() => ({})); .catch(() => ({}));
type CompactEndpointSchema = // const createGoogleSchema = (customGoogle: DefaultSchemaValues) => {
| typeof compactOpenAISchema // const defaults = { ...google, ...customGoogle };
| typeof assistantSchema // return tConversationSchema
| typeof compactGoogleSchema // .pick({
| typeof bingAISchema // model: true,
| typeof compactAnthropicSchema // modelLabel: true,
| typeof compactChatGPTSchema // promptPrefix: true,
| typeof compactPluginsSchema; // examples: true,
// temperature: true,
// maxOutputTokens: true,
// topP: true,
// topK: true,
// })
// .transform((obj) => {
// const isGeminiPro = obj?.model?.toLowerCase()?.includes('gemini-pro');
const compactEndpointSchemas: Record<string, CompactEndpointSchema> = { // const maxOutputTokensMax = isGeminiPro
openAI: compactOpenAISchema, // ? defaults.maxOutputTokens.maxGeminiPro
azureOpenAI: compactOpenAISchema, // : defaults.maxOutputTokens.max;
assistant: assistantSchema, // const maxOutputTokensDefault = isGeminiPro
google: compactGoogleSchema, // ? defaults.maxOutputTokens.defaultGeminiPro
/* BingAI needs all fields */ // : defaults.maxOutputTokens.default;
bingAI: bingAISchema,
anthropic: compactAnthropicSchema,
chatGPTBrowser: compactChatGPTSchema,
gptPlugins: compactPluginsSchema,
};
export const parseCompactConvo = ( // let maxOutputTokens = obj.maxOutputTokens ?? maxOutputTokensDefault;
endpoint: EModelEndpoint | undefined, // maxOutputTokens = Math.min(maxOutputTokens, maxOutputTokensMax);
conversation: Partial<TConversation | TPreset>,
possibleValues?: TPossibleValues,
) => {
if (!endpoint) {
throw new Error(`undefined endpoint: ${endpoint}`);
}
const schema = compactEndpointSchemas[endpoint]; // return {
// ...obj,
if (!schema) { // model: obj.model ?? defaults.model.default,
throw new Error(`Unknown endpoint: ${endpoint}`); // modelLabel: obj.modelLabel ?? null,
} // promptPrefix: obj.promptPrefix ?? null,
// examples: obj.examples ?? [{ input: { content: '' }, output: { content: '' } }],
const convo = schema.parse(conversation) as TConversation; // temperature: obj.temperature ?? defaults.temperature.default,
// const { models, secondaryModels } = possibleValues ?? {}; // maxOutputTokens,
const { models } = possibleValues ?? {}; // topP: obj.topP ?? defaults.topP.default,
// topK: obj.topK ?? defaults.topK.default,
if (models && convo) { // };
convo.model = getFirstDefinedValue(models) ?? convo.model; // })
} // .catch(() => ({
// model: defaults.model.default,
// if (secondaryModels && convo.agentOptions) { // modelLabel: null,
// convo.agentOptionmodel = getFirstDefinedValue(secondaryModels) ?? convo.agentOptionmodel; // promptPrefix: null,
// } // examples: [{ input: { content: '' }, output: { content: '' } }],
// temperature: defaults.temperature.default,
return convo; // maxOutputTokens: defaults.maxOutputTokens.default,
}; // topP: defaults.topP.default,
// topK: defaults.topK.default,
// }));
// };

View file

@ -1,5 +1,5 @@
import OpenAI from 'openai'; import OpenAI from 'openai';
import type { TResPlugin, TMessage, TConversation, TEndpointOption } from './schemas'; import type { TResPlugin, TMessage, TConversation, EModelEndpoint } from './schemas';
export type TOpenAIMessage = OpenAI.Chat.ChatCompletionMessageParam; export type TOpenAIMessage = OpenAI.Chat.ChatCompletionMessageParam;
export type TOpenAIFunction = OpenAI.Chat.ChatCompletionCreateParams.Function; export type TOpenAIFunction = OpenAI.Chat.ChatCompletionCreateParams.Function;
@ -11,6 +11,19 @@ export type TMessages = TMessage[];
export type TMessagesAtom = TMessages | null; export type TMessagesAtom = TMessages | null;
export type TEndpointOption = {
endpoint: EModelEndpoint;
endpointType?: EModelEndpoint;
modelDisplayLabel?: string;
model?: string | null;
promptPrefix?: string;
temperature?: number;
chatGptLabel?: string | null;
modelLabel?: string | null;
jailbreak?: boolean;
key?: string | null;
};
export type TSubmission = { export type TSubmission = {
plugin?: TResPlugin; plugin?: TResPlugin;
plugins?: TResPlugin[]; plugins?: TResPlugin[];
@ -114,17 +127,21 @@ export type TSearchResults = {
}; };
export type TConfig = { export type TConfig = {
availableModels?: []; order: number;
userProvide?: boolean | null; type?: EModelEndpoint;
azure?: boolean;
availableTools?: []; availableTools?: [];
plugins?: Record<string, string>; plugins?: Record<string, string>;
azure?: boolean; name?: string;
order: number; iconURL?: string;
modelDisplayLabel?: string;
userProvide?: boolean | null;
userProvideURL?: boolean | null;
}; };
export type TModelsConfig = Record<string, string[]>; export type TModelsConfig = Record<string, string[]>;
export type TEndpointsConfig = Record<string, TConfig | null>; export type TEndpointsConfig = Record<EModelEndpoint, TConfig | null>;
export type TUpdateTokenCountResponse = { export type TUpdateTokenCountResponse = {
count: number; count: number;