mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-01-24 03:06:12 +01:00
💫 feat: Config File & Custom Endpoints (#1474)
* WIP(backend/api): custom endpoint * WIP(frontend/client): custom endpoint * chore: adjust typedefs for configs * refactor: use data-provider for cache keys and rename enums and custom endpoint for better clarity and compatibility * feat: loadYaml utility * refactor: rename back to from and proof-of-concept for creating schemas from user-defined defaults * refactor: remove custom endpoint from default endpointsConfig as it will be exclusively managed by yaml config * refactor(EndpointController): rename variables for clarity * feat: initial load custom config * feat(server/utils): add simple `isUserProvided` helper * chore(types): update TConfig type * refactor: remove custom endpoint handling from model services as will be handled by config, modularize fetching of models * feat: loadCustomConfig, loadConfigEndpoints, loadConfigModels * chore: reorganize server init imports, invoke loadCustomConfig * refactor(loadConfigEndpoints/Models): return each custom endpoint as standalone endpoint * refactor(Endpoint/ModelController): spread config values after default (temporary) * chore(client): fix type issues * WIP: first pass for multiple custom endpoints - add endpointType to Conversation schema - add update zod schemas for both convo/presets to allow non-EModelEndpoint value as endpoint (also using type assertion) - use `endpointType` value as `endpoint` where mapping to type is necessary using this field - use custom defined `endpoint` value and not type for mapping to modelsConfig - misc: add return type to `getDefaultEndpoint` - in `useNewConvo`, add the endpointType if it wasn't already added to conversation - EndpointsMenu: use user-defined endpoint name as Title in menu - TODO: custom icon via custom config, change unknown to robot icon * refactor(parseConvo): pass args as an object and change where used accordingly; chore: comment out 'create schema' code * chore: remove unused availableModels field in TConfig type * refactor(parseCompactConvo): pass args as an object and change where used accordingly * feat: chat through custom endpoint * chore(message/convoSchemas): avoid saving empty arrays * fix(BaseClient/saveMessageToDatabase): save endpointType * refactor(ChatRoute): show Spinner if endpointsQuery or modelsQuery are still loading, which is apparent with slow fetching of models/remote config on first serve * fix(useConversation): assign endpointType if it's missing * fix(SaveAsPreset): pass real endpoint and endpointType when saving Preset) * chore: recorganize types order for TConfig, add `iconURL` * feat: custom endpoint icon support: - use UnknownIcon in all icon contexts - add mistral and openrouter as known endpoints, and add their icons - iconURL support * fix(presetSchema): move endpointType to default schema definitions shared between convoSchema and defaults * refactor(Settings/OpenAI): remove legacy `isOpenAI` flag * fix(OpenAIClient): do not invoke abortCompletion on completion error * feat: add responseSender/label support for custom endpoints: - use defaultModelLabel field in endpointOption - add model defaults for custom endpoints in `getResponseSender` - add `useGetSender` hook which uses EndpointsQuery to determine `defaultModelLabel` - include defaultModelLabel from endpointConfig in custom endpoint client options - pass `endpointType` to `getResponseSender` * feat(OpenAIClient): use custom options from config file * refactor: rename `defaultModelLabel` to `modelDisplayLabel` * refactor(data-provider): separate concerns from `schemas` into `parsers`, `config`, and fix imports elsewhere * feat: `iconURL` and extract environment variables from custom endpoint config values * feat: custom config validation via zod schema, rename and move to `./projectRoot/librechat.yaml` * docs: custom config docs and examples * fix(OpenAIClient/mistral): mistral does not allow singular system message, also add `useChatCompletion` flag to use openai-node for title completions * fix(custom/initializeClient): extract env var and use `isUserProvided` function * Update librechat.example.yaml * feat(InputWithLabel): add className props, and forwardRef * fix(streamResponse): handle error edge case where either messages or convos query throws an error * fix(useSSE): handle errorHandler edge cases where error response is and is not properly formatted from API, especially when a conversationId is not yet provided, which ensures stream is properly closed on error * feat: user_provided keys for custom endpoints * fix(config/endpointSchema): do not allow default endpoint values in custom endpoint `name` * feat(loadConfigModels): extract env variables and optimize fetching models * feat: support custom endpoint iconURL for messages and Nav * feat(OpenAIClient): add/dropParams support * docs: update docs with default params, add/dropParams, and notes to use config file instead of `OPENAI_REVERSE_PROXY` * docs: update docs with additional notes * feat(maxTokensMap): add mistral models (32k context) * docs: update openrouter notes * Update ai_setup.md * docs(custom_config): add table of contents and fix note about custom name * docs(custom_config): reorder ToC * Update custom_config.md * Add note about `max_tokens` field in custom_config.md
This commit is contained in:
parent
3f98f92d4c
commit
29473a72db
100 changed files with 2146 additions and 627 deletions
|
|
@ -520,6 +520,7 @@ class BaseClient {
|
|||
await saveConvo(user, {
|
||||
conversationId: message.conversationId,
|
||||
endpoint: this.options.endpoint,
|
||||
endpointType: this.options.endpointType,
|
||||
...endpointOptions,
|
||||
});
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
const OpenAI = require('openai');
|
||||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||
const { getResponseSender, EModelEndpoint } = require('librechat-data-provider');
|
||||
const { getResponseSender } = require('librechat-data-provider');
|
||||
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
|
||||
const { encodeAndFormat, validateVisionModel } = require('~/server/services/Files/images');
|
||||
const { getModelMaxTokens, genAzureChatCompletion, extractBaseURL } = require('~/utils');
|
||||
|
|
@ -94,10 +94,23 @@ class OpenAIClient extends BaseClient {
|
|||
}
|
||||
|
||||
const { reverseProxyUrl: reverseProxy } = this.options;
|
||||
|
||||
if (
|
||||
!this.useOpenRouter &&
|
||||
reverseProxy &&
|
||||
reverseProxy.includes('https://openrouter.ai/api/v1')
|
||||
) {
|
||||
this.useOpenRouter = true;
|
||||
}
|
||||
|
||||
this.FORCE_PROMPT =
|
||||
isEnabled(OPENAI_FORCE_PROMPT) ||
|
||||
(reverseProxy && reverseProxy.includes('completions') && !reverseProxy.includes('chat'));
|
||||
|
||||
if (typeof this.options.forcePrompt === 'boolean') {
|
||||
this.FORCE_PROMPT = this.options.forcePrompt;
|
||||
}
|
||||
|
||||
if (this.azure && process.env.AZURE_OPENAI_DEFAULT_MODEL) {
|
||||
this.azureEndpoint = genAzureChatCompletion(this.azure, this.modelOptions.model);
|
||||
this.modelOptions.model = process.env.AZURE_OPENAI_DEFAULT_MODEL;
|
||||
|
|
@ -146,8 +159,10 @@ class OpenAIClient extends BaseClient {
|
|||
this.options.sender ??
|
||||
getResponseSender({
|
||||
model: this.modelOptions.model,
|
||||
endpoint: EModelEndpoint.openAI,
|
||||
endpoint: this.options.endpoint,
|
||||
endpointType: this.options.endpointType,
|
||||
chatGptLabel: this.options.chatGptLabel,
|
||||
modelDisplayLabel: this.options.modelDisplayLabel,
|
||||
});
|
||||
|
||||
this.userLabel = this.options.userLabel || 'User';
|
||||
|
|
@ -434,7 +449,7 @@ class OpenAIClient extends BaseClient {
|
|||
},
|
||||
opts.abortController || new AbortController(),
|
||||
);
|
||||
} else if (typeof opts.onProgress === 'function') {
|
||||
} else if (typeof opts.onProgress === 'function' || this.options.useChatCompletion) {
|
||||
reply = await this.chatCompletion({
|
||||
payload,
|
||||
clientOptions: opts,
|
||||
|
|
@ -530,6 +545,19 @@ class OpenAIClient extends BaseClient {
|
|||
return llm;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a concise title for a conversation based on the user's input text and response.
|
||||
* Uses either specified method or starts with the OpenAI `functions` method (using LangChain).
|
||||
* If the `functions` method fails, it falls back to the `completion` method,
|
||||
* which involves sending a chat completion request with specific instructions for title generation.
|
||||
*
|
||||
* @param {Object} params - The parameters for the conversation title generation.
|
||||
* @param {string} params.text - The user's input.
|
||||
* @param {string} [params.responseText=''] - The AI's immediate response to the user.
|
||||
*
|
||||
* @returns {Promise<string | 'New Chat'>} A promise that resolves to the generated conversation title.
|
||||
* In case of failure, it will return the default title, "New Chat".
|
||||
*/
|
||||
async titleConvo({ text, responseText = '' }) {
|
||||
let title = 'New Chat';
|
||||
const convo = `||>User:
|
||||
|
|
@ -539,32 +567,25 @@ class OpenAIClient extends BaseClient {
|
|||
|
||||
const { OPENAI_TITLE_MODEL } = process.env ?? {};
|
||||
|
||||
const model = this.options.titleModel ?? OPENAI_TITLE_MODEL ?? 'gpt-3.5-turbo';
|
||||
|
||||
const modelOptions = {
|
||||
model: OPENAI_TITLE_MODEL ?? 'gpt-3.5-turbo',
|
||||
// TODO: remove the gpt fallback and make it specific to endpoint
|
||||
model,
|
||||
temperature: 0.2,
|
||||
presence_penalty: 0,
|
||||
frequency_penalty: 0,
|
||||
max_tokens: 16,
|
||||
};
|
||||
|
||||
try {
|
||||
this.abortController = new AbortController();
|
||||
const llm = this.initializeLLM({ ...modelOptions, context: 'title', tokenBuffer: 150 });
|
||||
title = await runTitleChain({ llm, text, convo, signal: this.abortController.signal });
|
||||
} catch (e) {
|
||||
if (e?.message?.toLowerCase()?.includes('abort')) {
|
||||
logger.debug('[OpenAIClient] Aborted title generation');
|
||||
return;
|
||||
}
|
||||
logger.error(
|
||||
'[OpenAIClient] There was an issue generating title with LangChain, trying the old method...',
|
||||
e,
|
||||
);
|
||||
modelOptions.model = OPENAI_TITLE_MODEL ?? 'gpt-3.5-turbo';
|
||||
const titleChatCompletion = async () => {
|
||||
modelOptions.model = model;
|
||||
|
||||
if (this.azure) {
|
||||
modelOptions.model = process.env.AZURE_OPENAI_DEFAULT_MODEL ?? modelOptions.model;
|
||||
this.azureEndpoint = genAzureChatCompletion(this.azure, modelOptions.model);
|
||||
}
|
||||
|
||||
const instructionsPayload = [
|
||||
{
|
||||
role: 'system',
|
||||
|
|
@ -578,10 +599,38 @@ ${convo}
|
|||
];
|
||||
|
||||
try {
|
||||
title = (await this.sendPayload(instructionsPayload, { modelOptions })).replaceAll('"', '');
|
||||
title = (
|
||||
await this.sendPayload(instructionsPayload, { modelOptions, useChatCompletion: true })
|
||||
).replaceAll('"', '');
|
||||
} catch (e) {
|
||||
logger.error('[OpenAIClient] There was another issue generating the title', e);
|
||||
logger.error(
|
||||
'[OpenAIClient] There was an issue generating the title with the completion method',
|
||||
e,
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
if (this.options.titleMethod === 'completion') {
|
||||
await titleChatCompletion();
|
||||
logger.debug('[OpenAIClient] Convo Title: ' + title);
|
||||
return title;
|
||||
}
|
||||
|
||||
try {
|
||||
this.abortController = new AbortController();
|
||||
const llm = this.initializeLLM({ ...modelOptions, context: 'title', tokenBuffer: 150 });
|
||||
title = await runTitleChain({ llm, text, convo, signal: this.abortController.signal });
|
||||
} catch (e) {
|
||||
if (e?.message?.toLowerCase()?.includes('abort')) {
|
||||
logger.debug('[OpenAIClient] Aborted title generation');
|
||||
return;
|
||||
}
|
||||
logger.error(
|
||||
'[OpenAIClient] There was an issue generating title with LangChain, trying completion method...',
|
||||
e,
|
||||
);
|
||||
|
||||
await titleChatCompletion();
|
||||
}
|
||||
|
||||
logger.debug('[OpenAIClient] Convo Title: ' + title);
|
||||
|
|
@ -593,8 +642,11 @@ ${convo}
|
|||
let context = messagesToRefine;
|
||||
let prompt;
|
||||
|
||||
// TODO: remove the gpt fallback and make it specific to endpoint
|
||||
const { OPENAI_SUMMARY_MODEL = 'gpt-3.5-turbo' } = process.env ?? {};
|
||||
const maxContextTokens = getModelMaxTokens(OPENAI_SUMMARY_MODEL) ?? 4095;
|
||||
const model = this.options.summaryModel ?? OPENAI_SUMMARY_MODEL;
|
||||
const maxContextTokens = getModelMaxTokens(model) ?? 4095;
|
||||
|
||||
// 3 tokens for the assistant label, and 98 for the summarizer prompt (101)
|
||||
let promptBuffer = 101;
|
||||
|
||||
|
|
@ -644,7 +696,7 @@ ${convo}
|
|||
logger.debug('[OpenAIClient] initialPromptTokens', initialPromptTokens);
|
||||
|
||||
const llm = this.initializeLLM({
|
||||
model: OPENAI_SUMMARY_MODEL,
|
||||
model,
|
||||
temperature: 0.2,
|
||||
context: 'summary',
|
||||
tokenBuffer: initialPromptTokens,
|
||||
|
|
@ -719,7 +771,9 @@ ${convo}
|
|||
if (!abortController) {
|
||||
abortController = new AbortController();
|
||||
}
|
||||
const modelOptions = { ...this.modelOptions };
|
||||
|
||||
let modelOptions = { ...this.modelOptions };
|
||||
|
||||
if (typeof onProgress === 'function') {
|
||||
modelOptions.stream = true;
|
||||
}
|
||||
|
|
@ -779,6 +833,27 @@ ${convo}
|
|||
...opts,
|
||||
});
|
||||
|
||||
/* hacky fix for Mistral AI API not allowing a singular system message in payload */
|
||||
if (opts.baseURL.includes('https://api.mistral.ai/v1') && modelOptions.messages) {
|
||||
const { messages } = modelOptions;
|
||||
if (messages.length === 1 && messages[0].role === 'system') {
|
||||
modelOptions.messages[0].role = 'user';
|
||||
}
|
||||
}
|
||||
|
||||
if (this.options.addParams && typeof this.options.addParams === 'object') {
|
||||
modelOptions = {
|
||||
...modelOptions,
|
||||
...this.options.addParams,
|
||||
};
|
||||
}
|
||||
|
||||
if (this.options.dropParams && Array.isArray(this.options.dropParams)) {
|
||||
this.options.dropParams.forEach((param) => {
|
||||
delete modelOptions[param];
|
||||
});
|
||||
}
|
||||
|
||||
let UnexpectedRoleError = false;
|
||||
if (modelOptions.stream) {
|
||||
const stream = await openai.beta.chat.completions
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue