mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 17:00:15 +01:00
* WIP: first pass ModelSpecs * refactor(onSelectEndpoint): use `getConvoSwitchLogic` * feat: introduce iconURL, greeting, frontend fields for conversations/presets/messages * feat: conversation.iconURL & greeting in Landing * feat: conversation.iconURL & greeting in New Chat button * feat: message.iconURL * refactor: ConversationIcon -> ConvoIconURL * WIP: add spec as a conversation field * refactor: useAppStartup, set spec on initial load for new chat, allow undefined spec, add localStorage keys enum, additional type fields for spec * feat: handle `showIconInMenu`, `showIconInHeader`, undefined `iconURL` and no specs on initial load * chore: handle undefined or empty modelSpecs * WIP: first pass, modelSpec schema for custom config * refactor: move default filtered tools definition to ToolService * feat: pass modelSpecs from backend via startupConfig * refactor: modelSpecs config, return and define list * fix: react error and include iconURL in responseMessage * refactor: add iconURL to responseMessage only * refactor: getIconEndpoint * refactor: pass TSpecsConfig * fix(assistants): differentiate compactAssistantSchema, correctly resets shared conversation state with other endpoints * refactor: assistant id prefix localStorage key * refactor: add more LocalStorageKeys and replace hardcoded values * feat: prioritize spec on new chat behavior: last selected modelSpec behavior (localStorage) * feat: first pass, interface config * chore: WIP, todo: add warnings based on config.modelSpecs settings. * feat: enforce modelSpecs if configured * feat: show config file yaml errors * chore: delete unused legacy Plugins component * refactor: set tools to localStorage from recoil store * chore: add stable recoil setter to useEffect deps * refactor: save tools to conversation documents * style(MultiSelectPop): dynamic height, remove unused import * refactor(react-query): use localstorage keys and pass config to useAvailablePluginsQuery * feat(utils): add mapPlugins * refactor(Convo): use conversation.tools if defined, lastSelectedTools if not * refactor: remove unused legacy code using `useSetOptions`, remove conditional flag `isMultiChat` for using legacy settings * refactor(PluginStoreDialog): add exhaustive-deps which are stable react state setters * fix(HeaderOptions): pass `popover` as true * refactor(useSetStorage): use project enums * refactor: use LocalStorageKeys enum * fix: prevent setConversation from setting falsy values in lastSelectedTools * refactor: use map for availableTools state and available Plugins query * refactor(updateLastSelectedModel): organize logic better and add note on purpose * fix(setAgentOption): prevent reseting last model to secondary model for gptPlugins * refactor(buildDefaultConvo): use enum * refactor: remove `useSetStorage` and consolidate areas where conversation state is saved to localStorage * fix: conversations retain tools on refresh * fix(gptPlugins): prevent nullish tools from being saved * chore: delete useServerStream * refactor: move initial plugins logic to useAppStartup * refactor(MultiSelectDropDown): add more pass-in className props * feat: use tools in presets * chore: delete unused usePresetOptions * refactor: new agentOptions default handling * chore: note * feat: add label and custom instructions to agents * chore: remove 'disabled with tools' message * style: move plugins to 2nd column in parameters * fix: TPreset type for agentOptions * fix: interface controls * refactor: add interfaceConfig, use Separator within Switcher * refactor: hide Assistants panel if interface.parameters are disabled * fix(Header): only modelSpecs if list is greater than 0 * refactor: separate MessageIcon logic from useMessageHelpers for better react rule-following * fix(AppService): don't use reserved keyword 'interface' * feat: set existing Icon for custom endpoints through iconURL * fix(ci): tests passing for App Service * docs: refactor custom_config.md for readability and better organization, also include missing values * docs: interface section and re-organize docs * docs: update modelSpecs info * chore: remove unused files * chore: remove unused files * chore: move useSetIndexOptions * chore: remove unused file * chore: move useConversation(s) * chore: move useDefaultConvo * chore: move useNavigateToConvo * refactor: use plugin install hook so it can be used elsewhere * chore: import order * update docs * refactor(OpenAI/Plugins): allow modelLabel as an initial value for chatGptLabel * chore: remove unused EndpointOptionsPopover and hide 'Save as Preset' button if preset UI visibility disabled * feat(loadDefaultInterface): issue warnings based on values * feat: changelog for custom config file * docs: add additional changelog note * fix: prevent unavailable tool selection from preset and update availableTools on Plugin installations * feat: add `filteredTools` option in custom config * chore: changelog * fix(MessageIcon): always overwrite conversation.iconURL in messageSettings * fix(ModelSpecsMenu): icon edge cases * fix(NewChat): dynamic icon * fix(PluginsClient): always include endpoint in responseMessage * fix: always include endpoint and iconURL in responseMessage across different response methods * feat: interchangeable keys for modelSpec enforcing
173 lines
5 KiB
JavaScript
173 lines
5 KiB
JavaScript
const { EModelEndpoint } = require('librechat-data-provider');
|
|
const { sendMessage, sendError, countTokens, isEnabled } = require('~/server/utils');
|
|
const { truncateText, smartTruncateText } = require('~/app/clients/prompts');
|
|
const { saveMessage, getConvo, getConvoTitle } = require('~/models');
|
|
const clearPendingReq = require('~/cache/clearPendingReq');
|
|
const abortControllers = require('./abortControllers');
|
|
const spendTokens = require('~/models/spendTokens');
|
|
const { abortRun } = require('./abortRun');
|
|
const { logger } = require('~/config');
|
|
|
|
async function abortMessage(req, res) {
|
|
let { abortKey, conversationId, endpoint } = req.body;
|
|
|
|
if (!abortKey && conversationId) {
|
|
abortKey = conversationId;
|
|
}
|
|
|
|
if (endpoint === EModelEndpoint.assistants) {
|
|
return await abortRun(req, res);
|
|
}
|
|
|
|
if (!abortControllers.has(abortKey) && !res.headersSent) {
|
|
return res.status(204).send({ message: 'Request not found' });
|
|
}
|
|
|
|
const { abortController } = abortControllers.get(abortKey);
|
|
const finalEvent = await abortController.abortCompletion();
|
|
logger.debug('[abortMessage] Aborted request', { abortKey });
|
|
abortControllers.delete(abortKey);
|
|
|
|
if (res.headersSent && finalEvent) {
|
|
return sendMessage(res, finalEvent);
|
|
}
|
|
|
|
res.setHeader('Content-Type', 'application/json');
|
|
|
|
res.send(JSON.stringify(finalEvent));
|
|
}
|
|
|
|
const handleAbort = () => {
|
|
return async (req, res) => {
|
|
try {
|
|
if (isEnabled(process.env.LIMIT_CONCURRENT_MESSAGES)) {
|
|
await clearPendingReq({ userId: req.user.id });
|
|
}
|
|
return await abortMessage(req, res);
|
|
} catch (err) {
|
|
logger.error('[abortMessage] handleAbort error', err);
|
|
}
|
|
};
|
|
};
|
|
|
|
const createAbortController = (req, res, getAbortData) => {
|
|
const abortController = new AbortController();
|
|
const { endpointOption } = req.body;
|
|
const onStart = (userMessage) => {
|
|
sendMessage(res, { message: userMessage, created: true });
|
|
const abortKey = userMessage?.conversationId ?? req.user.id;
|
|
abortControllers.set(abortKey, { abortController, ...endpointOption });
|
|
|
|
res.on('finish', function () {
|
|
abortControllers.delete(abortKey);
|
|
});
|
|
};
|
|
|
|
abortController.abortCompletion = async function () {
|
|
abortController.abort();
|
|
const { conversationId, userMessage, promptTokens, ...responseData } = getAbortData();
|
|
const completionTokens = await countTokens(responseData?.text ?? '');
|
|
const user = req.user.id;
|
|
|
|
const responseMessage = {
|
|
...responseData,
|
|
conversationId,
|
|
finish_reason: 'incomplete',
|
|
endpoint: endpointOption.endpoint,
|
|
iconURL: endpointOption.iconURL,
|
|
model: endpointOption.modelOptions.model,
|
|
unfinished: false,
|
|
error: false,
|
|
isCreatedByUser: false,
|
|
tokenCount: completionTokens,
|
|
};
|
|
|
|
await spendTokens(
|
|
{ ...responseMessage, context: 'incomplete', user },
|
|
{ promptTokens, completionTokens },
|
|
);
|
|
|
|
saveMessage({ ...responseMessage, user });
|
|
|
|
return {
|
|
title: await getConvoTitle(user, conversationId),
|
|
final: true,
|
|
conversation: await getConvo(user, conversationId),
|
|
requestMessage: userMessage,
|
|
responseMessage: responseMessage,
|
|
};
|
|
};
|
|
|
|
return { abortController, onStart };
|
|
};
|
|
|
|
const handleAbortError = async (res, req, error, data) => {
|
|
if (error?.message?.includes('base64')) {
|
|
logger.error('[handleAbortError] Error in base64 encoding', {
|
|
...error,
|
|
stack: smartTruncateText(error?.stack, 1000),
|
|
message: truncateText(error.message, 350),
|
|
});
|
|
} else {
|
|
logger.error('[handleAbortError] AI response error; aborting request:', error);
|
|
}
|
|
const { sender, conversationId, messageId, parentMessageId, partialText } = data;
|
|
|
|
if (error.stack && error.stack.includes('google')) {
|
|
logger.warn(
|
|
`AI Response error for conversation ${conversationId} likely caused by Google censor/filter`,
|
|
);
|
|
}
|
|
|
|
const errorText = error?.message?.includes('"type"')
|
|
? error.message
|
|
: 'An error occurred while processing your request. Please contact the Admin.';
|
|
|
|
const respondWithError = async (partialText) => {
|
|
let options = {
|
|
sender,
|
|
messageId,
|
|
conversationId,
|
|
parentMessageId,
|
|
text: errorText,
|
|
shouldSaveMessage: true,
|
|
user: req.user.id,
|
|
};
|
|
|
|
if (partialText) {
|
|
options = {
|
|
...options,
|
|
error: false,
|
|
unfinished: true,
|
|
text: partialText,
|
|
};
|
|
}
|
|
|
|
const callback = async () => {
|
|
if (abortControllers.has(conversationId)) {
|
|
const { abortController } = abortControllers.get(conversationId);
|
|
abortController.abort();
|
|
abortControllers.delete(conversationId);
|
|
}
|
|
};
|
|
|
|
await sendError(res, options, callback);
|
|
};
|
|
|
|
if (partialText && partialText.length > 5) {
|
|
try {
|
|
return await abortMessage(req, res);
|
|
} catch (err) {
|
|
logger.error('[handleAbortError] error while trying to abort message', err);
|
|
return respondWithError(partialText);
|
|
}
|
|
} else {
|
|
return respondWithError();
|
|
}
|
|
};
|
|
|
|
module.exports = {
|
|
handleAbort,
|
|
createAbortController,
|
|
handleAbortError,
|
|
};
|