🎉 feat: Optimizations and Anthropic Title Generation (#2184)

* feat: add claude-3-haiku-20240307 to default anthropic list

* refactor: optimize `saveMessage` calls mid-stream via throttling

* chore: remove addMetadata operations and consolidate in BaseClient

* fix(listAssistantsForAzure): attempt to specify correct model mapping as accurately as possible (#2177)

* refactor(client): update last conversation setup with current assistant model, call newConvo again when assistants load to allow fast initial load and ensure assistant model is always the default, not the last selected model

* refactor(cache): explicitly add TTL of 2 minutes when setting titleCache and add default TTL of 10 minutes to abortKeys cache

* feat(AnthropicClient): conversation titling using Anthropic Function Calling

* chore: remove extraneous token usage logging

* fix(convos): unhandled edge case for conversation grouping (undefined conversation)

* style: Improved style of Search Bar after recent UI update

* chore: remove unused code, content part helpers

* feat: always show code option
This commit is contained in:
Danny Avila 2024-03-23 20:21:40 -04:00 committed by GitHub
parent 8e7816468d
commit 1f0fb497f8
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
31 changed files with 426 additions and 188 deletions

View file

@ -6,7 +6,13 @@ const {
validateVisionModel,
} = require('librechat-data-provider');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const { formatMessage, createContextHandlers } = require('./prompts');
const {
titleFunctionPrompt,
parseTitleFromPrompt,
truncateText,
formatMessage,
createContextHandlers,
} = require('./prompts');
const spendTokens = require('~/models/spendTokens');
const { getModelMaxTokens } = require('~/utils');
const BaseClient = require('./BaseClient');
@ -108,7 +114,12 @@ class AnthropicClient extends BaseClient {
return this;
}
/**
* Get the initialized Anthropic client.
* @returns {Anthropic} The Anthropic client instance.
*/
getClient() {
/** @type {Anthropic.default.RequestOptions} */
const options = {
apiKey: this.apiKey,
};
@ -176,14 +187,13 @@ class AnthropicClient extends BaseClient {
return files;
}
async recordTokenUsage({ promptTokens, completionTokens }) {
logger.debug('[AnthropicClient] recordTokenUsage:', { promptTokens, completionTokens });
async recordTokenUsage({ promptTokens, completionTokens, model, context = 'message' }) {
await spendTokens(
{
context,
user: this.user,
model: this.modelOptions.model,
context: 'message',
conversationId: this.conversationId,
model: model ?? this.modelOptions.model,
endpointTokenConfig: this.options.endpointTokenConfig,
},
{ promptTokens, completionTokens },
@ -512,8 +522,15 @@ class AnthropicClient extends BaseClient {
logger.debug('AnthropicClient doesn\'t use getCompletion (all handled in sendCompletion)');
}
async createResponse(client, options) {
return this.useMessages
/**
* Creates a message or completion response using the Anthropic client.
* @param {Anthropic} client - The Anthropic client instance.
* @param {Anthropic.default.MessageCreateParams | Anthropic.default.CompletionCreateParams} options - The options for the message or completion.
* @param {boolean} useMessages - Whether to use messages or completions. Defaults to `this.useMessages`.
* @returns {Promise<Anthropic.default.Message | Anthropic.default.Completion>} The response from the Anthropic client.
*/
async createResponse(client, options, useMessages) {
return useMessages ?? this.useMessages
? await client.messages.create(options)
: await client.completions.create(options);
}
@ -663,6 +680,78 @@ class AnthropicClient extends BaseClient {
getTokenCount(text) {
return this.gptEncoder.encode(text, 'all').length;
}
/**
* Generates a concise title for a conversation based on the user's input text and response.
* Involves sending a chat completion request with specific instructions for title generation.
*
* This function capitlizes on [Anthropic's function calling training](https://docs.anthropic.com/claude/docs/functions-external-tools).
*
* @param {Object} params - The parameters for the conversation title generation.
* @param {string} params.text - The user's input.
* @param {string} [params.responseText=''] - The AI's immediate response to the user.
*
* @returns {Promise<string | 'New Chat'>} A promise that resolves to the generated conversation title.
* In case of failure, it will return the default title, "New Chat".
*/
async titleConvo({ text, responseText = '' }) {
let title = 'New Chat';
const convo = `<initial_message>
${truncateText(text)}
</initial_message>
<response>
${JSON.stringify(truncateText(responseText))}
</response>`;
const { ANTHROPIC_TITLE_MODEL } = process.env ?? {};
const model = this.options.titleModel ?? ANTHROPIC_TITLE_MODEL ?? 'claude-3-haiku-20240307';
const system = titleFunctionPrompt;
const titleChatCompletion = async () => {
const content = `<conversation_context>
${convo}
</conversation_context>
Please generate a title for this conversation.`;
const titleMessage = { role: 'user', content };
const requestOptions = {
model,
temperature: 0.3,
max_tokens: 1024,
system,
stop_sequences: ['\n\nHuman:', '\n\nAssistant', '</function_calls>'],
messages: [titleMessage],
};
try {
const response = await this.createResponse(this.getClient(), requestOptions, true);
let promptTokens = response?.usage?.input_tokens;
let completionTokens = response?.usage?.output_tokens;
if (!promptTokens) {
promptTokens = this.getTokenCountForMessage(titleMessage);
promptTokens += this.getTokenCountForMessage({ role: 'system', content: system });
}
if (!completionTokens) {
completionTokens = this.getTokenCountForMessage(response.content[0]);
}
await this.recordTokenUsage({
model,
promptTokens,
completionTokens,
context: 'title',
});
const text = response.content[0].text;
title = parseTitleFromPrompt(text);
} catch (e) {
logger.error('[AnthropicClient] There was an issue generating the title', e);
}
};
await titleChatCompletion();
logger.debug('[AnthropicClient] Convo Title: ' + title);
return title;
}
}
module.exports = AnthropicClient;

View file

@ -456,6 +456,7 @@ class BaseClient {
sender: this.sender,
text: addSpaceIfNeeded(generation) + completion,
promptTokens,
...(this.metadata ?? {}),
};
if (

View file

@ -47,6 +47,7 @@ class OpenAIClient extends BaseClient {
/** @type {AzureOptions} */
this.azure = options.azure || false;
this.setOptions(options);
this.metadata = {};
}
// TODO: PluginsClient calls this 3x, unneeded
@ -574,7 +575,6 @@ class OpenAIClient extends BaseClient {
} else if (typeof opts.onProgress === 'function' || this.options.useChatCompletion) {
reply = await this.chatCompletion({
payload,
clientOptions: opts,
onProgress: opts.onProgress,
abortController: opts.abortController,
});
@ -594,9 +594,9 @@ class OpenAIClient extends BaseClient {
}
}
if (streamResult && typeof opts.addMetadata === 'function') {
if (streamResult) {
const { finish_reason } = streamResult.choices[0];
opts.addMetadata({ finish_reason });
this.metadata = { finish_reason };
}
return (reply ?? '').trim();
}
@ -921,7 +921,6 @@ ${convo}
}
async recordTokenUsage({ promptTokens, completionTokens }) {
logger.debug('[OpenAIClient] recordTokenUsage:', { promptTokens, completionTokens });
await spendTokens(
{
user: this.user,
@ -941,7 +940,7 @@ ${convo}
});
}
async chatCompletion({ payload, onProgress, clientOptions, abortController = null }) {
async chatCompletion({ payload, onProgress, abortController = null }) {
let error = null;
const errorCallback = (err) => (error = err);
let intermediateReply = '';
@ -962,15 +961,6 @@ ${convo}
}
const baseURL = extractBaseURL(this.completionsUrl);
// let { messages: _msgsToLog, ...modelOptionsToLog } = modelOptions;
// if (modelOptionsToLog.messages) {
// _msgsToLog = modelOptionsToLog.messages.map((msg) => {
// let { content, ...rest } = msg;
// if (content)
// return { ...rest, content: truncateText(content) };
// });
// }
logger.debug('[OpenAIClient] chatCompletion', { baseURL, modelOptions });
const opts = {
baseURL,
@ -1163,8 +1153,8 @@ ${convo}
}
const { message, finish_reason } = chatCompletion.choices[0];
if (chatCompletion && typeof clientOptions.addMetadata === 'function') {
clientOptions.addMetadata({ finish_reason });
if (chatCompletion) {
this.metadata = { finish_reason };
}
logger.debug('[OpenAIClient] chatCompletion response', chatCompletion);

View file

@ -27,7 +27,60 @@ ${convo}`,
return titlePrompt;
};
const titleFunctionPrompt = `In this environment you have access to a set of tools you can use to generate the conversation title.
You may call them like this:
<function_calls>
<invoke>
<tool_name>$TOOL_NAME</tool_name>
<parameters>
<$PARAMETER_NAME>$PARAMETER_VALUE</$PARAMETER_NAME>
...
</parameters>
</invoke>
</function_calls>
Here are the tools available:
<tools>
<tool_description>
<tool_name>submit_title</tool_name>
<description>
Submit a brief title in the conversation's language, following the parameter description closely.
</description>
<parameters>
<parameter>
<name>title</name>
<type>string</type>
<description>A concise, 5-word-or-less title for the conversation, using its same language, with no punctuation. Apply title case conventions appropriate for the language. For English, use AP Stylebook Title Case. Never directly mention the language name or the word "title"</description>
</parameter>
</parameters>
</tool_description>
</tools>`;
/**
* Parses titles from title functions based on the provided prompt.
* @param {string} prompt - The prompt containing the title function.
* @returns {string} The parsed title. "New Chat" if no title is found.
*/
function parseTitleFromPrompt(prompt) {
const titleRegex = /<title>(.+?)<\/title>/;
const titleMatch = prompt.match(titleRegex);
if (titleMatch && titleMatch[1]) {
const title = titleMatch[1].trim();
// // Capitalize the first letter of each word; Note: unnecessary due to title case prompting
// const capitalizedTitle = title.replace(/\b\w/g, (char) => char.toUpperCase());
return title;
}
return 'New Chat';
}
module.exports = {
langPrompt,
createTitlePrompt,
titleFunctionPrompt,
parseTitleFromPrompt,
};

View file

@ -37,7 +37,7 @@ const modelQueries = isEnabled(process.env.USE_REDIS)
const abortKeys = isEnabled(USE_REDIS)
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.ABORT_KEYS });
: new Keyv({ namespace: CacheKeys.ABORT_KEYS, ttl: 600000 });
const namespaces = {
[CacheKeys.CONFIG_STORE]: config,

View file

@ -21,10 +21,15 @@ const { logger } = require('~/config');
*/
const spendTokens = async (txData, tokenUsage) => {
const { promptTokens, completionTokens } = tokenUsage;
logger.debug(`[spendTokens] conversationId: ${txData.conversationId} | Token usage: `, {
logger.debug(
`[spendTokens] conversationId: ${txData.conversationId}${
txData?.context ? ` | Context: ${txData?.context}` : ''
} | Token usage: `,
{
promptTokens,
completionTokens,
});
},
);
let prompt, completion;
try {
if (promptTokens >= 0) {

View file

@ -1,3 +1,4 @@
const throttle = require('lodash/throttle');
const { getResponseSender, Constants } = require('librechat-data-provider');
const { createAbortController, handleAbortError } = require('~/server/middleware');
const { sendMessage, createOnProgress } = require('~/server/utils');
@ -16,13 +17,10 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
logger.debug('[AskController]', { text, conversationId, ...endpointOption });
let metadata;
let userMessage;
let promptTokens;
let userMessageId;
let responseMessageId;
let lastSavedTimestamp = 0;
let saveDelay = 100;
const sender = getResponseSender({
...endpointOption,
model: endpointOption.modelOptions.model,
@ -31,8 +29,6 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
const newConvo = !conversationId;
const user = req.user.id;
const addMetadata = (data) => (metadata = data);
const getReqData = (data = {}) => {
for (let key in data) {
if (key === 'userMessage') {
@ -54,11 +50,8 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
const { client } = await initializeClient({ req, res, endpointOption });
const { onProgress: progressCallback, getPartialText } = createOnProgress({
onProgress: ({ text: partialText }) => {
const currentTimestamp = Date.now();
if (currentTimestamp - lastSavedTimestamp > saveDelay) {
lastSavedTimestamp = currentTimestamp;
onProgress: throttle(
({ text: partialText }) => {
saveMessage({
messageId: responseMessageId,
sender,
@ -70,12 +63,10 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
error: false,
user,
});
}
if (saveDelay < 500) {
saveDelay = 500;
}
},
3000,
{ trailing: false },
),
});
getText = getPartialText;
@ -113,7 +104,6 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
overrideParentMessageId,
getReqData,
onStart,
addMetadata,
abortController,
onProgress: progressCallback.call(null, {
res,
@ -128,10 +118,6 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
response.parentMessageId = overrideParentMessageId;
}
if (metadata) {
response = { ...response, ...metadata };
}
response.endpoint = endpointOption.endpoint;
const conversation = await getConvo(user, conversationId);

View file

@ -1,3 +1,4 @@
const throttle = require('lodash/throttle');
const { getResponseSender } = require('librechat-data-provider');
const { createAbortController, handleAbortError } = require('~/server/middleware');
const { sendMessage, createOnProgress } = require('~/server/utils');
@ -25,11 +26,8 @@ const EditController = async (req, res, next, initializeClient) => {
...endpointOption,
});
let metadata;
let userMessage;
let promptTokens;
let lastSavedTimestamp = 0;
let saveDelay = 100;
const sender = getResponseSender({
...endpointOption,
model: endpointOption.modelOptions.model,
@ -38,7 +36,6 @@ const EditController = async (req, res, next, initializeClient) => {
const userMessageId = parentMessageId;
const user = req.user.id;
const addMetadata = (data) => (metadata = data);
const getReqData = (data = {}) => {
for (let key in data) {
if (key === 'userMessage') {
@ -53,11 +50,8 @@ const EditController = async (req, res, next, initializeClient) => {
const { onProgress: progressCallback, getPartialText } = createOnProgress({
generation,
onProgress: ({ text: partialText }) => {
const currentTimestamp = Date.now();
if (currentTimestamp - lastSavedTimestamp > saveDelay) {
lastSavedTimestamp = currentTimestamp;
onProgress: throttle(
({ text: partialText }) => {
saveMessage({
messageId: responseMessageId,
sender,
@ -70,12 +64,10 @@ const EditController = async (req, res, next, initializeClient) => {
error: false,
user,
});
}
if (saveDelay < 500) {
saveDelay = 500;
}
},
3000,
{ trailing: false },
),
});
const getAbortData = () => ({
@ -118,7 +110,6 @@ const EditController = async (req, res, next, initializeClient) => {
overrideParentMessageId,
getReqData,
onStart,
addMetadata,
abortController,
onProgress: progressCallback.call(null, {
res,
@ -127,10 +118,6 @@ const EditController = async (req, res, next, initializeClient) => {
}),
});
if (metadata) {
response = { ...response, ...metadata };
}
const conversation = await getConvo(user, conversationId);
conversation.title =
conversation && !conversation.title ? null : conversation?.title || 'New Chat';

View file

@ -1,6 +1,6 @@
const express = require('express');
const AskController = require('~/server/controllers/AskController');
const { initializeClient } = require('~/server/services/Endpoints/anthropic');
const { addTitle, initializeClient } = require('~/server/services/Endpoints/anthropic');
const {
setHeaders,
handleAbort,
@ -20,7 +20,7 @@ router.post(
buildEndpointOption,
setHeaders,
async (req, res, next) => {
await AskController(req, res, next, initializeClient);
await AskController(req, res, next, initializeClient, addTitle);
},
);

View file

@ -1,11 +1,10 @@
const express = require('express');
const router = express.Router();
const throttle = require('lodash/throttle');
const { getResponseSender, Constants } = require('librechat-data-provider');
const { validateTools } = require('~/app');
const { addTitle } = require('~/server/services/Endpoints/openAI');
const { initializeClient } = require('~/server/services/Endpoints/gptPlugins');
const { saveMessage, getConvoTitle, getConvo } = require('~/models');
const { sendMessage, createOnProgress } = require('~/server/utils');
const { addTitle } = require('~/server/services/Endpoints/openAI');
const {
handleAbort,
createAbortController,
@ -16,8 +15,11 @@ const {
buildEndpointOption,
moderateText,
} = require('~/server/middleware');
const { validateTools } = require('~/app');
const { logger } = require('~/config');
const router = express.Router();
router.use(moderateText);
router.post('/abort', handleAbort());
@ -35,14 +37,13 @@ router.post(
parentMessageId = null,
overrideParentMessageId = null,
} = req.body;
logger.debug('[/ask/gptPlugins]', { text, conversationId, ...endpointOption });
let metadata;
let userMessage;
let promptTokens;
let userMessageId;
let responseMessageId;
let lastSavedTimestamp = 0;
let saveDelay = 100;
const sender = getResponseSender({
...endpointOption,
model: endpointOption.modelOptions.model,
@ -52,7 +53,6 @@ router.post(
const plugins = [];
const addMetadata = (data) => (metadata = data);
const getReqData = (data = {}) => {
for (let key in data) {
if (key === 'userMessage') {
@ -68,6 +68,7 @@ router.post(
}
};
const throttledSaveMessage = throttle(saveMessage, 3000, { trailing: false });
let streaming = null;
let timer = null;
@ -77,15 +78,11 @@ router.post(
getPartialText,
} = createOnProgress({
onProgress: ({ text: partialText }) => {
const currentTimestamp = Date.now();
if (timer) {
clearTimeout(timer);
}
if (currentTimestamp - lastSavedTimestamp > saveDelay) {
lastSavedTimestamp = currentTimestamp;
saveMessage({
throttledSaveMessage({
messageId: responseMessageId,
sender,
conversationId,
@ -97,11 +94,6 @@ router.post(
plugins,
user,
});
}
if (saveDelay < 500) {
saveDelay = 500;
}
streaming = new Promise((resolve) => {
timer = setTimeout(() => {
@ -180,7 +172,6 @@ router.post(
onToolStart,
onToolEnd,
onStart,
addMetadata,
getPartialText,
...endpointOption,
onProgress: progressCallback.call(null, {
@ -196,10 +187,6 @@ router.post(
response.parentMessageId = overrideParentMessageId;
}
if (metadata) {
response = { ...response, ...metadata };
}
logger.debug('[/ask/gptPlugins]', response);
response.plugins = plugins.map((p) => ({ ...p, loading: false }));

View file

@ -1,10 +1,6 @@
const express = require('express');
const router = express.Router();
const { validateTools } = require('~/app');
const throttle = require('lodash/throttle');
const { getResponseSender } = require('librechat-data-provider');
const { saveMessage, getConvoTitle, getConvo } = require('~/models');
const { initializeClient } = require('~/server/services/Endpoints/gptPlugins');
const { sendMessage, createOnProgress, formatSteps, formatAction } = require('~/server/utils');
const {
handleAbort,
createAbortController,
@ -15,8 +11,14 @@ const {
buildEndpointOption,
moderateText,
} = require('~/server/middleware');
const { sendMessage, createOnProgress, formatSteps, formatAction } = require('~/server/utils');
const { initializeClient } = require('~/server/services/Endpoints/gptPlugins');
const { saveMessage, getConvoTitle, getConvo } = require('~/models');
const { validateTools } = require('~/app');
const { logger } = require('~/config');
const router = express.Router();
router.use(moderateText);
router.post('/abort', handleAbort());
@ -45,11 +47,9 @@ router.post(
conversationId,
...endpointOption,
});
let metadata;
let userMessage;
let promptTokens;
let lastSavedTimestamp = 0;
let saveDelay = 100;
const sender = getResponseSender({
...endpointOption,
model: endpointOption.modelOptions.model,
@ -64,7 +64,6 @@ router.post(
outputs: null,
};
const addMetadata = (data) => (metadata = data);
const getReqData = (data = {}) => {
for (let key in data) {
if (key === 'userMessage') {
@ -77,6 +76,7 @@ router.post(
}
};
const throttledSaveMessage = throttle(saveMessage, 3000, { trailing: false });
const {
onProgress: progressCallback,
sendIntermediateMessage,
@ -84,15 +84,11 @@ router.post(
} = createOnProgress({
generation,
onProgress: ({ text: partialText }) => {
const currentTimestamp = Date.now();
if (plugin.loading === true) {
plugin.loading = false;
}
if (currentTimestamp - lastSavedTimestamp > saveDelay) {
lastSavedTimestamp = currentTimestamp;
saveMessage({
throttledSaveMessage({
messageId: responseMessageId,
sender,
conversationId,
@ -104,11 +100,6 @@ router.post(
error: false,
user,
});
}
if (saveDelay < 500) {
saveDelay = 500;
}
},
});
@ -161,7 +152,6 @@ router.post(
onAgentAction,
onChainEnd,
onStart,
addMetadata,
...endpointOption,
onProgress: progressCallback.call(null, {
res,
@ -176,10 +166,6 @@ router.post(
response.parentMessageId = overrideParentMessageId;
}
if (metadata) {
response = { ...response, ...metadata };
}
logger.debug('[/edit/gptPlugins] CLIENT RESPONSE', response);
response.plugin = { ...plugin, loading: false };
await saveMessage({ ...response, user });

View file

@ -0,0 +1,32 @@
const { CacheKeys } = require('librechat-data-provider');
const getLogStores = require('~/cache/getLogStores');
const { isEnabled } = require('~/server/utils');
const { saveConvo } = require('~/models');
const addTitle = async (req, { text, response, client }) => {
const { TITLE_CONVO = 'true' } = process.env ?? {};
if (!isEnabled(TITLE_CONVO)) {
return;
}
if (client.options.titleConvo === false) {
return;
}
// If the request was aborted, don't generate the title.
if (client.abortController.signal.aborted) {
return;
}
const titleCache = getLogStores(CacheKeys.GEN_TITLE);
const key = `${req.user.id}-${response.conversationId}`;
const title = await client.titleConvo({ text, responseText: response?.text });
await titleCache.set(key, title, 120000);
await saveConvo(req.user.id, {
conversationId: response.conversationId,
title,
});
};
module.exports = addTitle;

View file

@ -1,8 +1,9 @@
const addTitle = require('./addTitle');
const buildOptions = require('./buildOptions');
const initializeClient = require('./initializeClient');
module.exports = {
// addTitle, // todo
addTitle,
buildOptions,
initializeClient,
};

View file

@ -17,7 +17,7 @@ const addTitle = async (req, { text, responseText, conversationId, client }) =>
const key = `${req.user.id}-${conversationId}`;
const title = await client.titleConvo({ text, conversationId, responseText });
await titleCache.set(key, title);
await titleCache.set(key, title, 120000);
await saveConvo(req.user.id, {
conversationId,

View file

@ -35,23 +35,46 @@ const listAssistants = async ({ req, res, query }) => {
* @returns {Promise<AssistantListResponse>} A promise that resolves to an array of assistant data merged with their respective model information.
*/
const listAssistantsForAzure = async ({ req, res, azureConfig = {}, query }) => {
/** @type {Array<[string, TAzureModelConfig]>} */
const groupModelTuples = [];
const promises = [];
const models = [];
/** @type {Array<TAzureGroup>} */
const groups = [];
const { groupMap, assistantGroups } = azureConfig;
for (const groupName of assistantGroups) {
const group = groupMap[groupName];
req.body.model = Object.keys(group?.models)[0];
models.push(req.body.model);
groups.push(group);
const currentModelTuples = Object.entries(group?.models);
groupModelTuples.push(currentModelTuples);
/* The specified model is only necessary to
fetch assistants for the shared instance */
req.body.model = currentModelTuples[0][0];
promises.push(listAssistants({ req, res, query }));
}
const resolvedQueries = await Promise.all(promises);
const data = resolvedQueries.flatMap((res, i) =>
res.data.map((assistant) => {
const model = models[i];
return { ...assistant, model } ?? {};
const deploymentName = assistant.model;
const currentGroup = groups[i];
const currentModelTuples = groupModelTuples[i];
const firstModel = currentModelTuples[0][0];
if (currentGroup.deploymentName === deploymentName) {
return { ...assistant, model: firstModel };
}
for (const [model, modelConfig] of currentModelTuples) {
if (modelConfig.deploymentName === deploymentName) {
return { ...assistant, model };
}
}
return { ...assistant, model: firstModel };
}),
);

View file

@ -22,7 +22,7 @@ const addTitle = async (req, { text, response, client }) => {
const key = `${req.user.id}-${response.conversationId}`;
const title = await client.titleConvo({ text, responseText: response?.text });
await titleCache.set(key, title);
await titleCache.set(key, title, 120000);
await saveConvo(req.user.id, {
conversationId: response.conversationId,
title,

View file

@ -8,6 +8,12 @@
* @memberof typedefs
*/
/**
* @exports Anthropic
* @typedef {import('@anthropic-ai/sdk').default} Anthropic
* @memberof typedefs
*/
/**
* @exports AssistantStreamEvent
* @typedef {import('openai').default.Beta.AssistantStreamEvent} AssistantStreamEvent
@ -206,6 +212,18 @@
* @memberof typedefs
*/
/**
* @exports TAzureModelConfig
* @typedef {import('librechat-data-provider').TAzureModelConfig} TAzureModelConfig
* @memberof typedefs
*/
/**
* @exports TAzureGroup
* @typedef {import('librechat-data-provider').TAzureGroup} TAzureGroup
* @memberof typedefs
*/
/**
* @exports TAzureGroups
* @typedef {import('librechat-data-provider').TAzureGroups} TAzureGroups

View file

@ -1,9 +1,11 @@
import { useState } from 'react';
import { useRecoilValue } from 'recoil';
import ProgressCircle from './ProgressCircle';
import ProgressText from './ProgressText';
import FinishedIcon from './FinishedIcon';
import MarkdownLite from './MarkdownLite';
import { useProgress } from '~/hooks';
import store from '~/store';
export default function CodeAnalyze({
initialProgress = 0.1,
@ -14,7 +16,8 @@ export default function CodeAnalyze({
code: string;
outputs: Record<string, unknown>[];
}) {
const [showCode, setShowCode] = useState(false);
const showCodeDefault = useRecoilValue(store.showCode);
const [showCode, setShowCode] = useState(showCodeDefault);
const progress = useProgress(initialProgress);
const radius = 56.08695652173913;
const circumference = 2 * Math.PI * radius;

View file

@ -20,6 +20,7 @@ export default function Settings({
setOption,
readonly,
}: Omit<TModelSelectProps, 'models'>) {
/* This is an unfinished component for future update */
const localize = useLocalize();
const {
endpoint,

View file

@ -44,7 +44,7 @@ const SearchBar = forwardRef((props: SearchBarProps, ref: Ref<HTMLDivElement>) =
return (
<div
ref={ref}
className="relative mt-1 flex flex h-10 cursor-pointer items-center gap-3 rounded-lg border-white bg-gray-50 px-2 px-3 py-2 text-black transition-colors duration-200 hover:bg-gray-200 focus:bg-gray-800 dark:bg-gray-900 dark:text-white dark:hover:bg-gray-800"
className="relative mt-1 flex flex h-10 cursor-pointer items-center gap-3 rounded-lg border-white bg-gray-50 px-2 px-3 py-2 text-black transition-colors duration-200 focus-within:bg-gray-200 hover:bg-gray-200 dark:bg-gray-750 dark:text-white dark:focus-within:bg-gray-800 dark:hover:bg-gray-800"
>
{<Search className="absolute left-3 h-4 w-4" />}
<input

View file

@ -14,6 +14,7 @@ import {
import type { TDangerButtonProps } from '~/common';
import HideSidePanelSwitch from './HideSidePanelSwitch';
import AutoScrollSwitch from './AutoScrollSwitch';
import ShowCodeSwitch from './ShowCodeSwitch';
import { Dropdown } from '~/components/ui';
import DangerButton from '../DangerButton';
import store from '~/store';
@ -181,6 +182,17 @@ function General() {
<div className="border-b pb-3 last-of-type:border-b-0 dark:border-gray-700">
<LangSelector langcode={selectedLang} onChange={changeLang} />
</div>
<div className="border-b pb-3 last-of-type:border-b-0 dark:border-gray-700">
<AutoScrollSwitch />
</div>
<div className="border-b pb-3 last-of-type:border-b-0 dark:border-gray-700">
<ShowCodeSwitch />
</div>
<div className="border-b pb-3 last-of-type:border-b-0 dark:border-gray-700">
<HideSidePanelSwitch />
</div>
{/* Clear Chats should be last */}
<div className="border-b pb-3 last-of-type:border-b-0 dark:border-gray-700">
<ClearChatsButton
confirmClear={confirmClear}
@ -189,12 +201,6 @@ function General() {
mutation={clearConvosMutation}
/>
</div>
<div className="border-b pb-3 last-of-type:border-b-0 dark:border-gray-700">
<AutoScrollSwitch />
</div>
<div className="border-b pb-3 last-of-type:border-b-0 dark:border-gray-700">
<HideSidePanelSwitch />
</div>
</div>
</Tabs.Content>
);

View file

@ -0,0 +1,33 @@
import { useRecoilState } from 'recoil';
import { Switch } from '~/components/ui';
import { useLocalize } from '~/hooks';
import store from '~/store';
export default function ShowCodeSwitch({
onCheckedChange,
}: {
onCheckedChange?: (value: boolean) => void;
}) {
const [showCode, setShowCode] = useRecoilState<boolean>(store.showCode);
const localize = useLocalize();
const handleCheckedChange = (value: boolean) => {
setShowCode(value);
if (onCheckedChange) {
onCheckedChange(value);
}
};
return (
<div className="flex items-center justify-between">
<div> {localize('com_nav_show_code')} </div>
<Switch
id="showCode"
checked={showCode}
onCheckedChange={handleCheckedChange}
className="ml-4 mt-2"
data-testid="showCode"
/>
</div>
);
}

View file

@ -396,6 +396,7 @@ export default {
com_nav_theme_dark: 'Dark',
com_nav_theme_light: 'Light',
com_nav_user_name_display: 'Display username in messages',
com_nav_show_code: 'Always show code when using code interpreter',
com_nav_clear_all_chats: 'Clear all chats',
com_nav_confirm_clear: 'Confirm Clear',
com_nav_close_sidebar: 'Close sidebar',

View file

@ -6,9 +6,10 @@ import {
useGetStartupConfig,
useGetEndpointsQuery,
} from 'librechat-data-provider/react-query';
import { defaultOrderQuery } from 'librechat-data-provider';
import type { TPreset } from 'librechat-data-provider';
import { useGetConvoIdQuery, useListAssistantsQuery } from '~/data-provider';
import { useNewConvo, useConfigOverride } from '~/hooks';
import { useGetConvoIdQuery } from '~/data-provider';
import ChatView from '~/components/Chat/ChatView';
import useAuthRedirect from './useAuthRedirect';
import { Spinner } from '~/components/svg';
@ -32,6 +33,10 @@ export default function ChatRoute() {
enabled: isAuthenticated && conversationId !== 'new',
});
const endpointsQuery = useGetEndpointsQuery({ enabled: isAuthenticated && modelsQueryEnabled });
const { data: assistants = null } = useListAssistantsQuery(defaultOrderQuery, {
select: (res) =>
res.data.map(({ id, name, metadata, model }) => ({ id, name, metadata, model })),
});
useEffect(() => {
if (startupConfig?.appTitle) {
@ -48,7 +53,7 @@ export default function ChatRoute() {
!hasSetConversation.current
) {
newConversation({ modelsData: modelsQuery.data });
hasSetConversation.current = true;
hasSetConversation.current = !!assistants;
} else if (
initialConvoQuery.data &&
endpointsQuery.data &&
@ -61,10 +66,20 @@ export default function ChatRoute() {
preset: initialConvoQuery.data as TPreset,
modelsData: modelsQuery.data,
});
hasSetConversation.current = !!assistants;
} else if (!hasSetConversation.current && conversationId === 'new' && assistants) {
newConversation({ modelsData: modelsQuery.data });
hasSetConversation.current = true;
} else if (!hasSetConversation.current && assistants) {
newConversation({
template: initialConvoQuery.data,
preset: initialConvoQuery.data as TPreset,
modelsData: modelsQuery.data,
});
hasSetConversation.current = true;
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [initialConvoQuery.data, modelsQuery.data, endpointsQuery.data]);
}, [initialConvoQuery.data, modelsQuery.data, endpointsQuery.data, assistants]);
if (endpointsQuery.isLoading || modelsQuery.isLoading) {
return <Spinner className="m-auto text-black dark:text-white" />;

View file

@ -61,6 +61,25 @@ const autoScroll = atom<boolean>({
] as const,
});
const showCode = atom<boolean>({
key: 'showCode',
default: localStorage.getItem('showCode') === 'true',
effects: [
({ setSelf, onSet }) => {
const savedValue = localStorage.getItem('showCode');
if (savedValue != null) {
setSelf(savedValue === 'true');
}
onSet((newValue: unknown) => {
if (typeof newValue === 'boolean') {
localStorage.setItem('showCode', newValue.toString());
}
});
},
] as const,
});
const hideSidePanel = atom<boolean>({
key: 'hideSidePanel',
default: localStorage.getItem('hideSidePanel') === 'true',
@ -147,6 +166,7 @@ export default {
showBingToneSetting,
showPopover,
autoScroll,
showCode,
hideSidePanel,
modularChat,
LaTeXParsing,

View file

@ -1,20 +0,0 @@
import { ToolCallTypes } from 'librechat-data-provider';
import type {
ContentPart,
CodeToolCall,
ImageFile,
Text,
PartMetadata,
} from 'librechat-data-provider';
export function isText(part: ContentPart): part is Text & PartMetadata {
return (part as Text).value !== undefined;
}
export function isCodeToolCall(part: ContentPart): part is CodeToolCall & PartMetadata {
return (part as CodeToolCall).type === ToolCallTypes.CODE_INTERPRETER;
}
export function isImageFile(part: ContentPart): part is ImageFile & PartMetadata {
return (part as ImageFile).file_id !== undefined;
}

View file

@ -42,6 +42,10 @@ export const groupConversationsByDate = (conversations: TConversation[]): Groupe
const seenConversationIds = new Set();
const groups = conversations.reduce((acc, conversation) => {
if (!conversation) {
return acc;
}
if (seenConversationIds.has(conversation.conversationId)) {
return acc;
}

View file

@ -67,7 +67,12 @@ export function updateLastSelectedModel({
if (!model) {
return;
}
const lastConversationSetup = JSON.parse(localStorage.getItem('lastConversationSetup') || '{}');
const lastSelectedModels = JSON.parse(localStorage.getItem('lastSelectedModel') || '{}');
if (lastConversationSetup.endpoint === endpoint) {
lastConversationSetup.model = model;
localStorage.setItem('lastConversationSetup', JSON.stringify(lastConversationSetup));
}
lastSelectedModels[endpoint] = model;
localStorage.setItem('lastSelectedModel', JSON.stringify(lastSelectedModels));
}

View file

@ -4,7 +4,6 @@ export * from './files';
export * from './latex';
export * from './convos';
export * from './presets';
export * from './content';
export * from './languages';
export * from './endpoints';
export { default as cn } from './cn';

View file

@ -129,6 +129,15 @@ ENDPOINTS=openAI,assistants,azureOpenAI,bingAI,chatGPTBrowser,google,gptPlugins,
PROXY=
```
- Titling is enabled by default for all Endpoints when initiating a conversation (proceeding the first AI response).
- Set to `false` to disable this feature.
- Not all endpoints support titling.
- You can configure this feature on an Endpoint-level using [the `librechat.yaml` config file](./custom_config.md)
```bash
TITLE_CONVO=true
```
### Known Endpoints - librechat.yaml
- see: [AI Endpoints](./ai_endpoints.md)
- see also: [Custom Configuration](./custom_config.md)
@ -158,6 +167,15 @@ ANTHROPIC_MODELS=claude-3-opus-20240229,claude-3-sonnet-20240229,claude-2.1,clau
ANTHROPIC_REVERSE_PROXY=
```
- Titling is enabled by default but is configured with the environment variable
`TITLE_CONVO` for all Endpoints. The default model used for Anthropic titling is "claude-3-haiku-20240307". You can change it by uncommenting the following and setting the desired model. **(Optional)**
> **Note:** Must be compatible with the Anthropic Endpoint. Also, Claude 2 and Claude 3 models perform best at this task, with `claude-3-haiku` models being the cheapest.
```bash
ANTHROPIC_TITLE_MODEL=claude-3-haiku-20240307
```
### Azure
**Important:** See [the complete Azure OpenAI setup guide](./ai_setup.md#azure-openai) for thorough instructions on enabling Azure OpenAI
@ -325,14 +343,8 @@ DEBUG_OPENAI=false
OPENAI_MODELS=gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
```
- Titling is enabled by default when initiating a conversation.
- Set to false to disable this feature.
```bash
TITLE_CONVO=true
```
- The default model used for titling by is gpt-3.5-turbo. You can change it by uncommenting the following and setting the desired model. **(Optional)**
- Titling is enabled by default but is configured with the environment variable
`TITLE_CONVO` for all Endpoints. The default model used for OpenAI titling is gpt-3.5-turbo. You can change it by uncommenting the following and setting the desired model. **(Optional)**
> **Note:** Must be compatible with the OpenAI Endpoint.

View file

@ -60,8 +60,8 @@ export const azureGroupSchema = z
.and(azureBaseSchema);
export const azureGroupConfigsSchema = z.array(azureGroupSchema).min(1);
export type TAzureGroup = z.infer<typeof azureGroupSchema>;
export type TAzureGroups = z.infer<typeof azureGroupConfigsSchema>;
export type TAzureModelMapSchema = {
// deploymentName?: string;
// version?: string;
@ -297,6 +297,7 @@ export const defaultModels = {
[EModelEndpoint.anthropic]: [
'claude-3-opus-20240229',
'claude-3-sonnet-20240229',
'claude-3-haiku-20240307',
'claude-2.1',
'claude-2',
'claude-1.2',