mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 17:00:15 +01:00
* chore: rename dir from `assistant` to plural * feat: `assistants` field for azure config, spread options in AppService * refactor: rename constructAzureURL param for azure as `azureOptions` * chore: bump openai and bun * chore(loadDefaultModels): change naming of assistant -> assistants * feat: load azure settings with currect baseURL for assistants' initializeClient * refactor: add `assistants` flags to groups and model configs, add mapGroupToAzureConfig * feat(loadConfigEndpoints): initialize assistants endpoint if azure flag `assistants` is enabled * feat(AppService): determine assistant models on startup, throw Error if none * refactor(useDeleteAssistantMutation): send model along with assistant id for delete mutations * feat: support listing and deleting assistants with azure * feat: add model query to assistant avatar upload * feat: add azure support for retrieveRun method * refactor: update OpenAIClient initialization * chore: update README * fix(ci): tests passing * refactor(uploadOpenAIFile): improve logging and use more efficient REST API method * refactor(useFileHandling): add model to metadata to target Azure region compatible with current model * chore(files): add azure naming pattern for valid file id recognition * fix(assistants): initialize openai with first available assistant model if none provided * refactor(uploadOpenAIFile): add content type for azure, initialize formdata before azure options * refactor(sleep): move sleep function out of Runs and into `~/server/utils` * fix(azureOpenAI/assistants): make sure to only overwrite models with assistant models if `assistants` flag is enabled * refactor(uploadOpenAIFile): revert to old method * chore(uploadOpenAIFile): use enum for file purpose * docs: azureOpenAI update guide with more info, examples * feat: enable/disable assistant capabilities and specify retrieval models * refactor: optional chain conditional statement in loadConfigModels.js * docs: add assistants examples * chore: update librechat.example.yaml * docs(azure): update note of file upload behavior in Azure OpenAI Assistants * chore: update docs and add descriptive message about assistant errors * fix: prevent message submission with invalid assistant or if files loading * style: update Landing icon & text when assistant is not selected * chore: bump librechat-data-provider to 0.4.8 * fix(assistants/azure): assign req.body.model for proper azure init to abort runs
102 lines
2.9 KiB
JavaScript
102 lines
2.9 KiB
JavaScript
const express = require('express');
|
|
const { CacheKeys } = require('librechat-data-provider');
|
|
const { initializeClient } = require('~/server/services/Endpoints/assistants');
|
|
const { getConvosByPage, deleteConvos, getConvo, saveConvo } = require('~/models/Conversation');
|
|
const requireJwtAuth = require('~/server/middleware/requireJwtAuth');
|
|
const getLogStores = require('~/cache/getLogStores');
|
|
const { sleep } = require('~/server/utils');
|
|
const { logger } = require('~/config');
|
|
|
|
const router = express.Router();
|
|
router.use(requireJwtAuth);
|
|
|
|
router.get('/', async (req, res) => {
|
|
let pageNumber = req.query.pageNumber || 1;
|
|
pageNumber = parseInt(pageNumber, 10);
|
|
|
|
if (isNaN(pageNumber) || pageNumber < 1) {
|
|
return res.status(400).json({ error: 'Invalid page number' });
|
|
}
|
|
|
|
res.status(200).send(await getConvosByPage(req.user.id, pageNumber));
|
|
});
|
|
|
|
router.get('/:conversationId', async (req, res) => {
|
|
const { conversationId } = req.params;
|
|
const convo = await getConvo(req.user.id, conversationId);
|
|
|
|
if (convo) {
|
|
res.status(200).json(convo);
|
|
} else {
|
|
res.status(404).end();
|
|
}
|
|
});
|
|
|
|
router.post('/gen_title', async (req, res) => {
|
|
const { conversationId } = req.body;
|
|
const titleCache = getLogStores(CacheKeys.GEN_TITLE);
|
|
const key = `${req.user.id}-${conversationId}`;
|
|
let title = await titleCache.get(key);
|
|
|
|
if (!title) {
|
|
await sleep(2500);
|
|
title = await titleCache.get(key);
|
|
}
|
|
|
|
if (title) {
|
|
await titleCache.delete(key);
|
|
res.status(200).json({ title });
|
|
} else {
|
|
res.status(404).json({
|
|
message: 'Title not found or method not implemented for the conversation\'s endpoint',
|
|
});
|
|
}
|
|
});
|
|
|
|
router.post('/clear', async (req, res) => {
|
|
let filter = {};
|
|
const { conversationId, source, thread_id } = req.body.arg;
|
|
if (conversationId) {
|
|
filter = { conversationId };
|
|
}
|
|
|
|
if (source === 'button' && !conversationId) {
|
|
return res.status(200).send('No conversationId provided');
|
|
}
|
|
|
|
if (thread_id) {
|
|
/** @type {{ openai: OpenAI}} */
|
|
const { openai } = await initializeClient({ req, res });
|
|
try {
|
|
const response = await openai.beta.threads.del(thread_id);
|
|
logger.debug('Deleted OpenAI thread:', response);
|
|
} catch (error) {
|
|
logger.error('Error deleting OpenAI thread:', error);
|
|
}
|
|
}
|
|
|
|
// for debugging deletion source
|
|
// logger.debug('source:', source);
|
|
|
|
try {
|
|
const dbResponse = await deleteConvos(req.user.id, filter);
|
|
res.status(201).json(dbResponse);
|
|
} catch (error) {
|
|
logger.error('Error clearing conversations', error);
|
|
res.status(500).send('Error clearing conversations');
|
|
}
|
|
});
|
|
|
|
router.post('/update', async (req, res) => {
|
|
const update = req.body.arg;
|
|
|
|
try {
|
|
const dbResponse = await saveConvo(req.user.id, update);
|
|
res.status(201).json(dbResponse);
|
|
} catch (error) {
|
|
logger.error('Error updating conversation', error);
|
|
res.status(500).send('Error updating conversation');
|
|
}
|
|
});
|
|
|
|
module.exports = router;
|