mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-09-21 21:50:49 +02:00

* chore: rename dir from `assistant` to plural * feat: `assistants` field for azure config, spread options in AppService * refactor: rename constructAzureURL param for azure as `azureOptions` * chore: bump openai and bun * chore(loadDefaultModels): change naming of assistant -> assistants * feat: load azure settings with currect baseURL for assistants' initializeClient * refactor: add `assistants` flags to groups and model configs, add mapGroupToAzureConfig * feat(loadConfigEndpoints): initialize assistants endpoint if azure flag `assistants` is enabled * feat(AppService): determine assistant models on startup, throw Error if none * refactor(useDeleteAssistantMutation): send model along with assistant id for delete mutations * feat: support listing and deleting assistants with azure * feat: add model query to assistant avatar upload * feat: add azure support for retrieveRun method * refactor: update OpenAIClient initialization * chore: update README * fix(ci): tests passing * refactor(uploadOpenAIFile): improve logging and use more efficient REST API method * refactor(useFileHandling): add model to metadata to target Azure region compatible with current model * chore(files): add azure naming pattern for valid file id recognition * fix(assistants): initialize openai with first available assistant model if none provided * refactor(uploadOpenAIFile): add content type for azure, initialize formdata before azure options * refactor(sleep): move sleep function out of Runs and into `~/server/utils` * fix(azureOpenAI/assistants): make sure to only overwrite models with assistant models if `assistants` flag is enabled * refactor(uploadOpenAIFile): revert to old method * chore(uploadOpenAIFile): use enum for file purpose * docs: azureOpenAI update guide with more info, examples * feat: enable/disable assistant capabilities and specify retrieval models * refactor: optional chain conditional statement in loadConfigModels.js * docs: add assistants examples * chore: update librechat.example.yaml * docs(azure): update note of file upload behavior in Azure OpenAI Assistants * chore: update docs and add descriptive message about assistant errors * fix: prevent message submission with invalid assistant or if files loading * style: update Landing icon & text when assistant is not selected * chore: bump librechat-data-provider to 0.4.8 * fix(assistants/azure): assign req.body.model for proper azure init to abort runs
92 lines
2.8 KiB
JavaScript
92 lines
2.8 KiB
JavaScript
const { CacheKeys, RunStatus, isUUID } = require('librechat-data-provider');
|
|
const { initializeClient } = require('~/server/services/Endpoints/assistants');
|
|
const { checkMessageGaps, recordUsage } = require('~/server/services/Threads');
|
|
const { getConvo } = require('~/models/Conversation');
|
|
const getLogStores = require('~/cache/getLogStores');
|
|
const { sendMessage } = require('~/server/utils');
|
|
// const spendTokens = require('~/models/spendTokens');
|
|
const { logger } = require('~/config');
|
|
|
|
async function abortRun(req, res) {
|
|
res.setHeader('Content-Type', 'application/json');
|
|
const { abortKey } = req.body;
|
|
const [conversationId, latestMessageId] = abortKey.split(':');
|
|
const conversation = await getConvo(req.user.id, conversationId);
|
|
|
|
if (conversation?.model) {
|
|
req.body.model = conversation.model;
|
|
}
|
|
|
|
if (!isUUID.safeParse(conversationId).success) {
|
|
logger.error('[abortRun] Invalid conversationId', { conversationId });
|
|
return res.status(400).send({ message: 'Invalid conversationId' });
|
|
}
|
|
|
|
const cacheKey = `${req.user.id}:${conversationId}`;
|
|
const cache = getLogStores(CacheKeys.ABORT_KEYS);
|
|
const runValues = await cache.get(cacheKey);
|
|
const [thread_id, run_id] = runValues.split(':');
|
|
|
|
if (!run_id) {
|
|
logger.warn('[abortRun] Couldn\'t find run for cancel request', { thread_id });
|
|
return res.status(204).send({ message: 'Run not found' });
|
|
} else if (run_id === 'cancelled') {
|
|
logger.warn('[abortRun] Run already cancelled', { thread_id });
|
|
return res.status(204).send({ message: 'Run already cancelled' });
|
|
}
|
|
|
|
let runMessages = [];
|
|
/** @type {{ openai: OpenAI }} */
|
|
const { openai } = await initializeClient({ req, res });
|
|
|
|
try {
|
|
await cache.set(cacheKey, 'cancelled');
|
|
const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id);
|
|
logger.debug('[abortRun] Cancelled run:', cancelledRun);
|
|
} catch (error) {
|
|
logger.error('[abortRun] Error cancelling run', error);
|
|
if (
|
|
error?.message?.includes(RunStatus.CANCELLED) ||
|
|
error?.message?.includes(RunStatus.CANCELLING)
|
|
) {
|
|
return res.end();
|
|
}
|
|
}
|
|
|
|
try {
|
|
const run = await openai.beta.threads.runs.retrieve(thread_id, run_id);
|
|
await recordUsage({
|
|
...run.usage,
|
|
model: run.model,
|
|
user: req.user.id,
|
|
conversationId,
|
|
});
|
|
} catch (error) {
|
|
logger.error('[abortRun] Error fetching or processing run', error);
|
|
}
|
|
|
|
runMessages = await checkMessageGaps({
|
|
openai,
|
|
latestMessageId,
|
|
thread_id,
|
|
run_id,
|
|
conversationId,
|
|
});
|
|
|
|
const finalEvent = {
|
|
title: 'New Chat',
|
|
final: true,
|
|
conversation,
|
|
runMessages,
|
|
};
|
|
|
|
if (res.headersSent && finalEvent) {
|
|
return sendMessage(res, finalEvent);
|
|
}
|
|
|
|
res.json(finalEvent);
|
|
}
|
|
|
|
module.exports = {
|
|
abortRun,
|
|
};
|