🅰️ feat: Azure OpenAI Assistants API Support (#1992)

* chore: rename dir from `assistant` to plural

* feat: `assistants` field for azure config, spread options in AppService

* refactor: rename constructAzureURL param for azure as `azureOptions`

* chore: bump openai and bun

* chore(loadDefaultModels): change naming of assistant -> assistants

* feat: load azure settings with currect baseURL for assistants' initializeClient

* refactor: add `assistants` flags to groups and model configs, add mapGroupToAzureConfig

* feat(loadConfigEndpoints): initialize assistants endpoint if azure flag `assistants` is enabled

* feat(AppService): determine assistant models on startup, throw Error if none

* refactor(useDeleteAssistantMutation): send model along with assistant id for delete mutations

* feat: support listing and deleting assistants with azure

* feat: add model query to assistant avatar upload

* feat: add azure support for retrieveRun method

* refactor: update OpenAIClient initialization

* chore: update README

* fix(ci): tests passing

* refactor(uploadOpenAIFile): improve logging and use more efficient REST API method

* refactor(useFileHandling): add model to metadata to target Azure region compatible with current model

* chore(files): add azure naming pattern for valid file id recognition

* fix(assistants): initialize openai with first available assistant model if none provided

* refactor(uploadOpenAIFile): add content type for azure, initialize formdata before azure options

* refactor(sleep): move sleep function out of Runs and into `~/server/utils`

* fix(azureOpenAI/assistants): make sure to only overwrite models with assistant models if `assistants` flag is enabled

* refactor(uploadOpenAIFile): revert to old method

* chore(uploadOpenAIFile): use enum for file purpose

* docs: azureOpenAI update guide with more info, examples

* feat: enable/disable assistant capabilities and specify retrieval models

* refactor: optional chain conditional statement in loadConfigModels.js

* docs: add assistants examples

* chore: update librechat.example.yaml

* docs(azure): update note of file upload behavior in Azure OpenAI Assistants

* chore: update docs and add descriptive message about assistant errors

* fix: prevent message submission with invalid assistant or if files loading

* style: update Landing icon & text when assistant is not selected

* chore: bump librechat-data-provider to 0.4.8

* fix(assistants/azure): assign req.body.model for proper azure init to abort runs
This commit is contained in:
Danny Avila 2024-03-14 17:21:42 -04:00 committed by GitHub
parent 1b243c6f8c
commit 5cd5c3bef8
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
60 changed files with 1044 additions and 300 deletions

View file

@ -16,8 +16,14 @@ async function endpointController(req, res) {
/** @type {TEndpointsConfig} */
const mergedConfig = { ...defaultEndpointsConfig, ...customConfigEndpoints };
if (mergedConfig[EModelEndpoint.assistants] && req.app.locals?.[EModelEndpoint.assistants]) {
mergedConfig[EModelEndpoint.assistants].disableBuilder =
req.app.locals[EModelEndpoint.assistants].disableBuilder;
const { disableBuilder, retrievalModels, capabilities, ..._rest } =
req.app.locals[EModelEndpoint.assistants];
mergedConfig[EModelEndpoint.assistants] = {
...mergedConfig[EModelEndpoint.assistants],
retrievalModels,
disableBuilder,
capabilities,
};
}
const endpointsConfig = orderEndpointsConfig(mergedConfig);

View file

@ -1,5 +1,5 @@
const { CacheKeys, RunStatus, isUUID } = require('librechat-data-provider');
const { initializeClient } = require('~/server/services/Endpoints/assistant');
const { initializeClient } = require('~/server/services/Endpoints/assistants');
const { checkMessageGaps, recordUsage } = require('~/server/services/Threads');
const { getConvo } = require('~/models/Conversation');
const getLogStores = require('~/cache/getLogStores');
@ -11,6 +11,11 @@ async function abortRun(req, res) {
res.setHeader('Content-Type', 'application/json');
const { abortKey } = req.body;
const [conversationId, latestMessageId] = abortKey.split(':');
const conversation = await getConvo(req.user.id, conversationId);
if (conversation?.model) {
req.body.model = conversation.model;
}
if (!isUUID.safeParse(conversationId).success) {
logger.error('[abortRun] Invalid conversationId', { conversationId });
@ -71,7 +76,7 @@ async function abortRun(req, res) {
const finalEvent = {
title: 'New Chat',
final: true,
conversation: await getConvo(req.user.id, conversationId),
conversation,
runMessages,
};

View file

@ -1,9 +1,9 @@
const { parseConvo, EModelEndpoint } = require('librechat-data-provider');
const { getModelsConfig } = require('~/server/controllers/ModelController');
const { processFiles } = require('~/server/services/Files/process');
const assistants = require('~/server/services/Endpoints/assistants');
const gptPlugins = require('~/server/services/Endpoints/gptPlugins');
const { processFiles } = require('~/server/services/Files/process');
const anthropic = require('~/server/services/Endpoints/anthropic');
const assistant = require('~/server/services/Endpoints/assistant');
const openAI = require('~/server/services/Endpoints/openAI');
const custom = require('~/server/services/Endpoints/custom');
const google = require('~/server/services/Endpoints/google');
@ -15,7 +15,7 @@ const buildFunction = {
[EModelEndpoint.azureOpenAI]: openAI.buildOptions,
[EModelEndpoint.anthropic]: anthropic.buildOptions,
[EModelEndpoint.gptPlugins]: gptPlugins.buildOptions,
[EModelEndpoint.assistants]: assistant.buildOptions,
[EModelEndpoint.assistants]: assistants.buildOptions,
};
async function buildEndpointOption(req, res, next) {

View file

@ -1,7 +1,7 @@
const { v4 } = require('uuid');
const express = require('express');
const { actionDelimiter } = require('librechat-data-provider');
const { initializeClient } = require('~/server/services/Endpoints/assistant');
const { initializeClient } = require('~/server/services/Endpoints/assistants');
const { updateAction, getActions, deleteAction } = require('~/models/Action');
const { updateAssistant, getAssistant } = require('~/models/Assistant');
const { encryptMetadata } = require('~/server/services/ActionService');

View file

@ -1,10 +1,14 @@
const multer = require('multer');
const express = require('express');
const { FileContext, EModelEndpoint } = require('librechat-data-provider');
const { updateAssistant, getAssistants } = require('~/models/Assistant');
const { initializeClient } = require('~/server/services/Endpoints/assistant');
const {
initializeClient,
listAssistantsForAzure,
listAssistants,
} = require('~/server/services/Endpoints/assistants');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { uploadImageBuffer } = require('~/server/services/Files/process');
const { updateAssistant, getAssistants } = require('~/models/Assistant');
const { deleteFileByFilter } = require('~/models/File');
const { logger } = require('~/config');
const actions = require('./actions');
@ -48,6 +52,10 @@ router.post('/', async (req, res) => {
})
.filter((tool) => tool);
if (openai.locals?.azureOptions) {
assistantData.model = openai.locals.azureOptions.azureOpenAIApiDeploymentName;
}
const assistant = await openai.beta.assistants.create(assistantData);
logger.debug('/assistants/', assistant);
res.status(201).json(assistant);
@ -101,6 +109,10 @@ router.patch('/:id', async (req, res) => {
})
.filter((tool) => tool);
if (openai.locals?.azureOptions && updateData.model) {
updateData.model = openai.locals.azureOptions.azureOpenAIApiDeploymentName;
}
const updatedAssistant = await openai.beta.assistants.update(assistant_id, updateData);
res.json(updatedAssistant);
} catch (error) {
@ -137,19 +149,18 @@ router.delete('/:id', async (req, res) => {
*/
router.get('/', async (req, res) => {
try {
/** @type {{ openai: OpenAI }} */
const { openai } = await initializeClient({ req, res });
const { limit, order, after, before } = req.query;
const response = await openai.beta.assistants.list({
limit,
order,
after,
before,
});
const query = { limit, order, after, before };
const azureConfig = req.app.locals[EModelEndpoint.azureOpenAI];
/** @type {AssistantListResponse} */
let body = response.body;
let body;
if (azureConfig?.assistants) {
body = await listAssistantsForAzure({ req, res, azureConfig, query });
} else {
({ body } = await listAssistants({ req, res, query }));
}
if (req.app.locals?.[EModelEndpoint.assistants]) {
/** @type {Partial<TAssistantEndpoint>} */
@ -165,7 +176,7 @@ router.get('/', async (req, res) => {
res.json(body);
} catch (error) {
logger.error('[/assistants] Error listing assistants', error);
res.status(500).json({ error: error.message });
res.status(500).json({ message: 'Error listing assistants' });
}
});

View file

@ -10,9 +10,9 @@ const {
saveAssistantMessage,
} = require('~/server/services/Threads');
const { runAssistant, createOnTextProgress } = require('~/server/services/AssistantService');
const { addTitle, initializeClient } = require('~/server/services/Endpoints/assistant');
const { sendResponse, sendMessage } = require('~/server/utils');
const { createRun, sleep } = require('~/server/services/Runs');
const { addTitle, initializeClient } = require('~/server/services/Endpoints/assistants');
const { sendResponse, sendMessage, sleep } = require('~/server/utils');
const { createRun } = require('~/server/services/Runs');
const { getConvo } = require('~/models/Conversation');
const getLogStores = require('~/cache/getLogStores');
const { logger } = require('~/config');
@ -101,6 +101,8 @@ router.post('/', validateModel, buildEndpointOption, setHeaders, async (req, res
let completedRun;
const handleError = async (error) => {
const defaultErrorMessage =
'The Assistant run failed to initialize. Try sending a message in a new conversation.';
const messageData = {
thread_id,
assistant_id,
@ -119,12 +121,19 @@ router.post('/', validateModel, buildEndpointOption, setHeaders, async (req, res
return;
} else if (error.message === 'Request closed') {
logger.debug('[/assistants/chat/] Request aborted on close');
} else if (/Files.*are invalid/.test(error.message)) {
const errorMessage = `Files are invalid, or may not have uploaded yet.${
req.app.locals?.[EModelEndpoint.azureOpenAI].assistants
? ' If using Azure OpenAI, files are only available in the region of the assistant\'s model at the time of upload.'
: ''
}`;
return sendResponse(res, messageData, errorMessage);
} else {
logger.error('[/assistants/chat/]', error);
}
if (!openai || !thread_id || !run_id) {
return sendResponse(res, messageData, 'The Assistant run failed to initialize');
return sendResponse(res, messageData, defaultErrorMessage);
}
await sleep(3000);

View file

@ -1,10 +1,10 @@
const express = require('express');
const { CacheKeys } = require('librechat-data-provider');
const { initializeClient } = require('~/server/services/Endpoints/assistant');
const { initializeClient } = require('~/server/services/Endpoints/assistants');
const { getConvosByPage, deleteConvos, getConvo, saveConvo } = require('~/models/Conversation');
const requireJwtAuth = require('~/server/middleware/requireJwtAuth');
const { sleep } = require('~/server/services/Runs/handle');
const getLogStores = require('~/cache/getLogStores');
const { sleep } = require('~/server/utils');
const { logger } = require('~/config');
const router = express.Router();

View file

@ -44,7 +44,7 @@ router.delete('/', async (req, res) => {
return false;
}
if (/^file-/.test(file.file_id)) {
if (/^(file|assistant)-/.test(file.file_id)) {
return true;
}

View file

@ -5,6 +5,7 @@ const {
defaultSocialLogins,
validateAzureGroups,
mapModelToAzureConfig,
assistantEndpointSchema,
deprecatedAzureVariables,
conflictingAzureVariables,
} = require('librechat-data-provider');
@ -68,8 +69,7 @@ const AppService = async (app) => {
const endpointLocals = {};
if (config?.endpoints?.[EModelEndpoint.azureOpenAI]) {
const { groups, titleModel, titleConvo, titleMethod, plugins } =
config.endpoints[EModelEndpoint.azureOpenAI];
const { groups, ...azureConfiguration } = config.endpoints[EModelEndpoint.azureOpenAI];
const { isValid, modelNames, modelGroupMap, groupMap, errors } = validateAzureGroups(groups);
if (!isValid) {
@ -79,18 +79,32 @@ const AppService = async (app) => {
throw new Error(errorMessage);
}
const assistantModels = [];
const assistantGroups = new Set();
for (const modelName of modelNames) {
mapModelToAzureConfig({ modelName, modelGroupMap, groupMap });
const groupName = modelGroupMap?.[modelName]?.group;
const modelGroup = groupMap?.[groupName];
let supportsAssistants = modelGroup?.assistants || modelGroup?.[modelName]?.assistants;
if (supportsAssistants) {
assistantModels.push(modelName);
!assistantGroups.has(groupName) && assistantGroups.add(groupName);
}
}
if (azureConfiguration.assistants && assistantModels.length === 0) {
throw new Error(
'No Azure models are configured to support assistants. Please remove the `assistants` field or configure at least one model to support assistants.',
);
}
endpointLocals[EModelEndpoint.azureOpenAI] = {
modelNames,
modelGroupMap,
groupMap,
titleConvo,
titleMethod,
titleModel,
plugins,
assistantModels,
assistantGroups: Array.from(assistantGroups),
...azureConfiguration,
};
deprecatedAzureVariables.forEach(({ key, description }) => {
@ -111,10 +125,9 @@ const AppService = async (app) => {
}
if (config?.endpoints?.[EModelEndpoint.assistants]) {
const { disableBuilder, pollIntervalMs, timeoutMs, supportedIds, excludedIds } =
config.endpoints[EModelEndpoint.assistants];
if (supportedIds?.length && excludedIds?.length) {
const assistantsConfig = config.endpoints[EModelEndpoint.assistants];
const parsedConfig = assistantEndpointSchema.parse(assistantsConfig);
if (assistantsConfig.supportedIds?.length && assistantsConfig.excludedIds?.length) {
logger.warn(
`Both \`supportedIds\` and \`excludedIds\` are defined for the ${EModelEndpoint.assistants} endpoint; \`excludedIds\` field will be ignored.`,
);
@ -122,11 +135,13 @@ const AppService = async (app) => {
/** @type {Partial<TAssistantEndpoint>} */
endpointLocals[EModelEndpoint.assistants] = {
disableBuilder,
pollIntervalMs,
timeoutMs,
supportedIds,
excludedIds,
retrievalModels: parsedConfig.retrievalModels,
disableBuilder: parsedConfig.disableBuilder,
pollIntervalMs: parsedConfig.pollIntervalMs,
supportedIds: parsedConfig.supportedIds,
capabilities: parsedConfig.capabilities,
excludedIds: parsedConfig.excludedIds,
timeoutMs: parsedConfig.timeoutMs,
};
}

View file

@ -13,9 +13,9 @@ const {
defaultOrderQuery,
} = require('librechat-data-provider');
const { retrieveAndProcessFile } = require('~/server/services/Files/process');
const { RunManager, waitForRun, sleep } = require('~/server/services/Runs');
const { RunManager, waitForRun } = require('~/server/services/Runs');
const { processRequiredActions } = require('~/server/services/ToolService');
const { createOnProgress, sendMessage } = require('~/server/utils');
const { createOnProgress, sendMessage, sleep } = require('~/server/utils');
const { TextStream } = require('~/app/clients');
const { logger } = require('~/config');

View file

@ -51,6 +51,13 @@ async function loadConfigEndpoints(req) {
};
}
if (req.app.locals[EModelEndpoint.azureOpenAI]?.assistants) {
/** @type {Omit<TConfig, 'order'>} */
endpointsConfig[EModelEndpoint.assistants] = {
userProvide: false,
};
}
return endpointsConfig;
}

View file

@ -17,15 +17,20 @@ async function loadConfigModels(req) {
const { endpoints = {} } = customConfig ?? {};
const modelsConfig = {};
const azureModels = req.app.locals[EModelEndpoint.azureOpenAI]?.modelNames;
const azureEndpoint = endpoints[EModelEndpoint.azureOpenAI];
const azureConfig = req.app.locals[EModelEndpoint.azureOpenAI];
const { modelNames } = azureConfig ?? {};
if (azureModels && azureEndpoint) {
modelsConfig[EModelEndpoint.azureOpenAI] = azureModels;
if (modelNames && azureEndpoint) {
modelsConfig[EModelEndpoint.azureOpenAI] = modelNames;
}
if (azureModels && azureEndpoint && azureEndpoint.plugins) {
modelsConfig[EModelEndpoint.gptPlugins] = azureModels;
if (modelNames && azureEndpoint && azureEndpoint.plugins) {
modelsConfig[EModelEndpoint.gptPlugins] = modelNames;
}
if (azureEndpoint?.assistants && azureConfig.assistantModels) {
modelsConfig[EModelEndpoint.assistants] = azureConfig.assistantModels;
}
if (!Array.isArray(endpoints[EModelEndpoint.custom])) {

View file

@ -24,7 +24,7 @@ async function loadDefaultModels(req) {
azure: useAzurePlugins,
plugins: true,
});
const assistant = await getOpenAIModels({ assistants: true });
const assistants = await getOpenAIModels({ assistants: true });
return {
[EModelEndpoint.openAI]: openAI,
@ -34,7 +34,7 @@ async function loadDefaultModels(req) {
[EModelEndpoint.azureOpenAI]: azureOpenAI,
[EModelEndpoint.bingAI]: ['BingAI', 'Sydney'],
[EModelEndpoint.chatGPTBrowser]: chatGPTBrowser,
[EModelEndpoint.assistants]: assistant,
[EModelEndpoint.assistants]: assistants,
};
}

View file

@ -1,9 +0,0 @@
const addTitle = require('./addTitle');
const buildOptions = require('./buildOptions');
const initializeClient = require('./initializeClient');
module.exports = {
addTitle,
buildOptions,
initializeClient,
};

View file

@ -0,0 +1,73 @@
const addTitle = require('./addTitle');
const buildOptions = require('./buildOptions');
const initializeClient = require('./initializeClient');
/**
* Asynchronously lists assistants based on provided query parameters.
*
* Initializes the client with the current request and response objects and lists assistants
* according to the query parameters. This function abstracts the logic for non-Azure paths.
*
* @async
* @param {object} params - The parameters object.
* @param {object} params.req - The request object, used for initializing the client.
* @param {object} params.res - The response object, used for initializing the client.
* @param {object} params.query - The query parameters to list assistants (e.g., limit, order).
* @returns {Promise<object>} A promise that resolves to the response from the `openai.beta.assistants.list` method call.
*/
const listAssistants = async ({ req, res, query }) => {
const { openai } = await initializeClient({ req, res });
return openai.beta.assistants.list(query);
};
/**
* Asynchronously lists assistants for Azure configured groups.
*
* Iterates through Azure configured assistant groups, initializes the client with the current request and response objects,
* lists assistants based on the provided query parameters, and merges their data alongside the model information into a single array.
*
* @async
* @param {object} params - The parameters object.
* @param {object} params.req - The request object, used for initializing the client and manipulating the request body.
* @param {object} params.res - The response object, used for initializing the client.
* @param {TAzureConfig} params.azureConfig - The Azure configuration object containing assistantGroups and groupMap.
* @param {object} params.query - The query parameters to list assistants (e.g., limit, order).
* @returns {Promise<AssistantListResponse>} A promise that resolves to an array of assistant data merged with their respective model information.
*/
const listAssistantsForAzure = async ({ req, res, azureConfig = {}, query }) => {
const promises = [];
const models = [];
const { groupMap, assistantGroups } = azureConfig;
for (const groupName of assistantGroups) {
const group = groupMap[groupName];
req.body.model = Object.keys(group?.models)[0];
models.push(req.body.model);
promises.push(listAssistants({ req, res, query }));
}
const resolvedQueries = await Promise.all(promises);
const data = resolvedQueries.flatMap((res, i) =>
res.data.map((assistant) => {
const model = models[i];
return { ...assistant, model } ?? {};
}),
);
return {
first_id: data[0]?.id,
last_id: data[data.length - 1]?.id,
object: 'list',
has_more: false,
data,
};
};
module.exports = {
addTitle,
buildOptions,
initializeClient,
listAssistants,
listAssistantsForAzure,
};

View file

@ -1,6 +1,10 @@
const OpenAI = require('openai');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { EModelEndpoint } = require('librechat-data-provider');
const {
EModelEndpoint,
resolveHeaders,
mapModelToAzureConfig,
} = require('librechat-data-provider');
const {
getUserKey,
getUserKeyExpiry,
@ -8,6 +12,7 @@ const {
} = require('~/server/services/UserService');
const OpenAIClient = require('~/app/clients/OpenAIClient');
const { isUserProvided } = require('~/server/utils');
const { constructAzureURL } = require('~/utils');
const initializeClient = async ({ req, res, endpointOption, initAppClient = false }) => {
const { PROXY, OPENAI_ORGANIZATION, ASSISTANTS_API_KEY, ASSISTANTS_BASE_URL } = process.env;
@ -38,12 +43,68 @@ const initializeClient = async ({ req, res, endpointOption, initAppClient = fals
let apiKey = userProvidesKey ? userValues.apiKey : ASSISTANTS_API_KEY;
let baseURL = userProvidesURL ? userValues.baseURL : ASSISTANTS_BASE_URL;
const opts = {};
const clientOptions = {
reverseProxyUrl: baseURL ?? null,
proxy: PROXY ?? null,
req,
res,
...endpointOption,
};
/** @type {TAzureConfig | undefined} */
const azureConfig = req.app.locals[EModelEndpoint.azureOpenAI];
/** @type {AzureOptions | undefined} */
let azureOptions;
if (azureConfig && azureConfig.assistants) {
const { modelGroupMap, groupMap, assistantModels } = azureConfig;
const modelName = req.body.model ?? req.query.model ?? assistantModels[0];
const {
azureOptions: currentOptions,
baseURL: azureBaseURL,
headers = {},
serverless,
} = mapModelToAzureConfig({
modelName,
modelGroupMap,
groupMap,
});
azureOptions = currentOptions;
baseURL = constructAzureURL({
baseURL: azureBaseURL ?? 'https://${INSTANCE_NAME}.openai.azure.com/openai',
azureOptions,
});
apiKey = azureOptions.azureOpenAIApiKey;
opts.defaultQuery = { 'api-version': azureOptions.azureOpenAIApiVersion };
opts.defaultHeaders = resolveHeaders({ ...headers, 'api-key': apiKey });
opts.model = azureOptions.azureOpenAIApiDeploymentName;
if (initAppClient) {
clientOptions.titleConvo = azureConfig.titleConvo;
clientOptions.titleModel = azureConfig.titleModel;
clientOptions.titleMethod = azureConfig.titleMethod ?? 'completion';
const groupName = modelGroupMap[modelName].group;
clientOptions.addParams = azureConfig.groupMap[groupName].addParams;
clientOptions.dropParams = azureConfig.groupMap[groupName].dropParams;
clientOptions.forcePrompt = azureConfig.groupMap[groupName].forcePrompt;
clientOptions.reverseProxyUrl = baseURL ?? clientOptions.reverseProxyUrl;
clientOptions.headers = opts.defaultHeaders;
clientOptions.azure = !serverless && azureOptions;
}
}
if (!apiKey) {
throw new Error('Assistants API key not provided. Please provide it again.');
}
const opts = {};
if (baseURL) {
opts.baseURL = baseURL;
}
@ -61,18 +122,15 @@ const initializeClient = async ({ req, res, endpointOption, initAppClient = fals
apiKey,
...opts,
});
openai.req = req;
openai.res = res;
if (endpointOption && initAppClient) {
const clientOptions = {
reverseProxyUrl: baseURL,
proxy: PROXY ?? null,
req,
res,
...endpointOption,
};
if (azureOptions) {
openai.locals = { ...(openai.locals ?? {}), azureOptions };
}
if (endpointOption && initAppClient) {
const client = new OpenAIClient(apiKey, clientOptions);
return {
client,

View file

@ -57,7 +57,7 @@ describe('initializeClient', () => {
);
getUserKeyExpiry.mockResolvedValue(isoString);
const req = { user: { id: 'user123' } };
const req = { user: { id: 'user123' }, app };
const res = {};
const { openai, openAIApiKey } = await initializeClient({ req, res });
@ -80,7 +80,7 @@ describe('initializeClient', () => {
test('throws error if API key is not provided', async () => {
delete process.env.ASSISTANTS_API_KEY; // Simulate missing API key
const req = { user: { id: 'user123' } };
const req = { user: { id: 'user123' }, app };
const res = {};
await expect(initializeClient({ req, res })).rejects.toThrow(/Assistants API key not/);

View file

@ -1,7 +1,7 @@
const {
EModelEndpoint,
mapModelToAzureConfig,
resolveHeaders,
mapModelToAzureConfig,
} = require('librechat-data-provider');
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
const { isEnabled, isUserProvided } = require('~/server/utils');

View file

@ -1,4 +1,7 @@
const fs = require('fs');
const { FilePurpose } = require('librechat-data-provider');
const { sleep } = require('~/server/utils');
const { logger } = require('~/config');
/**
* Uploads a file that can be used across various OpenAI services.
@ -6,23 +9,31 @@ const fs = require('fs');
* @param {Express.Request} req - The request object from Express. It should have a `user` property with an `id`
* representing the user, and an `app.locals.paths` object with an `imageOutput` path.
* @param {Express.Multer.File} file - The file uploaded to the server via multer.
* @param {OpenAI} openai - The initialized OpenAI client.
* @param {OpenAIClient} openai - The initialized OpenAI client.
* @returns {Promise<OpenAIFile>}
*/
async function uploadOpenAIFile(req, file, openai) {
try {
const uploadedFile = await openai.files.create({
file: fs.createReadStream(file.path),
purpose: 'assistants',
});
const uploadedFile = await openai.files.create({
file: fs.createReadStream(file.path),
purpose: FilePurpose.Assistants,
});
console.log('File uploaded successfully to OpenAI');
logger.debug(
`[uploadOpenAIFile] User ${req.user.id} successfully uploaded file to OpenAI`,
uploadedFile,
);
return uploadedFile;
} catch (error) {
console.error('Error uploading file to OpenAI:', error.message);
throw error;
if (uploadedFile.status !== 'processed') {
const sleepTime = 2500;
logger.debug(
`[uploadOpenAIFile] File ${
uploadedFile.id
} is not yet processed. Waiting for it to be processed (${sleepTime / 1000}s)...`,
);
await sleep(sleepTime);
}
return uploadedFile;
}
/**
@ -39,9 +50,11 @@ async function deleteOpenAIFile(req, file, openai) {
if (!res.deleted) {
throw new Error('OpenAI returned `false` for deleted status');
}
console.log('File deleted successfully from OpenAI');
logger.debug(
`[deleteOpenAIFile] User ${req.user.id} successfully deleted ${file.file_id} from OpenAI`,
);
} catch (error) {
console.error('Error deleting file from OpenAI:', error.message);
logger.error('[deleteOpenAIFile] Error deleting file from OpenAI: ' + error.message);
throw error;
}
}

View file

@ -11,7 +11,7 @@ const {
mergeFileConfig,
} = require('librechat-data-provider');
const { convertToWebP, resizeAndConvert } = require('~/server/services/Files/images');
const { initializeClient } = require('~/server/services/Endpoints/assistant');
const { initializeClient } = require('~/server/services/Endpoints/assistants');
const { createFile, updateFileUsage, deleteFiles } = require('~/models/File');
const { isEnabled, determineFileType } = require('~/server/utils');
const { LB_QueueAsyncCall } = require('~/server/utils/queue');
@ -286,7 +286,7 @@ const processFileUpload = async ({ req, res, file, metadata }) => {
file_id: id ?? file_id,
temp_file_id,
bytes,
filepath: isAssistantUpload ? `https://api.openai.com/v1/files/${id}` : filepath,
filepath: isAssistantUpload ? `${openai.baseURL}/files/${id}` : filepath,
filename: filename ?? file.originalname,
context: isAssistantUpload ? FileContext.assistants : FileContext.message_attachment,
source,

View file

@ -1,6 +1,7 @@
const { RunStatus, defaultOrderQuery, CacheKeys } = require('librechat-data-provider');
const getLogStores = require('~/cache/getLogStores');
const { retrieveRun } = require('./methods');
const { sleep } = require('~/server/utils');
const RunManager = require('./RunManager');
const { logger } = require('~/config');
@ -46,16 +47,6 @@ async function createRun({ openai, thread_id, body }) {
return await openai.beta.threads.runs.create(thread_id, body);
}
/**
* Delays the execution for a specified number of milliseconds.
*
* @param {number} ms - The number of milliseconds to delay.
* @return {Promise<void>} A promise that resolves after the specified delay.
*/
function sleep(ms) {
return new Promise((resolve) => setTimeout(resolve, ms));
}
/**
* Waits for a run to complete by repeatedly checking its status. It uses a RunManager instance to fetch and manage run steps based on the run status.
*

View file

@ -1,4 +1,5 @@
const axios = require('axios');
const { EModelEndpoint } = require('librechat-data-provider');
const { logger } = require('~/config');
/**
@ -18,9 +19,9 @@ const { logger } = require('~/config');
*/
async function retrieveRun({ thread_id, run_id, timeout, openai }) {
const { apiKey, baseURL, httpAgent, organization } = openai;
const url = `${baseURL}/threads/${thread_id}/runs/${run_id}`;
let url = `${baseURL}/threads/${thread_id}/runs/${run_id}`;
const headers = {
let headers = {
Authorization: `Bearer ${apiKey}`,
'OpenAI-Beta': 'assistants=v1',
};
@ -29,6 +30,16 @@ async function retrieveRun({ thread_id, run_id, timeout, openai }) {
headers['OpenAI-Organization'] = organization;
}
/** @type {TAzureConfig | undefined} */
const azureConfig = openai.req.app.locals[EModelEndpoint.azureOpenAI];
if (azureConfig && azureConfig.assistants) {
delete headers.Authorization;
headers = { ...headers, ...openai._options.defaultHeaders };
const queryParams = new URLSearchParams(openai._options.defaultQuery).toString();
url = `${url}?${queryParams}`;
}
try {
const axiosConfig = {
headers: headers,

View file

@ -14,7 +14,7 @@ const { loadActionSets, createActionTool } = require('./ActionService');
const { processFileURL } = require('~/server/services/Files/process');
const { loadTools } = require('~/app/clients/tools/util');
const { redactMessage } = require('~/config/parsers');
const { sleep } = require('./Runs/handle');
const { sleep } = require('~/server/utils');
const { logger } = require('~/config');
/**

View file

@ -5,6 +5,7 @@ const handleText = require('./handleText');
const cryptoUtils = require('./crypto');
const citations = require('./citations');
const sendEmail = require('./sendEmail');
const queue = require('./queue');
const files = require('./files');
const math = require('./math');
@ -17,5 +18,6 @@ module.exports = {
removePorts,
sendEmail,
...files,
...queue,
math,
};

View file

@ -53,6 +53,17 @@ function LB_QueueAsyncCall(asyncFunc, args, callback) {
}
}
/**
* Delays the execution for a specified number of milliseconds.
*
* @param {number} ms - The number of milliseconds to delay.
* @return {Promise<void>} A promise that resolves after the specified delay.
*/
function sleep(ms) {
return new Promise((resolve) => setTimeout(resolve, ms));
}
module.exports = {
sleep,
LB_QueueAsyncCall,
};