LibreChat/api/server/utils/queue.js
Danny Avila 5cd5c3bef8
🅰️ feat: Azure OpenAI Assistants API Support (#1992)
* chore: rename dir from `assistant` to plural

* feat: `assistants` field for azure config, spread options in AppService

* refactor: rename constructAzureURL param for azure as `azureOptions`

* chore: bump openai and bun

* chore(loadDefaultModels): change naming of assistant -> assistants

* feat: load azure settings with currect baseURL for assistants' initializeClient

* refactor: add `assistants` flags to groups and model configs, add mapGroupToAzureConfig

* feat(loadConfigEndpoints): initialize assistants endpoint if azure flag `assistants` is enabled

* feat(AppService): determine assistant models on startup, throw Error if none

* refactor(useDeleteAssistantMutation): send model along with assistant id for delete mutations

* feat: support listing and deleting assistants with azure

* feat: add model query to assistant avatar upload

* feat: add azure support for retrieveRun method

* refactor: update OpenAIClient initialization

* chore: update README

* fix(ci): tests passing

* refactor(uploadOpenAIFile): improve logging and use more efficient REST API method

* refactor(useFileHandling): add model to metadata to target Azure region compatible with current model

* chore(files): add azure naming pattern for valid file id recognition

* fix(assistants): initialize openai with first available assistant model if none provided

* refactor(uploadOpenAIFile): add content type for azure, initialize formdata before azure options

* refactor(sleep): move sleep function out of Runs and into `~/server/utils`

* fix(azureOpenAI/assistants): make sure to only overwrite models with assistant models if `assistants` flag is enabled

* refactor(uploadOpenAIFile): revert to old method

* chore(uploadOpenAIFile): use enum for file purpose

* docs: azureOpenAI update guide with more info, examples

* feat: enable/disable assistant capabilities and specify retrieval models

* refactor: optional chain conditional statement in loadConfigModels.js

* docs: add assistants examples

* chore: update librechat.example.yaml

* docs(azure): update note of file upload behavior in Azure OpenAI Assistants

* chore: update docs and add descriptive message about assistant errors

* fix: prevent message submission with invalid assistant or if files loading

* style: update Landing icon & text when assistant is not selected

* chore: bump librechat-data-provider to 0.4.8

* fix(assistants/azure): assign req.body.model for proper azure init to abort runs
2024-03-14 17:21:42 -04:00

69 lines
1.7 KiB
JavaScript

/**
* A leaky bucket queue structure to manage API requests.
* @type {{queue: Array, interval: NodeJS.Timer | null}}
*/
const _LB = {
queue: [],
interval: null,
};
/**
* Interval in milliseconds to control the rate of API requests.
* Adjust the interval according to your rate limit needs.
*/
const _LB_INTERVAL_MS = Math.ceil(1000 / 60); // 60 req/s
/**
* Executes the next function in the leaky bucket queue.
* This function is called at regular intervals defined by _LB_INTERVAL_MS.
*/
const _LB_EXEC_NEXT = async () => {
if (_LB.queue.length === 0) {
clearInterval(_LB.interval);
_LB.interval = null;
return;
}
const next = _LB.queue.shift();
if (!next) {
return;
}
const { asyncFunc, args, callback } = next;
try {
const data = await asyncFunc(...args);
callback(null, data);
} catch (e) {
callback(e);
}
};
/**
* Adds an async function call to the leaky bucket queue.
* @param {Function} asyncFunc - The async function to be executed.
* @param {Array} args - Arguments to pass to the async function.
* @param {Function} callback - Callback function for handling the result or error.
*/
function LB_QueueAsyncCall(asyncFunc, args, callback) {
_LB.queue.push({ asyncFunc, args, callback });
if (_LB.interval === null) {
_LB.interval = setInterval(_LB_EXEC_NEXT, _LB_INTERVAL_MS);
}
}
/**
* Delays the execution for a specified number of milliseconds.
*
* @param {number} ms - The number of milliseconds to delay.
* @return {Promise<void>} A promise that resolves after the specified delay.
*/
function sleep(ms) {
return new Promise((resolve) => setTimeout(resolve, ms));
}
module.exports = {
sleep,
LB_QueueAsyncCall,
};