mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-09-22 06:00:56 +02:00

* chore: rename dir from `assistant` to plural * feat: `assistants` field for azure config, spread options in AppService * refactor: rename constructAzureURL param for azure as `azureOptions` * chore: bump openai and bun * chore(loadDefaultModels): change naming of assistant -> assistants * feat: load azure settings with currect baseURL for assistants' initializeClient * refactor: add `assistants` flags to groups and model configs, add mapGroupToAzureConfig * feat(loadConfigEndpoints): initialize assistants endpoint if azure flag `assistants` is enabled * feat(AppService): determine assistant models on startup, throw Error if none * refactor(useDeleteAssistantMutation): send model along with assistant id for delete mutations * feat: support listing and deleting assistants with azure * feat: add model query to assistant avatar upload * feat: add azure support for retrieveRun method * refactor: update OpenAIClient initialization * chore: update README * fix(ci): tests passing * refactor(uploadOpenAIFile): improve logging and use more efficient REST API method * refactor(useFileHandling): add model to metadata to target Azure region compatible with current model * chore(files): add azure naming pattern for valid file id recognition * fix(assistants): initialize openai with first available assistant model if none provided * refactor(uploadOpenAIFile): add content type for azure, initialize formdata before azure options * refactor(sleep): move sleep function out of Runs and into `~/server/utils` * fix(azureOpenAI/assistants): make sure to only overwrite models with assistant models if `assistants` flag is enabled * refactor(uploadOpenAIFile): revert to old method * chore(uploadOpenAIFile): use enum for file purpose * docs: azureOpenAI update guide with more info, examples * feat: enable/disable assistant capabilities and specify retrieval models * refactor: optional chain conditional statement in loadConfigModels.js * docs: add assistants examples * chore: update librechat.example.yaml * docs(azure): update note of file upload behavior in Azure OpenAI Assistants * chore: update docs and add descriptive message about assistant errors * fix: prevent message submission with invalid assistant or if files loading * style: update Landing icon & text when assistant is not selected * chore: bump librechat-data-provider to 0.4.8 * fix(assistants/azure): assign req.body.model for proper azure init to abort runs
88 lines
2.9 KiB
JavaScript
88 lines
2.9 KiB
JavaScript
const axios = require('axios');
|
|
const { EModelEndpoint } = require('librechat-data-provider');
|
|
const { logger } = require('~/config');
|
|
|
|
/**
|
|
* @typedef {Object} RetrieveOptions
|
|
* @property {string} thread_id - The ID of the thread to retrieve.
|
|
* @property {string} run_id - The ID of the run to retrieve.
|
|
* @property {number} [timeout] - Optional timeout for the API call.
|
|
* @property {number} [maxRetries] - TODO: not yet implemented; Optional maximum number of retries for the API call.
|
|
* @property {OpenAIClient} openai - Configuration and credentials for OpenAI API access.
|
|
*/
|
|
|
|
/**
|
|
* Asynchronously retrieves data from an API endpoint based on provided thread and run IDs.
|
|
*
|
|
* @param {RetrieveOptions} options - The options for the retrieve operation.
|
|
* @returns {Promise<Object>} The data retrieved from the API.
|
|
*/
|
|
async function retrieveRun({ thread_id, run_id, timeout, openai }) {
|
|
const { apiKey, baseURL, httpAgent, organization } = openai;
|
|
let url = `${baseURL}/threads/${thread_id}/runs/${run_id}`;
|
|
|
|
let headers = {
|
|
Authorization: `Bearer ${apiKey}`,
|
|
'OpenAI-Beta': 'assistants=v1',
|
|
};
|
|
|
|
if (organization) {
|
|
headers['OpenAI-Organization'] = organization;
|
|
}
|
|
|
|
/** @type {TAzureConfig | undefined} */
|
|
const azureConfig = openai.req.app.locals[EModelEndpoint.azureOpenAI];
|
|
|
|
if (azureConfig && azureConfig.assistants) {
|
|
delete headers.Authorization;
|
|
headers = { ...headers, ...openai._options.defaultHeaders };
|
|
const queryParams = new URLSearchParams(openai._options.defaultQuery).toString();
|
|
url = `${url}?${queryParams}`;
|
|
}
|
|
|
|
try {
|
|
const axiosConfig = {
|
|
headers: headers,
|
|
timeout: timeout,
|
|
};
|
|
|
|
if (httpAgent) {
|
|
axiosConfig.httpAgent = httpAgent;
|
|
axiosConfig.httpsAgent = httpAgent;
|
|
}
|
|
|
|
const response = await axios.get(url, axiosConfig);
|
|
return response.data;
|
|
} catch (error) {
|
|
const logMessage = '[retrieveRun] Failed to retrieve run data:';
|
|
const timedOutMessage = 'Cannot read properties of undefined (reading \'status\')';
|
|
if (error?.response && error?.response?.status) {
|
|
logger.error(
|
|
`${logMessage} The request was made and the server responded with a status code that falls out of the range of 2xx: ${
|
|
error.message ? error.message : ''
|
|
}`,
|
|
{
|
|
headers: error.response.headers,
|
|
status: error.response.status,
|
|
data: error.response.data,
|
|
},
|
|
);
|
|
} else if (error.request) {
|
|
logger.error(
|
|
`${logMessage} The request was made but no response was received: ${
|
|
error.message ? error.message : ''
|
|
}`,
|
|
{
|
|
request: error.request,
|
|
},
|
|
);
|
|
} else if (error?.message && !error?.message?.includes(timedOutMessage)) {
|
|
logger.error(`${logMessage} Something happened in setting up the request`, {
|
|
message: error.message,
|
|
});
|
|
}
|
|
throw error;
|
|
}
|
|
}
|
|
|
|
module.exports = { retrieveRun };
|