mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-16 16:30:15 +01:00
🔧 fix: Assistants API SDK calls to match Updated Arguments (#8818)
* chore: remove comments in agents endpoint error handler
* chore: improve openai sdk typing
* chore: improve typing for azure asst init
* 🔧 fix: Assistants API SDK calls to match Updated Arguments
This commit is contained in:
parent
33c8b87edd
commit
863401bcdf
15 changed files with 41 additions and 40 deletions
|
|
@ -105,8 +105,6 @@ const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/ch
|
||||||
return res.end();
|
return res.end();
|
||||||
}
|
}
|
||||||
await cache.delete(cacheKey);
|
await cache.delete(cacheKey);
|
||||||
// const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id);
|
|
||||||
// logger.debug(`[${originPath}] Cancelled run:`, cancelledRun);
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error(`[${originPath}] Error cancelling run`, error);
|
logger.error(`[${originPath}] Error cancelling run`, error);
|
||||||
}
|
}
|
||||||
|
|
@ -115,7 +113,6 @@ const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/ch
|
||||||
|
|
||||||
let run;
|
let run;
|
||||||
try {
|
try {
|
||||||
// run = await openai.beta.threads.runs.retrieve(thread_id, run_id);
|
|
||||||
await recordUsage({
|
await recordUsage({
|
||||||
...run.usage,
|
...run.usage,
|
||||||
model: run.model,
|
model: run.model,
|
||||||
|
|
@ -128,18 +125,9 @@ const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/ch
|
||||||
|
|
||||||
let finalEvent;
|
let finalEvent;
|
||||||
try {
|
try {
|
||||||
// const errorContentPart = {
|
|
||||||
// text: {
|
|
||||||
// value:
|
|
||||||
// error?.message ?? 'There was an error processing your request. Please try again later.',
|
|
||||||
// },
|
|
||||||
// type: ContentTypes.ERROR,
|
|
||||||
// };
|
|
||||||
|
|
||||||
finalEvent = {
|
finalEvent = {
|
||||||
final: true,
|
final: true,
|
||||||
conversation: await getConvo(req.user.id, conversationId),
|
conversation: await getConvo(req.user.id, conversationId),
|
||||||
// runMessages,
|
|
||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error(`[${originPath}] Error finalizing error process`, error);
|
logger.error(`[${originPath}] Error finalizing error process`, error);
|
||||||
|
|
|
||||||
|
|
@ -152,7 +152,7 @@ const chatV1 = async (req, res) => {
|
||||||
return res.end();
|
return res.end();
|
||||||
}
|
}
|
||||||
await cache.delete(cacheKey);
|
await cache.delete(cacheKey);
|
||||||
const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id);
|
const cancelledRun = await openai.beta.threads.runs.cancel(run_id, { thread_id });
|
||||||
logger.debug('[/assistants/chat/] Cancelled run:', cancelledRun);
|
logger.debug('[/assistants/chat/] Cancelled run:', cancelledRun);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('[/assistants/chat/] Error cancelling run', error);
|
logger.error('[/assistants/chat/] Error cancelling run', error);
|
||||||
|
|
@ -162,7 +162,7 @@ const chatV1 = async (req, res) => {
|
||||||
|
|
||||||
let run;
|
let run;
|
||||||
try {
|
try {
|
||||||
run = await openai.beta.threads.runs.retrieve(thread_id, run_id);
|
run = await openai.beta.threads.runs.retrieve(run_id, { thread_id });
|
||||||
await recordUsage({
|
await recordUsage({
|
||||||
...run.usage,
|
...run.usage,
|
||||||
model: run.model,
|
model: run.model,
|
||||||
|
|
@ -623,7 +623,7 @@ const chatV1 = async (req, res) => {
|
||||||
|
|
||||||
if (!response.run.usage) {
|
if (!response.run.usage) {
|
||||||
await sleep(3000);
|
await sleep(3000);
|
||||||
completedRun = await openai.beta.threads.runs.retrieve(thread_id, response.run.id);
|
completedRun = await openai.beta.threads.runs.retrieve(response.run.id, { thread_id });
|
||||||
if (completedRun.usage) {
|
if (completedRun.usage) {
|
||||||
await recordUsage({
|
await recordUsage({
|
||||||
...completedRun.usage,
|
...completedRun.usage,
|
||||||
|
|
|
||||||
|
|
@ -467,7 +467,7 @@ const chatV2 = async (req, res) => {
|
||||||
|
|
||||||
if (!response.run.usage) {
|
if (!response.run.usage) {
|
||||||
await sleep(3000);
|
await sleep(3000);
|
||||||
completedRun = await openai.beta.threads.runs.retrieve(thread_id, response.run.id);
|
completedRun = await openai.beta.threads.runs.retrieve(response.run.id, { thread_id });
|
||||||
if (completedRun.usage) {
|
if (completedRun.usage) {
|
||||||
await recordUsage({
|
await recordUsage({
|
||||||
...completedRun.usage,
|
...completedRun.usage,
|
||||||
|
|
|
||||||
|
|
@ -108,7 +108,7 @@ const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/ch
|
||||||
return res.end();
|
return res.end();
|
||||||
}
|
}
|
||||||
await cache.delete(cacheKey);
|
await cache.delete(cacheKey);
|
||||||
const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id);
|
const cancelledRun = await openai.beta.threads.runs.cancel(run_id, { thread_id });
|
||||||
logger.debug(`[${originPath}] Cancelled run:`, cancelledRun);
|
logger.debug(`[${originPath}] Cancelled run:`, cancelledRun);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error(`[${originPath}] Error cancelling run`, error);
|
logger.error(`[${originPath}] Error cancelling run`, error);
|
||||||
|
|
@ -118,7 +118,7 @@ const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/ch
|
||||||
|
|
||||||
let run;
|
let run;
|
||||||
try {
|
try {
|
||||||
run = await openai.beta.threads.runs.retrieve(thread_id, run_id);
|
run = await openai.beta.threads.runs.retrieve(run_id, { thread_id });
|
||||||
await recordUsage({
|
await recordUsage({
|
||||||
...run.usage,
|
...run.usage,
|
||||||
model: run.model,
|
model: run.model,
|
||||||
|
|
|
||||||
|
|
@ -173,6 +173,16 @@ const listAssistantsForAzure = async ({ req, res, version, azureConfig = {}, que
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initializes the OpenAI client.
|
||||||
|
* @param {object} params - The parameters object.
|
||||||
|
* @param {ServerRequest} params.req - The request object.
|
||||||
|
* @param {ServerResponse} params.res - The response object.
|
||||||
|
* @param {TEndpointOption} params.endpointOption - The endpoint options.
|
||||||
|
* @param {boolean} params.initAppClient - Whether to initialize the app client.
|
||||||
|
* @param {string} params.overrideEndpoint - The endpoint to override.
|
||||||
|
* @returns {Promise<{ openai: OpenAIClient, openAIApiKey: string; client: import('~/app/clients/OpenAIClient') }>} - The initialized OpenAI client.
|
||||||
|
*/
|
||||||
async function getOpenAIClient({ req, res, endpointOption, initAppClient, overrideEndpoint }) {
|
async function getOpenAIClient({ req, res, endpointOption, initAppClient, overrideEndpoint }) {
|
||||||
let endpoint = overrideEndpoint ?? req.body.endpoint ?? req.query.endpoint;
|
let endpoint = overrideEndpoint ?? req.body.endpoint ?? req.query.endpoint;
|
||||||
const version = await getCurrentVersion(req, endpoint);
|
const version = await getCurrentVersion(req, endpoint);
|
||||||
|
|
|
||||||
|
|
@ -197,7 +197,7 @@ const deleteAssistant = async (req, res) => {
|
||||||
await validateAuthor({ req, openai });
|
await validateAuthor({ req, openai });
|
||||||
|
|
||||||
const assistant_id = req.params.id;
|
const assistant_id = req.params.id;
|
||||||
const deletionStatus = await openai.beta.assistants.del(assistant_id);
|
const deletionStatus = await openai.beta.assistants.delete(assistant_id);
|
||||||
if (deletionStatus?.deleted) {
|
if (deletionStatus?.deleted) {
|
||||||
await deleteAssistantActions({ req, assistant_id });
|
await deleteAssistantActions({ req, assistant_id });
|
||||||
}
|
}
|
||||||
|
|
@ -365,7 +365,7 @@ const uploadAssistantAvatar = async (req, res) => {
|
||||||
try {
|
try {
|
||||||
await fs.unlink(req.file.path);
|
await fs.unlink(req.file.path);
|
||||||
logger.debug('[/:agent_id/avatar] Temp. image upload file deleted');
|
logger.debug('[/:agent_id/avatar] Temp. image upload file deleted');
|
||||||
} catch (error) {
|
} catch {
|
||||||
logger.debug('[/:agent_id/avatar] Temp. image upload file already deleted');
|
logger.debug('[/:agent_id/avatar] Temp. image upload file already deleted');
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -47,7 +47,7 @@ async function abortRun(req, res) {
|
||||||
|
|
||||||
try {
|
try {
|
||||||
await cache.set(cacheKey, 'cancelled', three_minutes);
|
await cache.set(cacheKey, 'cancelled', three_minutes);
|
||||||
const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id);
|
const cancelledRun = await openai.beta.threads.runs.cancel(run_id, { thread_id });
|
||||||
logger.debug('[abortRun] Cancelled run:', cancelledRun);
|
logger.debug('[abortRun] Cancelled run:', cancelledRun);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('[abortRun] Error cancelling run', error);
|
logger.error('[abortRun] Error cancelling run', error);
|
||||||
|
|
@ -60,7 +60,7 @@ async function abortRun(req, res) {
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const run = await openai.beta.threads.runs.retrieve(thread_id, run_id);
|
const run = await openai.beta.threads.runs.retrieve(run_id, { thread_id });
|
||||||
await recordUsage({
|
await recordUsage({
|
||||||
...run.usage,
|
...run.usage,
|
||||||
model: run.model,
|
model: run.model,
|
||||||
|
|
|
||||||
|
|
@ -111,7 +111,7 @@ router.delete('/', async (req, res) => {
|
||||||
/** @type {{ openai: OpenAI }} */
|
/** @type {{ openai: OpenAI }} */
|
||||||
const { openai } = await assistantClients[endpoint].initializeClient({ req, res });
|
const { openai } = await assistantClients[endpoint].initializeClient({ req, res });
|
||||||
try {
|
try {
|
||||||
const response = await openai.beta.threads.del(thread_id);
|
const response = await openai.beta.threads.delete(thread_id);
|
||||||
logger.debug('Deleted OpenAI thread:', response);
|
logger.debug('Deleted OpenAI thread:', response);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('Error deleting OpenAI thread:', error);
|
logger.error('Error deleting OpenAI thread:', error);
|
||||||
|
|
|
||||||
|
|
@ -281,7 +281,7 @@ function createInProgressHandler(openai, thread_id, messages) {
|
||||||
|
|
||||||
openai.seenCompletedMessages.add(message_id);
|
openai.seenCompletedMessages.add(message_id);
|
||||||
|
|
||||||
const message = await openai.beta.threads.messages.retrieve(thread_id, message_id);
|
const message = await openai.beta.threads.messages.retrieve(message_id, { thread_id });
|
||||||
if (!message?.content?.length) {
|
if (!message?.content?.length) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
@ -435,9 +435,11 @@ async function runAssistant({
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
const outputs = await processRequiredActions(openai, actions);
|
const tool_outputs = await processRequiredActions(openai, actions);
|
||||||
|
const toolRun = await openai.beta.threads.runs.submitToolOutputs(run.id, {
|
||||||
const toolRun = await openai.beta.threads.runs.submitToolOutputs(run.thread_id, run.id, outputs);
|
thread_id: run.thread_id,
|
||||||
|
tool_outputs,
|
||||||
|
});
|
||||||
|
|
||||||
// Recursive call with accumulated steps and messages
|
// Recursive call with accumulated steps and messages
|
||||||
return await runAssistant({
|
return await runAssistant({
|
||||||
|
|
|
||||||
|
|
@ -6,7 +6,7 @@ const {
|
||||||
getUserKeyExpiry,
|
getUserKeyExpiry,
|
||||||
checkUserKeyExpiry,
|
checkUserKeyExpiry,
|
||||||
} = require('~/server/services/UserService');
|
} = require('~/server/services/UserService');
|
||||||
const OpenAIClient = require('~/app/clients/OpenAIClient');
|
const OAIClient = require('~/app/clients/OpenAIClient');
|
||||||
const { isUserProvided } = require('~/server/utils');
|
const { isUserProvided } = require('~/server/utils');
|
||||||
|
|
||||||
const initializeClient = async ({ req, res, endpointOption, version, initAppClient = false }) => {
|
const initializeClient = async ({ req, res, endpointOption, version, initAppClient = false }) => {
|
||||||
|
|
@ -79,7 +79,7 @@ const initializeClient = async ({ req, res, endpointOption, version, initAppClie
|
||||||
openai.res = res;
|
openai.res = res;
|
||||||
|
|
||||||
if (endpointOption && initAppClient) {
|
if (endpointOption && initAppClient) {
|
||||||
const client = new OpenAIClient(apiKey, clientOptions);
|
const client = new OAIClient(apiKey, clientOptions);
|
||||||
return {
|
return {
|
||||||
client,
|
client,
|
||||||
openai,
|
openai,
|
||||||
|
|
|
||||||
|
|
@ -3,11 +3,11 @@ const { ProxyAgent } = require('undici');
|
||||||
const { constructAzureURL, isUserProvided, resolveHeaders } = require('@librechat/api');
|
const { constructAzureURL, isUserProvided, resolveHeaders } = require('@librechat/api');
|
||||||
const { ErrorTypes, EModelEndpoint, mapModelToAzureConfig } = require('librechat-data-provider');
|
const { ErrorTypes, EModelEndpoint, mapModelToAzureConfig } = require('librechat-data-provider');
|
||||||
const {
|
const {
|
||||||
|
checkUserKeyExpiry,
|
||||||
getUserKeyValues,
|
getUserKeyValues,
|
||||||
getUserKeyExpiry,
|
getUserKeyExpiry,
|
||||||
checkUserKeyExpiry,
|
|
||||||
} = require('~/server/services/UserService');
|
} = require('~/server/services/UserService');
|
||||||
const OpenAIClient = require('~/app/clients/OpenAIClient');
|
const OAIClient = require('~/app/clients/OpenAIClient');
|
||||||
|
|
||||||
class Files {
|
class Files {
|
||||||
constructor(client) {
|
constructor(client) {
|
||||||
|
|
@ -184,7 +184,7 @@ const initializeClient = async ({ req, res, version, endpointOption, initAppClie
|
||||||
}
|
}
|
||||||
|
|
||||||
if (endpointOption && initAppClient) {
|
if (endpointOption && initAppClient) {
|
||||||
const client = new OpenAIClient(apiKey, clientOptions);
|
const client = new OAIClient(apiKey, clientOptions);
|
||||||
return {
|
return {
|
||||||
client,
|
client,
|
||||||
openai,
|
openai,
|
||||||
|
|
|
||||||
|
|
@ -91,11 +91,10 @@ class RunManager {
|
||||||
* @param {boolean} [params.final] - The end of the run polling loop, due to `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, or `expired` statuses.
|
* @param {boolean} [params.final] - The end of the run polling loop, due to `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, or `expired` statuses.
|
||||||
*/
|
*/
|
||||||
async fetchRunSteps({ openai, thread_id, run_id, runStatus, final = false }) {
|
async fetchRunSteps({ openai, thread_id, run_id, runStatus, final = false }) {
|
||||||
// const { data: steps, first_id, last_id, has_more } = await openai.beta.threads.runs.steps.list(thread_id, run_id);
|
// const { data: steps, first_id, last_id, has_more } = await openai.beta.threads.runs.steps.list(run_id, { thread_id });
|
||||||
const { data: _steps } = await openai.beta.threads.runs.steps.list(
|
const { data: _steps } = await openai.beta.threads.runs.steps.list(
|
||||||
thread_id,
|
|
||||||
run_id,
|
run_id,
|
||||||
{},
|
{ thread_id },
|
||||||
{
|
{
|
||||||
timeout: 3000,
|
timeout: 3000,
|
||||||
maxRetries: 5,
|
maxRetries: 5,
|
||||||
|
|
|
||||||
|
|
@ -573,9 +573,9 @@ class StreamRunManager {
|
||||||
let toolRun;
|
let toolRun;
|
||||||
try {
|
try {
|
||||||
toolRun = this.openai.beta.threads.runs.submitToolOutputsStream(
|
toolRun = this.openai.beta.threads.runs.submitToolOutputsStream(
|
||||||
run.thread_id,
|
|
||||||
run.id,
|
run.id,
|
||||||
{
|
{
|
||||||
|
thread_id: run.thread_id,
|
||||||
tool_outputs,
|
tool_outputs,
|
||||||
stream: true,
|
stream: true,
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -179,7 +179,7 @@ async function waitForRun({
|
||||||
* @return {Promise<RunStep[]>} A promise that resolves to an array of RunStep objects.
|
* @return {Promise<RunStep[]>} A promise that resolves to an array of RunStep objects.
|
||||||
*/
|
*/
|
||||||
async function _retrieveRunSteps({ openai, thread_id, run_id }) {
|
async function _retrieveRunSteps({ openai, thread_id, run_id }) {
|
||||||
const runSteps = await openai.beta.threads.runs.steps.list(thread_id, run_id);
|
const runSteps = await openai.beta.threads.runs.steps.list(run_id, { thread_id });
|
||||||
return runSteps;
|
return runSteps;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -192,7 +192,8 @@ async function addThreadMetadata({ openai, thread_id, messageId, messages }) {
|
||||||
const promises = [];
|
const promises = [];
|
||||||
for (const message of messages) {
|
for (const message of messages) {
|
||||||
promises.push(
|
promises.push(
|
||||||
openai.beta.threads.messages.update(thread_id, message.id, {
|
openai.beta.threads.messages.update(message.id, {
|
||||||
|
thread_id,
|
||||||
metadata: {
|
metadata: {
|
||||||
messageId,
|
messageId,
|
||||||
},
|
},
|
||||||
|
|
@ -263,7 +264,8 @@ async function syncMessages({
|
||||||
}
|
}
|
||||||
|
|
||||||
modifyPromises.push(
|
modifyPromises.push(
|
||||||
openai.beta.threads.messages.update(thread_id, apiMessage.id, {
|
openai.beta.threads.messages.update(apiMessage.id, {
|
||||||
|
thread_id,
|
||||||
metadata: {
|
metadata: {
|
||||||
messageId: dbMessage.messageId,
|
messageId: dbMessage.messageId,
|
||||||
},
|
},
|
||||||
|
|
@ -413,7 +415,7 @@ async function checkMessageGaps({
|
||||||
}) {
|
}) {
|
||||||
const promises = [];
|
const promises = [];
|
||||||
promises.push(openai.beta.threads.messages.list(thread_id, defaultOrderQuery));
|
promises.push(openai.beta.threads.messages.list(thread_id, defaultOrderQuery));
|
||||||
promises.push(openai.beta.threads.runs.steps.list(thread_id, run_id));
|
promises.push(openai.beta.threads.runs.steps.list(run_id, { thread_id }));
|
||||||
/** @type {[{ data: ThreadMessage[] }, { data: RunStep[] }]} */
|
/** @type {[{ data: ThreadMessage[] }, { data: RunStep[] }]} */
|
||||||
const [response, stepsResponse] = await Promise.all(promises);
|
const [response, stepsResponse] = await Promise.all(promises);
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue