🚀 feat: Assistants Streaming (#2159)

* chore: bump openai to 4.29.0 and npm audit fix

* chore: remove unnecessary stream field from ContentData

* feat: new enum and types for AssistantStreamEvent

* refactor(AssistantService): remove stream field and add conversationId to text ContentData
> - return `finalMessage` and `text` on run completion
> - move `processMessages` to services/Threads to avoid circular dependencies with new stream handling
> - refactor(processMessages/retrieveAndProcessFile): add new `client` field to differentiate new RunClient type

* WIP: new assistants stream handling

* chore: stores messages to StreamRunManager

* chore: add additional typedefs

* fix: pass req and openai to StreamRunManager

* fix(AssistantService): pass openai as client to `retrieveAndProcessFile`

* WIP: streaming tool i/o, handle in_progress and completed run steps

* feat(assistants): process required actions with streaming enabled

* chore: condense early return check for useSSE useEffect

* chore: remove unnecessary comments and only handle completed tool calls when not function

* feat: add TTL for assistants run abort cacheKey

* feat: abort stream runs

* fix(assistants): render streaming cursor

* fix(assistants): hide edit icon as functionality is not supported

* fix(textArea): handle pasting edge cases; first, when onChange events wouldn't fire; second, when textarea wouldn't resize

* chore: memoize Conversations

* chore(useTextarea): reverse args order

* fix: load default capabilities when an azure is configured to support assistants, but `assistants` endpoint is not configured

* fix(AssistantSelect): update form assistant model on assistant form select

* fix(actions): handle azure strict validation for function names to fix crud for actions

* chore: remove content data debug log as it fires in rapid succession

* feat: improve UX for assistant errors mid-request

* feat: add tool call localizations and replace any domain separators from azure action names

* refactor(chat): error out tool calls without outputs during handleError

* fix(ToolService): handle domain separators allowing Azure use of actions

* refactor(StreamRunManager): types and throw Error if tool submission fails
This commit is contained in:
Danny Avila 2024-03-21 22:42:25 -04:00 committed by GitHub
parent ed64c76053
commit f427ad792a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
39 changed files with 1503 additions and 330 deletions

View file

@ -10,7 +10,7 @@ const {
validateAndParseOpenAPISpec,
actionDelimiter,
} = require('librechat-data-provider');
const { loadActionSets, createActionTool } = require('./ActionService');
const { loadActionSets, createActionTool, domainParser } = require('./ActionService');
const { processFileURL } = require('~/server/services/Files/process');
const { loadTools } = require('~/app/clients/tools/util');
const { redactMessage } = require('~/config/parsers');
@ -112,26 +112,26 @@ function formatToOpenAIAssistantTool(tool) {
/**
* Processes return required actions from run.
*
* @param {OpenAIClient} openai - OpenAI Client.
* @param {OpenAIClient} client - OpenAI or StreamRunManager Client.
* @param {RequiredAction[]} requiredActions - The required actions to submit outputs for.
* @returns {Promise<ToolOutputs>} The outputs of the tools.
*
*/
async function processRequiredActions(openai, requiredActions) {
async function processRequiredActions(client, requiredActions) {
logger.debug(
`[required actions] user: ${openai.req.user.id} | thread_id: ${requiredActions[0].thread_id} | run_id: ${requiredActions[0].run_id}`,
`[required actions] user: ${client.req.user.id} | thread_id: ${requiredActions[0].thread_id} | run_id: ${requiredActions[0].run_id}`,
requiredActions,
);
const tools = requiredActions.map((action) => action.tool);
const loadedTools = await loadTools({
user: openai.req.user.id,
model: openai.req.body.model ?? 'gpt-3.5-turbo-1106',
user: client.req.user.id,
model: client.req.body.model ?? 'gpt-3.5-turbo-1106',
tools,
functions: true,
options: {
processFileURL,
openAIApiKey: openai.apiKey,
fileStrategy: openai.req.app.locals.fileStrategy,
openAIApiKey: client.apiKey,
fileStrategy: client.req.app.locals.fileStrategy,
returnMetadata: true,
},
skipSpecs: true,
@ -170,14 +170,14 @@ async function processRequiredActions(openai, requiredActions) {
action: isActionTool,
};
const toolCallIndex = openai.mappedOrder.get(toolCall.id);
const toolCallIndex = client.mappedOrder.get(toolCall.id);
if (imageGenTools.has(currentAction.tool)) {
const imageOutput = output;
toolCall.function.output = `${currentAction.tool} displayed an image. All generated images are already plainly visible, so don't repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.`;
// Streams the "Finished" state of the tool call in the UI
openai.addContentData({
client.addContentData({
[ContentTypes.TOOL_CALL]: toolCall,
index: toolCallIndex,
type: ContentTypes.TOOL_CALL,
@ -198,10 +198,10 @@ async function processRequiredActions(openai, requiredActions) {
index: toolCallIndex,
};
openai.addContentData(image_file);
client.addContentData(image_file);
// Update the stored tool call
openai.seenToolCalls.set(toolCall.id, toolCall);
client.seenToolCalls && client.seenToolCalls.set(toolCall.id, toolCall);
return {
tool_call_id: currentAction.toolCallId,
@ -209,8 +209,8 @@ async function processRequiredActions(openai, requiredActions) {
};
}
openai.seenToolCalls.set(toolCall.id, toolCall);
openai.addContentData({
client.seenToolCalls && client.seenToolCalls.set(toolCall.id, toolCall);
client.addContentData({
[ContentTypes.TOOL_CALL]: toolCall,
index: toolCallIndex,
type: ContentTypes.TOOL_CALL,
@ -230,13 +230,13 @@ async function processRequiredActions(openai, requiredActions) {
if (!actionSets.length) {
actionSets =
(await loadActionSets({
user: openai.req.user.id,
assistant_id: openai.req.body.assistant_id,
user: client.req.user.id,
assistant_id: client.req.body.assistant_id,
})) ?? [];
}
const actionSet = actionSets.find((action) =>
currentAction.tool.includes(action.metadata.domain),
currentAction.tool.includes(domainParser(client.req, action.metadata.domain, true)),
);
if (!actionSet) {
@ -251,7 +251,7 @@ async function processRequiredActions(openai, requiredActions) {
const validationResult = validateAndParseOpenAPISpec(actionSet.metadata.raw_spec);
if (!validationResult.spec) {
throw new Error(
`Invalid spec: user: ${openai.req.user.id} | thread_id: ${requiredActions[0].thread_id} | run_id: ${requiredActions[0].run_id}`,
`Invalid spec: user: ${client.req.user.id} | thread_id: ${requiredActions[0].thread_id} | run_id: ${requiredActions[0].run_id}`,
);
}
const { requestBuilders } = openapiToFunction(validationResult.spec);
@ -260,7 +260,7 @@ async function processRequiredActions(openai, requiredActions) {
}
const functionName = currentAction.tool.replace(
`${actionDelimiter}${actionSet.metadata.domain}`,
`${actionDelimiter}${domainParser(client.req, actionSet.metadata.domain, true)}`,
'',
);
const requestBuilder = builders[functionName];