mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 08:50:15 +01:00
🚀 feat: Assistants Streaming (#2159)
* chore: bump openai to 4.29.0 and npm audit fix * chore: remove unnecessary stream field from ContentData * feat: new enum and types for AssistantStreamEvent * refactor(AssistantService): remove stream field and add conversationId to text ContentData > - return `finalMessage` and `text` on run completion > - move `processMessages` to services/Threads to avoid circular dependencies with new stream handling > - refactor(processMessages/retrieveAndProcessFile): add new `client` field to differentiate new RunClient type * WIP: new assistants stream handling * chore: stores messages to StreamRunManager * chore: add additional typedefs * fix: pass req and openai to StreamRunManager * fix(AssistantService): pass openai as client to `retrieveAndProcessFile` * WIP: streaming tool i/o, handle in_progress and completed run steps * feat(assistants): process required actions with streaming enabled * chore: condense early return check for useSSE useEffect * chore: remove unnecessary comments and only handle completed tool calls when not function * feat: add TTL for assistants run abort cacheKey * feat: abort stream runs * fix(assistants): render streaming cursor * fix(assistants): hide edit icon as functionality is not supported * fix(textArea): handle pasting edge cases; first, when onChange events wouldn't fire; second, when textarea wouldn't resize * chore: memoize Conversations * chore(useTextarea): reverse args order * fix: load default capabilities when an azure is configured to support assistants, but `assistants` endpoint is not configured * fix(AssistantSelect): update form assistant model on assistant form select * fix(actions): handle azure strict validation for function names to fix crud for actions * chore: remove content data debug log as it fires in rapid succession * feat: improve UX for assistant errors mid-request * feat: add tool call localizations and replace any domain separators from azure action names * refactor(chat): error out tool calls without outputs during handleError * fix(ToolService): handle domain separators allowing Azure use of actions * refactor(StreamRunManager): types and throw Error if tool submission fails
This commit is contained in:
parent
ed64c76053
commit
f427ad792a
39 changed files with 1503 additions and 330 deletions
|
|
@ -338,19 +338,26 @@ const processFileUpload = async ({ req, res, file, metadata }) => {
|
|||
* Retrieves and processes an OpenAI file based on its type.
|
||||
*
|
||||
* @param {Object} params - The params passed to the function.
|
||||
* @param {OpenAIClient} params.openai - The params passed to the function.
|
||||
* @param {OpenAIClient} params.openai - The OpenAI client instance.
|
||||
* @param {RunClient} params.client - The LibreChat client instance: either refers to `openai` or `streamRunManager`.
|
||||
* @param {string} params.file_id - The ID of the file to retrieve.
|
||||
* @param {string} params.basename - The basename of the file (if image); e.g., 'image.jpg'.
|
||||
* @param {boolean} [params.unknownType] - Whether the file type is unknown.
|
||||
* @returns {Promise<{file_id: string, filepath: string, source: string, bytes?: number, width?: number, height?: number} | null>}
|
||||
* - Returns null if `file_id` is not defined; else, the file metadata if successfully retrieved and processed.
|
||||
*/
|
||||
async function retrieveAndProcessFile({ openai, file_id, basename: _basename, unknownType }) {
|
||||
async function retrieveAndProcessFile({
|
||||
openai,
|
||||
client,
|
||||
file_id,
|
||||
basename: _basename,
|
||||
unknownType,
|
||||
}) {
|
||||
if (!file_id) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (openai.attachedFileIds?.has(file_id)) {
|
||||
if (client.attachedFileIds?.has(file_id)) {
|
||||
return {
|
||||
file_id,
|
||||
// filepath: TODO: local source filepath?,
|
||||
|
|
@ -416,7 +423,7 @@ async function retrieveAndProcessFile({ openai, file_id, basename: _basename, un
|
|||
*/
|
||||
const processAsImage = async (dataBuffer, fileExt) => {
|
||||
// Logic to process image files, convert to webp, etc.
|
||||
const _file = await convertToWebP(openai.req, dataBuffer, 'high', `${file_id}${fileExt}`);
|
||||
const _file = await convertToWebP(client.req, dataBuffer, 'high', `${file_id}${fileExt}`);
|
||||
const file = {
|
||||
..._file,
|
||||
type: 'image/webp',
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue