feat: Vision Support + New UI (#1203)

* feat: add timer duration to showToast, show toast for preset selection

* refactor: replace old /chat/ route with /c/. e2e tests will fail here

* refactor: move typedefs to root of /api/ and add a few to assistant types in TS

* refactor: reorganize data-provider imports, fix dependency cycle, strategize new plan to separate react dependent packages

* feat: add dataService for uploading images

* feat(data-provider): add mutation keys

* feat: file resizing and upload

* WIP: initial API image handling

* fix: catch JSON.parse of localStorage tools

* chore: experimental: use module-alias for absolute imports

* refactor: change temp_file_id strategy

* fix: updating files state by using Map and defining react query callbacks in a way that keeps them during component unmount, initial delete handling

* feat: properly handle file deletion

* refactor: unexpose complete filepath and resize from server for higher fidelity

* fix: make sure resized height, width is saved, catch bad requests

* refactor: use absolute imports

* fix: prevent setOptions from being called more than once for OpenAIClient, made note to fix for PluginsClient

* refactor: import supportsFiles and models vars from schemas

* fix: correctly replace temp file id

* refactor(BaseClient): use absolute imports, pass message 'opts' to buildMessages method, count tokens for nested objects/arrays

* feat: add validateVisionModel to determine if model has vision capabilities

* chore(checkBalance): update jsdoc

* feat: formatVisionMessage: change message content format dependent on role and image_urls passed

* refactor: add usage to File schema, make create and updateFile, correctly set and remove TTL

* feat: working vision support
TODO: file size, type, amount validations, making sure they are styled right, and making sure you can add images from the clipboard/dragging

* feat: clipboard support for uploading images

* feat: handle files on drop to screen, refactor top level view code to Presentation component so the useDragHelpers hook  has ChatContext

* fix(Images): replace uploaded images in place

* feat: add filepath validation to protect sensitive files

* fix: ensure correct file_ids are push and not the Map key values

* fix(ToastContext): type issue

* feat: add basic file validation

* fix(useDragHelpers): correct context issue with `files` dependency

* refactor: consolidate setErrors logic to setError

* feat: add dialog Image overlay on image click

* fix: close endpoints menu on click

* chore: set detail to auto, make note for configuration

* fix: react warning (button desc. of button)

* refactor: optimize filepath handling, pass file_ids to images for easier re-use

* refactor: optimize image file handling, allow re-using files in regen, pass more file metadata in messages

* feat: lazy loading images including use of upload preview

* fix: SetKeyDialog closing, stopPropagation on Dialog content click

* style(EndpointMenuItem): tighten up the style, fix dark theme showing in lightmode, make menu more ux friendly

* style: change maxheight of all settings textareas to 138px from 300px

* style: better styling for textarea and enclosing buttons

* refactor(PresetItems): swap back edit and delete icons

* feat: make textarea placeholder dynamic to endpoint

* style: show user hover buttons only on hover when message is streaming

* fix: ordered list not going past 9, fix css

* feat: add User/AI labels; style: hide loading spinner

* feat: add back custom footer, change original footer text

* feat: dynamic landing icons based on endpoint

* chore: comment out assistants route

* fix: autoScroll to newest on /c/ view

* fix: Export Conversation on new UI

* style: match message style of official more closely

* ci: fix api jest unit tests, comment out e2e tests for now as they will fail until addressed

* feat: more file validation and use blob in preview field, not filepath, to fix temp deletion

* feat: filefilter for multer

* feat: better AI labels based on custom name, model, and endpoint instead of  `ChatGPT`
This commit is contained in:
Danny Avila 2023-11-21 20:12:48 -05:00 committed by GitHub
parent 345f4b2e85
commit 317cdd3f77
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
113 changed files with 2680 additions and 675 deletions

View file

@ -1,8 +1,8 @@
const crypto = require('crypto');
const TextStream = require('./TextStream');
const { getConvo, getMessages, saveMessage, updateMessage, saveConvo } = require('../../models');
const { addSpaceIfNeeded, isEnabled } = require('../../server/utils');
const checkBalance = require('../../models/checkBalance');
const { getConvo, getMessages, saveMessage, updateMessage, saveConvo } = require('~/models');
const { addSpaceIfNeeded, isEnabled } = require('~/server/utils');
const checkBalance = require('~/models/checkBalance');
class BaseClient {
constructor(apiKey, options = {}) {
@ -62,7 +62,7 @@ class BaseClient {
}
async setMessageOptions(opts = {}) {
if (opts && typeof opts === 'object') {
if (opts && opts.replaceOptions) {
this.setOptions(opts);
}
@ -417,6 +417,7 @@ class BaseClient {
// this only matters when buildMessages is utilizing the parentMessageId, and may vary on implementation
isEdited ? head : userMessage.messageId,
this.getBuildMessagesOptions(opts),
opts,
);
if (tokenCountMap) {
@ -636,14 +637,27 @@ class BaseClient {
tokensPerName = -1;
}
const processValue = (value) => {
if (typeof value === 'object' && value !== null) {
for (let [nestedKey, nestedValue] of Object.entries(value)) {
if (nestedKey === 'image_url' || nestedValue === 'image_url') {
continue;
}
processValue(nestedValue);
}
} else {
numTokens += this.getTokenCount(value);
}
};
let numTokens = tokensPerMessage;
for (let [key, value] of Object.entries(message)) {
numTokens += this.getTokenCount(value);
processValue(value);
if (key === 'name') {
numTokens += tokensPerName;
}
}
return numTokens;
}

View file

@ -1,12 +1,14 @@
const OpenAI = require('openai');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const { getModelMaxTokens, genAzureChatCompletion, extractBaseURL } = require('../../utils');
const { encodeAndFormat, validateVisionModel } = require('~/server/services/Files/images');
const { getModelMaxTokens, genAzureChatCompletion, extractBaseURL } = require('~/utils');
const { truncateText, formatMessage, CUT_OFF_PROMPT } = require('./prompts');
const spendTokens = require('../../models/spendTokens');
const { getResponseSender, EModelEndpoint } = require('~/server/routes/endpoints/schemas');
const { handleOpenAIErrors } = require('./tools/util');
const { isEnabled } = require('../../server/utils');
const spendTokens = require('~/models/spendTokens');
const { createLLM, RunManager } = require('./llm');
const { isEnabled } = require('~/server/utils');
const ChatGPTClient = require('./ChatGPTClient');
const { summaryBuffer } = require('./memory');
const { runTitleChain } = require('./chains');
@ -24,7 +26,6 @@ class OpenAIClient extends BaseClient {
this.ChatGPTClient = new ChatGPTClient();
this.buildPrompt = this.ChatGPTClient.buildPrompt.bind(this);
this.getCompletion = this.ChatGPTClient.getCompletion.bind(this);
this.sender = options.sender ?? 'ChatGPT';
this.contextStrategy = options.contextStrategy
? options.contextStrategy.toLowerCase()
: 'discard';
@ -33,6 +34,7 @@ class OpenAIClient extends BaseClient {
this.setOptions(options);
}
// TODO: PluginsClient calls this 3x, unneeded
setOptions(options) {
if (this.options && !this.options.replaceOptions) {
this.options.modelOptions = {
@ -53,6 +55,7 @@ class OpenAIClient extends BaseClient {
}
const modelOptions = this.options.modelOptions || {};
if (!this.modelOptions) {
this.modelOptions = {
...modelOptions,
@ -72,6 +75,14 @@ class OpenAIClient extends BaseClient {
};
}
if (this.options.attachments && !validateVisionModel(this.modelOptions.model)) {
this.modelOptions.model = 'gpt-4-vision-preview';
}
if (validateVisionModel(this.modelOptions.model)) {
delete this.modelOptions.stop;
}
const { OPENROUTER_API_KEY, OPENAI_FORCE_PROMPT } = process.env ?? {};
if (OPENROUTER_API_KEY && !this.azure) {
this.apiKey = OPENROUTER_API_KEY;
@ -127,12 +138,20 @@ class OpenAIClient extends BaseClient {
);
}
this.sender =
this.options.sender ??
getResponseSender({
model: this.modelOptions.model,
endpoint: EModelEndpoint.openAI,
chatGptLabel: this.options.chatGptLabel,
});
this.userLabel = this.options.userLabel || 'User';
this.chatGptLabel = this.options.chatGptLabel || 'Assistant';
this.setupTokens();
if (!this.modelOptions.stop) {
if (!this.modelOptions.stop && !validateVisionModel(this.modelOptions.model)) {
const stopTokens = [this.startToken];
if (this.endToken && this.endToken !== this.startToken) {
stopTokens.push(this.endToken);
@ -284,6 +303,7 @@ class OpenAIClient extends BaseClient {
messages,
parentMessageId,
{ isChatCompletion = false, promptPrefix = null },
opts,
) {
let orderedMessages = this.constructor.getMessagesForConversation({
messages,
@ -316,6 +336,17 @@ class OpenAIClient extends BaseClient {
}
}
if (this.options.attachments) {
const attachments = await this.options.attachments;
const { files, image_urls } = await encodeAndFormat(
this.options.req,
attachments.filter((file) => file.type.includes('image')),
);
orderedMessages[orderedMessages.length - 1].image_urls = image_urls;
this.options.attachments = files;
}
const formattedMessages = orderedMessages.map((message, i) => {
const formattedMessage = formatMessage({
message,
@ -350,8 +381,8 @@ class OpenAIClient extends BaseClient {
result.tokenCountMap = tokenCountMap;
}
if (promptTokens >= 0 && typeof this.options.getReqData === 'function') {
this.options.getReqData({ promptTokens });
if (promptTokens >= 0 && typeof opts?.getReqData === 'function') {
opts.getReqData({ promptTokens });
}
return result;
@ -730,6 +761,10 @@ ${convo}
opts.httpAgent = new HttpsProxyAgent(this.options.proxy);
}
if (validateVisionModel(modelOptions.model)) {
modelOptions.max_tokens = 4000;
}
let chatCompletion;
const openai = new OpenAI({
apiKey: this.apiKey,

View file

@ -1,5 +1,21 @@
const { HumanMessage, AIMessage, SystemMessage } = require('langchain/schema');
/**
* Formats a message to OpenAI Vision API payload format.
*
* @param {Object} params - The parameters for formatting.
* @param {Object} params.message - The message object to format.
* @param {string} [params.message.role] - The role of the message sender (must be 'user').
* @param {string} [params.message.content] - The text content of the message.
* @param {Array<string>} [params.image_urls] - The image_urls to attach to the message.
* @returns {(Object)} - The formatted message.
*/
const formatVisionMessage = ({ message, image_urls }) => {
message.content = [{ type: 'text', text: message.content }, ...image_urls];
return message;
};
/**
* Formats a message to OpenAI payload format based on the provided options.
*
@ -10,6 +26,7 @@ const { HumanMessage, AIMessage, SystemMessage } = require('langchain/schema');
* @param {string} [params.message.sender] - The sender of the message.
* @param {string} [params.message.text] - The text content of the message.
* @param {string} [params.message.content] - The content of the message.
* @param {Array<string>} [params.message.image_urls] - The image_urls attached to the message for Vision API.
* @param {string} [params.userName] - The name of the user.
* @param {string} [params.assistantName] - The name of the assistant.
* @param {boolean} [params.langChain=false] - Whether to return a LangChain message object.
@ -32,6 +49,11 @@ const formatMessage = ({ message, userName, assistantName, langChain = false })
content,
};
const { image_urls } = message;
if (Array.isArray(image_urls) && image_urls.length > 0 && role === 'user') {
return formatVisionMessage({ message: formattedMessage, image_urls: message.image_urls });
}
if (_name) {
formattedMessage.name = _name;
}

View file

@ -529,9 +529,9 @@ describe('BaseClient', () => {
);
});
test('setOptions is called with the correct arguments', async () => {
test('setOptions is called with the correct arguments only when replaceOptions is set to true', async () => {
TestClient.setOptions = jest.fn();
const opts = { conversationId: '123', parentMessageId: '456' };
const opts = { conversationId: '123', parentMessageId: '456', replaceOptions: true };
await TestClient.sendMessage('Hello, world!', opts);
expect(TestClient.setOptions).toHaveBeenCalledWith(opts);
TestClient.setOptions.mockClear();

6
api/config.js Normal file
View file

@ -0,0 +1,6 @@
const path = require('path');
module.exports = {
publicPath: path.resolve(__dirname, '..', 'client', 'public'),
imageOutput: path.resolve(__dirname, '..', 'client', 'public', 'images'),
};

View file

@ -4,4 +4,7 @@ module.exports = {
roots: ['<rootDir>'],
coverageDirectory: 'coverage',
setupFiles: ['./test/jestSetup.js', './test/__mocks__/KeyvMongo.js'],
moduleNameMapper: {
'~/(.*)': '<rootDir>/$1',
},
};

96
api/models/File.js Normal file
View file

@ -0,0 +1,96 @@
const mongoose = require('mongoose');
const fileSchema = require('./schema/fileSchema');
const File = mongoose.model('File', fileSchema);
/**
* Finds a file by its file_id with additional query options.
* @param {string} file_id - The unique identifier of the file.
* @param {object} options - Query options for filtering, projection, etc.
* @returns {Promise<MongoFile>} A promise that resolves to the file document or null.
*/
const findFileById = async (file_id, options = {}) => {
return await File.findOne({ file_id, ...options }).lean();
};
/**
* Retrieves files matching a given filter.
* @param {Object} filter - The filter criteria to apply.
* @returns {Promise<Array<MongoFile>>} A promise that resolves to an array of file documents.
*/
const getFiles = async (filter) => {
return await File.find(filter).lean();
};
/**
* Creates a new file with a TTL of 1 hour.
* @param {Object} data - The file data to be created, must contain file_id.
* @returns {Promise<MongoFile>} A promise that resolves to the created file document.
*/
const createFile = async (data) => {
const fileData = {
...data,
expiresAt: new Date(Date.now() + 3600 * 1000),
};
return await File.findOneAndUpdate({ file_id: data.file_id }, fileData, {
new: true,
upsert: true,
}).lean();
};
/**
* Updates a file identified by file_id with new data and removes the TTL.
* @param {Object} data - The data to update, must contain file_id.
* @returns {Promise<MongoFile>} A promise that resolves to the updated file document.
*/
const updateFile = async (data) => {
const { file_id, ...update } = data;
const updateOperation = {
$set: update,
$unset: { expiresAt: '' }, // Remove the expiresAt field to prevent TTL
};
return await File.findOneAndUpdate({ file_id }, updateOperation, { new: true }).lean();
};
/**
* Increments the usage of a file identified by file_id.
* @param {Object} data - The data to update, must contain file_id and the increment value for usage.
* @returns {Promise<MongoFile>} A promise that resolves to the updated file document.
*/
const updateFileUsage = async (data) => {
const { file_id, inc = 1 } = data;
const updateOperation = {
$inc: { usage: inc },
$unset: { expiresAt: '' },
};
return await File.findOneAndUpdate({ file_id }, updateOperation, { new: true }).lean();
};
/**
* Deletes a file identified by file_id.
* @param {string} file_id - The unique identifier of the file to delete.
* @returns {Promise<MongoFile>} A promise that resolves to the deleted file document or null.
*/
const deleteFile = async (file_id) => {
return await File.findOneAndDelete({ file_id }).lean();
};
/**
* Deletes multiple files identified by an array of file_ids.
* @param {Array<string>} file_ids - The unique identifiers of the files to delete.
* @returns {Promise<Object>} A promise that resolves to the result of the deletion operation.
*/
const deleteFiles = async (file_ids) => {
return await File.deleteMany({ file_id: { $in: file_ids } });
};
module.exports = {
File,
findFileById,
getFiles,
createFile,
updateFile,
updateFileUsage,
deleteFile,
deleteFiles,
};

View file

@ -18,6 +18,7 @@ module.exports = {
error,
unfinished,
cancelled,
files,
isEdited = false,
finish_reason = null,
tokenCount = null,
@ -30,29 +31,31 @@ module.exports = {
if (!validConvoId.success) {
return;
}
const update = {
user,
messageId: newMessageId || messageId,
conversationId,
parentMessageId,
sender,
text,
isCreatedByUser,
isEdited,
finish_reason,
error,
unfinished,
cancelled,
tokenCount,
plugin,
plugins,
model,
};
if (files) {
update.files = files;
}
// may also need to update the conversation here
await Message.findOneAndUpdate(
{ messageId },
{
user,
messageId: newMessageId || messageId,
conversationId,
parentMessageId,
sender,
text,
isCreatedByUser,
isEdited,
finish_reason,
error,
unfinished,
cancelled,
tokenCount,
plugin,
plugins,
model,
},
{ upsert: true, new: true },
);
await Message.findOneAndUpdate({ messageId }, update, { upsert: true, new: true });
return {
messageId,

View file

@ -7,8 +7,8 @@ const { logViolation } = require('../cache');
* @async
* @function
* @param {Object} params - The function parameters.
* @param {Object} params.req - The Express request object.
* @param {Object} params.res - The Express response object.
* @param {Express.Request} params.req - The Express request object.
* @param {Express.Response} params.res - The Express response object.
* @param {Object} params.txData - The transaction data.
* @param {string} params.txData.user - The user ID or identifier.
* @param {('prompt' | 'completion')} params.txData.tokenType - The type of token.

View file

@ -7,6 +7,15 @@ const {
} = require('./Message');
const { getConvoTitle, getConvo, saveConvo, deleteConvos } = require('./Conversation');
const { getPreset, getPresets, savePreset, deletePresets } = require('./Preset');
const {
findFileById,
createFile,
updateFile,
deleteFile,
deleteFiles,
getFiles,
updateFileUsage,
} = require('./File');
const Key = require('./Key');
const User = require('./User');
const Session = require('./Session');
@ -35,4 +44,12 @@ module.exports = {
getPresets,
savePreset,
deletePresets,
findFileById,
createFile,
updateFile,
deleteFile,
deleteFiles,
getFiles,
updateFileUsage,
};

View file

@ -0,0 +1,79 @@
const mongoose = require('mongoose');
/**
* @typedef {Object} MongoFile
* @property {mongoose.Schema.Types.ObjectId} user - User ID
* @property {string} [conversationId] - Optional conversation ID
* @property {string} file_id - File identifier
* @property {string} [temp_file_id] - Temporary File identifier
* @property {number} bytes - Size of the file in bytes
* @property {string} filename - Name of the file
* @property {string} filepath - Location of the file
* @property {'file'} object - Type of object, always 'file'
* @property {string} type - Type of file
* @property {number} usage - Number of uses of the file
* @property {number} [width] - Optional width of the file
* @property {number} [height] - Optional height of the file
* @property {Date} [expiresAt] - Optional height of the file
*/
const fileSchema = mongoose.Schema(
{
user: {
type: mongoose.Schema.Types.ObjectId,
ref: 'User',
index: true,
required: true,
},
conversationId: {
type: String,
ref: 'Conversation',
index: true,
},
file_id: {
type: String,
// required: true,
index: true,
},
temp_file_id: {
type: String,
// required: true,
},
bytes: {
type: Number,
required: true,
},
usage: {
type: Number,
required: true,
default: 0,
},
filename: {
type: String,
required: true,
},
filepath: {
type: String,
required: true,
},
object: {
type: String,
required: true,
default: 'file',
},
type: {
type: String,
required: true,
},
width: Number,
height: Number,
expiresAt: {
type: Date,
expires: 3600,
},
},
{
timestamps: true,
},
);
module.exports = fileSchema;

View file

@ -85,6 +85,7 @@ const messageSchema = mongoose.Schema(
select: false,
default: false,
},
files: [{ type: mongoose.Schema.Types.Mixed }],
plugin: {
latest: {
type: String,

View file

@ -16,6 +16,12 @@
"keywords": [],
"author": "",
"license": "ISC",
"_moduleAliases": {
"~": "."
},
"imports": {
"~/*": "./*"
},
"bugs": {
"url": "https://github.com/danny-avila/LibreChat/issues"
},
@ -48,7 +54,9 @@
"langchain": "^0.0.186",
"lodash": "^4.17.21",
"meilisearch": "^0.33.0",
"module-alias": "^2.2.3",
"mongoose": "^7.1.1",
"multer": "^1.4.5-lts.1",
"nodejs-gpt": "^1.37.4",
"nodemailer": "^6.9.4",
"openai": "^4.16.1",

View file

@ -8,7 +8,7 @@ const {
userProvidedOpenAI,
palmKey,
openAI,
assistant,
// assistant,
azureOpenAI,
bingAI,
chatGPTBrowser,
@ -57,7 +57,7 @@ async function endpointController(req, res) {
res.send(
JSON.stringify({
[EModelEndpoint.openAI]: openAI,
[EModelEndpoint.assistant]: assistant,
// [EModelEndpoint.assistant]: assistant,
[EModelEndpoint.azureOpenAI]: azureOpenAI,
[EModelEndpoint.google]: google,
[EModelEndpoint.bingAI]: bingAI,

View file

@ -1,12 +1,15 @@
const express = require('express');
const mongoSanitize = require('express-mongo-sanitize');
const { connectDb, indexSync } = require('../lib/db');
const path = require('path');
require('module-alias')({ base: path.resolve(__dirname, '..') });
const cors = require('cors');
const routes = require('./routes');
const errorController = require('./controllers/ErrorController');
const express = require('express');
const passport = require('passport');
const mongoSanitize = require('express-mongo-sanitize');
const errorController = require('./controllers/ErrorController');
const configureSocialLogins = require('./socialLogins');
const { connectDb, indexSync } = require('../lib/db');
const config = require('../config');
const routes = require('./routes');
const { PORT, HOST, ALLOW_SOCIAL_LOGIN } = process.env ?? {};
const port = Number(PORT) || 3080;
@ -20,6 +23,7 @@ const startServer = async () => {
await indexSync();
const app = express();
app.locals.config = config;
// Middleware
app.use(errorController);
@ -65,6 +69,7 @@ const startServer = async () => {
app.use('/api/plugins', routes.plugins);
app.use('/api/config', routes.config);
app.use('/api/assistants', routes.assistants);
app.use('/api/files', routes.files);
// Static files
app.get('/*', function (req, res) {

View file

@ -1,19 +1,24 @@
const openAI = require('../routes/endpoints/openAI');
const gptPlugins = require('../routes/endpoints/gptPlugins');
const anthropic = require('../routes/endpoints/anthropic');
const { parseConvo } = require('../routes/endpoints/schemas');
const openAI = require('~/server/routes/endpoints/openAI');
const gptPlugins = require('~/server/routes/endpoints/gptPlugins');
const anthropic = require('~/server/routes/endpoints/anthropic');
const { parseConvo, EModelEndpoint } = require('~/server/routes/endpoints/schemas');
const { processFiles } = require('~/server/services/Files');
const buildFunction = {
openAI: openAI.buildOptions,
azureOpenAI: openAI.buildOptions,
gptPlugins: gptPlugins.buildOptions,
anthropic: anthropic.buildOptions,
[EModelEndpoint.openAI]: openAI.buildOptions,
[EModelEndpoint.azureOpenAI]: openAI.buildOptions,
[EModelEndpoint.gptPlugins]: gptPlugins.buildOptions,
[EModelEndpoint.anthropic]: anthropic.buildOptions,
};
function buildEndpointOption(req, res, next) {
const { endpoint } = req.body;
const parsedBody = parseConvo(endpoint, req.body);
req.body.endpointOption = buildFunction[endpoint](endpoint, parsedBody);
if (req.body.files) {
// hold the promise
req.body.endpointOption.attachments = processFiles(req.body.files);
}
next();
}

View file

@ -1,9 +1,9 @@
const express = require('express');
const router = express.Router();
const { getResponseSender } = require('../endpoints/schemas');
const { sendMessage, createOnProgress } = require('../../utils');
const { addTitle, initializeClient } = require('../endpoints/openAI');
const { saveMessage, getConvoTitle, getConvo } = require('../../../models');
const { sendMessage, createOnProgress } = require('~/server/utils');
const { saveMessage, getConvoTitle, getConvo } = require('~/models');
const { getResponseSender } = require('~/server/routes/endpoints/schemas');
const { addTitle, initializeClient } = require('~/server/routes/endpoints/openAI');
const {
handleAbort,
createAbortController,
@ -11,7 +11,7 @@ const {
setHeaders,
validateEndpoint,
buildEndpointOption,
} = require('../../middleware');
} = require('~/server/middleware');
router.post('/abort', handleAbort());
@ -93,8 +93,7 @@ router.post('/', validateEndpoint, buildEndpointOption, setHeaders, async (req,
try {
const { client } = await initializeClient({ req, res, endpointOption });
let response = await client.sendMessage(text, {
const messageOptions = {
user,
parentMessageId,
conversationId,
@ -108,7 +107,9 @@ router.post('/', validateEndpoint, buildEndpointOption, setHeaders, async (req,
text,
parentMessageId: overrideParentMessageId || userMessageId,
}),
});
};
let response = await client.sendMessage(text, messageOptions);
if (overrideParentMessageId) {
response.parentMessageId = overrideParentMessageId;
@ -118,7 +119,10 @@ router.post('/', validateEndpoint, buildEndpointOption, setHeaders, async (req,
response = { ...response, ...metadata };
}
await saveMessage({ ...response, user });
if (client.options.attachments) {
userMessage.files = client.options.attachments;
delete userMessage.image_urls;
}
sendMessage(res, {
title: await getConvoTitle(user, conversationId),
@ -129,6 +133,9 @@ router.post('/', validateEndpoint, buildEndpointOption, setHeaders, async (req,
});
res.end();
await saveMessage({ ...response, user });
await saveMessage(userMessage);
if (parentMessageId === '00000000-0000-0000-0000-000000000000' && newConvo) {
addTitle(req, {
text,

View file

@ -1,7 +1,7 @@
const { OpenAIClient } = require('../../../../app');
const { isEnabled } = require('../../../utils');
const { getAzureCredentials } = require('../../../../utils');
const { getUserKey, checkUserKeyExpiry } = require('../../../services/UserService');
const { OpenAIClient } = require('~/app');
const { isEnabled } = require('~/server/utils');
const { getAzureCredentials } = require('~/utils');
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
const initializeClient = async ({ req, res, endpointOption }) => {
const {

View file

@ -11,6 +11,41 @@ const EModelEndpoint = {
assistant: 'assistant',
};
const alternateName = {
[EModelEndpoint.openAI]: 'OpenAI',
[EModelEndpoint.assistant]: 'Assistants',
[EModelEndpoint.azureOpenAI]: 'Azure OpenAI',
[EModelEndpoint.bingAI]: 'Bing',
[EModelEndpoint.chatGPTBrowser]: 'ChatGPT',
[EModelEndpoint.gptPlugins]: 'Plugins',
[EModelEndpoint.google]: 'PaLM',
[EModelEndpoint.anthropic]: 'Anthropic',
};
const supportsFiles = {
[EModelEndpoint.openAI]: true,
[EModelEndpoint.assistant]: true,
};
const openAIModels = [
'gpt-3.5-turbo-16k-0613',
'gpt-3.5-turbo-16k',
'gpt-4-1106-preview',
'gpt-3.5-turbo',
'gpt-3.5-turbo-1106',
'gpt-4-vision-preview',
'gpt-4',
'gpt-3.5-turbo-instruct-0914',
'gpt-3.5-turbo-0613',
'gpt-3.5-turbo-0301',
'gpt-3.5-turbo-instruct',
'gpt-4-0613',
'text-davinci-003',
'gpt-4-0314',
];
const visionModels = ['gpt-4-vision', 'llava-13b'];
const eModelEndpointSchema = z.nativeEnum(EModelEndpoint);
const tPluginAuthConfigSchema = z.object({
@ -321,7 +356,7 @@ const parseConvo = (endpoint, conversation, possibleValues) => {
};
const getResponseSender = (endpointOption) => {
const { endpoint, chatGptLabel, modelLabel, jailbreak } = endpointOption;
const { model, endpoint, chatGptLabel, modelLabel, jailbreak } = endpointOption;
if (
[
@ -331,7 +366,14 @@ const getResponseSender = (endpointOption) => {
EModelEndpoint.chatGPTBrowser,
].includes(endpoint)
) {
return chatGptLabel ?? 'ChatGPT';
if (chatGptLabel) {
return chatGptLabel;
} else if (model && model.includes('gpt-3')) {
return 'GPT-3.5';
} else if (model && model.includes('gpt-4')) {
return 'GPT-4';
}
return alternateName[endpoint] ?? 'ChatGPT';
}
if (endpoint === EModelEndpoint.bingAI) {
@ -353,4 +395,8 @@ module.exports = {
parseConvo,
getResponseSender,
EModelEndpoint,
supportsFiles,
openAIModels,
visionModels,
alternateName,
};

View file

@ -0,0 +1,58 @@
const { z } = require('zod');
const fs = require('fs').promises;
const express = require('express');
const { deleteFiles } = require('~/models');
const path = require('path');
const router = express.Router();
const isUUID = z.string().uuid();
const isValidPath = (base, subfolder, filepath) => {
const normalizedBase = path.resolve(base, subfolder, 'temp');
const normalizedFilepath = path.resolve(filepath);
return normalizedFilepath.startsWith(normalizedBase);
};
const deleteFile = async (req, file) => {
const { publicPath } = req.app.locals.config;
const parts = file.filepath.split(path.sep);
const subfolder = parts[1];
const filepath = path.join(publicPath, file.filepath);
if (!isValidPath(publicPath, subfolder, filepath)) {
throw new Error('Invalid file path');
}
await fs.unlink(filepath);
};
router.delete('/', async (req, res) => {
try {
const { files: _files } = req.body;
const files = _files.filter((file) => {
if (!file.file_id) {
return false;
}
if (!file.filepath) {
return false;
}
return isUUID.safeParse(file.file_id).success;
});
const file_ids = files.map((file) => file.file_id);
const promises = [];
promises.push(await deleteFiles(file_ids));
for (const file of files) {
promises.push(deleteFile(req, file));
}
await Promise.all(promises);
res.status(200).json({ message: 'Files deleted successfully' });
} catch (error) {
console.error('Error deleting files:', error);
res.status(400).json({ message: 'Error in request', error: error.message });
}
});
module.exports = router;

View file

@ -0,0 +1,58 @@
const { z } = require('zod');
const fs = require('fs').promises;
const express = require('express');
const upload = require('./multer');
const { localStrategy } = require('~/server/services/Files');
const router = express.Router();
router.post('/', upload.single('file'), async (req, res) => {
const file = req.file;
const metadata = req.body;
// TODO: add file size/type validation
const uuidSchema = z.string().uuid();
try {
if (!file) {
throw new Error('No file provided');
}
if (!metadata.file_id) {
throw new Error('No file_id provided');
}
if (!metadata.width) {
throw new Error('No width provided');
}
if (!metadata.height) {
throw new Error('No height provided');
}
/* parse to validate api call */
uuidSchema.parse(metadata.file_id);
metadata.temp_file_id = metadata.file_id;
metadata.file_id = req.file_id;
await localStrategy({ res, file, metadata });
} catch (error) {
console.error('Error processing file:', error);
try {
await fs.unlink(file.path);
} catch (error) {
console.error('Error deleting file:', error);
}
res.status(500).json({ message: 'Error processing file' });
}
// do this if strategy is not local
// finally {
// try {
// // await fs.unlink(file.path);
// } catch (error) {
// console.error('Error deleting file:', error);
// }
// }
});
module.exports = router;

View file

@ -0,0 +1,22 @@
const express = require('express');
const router = express.Router();
const {
uaParser,
checkBan,
requireJwtAuth,
// concurrentLimiter,
// messageIpLimiter,
// messageUserLimiter,
} = require('../../middleware');
const files = require('./files');
const images = require('./images');
router.use(requireJwtAuth);
router.use(checkBan);
router.use(uaParser);
router.use('/', files);
router.use('/images', images);
module.exports = router;

View file

@ -0,0 +1,41 @@
const fs = require('fs');
const path = require('path');
const crypto = require('crypto');
const multer = require('multer');
const supportedTypes = ['image/jpeg', 'image/jpg', 'image/png', 'image/webp'];
const sizeLimit = 20 * 1024 * 1024; // 20 MB
const storage = multer.diskStorage({
destination: function (req, file, cb) {
const outputPath = path.join(req.app.locals.config.imageOutput, 'temp');
if (!fs.existsSync(outputPath)) {
fs.mkdirSync(outputPath, { recursive: true });
}
cb(null, outputPath);
},
filename: function (req, file, cb) {
req.file_id = crypto.randomUUID();
const fileExt = path.extname(file.originalname);
cb(null, `img-${req.file_id}${fileExt}`);
},
});
const fileFilter = (req, file, cb) => {
if (!supportedTypes.includes(file.mimetype)) {
return cb(
new Error('Unsupported file type. Only JPEG, JPG, PNG, and WEBP files are allowed.'),
false,
);
}
if (file.size > sizeLimit) {
return cb(new Error(`File size exceeds ${sizeLimit / 1024 / 1024} MB.`), false);
}
cb(null, true);
};
const upload = multer({ storage, fileFilter });
module.exports = upload;

View file

@ -16,6 +16,7 @@ const plugins = require('./plugins');
const user = require('./user');
const config = require('./config');
const assistants = require('./assistants');
const files = require('./files');
module.exports = {
search,
@ -36,4 +37,5 @@ module.exports = {
plugins,
config,
assistants,
files,
};

View file

@ -1,21 +1,5 @@
const RunManager = require('./Runs/RunMananger');
/**
* @typedef {import('openai').OpenAI} OpenAI
* @typedef {import('openai').OpenAI.Beta.Threads.ThreadMessage} ThreadMessage
* @typedef {import('openai').OpenAI.Beta.Threads.RequiredActionFunctionToolCall} RequiredActionFunctionToolCall
* @typedef {import('./Runs/RunManager').RunManager} RunManager
*/
/**
* @typedef {Object} Thread
* @property {string} id - The identifier of the thread.
* @property {string} object - The object type, always 'thread'.
* @property {number} created_at - The Unix timestamp (in seconds) for when the thread was created.
* @property {Object} [metadata] - Optional metadata associated with the thread.
* @property {Message[]} [messages] - An array of messages associated with the thread.
*/
/**
* @typedef {Object} Message
* @property {string} id - The identifier of the message.
@ -247,27 +231,6 @@ async function waitForRun({ openai, run_id, thread_id, runManager, pollIntervalM
return run;
}
/**
* @typedef {Object} AgentAction
* @property {string} tool - The name of the tool used.
* @property {string} toolInput - The input provided to the tool.
* @property {string} log - A log or message associated with the action.
*/
/**
* @typedef {Object} AgentFinish
* @property {Record<string, any>} returnValues - The return values of the agent's execution.
* @property {string} log - A log or message associated with the finish.
*/
/**
* @typedef {AgentFinish & { run_id: string; thread_id: string; }} OpenAIAssistantFinish
*/
/**
* @typedef {AgentAction & { toolCallId: string; run_id: string; thread_id: string; }} OpenAIAssistantAction
*/
/**
* Retrieves the response from an OpenAI run.
*

View file

@ -0,0 +1,17 @@
const path = require('path');
const sharp = require('sharp');
const fs = require('fs').promises;
const { resizeImage } = require('./resize');
async function convertToWebP(inputFilePath, resolution = 'high') {
const { buffer: resizedBuffer, width, height } = await resizeImage(inputFilePath, resolution);
const outputFilePath = inputFilePath.replace(/\.[^/.]+$/, '') + '.webp';
const data = await sharp(resizedBuffer).toFormat('webp').toBuffer();
await fs.writeFile(outputFilePath, data);
const bytes = Buffer.byteLength(data);
const filepath = path.posix.join('/', 'images', 'temp', path.basename(outputFilePath));
await fs.unlink(inputFilePath);
return { filepath, bytes, width, height };
}
module.exports = { convertToWebP };

View file

@ -0,0 +1,80 @@
const fs = require('fs');
const path = require('path');
const { updateFile } = require('~/models');
function encodeImage(imagePath) {
return new Promise((resolve, reject) => {
fs.readFile(imagePath, (err, data) => {
if (err) {
reject(err);
} else {
resolve(data.toString('base64'));
}
});
});
}
async function encodeAndMove(req, file) {
const { publicPath, imageOutput } = req.app.locals.config;
const userPath = path.join(imageOutput, req.user.id);
if (!fs.existsSync(userPath)) {
fs.mkdirSync(userPath, { recursive: true });
}
const filepath = path.join(publicPath, file.filepath);
if (!filepath.includes('temp')) {
const base64 = await encodeImage(filepath);
return [file, base64];
}
const newPath = path.join(userPath, path.basename(file.filepath));
await fs.promises.rename(filepath, newPath);
const newFilePath = path.posix.join('/', 'images', req.user.id, path.basename(file.filepath));
const promises = [];
promises.push(updateFile({ file_id: file.file_id, filepath: newFilePath }));
promises.push(encodeImage(newPath));
return await Promise.all(promises);
}
async function encodeAndFormat(req, files) {
const promises = [];
for (let file of files) {
promises.push(encodeAndMove(req, file));
}
// TODO: make detail configurable, as of now resizing is done
// to prefer "high" but "low" may be used if the image is small enough
const detail = req.body.detail ?? 'auto';
const encodedImages = await Promise.all(promises);
const result = {
files: [],
image_urls: [],
};
for (const [file, base64] of encodedImages) {
result.image_urls.push({
type: 'image_url',
image_url: {
url: `data:image/webp;base64,${base64}`,
detail,
},
});
result.files.push({
file_id: file.file_id,
filepath: file.filepath,
filename: file.filename,
type: file.type,
height: file.height,
width: file.width,
});
}
return result;
}
module.exports = {
encodeImage,
encodeAndFormat,
};

View file

@ -0,0 +1,11 @@
const convert = require('./convert');
const encode = require('./encode');
const resize = require('./resize');
const validate = require('./validate');
module.exports = {
...convert,
...encode,
...resize,
...validate,
};

View file

@ -0,0 +1,52 @@
const sharp = require('sharp');
async function resizeImage(inputFilePath, resolution) {
const maxLowRes = 512;
const maxShortSideHighRes = 768;
const maxLongSideHighRes = 2000;
let newWidth, newHeight;
let resizeOptions = { fit: 'inside', withoutEnlargement: true };
if (resolution === 'low') {
resizeOptions.width = maxLowRes;
resizeOptions.height = maxLowRes;
} else if (resolution === 'high') {
const metadata = await sharp(inputFilePath).metadata();
const isWidthShorter = metadata.width < metadata.height;
if (isWidthShorter) {
// Width is the shorter side
newWidth = Math.min(metadata.width, maxShortSideHighRes);
// Calculate new height to maintain aspect ratio
newHeight = Math.round((metadata.height / metadata.width) * newWidth);
// Ensure the long side does not exceed the maximum allowed
if (newHeight > maxLongSideHighRes) {
newHeight = maxLongSideHighRes;
newWidth = Math.round((metadata.width / metadata.height) * newHeight);
}
} else {
// Height is the shorter side
newHeight = Math.min(metadata.height, maxShortSideHighRes);
// Calculate new width to maintain aspect ratio
newWidth = Math.round((metadata.width / metadata.height) * newHeight);
// Ensure the long side does not exceed the maximum allowed
if (newWidth > maxLongSideHighRes) {
newWidth = maxLongSideHighRes;
newHeight = Math.round((metadata.height / metadata.width) * newWidth);
}
}
resizeOptions.width = newWidth;
resizeOptions.height = newHeight;
} else {
throw new Error('Invalid resolution parameter');
}
const resizedBuffer = await sharp(inputFilePath).resize(resizeOptions).toBuffer();
const resizedMetadata = await sharp(resizedBuffer).metadata();
return { buffer: resizedBuffer, width: resizedMetadata.width, height: resizedMetadata.height };
}
module.exports = { resizeImage };

View file

@ -0,0 +1,13 @@
const { visionModels } = require('~/server/routes/endpoints/schemas');
function validateVisionModel(model) {
if (!model) {
return false;
}
return visionModels.some((visionModel) => model.includes(visionModel));
}
module.exports = {
validateVisionModel,
};

View file

@ -0,0 +1,9 @@
const localStrategy = require('./localStrategy');
const process = require('./process');
const save = require('./save');
module.exports = {
...save,
...process,
localStrategy,
};

View file

@ -0,0 +1,34 @@
const { createFile } = require('~/models');
const { convertToWebP } = require('./images/convert');
/**
* Applies the local strategy for image uploads.
* Saves file metadata to the database with an expiry TTL.
* Files must be deleted from the server filesystem manually.
*
* @param {Object} params - The parameters object.
* @param {Express.Response} params.res - The Express response object.
* @param {Express.Multer.File} params.file - The uploaded file.
* @param {ImageMetadata} params.metadata - Additional metadata for the file.
* @returns {Promise<void>}
*/
const localStrategy = async ({ res, file, metadata }) => {
const { file_id, temp_file_id } = metadata;
const { filepath, bytes, width, height } = await convertToWebP(file.path);
const result = await createFile(
{
file_id,
temp_file_id,
bytes,
filepath,
filename: file.originalname,
type: 'image/webp',
width,
height,
},
true,
);
res.status(200).json({ message: 'File uploaded and processed successfully', ...result });
};
module.exports = localStrategy;

View file

@ -0,0 +1,29 @@
const { updateFileUsage } = require('~/models');
// const mapImageUrls = (files, detail) => {
// return files
// .filter((file) => file.type.includes('image'))
// .map((file) => ({
// type: 'image_url',
// image_url: {
// /* Temporarily set to path to encode later */
// url: file.filepath,
// detail,
// },
// }));
// };
const processFiles = async (files) => {
const promises = [];
for (let file of files) {
const { file_id } = file;
promises.push(updateFileUsage({ file_id }));
}
// TODO: calculate token cost when image is first uploaded
return await Promise.all(promises);
};
module.exports = {
processFiles,
};

View file

@ -0,0 +1,47 @@
const fs = require('fs');
const path = require('path');
/**
* Saves a file to a specified output path with a new filename.
*
* @param {Express.Multer.File} file - The file object to be saved. Should contain properties like 'originalname' and 'path'.
* @param {string} outputPath - The path where the file should be saved.
* @param {string} outputFilename - The new filename for the saved file (without extension).
* @returns {Promise<string>} The full path of the saved file.
* @throws Will throw an error if the file saving process fails.
*/
async function saveFile(file, outputPath, outputFilename) {
try {
if (!fs.existsSync(outputPath)) {
fs.mkdirSync(outputPath, { recursive: true });
}
const fileExtension = path.extname(file.originalname);
const filenameWithExt = outputFilename + fileExtension;
const outputFilePath = path.join(outputPath, filenameWithExt);
fs.copyFileSync(file.path, outputFilePath);
fs.unlinkSync(file.path);
return outputFilePath;
} catch (error) {
console.error('Error while saving the file:', error);
throw error;
}
}
/**
* Saves an uploaded image file to a specified directory based on the user's ID and a filename.
*
* @param {Express.Request} req - The Express request object, containing the user's information and app configuration.
* @param {Express.Multer.File} file - The uploaded file object.
* @param {string} filename - The new filename to assign to the saved image (without extension).
* @returns {Promise<void>}
* @throws Will throw an error if the image saving process fails.
*/
const saveLocalImage = async (req, file, filename) => {
const imagePath = req.app.locals.config.imageOutput;
const outputPath = path.join(imagePath, req.user.id ?? '');
await saveFile(file, outputPath, filename);
};
module.exports = { saveFile, saveLocalImage };

241
api/typedefs.js Normal file
View file

@ -0,0 +1,241 @@
/**
* @namespace typedefs
*/
/**
* @exports OpenAI
* @typedef {import('openai').OpenAI} OpenAI
* @memberof typedefs
*/
/**
* @exports Assistant
* @typedef {import('librechat-data-provider').Assistant} Assistant
* @memberof typedefs
*/
/**
* @exports OpenAIFile
* @typedef {import('librechat-data-provider').File} OpenAIFile
* @memberof typedefs
*/
/**
* @exports ImageMetadata
* @typedef {Object} ImageMetadata
* @property {string} file_id - The identifier of the file.
* @property {string} [temp_file_id] - The temporary identifier of the file.
* @property {number} width - The width of the image.
* @property {number} height - The height of the image.
* @memberof typedefs
*/
/**
* @exports MongoFile
* @typedef {import('~/models/schema/fileSchema.js').MongoFile} MongoFile
* @memberof typedefs
*/
/**
* @exports AssistantCreateParams
* @typedef {import('librechat-data-provider').AssistantCreateParams} AssistantCreateParams
* @memberof typedefs
*/
/**
* @exports AssistantUpdateParams
* @typedef {import('librechat-data-provider').AssistantUpdateParams} AssistantUpdateParams
* @memberof typedefs
*/
/**
* @exports AssistantListParams
* @typedef {import('librechat-data-provider').AssistantListParams} AssistantListParams
* @memberof typedefs
*/
/**
* @exports AssistantListResponse
* @typedef {import('librechat-data-provider').AssistantListResponse} AssistantListResponse
* @memberof typedefs
*/
/**
* @exports ThreadMessage
* @typedef {import('openai').OpenAI.Beta.Threads.ThreadMessage} ThreadMessage
* @memberof typedefs
*/
/**
* @exports RequiredActionFunctionToolCall
* @typedef {import('openai').OpenAI.Beta.Threads.RequiredActionFunctionToolCall} RequiredActionFunctionToolCall
* @memberof typedefs
*/
/**
* @exports RunManager
* @typedef {import('./server/services/Runs/RunMananger.js').RunManager} RunManager
* @memberof typedefs
*/
/**
* @exports Thread
* @typedef {Object} Thread
* @property {string} id - The identifier of the thread.
* @property {string} object - The object type, always 'thread'.
* @property {number} created_at - The Unix timestamp (in seconds) for when the thread was created.
* @property {Object} [metadata] - Optional metadata associated with the thread.
* @property {Message[]} [messages] - An array of messages associated with the thread.
* @memberof typedefs
*/
/**
* @exports Message
* @typedef {Object} Message
* @property {string} id - The identifier of the message.
* @property {string} object - The object type, always 'thread.message'.
* @property {number} created_at - The Unix timestamp (in seconds) for when the message was created.
* @property {string} thread_id - The thread ID that this message belongs to.
* @property {string} role - The entity that produced the message. One of 'user' or 'assistant'.
* @property {Object[]} content - The content of the message in an array of text and/or images.
* @property {string} content[].type - The type of content, either 'text' or 'image_file'.
* @property {Object} [content[].text] - The text content, present if type is 'text'.
* @property {string} content[].text.value - The data that makes up the text.
* @property {Object[]} [content[].text.annotations] - Annotations for the text content.
* @property {Object} [content[].image_file] - The image file content, present if type is 'image_file'.
* @property {string} content[].image_file.file_id - The File ID of the image in the message content.
* @property {string[]} [file_ids] - Optional list of File IDs for the message.
* @property {string|null} [assistant_id] - If applicable, the ID of the assistant that authored this message.
* @property {string|null} [run_id] - If applicable, the ID of the run associated with the authoring of this message.
* @property {Object} [metadata] - Optional metadata for the message, a map of key-value pairs.
* @memberof typedefs
*/
/**
* @exports FunctionTool
* @typedef {Object} FunctionTool
* @property {string} type - The type of tool, 'function'.
* @property {Object} function - The function definition.
* @property {string} function.description - A description of what the function does.
* @property {string} function.name - The name of the function to be called.
* @property {Object} function.parameters - The parameters the function accepts, described as a JSON Schema object.
* @memberof typedefs
*/
/**
* @exports Tool
* @typedef {Object} Tool
* @property {string} type - The type of tool, can be 'code_interpreter', 'retrieval', or 'function'.
* @property {FunctionTool} [function] - The function tool, present if type is 'function'.
* @memberof typedefs
*/
/**
* @exports Run
* @typedef {Object} Run
* @property {string} id - The identifier of the run.
* @property {string} object - The object type, always 'thread.run'.
* @property {number} created_at - The Unix timestamp (in seconds) for when the run was created.
* @property {string} thread_id - The ID of the thread that was executed on as a part of this run.
* @property {string} assistant_id - The ID of the assistant used for execution of this run.
* @property {string} status - The status of the run (e.g., 'queued', 'completed').
* @property {Object} [required_action] - Details on the action required to continue the run.
* @property {string} required_action.type - The type of required action, always 'submit_tool_outputs'.
* @property {Object} required_action.submit_tool_outputs - Details on the tool outputs needed for the run to continue.
* @property {Object[]} required_action.submit_tool_outputs.tool_calls - A list of the relevant tool calls.
* @property {string} required_action.submit_tool_outputs.tool_calls[].id - The ID of the tool call.
* @property {string} required_action.submit_tool_outputs.tool_calls[].type - The type of tool call the output is required for, always 'function'.
* @property {Object} required_action.submit_tool_outputs.tool_calls[].function - The function definition.
* @property {string} required_action.submit_tool_outputs.tool_calls[].function.name - The name of the function.
* @property {string} required_action.submit_tool_outputs.tool_calls[].function.arguments - The arguments that the model expects you to pass to the function.
* @property {Object} [last_error] - The last error associated with this run.
* @property {string} last_error.code - One of 'server_error' or 'rate_limit_exceeded'.
* @property {string} last_error.message - A human-readable description of the error.
* @property {number} [expires_at] - The Unix timestamp (in seconds) for when the run will expire.
* @property {number} [started_at] - The Unix timestamp (in seconds) for when the run was started.
* @property {number} [cancelled_at] - The Unix timestamp (in seconds) for when the run was cancelled.
* @property {number} [failed_at] - The Unix timestamp (in seconds) for when the run failed.
* @property {number} [completed_at] - The Unix timestamp (in seconds) for when the run was completed.
* @property {string} [model] - The model that the assistant used for this run.
* @property {string} [instructions] - The instructions that the assistant used for this run.
* @property {Tool[]} [tools] - The list of tools used for this run.
* @property {string[]} [file_ids] - The list of File IDs used for this run.
* @property {Object} [metadata] - Metadata associated with this run.
* @memberof typedefs
*/
/**
* @exports RunStep
* @typedef {Object} RunStep
* @property {string} id - The identifier of the run step.
* @property {string} object - The object type, always 'thread.run.step'.
* @property {number} created_at - The Unix timestamp (in seconds) for when the run step was created.
* @property {string} assistant_id - The ID of the assistant associated with the run step.
* @property {string} thread_id - The ID of the thread that was run.
* @property {string} run_id - The ID of the run that this run step is a part of.
* @property {string} type - The type of run step, either 'message_creation' or 'tool_calls'.
* @property {string} status - The status of the run step, can be 'in_progress', 'cancelled', 'failed', 'completed', or 'expired'.
* @property {Object} step_details - The details of the run step.
* @property {Object} [last_error] - The last error associated with this run step.
* @property {string} last_error.code - One of 'server_error' or 'rate_limit_exceeded'.
* @property {string} last_error.message - A human-readable description of the error.
* @property {number} [expired_at] - The Unix timestamp (in seconds) for when the run step expired.
* @property {number} [cancelled_at] - The Unix timestamp (in seconds) for when the run step was cancelled.
* @property {number} [failed_at] - The Unix timestamp (in seconds) for when the run step failed.
* @property {number} [completed_at] - The Unix timestamp (in seconds) for when the run step completed.
* @property {Object} [metadata] - Metadata associated with this run step, a map of up to 16 key-value pairs.
* @memberof typedefs
*/
/**
* @exports StepMessage
* @typedef {Object} StepMessage
* @property {Message} message - The complete message object created by the step.
* @property {string} id - The identifier of the run step.
* @property {string} object - The object type, always 'thread.run.step'.
* @property {number} created_at - The Unix timestamp (in seconds) for when the run step was created.
* @property {string} assistant_id - The ID of the assistant associated with the run step.
* @property {string} thread_id - The ID of the thread that was run.
* @property {string} run_id - The ID of the run that this run step is a part of.
* @property {string} type - The type of run step, either 'message_creation' or 'tool_calls'.
* @property {string} status - The status of the run step, can be 'in_progress', 'cancelled', 'failed', 'completed', or 'expired'.
* @property {Object} step_details - The details of the run step.
* @property {Object} [last_error] - The last error associated with this run step.
* @property {string} last_error.code - One of 'server_error' or 'rate_limit_exceeded'.
* @property {string} last_error.message - A human-readable description of the error.
* @property {number} [expired_at] - The Unix timestamp (in seconds) for when the run step expired.
* @property {number} [cancelled_at] - The Unix timestamp (in seconds) for when the run step was cancelled.
* @property {number} [failed_at] - The Unix timestamp (in seconds) for when the run step failed.
* @property {number} [completed_at] - The Unix timestamp (in seconds) for when the run step completed.
* @property {Object} [metadata] - Metadata associated with this run step, a map of up to 16 key-value pairs.
* @memberof typedefs
*/
/**
* @exports AgentAction
* @typedef {Object} AgentAction
* @property {string} tool - The name of the tool used.
* @property {string} toolInput - The input provided to the tool.
* @property {string} log - A log or message associated with the action.
* @memberof typedefs
*/
/**
* @exports AgentFinish
* @typedef {Object} AgentFinish
* @property {Record<string, any>} returnValues - The return values of the agent's execution.
* @property {string} log - A log or message associated with the finish.
* @memberof typedefs
*/
/**
* @exports OpenAIAssistantFinish
* @typedef {AgentFinish & { run_id: string; thread_id: string; }} OpenAIAssistantFinish
* @memberof typedefs
*/
/**
* @exports OpenAIAssistantAction
* @typedef {AgentAction & { toolCallId: string; run_id: string; thread_id: string; }} OpenAIAssistantAction
* @memberof typedefs
*/