diff --git a/.gitignore b/.gitignore index 52ce79baa..a6d89a977 100644 --- a/.gitignore +++ b/.gitignore @@ -40,7 +40,7 @@ meili_data/ api/node_modules/ client/node_modules/ bower_components/ -types/ +*.d.ts # Floobits .floo diff --git a/api/models/schema/convoSchema.js b/api/models/schema/convoSchema.js index 1ea928f25..46555ba35 100644 --- a/api/models/schema/convoSchema.js +++ b/api/models/schema/convoSchema.js @@ -18,36 +18,36 @@ const convoSchema = mongoose.Schema( user: { type: String, index: true, - default: null, + // default: null, }, messages: [{ type: mongoose.Schema.Types.ObjectId, ref: 'Message' }], // google only examples: [{ type: mongoose.Schema.Types.Mixed }], agentOptions: { type: mongoose.Schema.Types.Mixed, - default: null, + // default: null, }, ...conversationPreset, // for bingAI only bingConversationId: { type: String, - default: null, + // default: null, }, jailbreakConversationId: { type: String, - default: null, + // default: null, }, conversationSignature: { type: String, - default: null, + // default: null, }, clientId: { type: String, - default: null, + // default: null, }, invocationId: { type: Number, - default: 1, + // default: 1, }, }, { timestamps: true }, diff --git a/api/models/schema/defaults.js b/api/models/schema/defaults.js index 92e064480..338ee1208 100644 --- a/api/models/schema/defaults.js +++ b/api/models/schema/defaults.js @@ -8,147 +8,147 @@ const conversationPreset = { // for azureOpenAI, openAI, chatGPTBrowser only model: { type: String, - default: null, + // default: null, required: false, }, // for azureOpenAI, openAI only chatGptLabel: { type: String, - default: null, + // default: null, required: false, }, // for google only modelLabel: { type: String, - default: null, + // default: null, required: false, }, promptPrefix: { type: String, - default: null, + // default: null, required: false, }, temperature: { type: Number, - default: 1, + // default: 1, required: false, }, top_p: { type: Number, - default: 1, + // default: 1, required: false, }, // for google only topP: { type: Number, - default: 0.95, + // default: 0.95, required: false, }, topK: { type: Number, - default: 40, + // default: 40, required: false, }, maxOutputTokens: { type: Number, - default: 1024, + // default: 1024, required: false, }, presence_penalty: { type: Number, - default: 0, + // default: 0, required: false, }, frequency_penalty: { type: Number, - default: 0, + // default: 0, required: false, }, // for bingai only jailbreak: { type: Boolean, - default: false, + // default: false, }, context: { type: String, - default: null, + // default: null, }, systemMessage: { type: String, - default: null, + // default: null, }, toneStyle: { type: String, - default: null, + // default: null, }, }; const agentOptions = { model: { type: String, - default: null, + // default: null, required: false, }, // for azureOpenAI, openAI only chatGptLabel: { type: String, - default: null, + // default: null, required: false, }, // for google only modelLabel: { type: String, - default: null, + // default: null, required: false, }, promptPrefix: { type: String, - default: null, + // default: null, required: false, }, temperature: { type: Number, - default: 1, + // default: 1, required: false, }, top_p: { type: Number, - default: 1, + // default: 1, required: false, }, // for google only topP: { type: Number, - default: 0.95, + // default: 0.95, required: false, }, topK: { type: Number, - default: 40, + // default: 40, required: false, }, maxOutputTokens: { type: Number, - default: 1024, + // default: 1024, required: false, }, presence_penalty: { type: Number, - default: 0, + // default: 0, required: false, }, frequency_penalty: { type: Number, - default: 0, + // default: 0, required: false, }, context: { type: String, - default: null, + // default: null, }, systemMessage: { type: String, - default: null, + // default: null, }, }; diff --git a/api/server/controllers/EndpointController.js b/api/server/controllers/EndpointController.js index ff4c8c978..09e7bfaf3 100644 --- a/api/server/controllers/EndpointController.js +++ b/api/server/controllers/EndpointController.js @@ -1,3 +1,4 @@ +const { EModelEndpoint } = require('../routes/endpoints/schemas'); const { availableTools } = require('../../app/clients/tools'); const { addOpenAPISpecs } = require('../../app/clients/tools/util/addOpenAPISpecs'); const { @@ -7,6 +8,7 @@ const { userProvidedOpenAI, palmKey, openAI, + assistant, azureOpenAI, bingAI, chatGPTBrowser, @@ -53,7 +55,16 @@ async function endpointController(req, res) { : false; res.send( - JSON.stringify({ azureOpenAI, openAI, google, bingAI, chatGPTBrowser, gptPlugins, anthropic }), + JSON.stringify({ + [EModelEndpoint.openAI]: openAI, + [EModelEndpoint.assistant]: assistant, + [EModelEndpoint.azureOpenAI]: azureOpenAI, + [EModelEndpoint.google]: google, + [EModelEndpoint.bingAI]: bingAI, + [EModelEndpoint.chatGPTBrowser]: chatGPTBrowser, + [EModelEndpoint.gptPlugins]: gptPlugins, + [EModelEndpoint.anthropic]: anthropic, + }), ); } diff --git a/api/server/controllers/ModelController.js b/api/server/controllers/ModelController.js index 1c1b9b9e8..2b683a6e5 100644 --- a/api/server/controllers/ModelController.js +++ b/api/server/controllers/ModelController.js @@ -1,3 +1,4 @@ +const { EModelEndpoint } = require('../routes/endpoints/schemas'); const { getOpenAIModels, getChatGPTBrowserModels, @@ -6,17 +7,28 @@ const { const { useAzurePlugins } = require('../services/EndpointService').config; +const fitlerAssistantModels = (str) => { + return /gpt-4|gpt-3\\.5/i.test(str) && !/vision|instruct/i.test(str); +}; + async function modelController(req, res) { - const google = ['chat-bison', 'text-bison', 'codechat-bison']; const openAI = await getOpenAIModels(); const azureOpenAI = await getOpenAIModels({ azure: true }); const gptPlugins = await getOpenAIModels({ azure: useAzurePlugins, plugins: true }); - const bingAI = ['BingAI', 'Sydney']; const chatGPTBrowser = getChatGPTBrowserModels(); const anthropic = getAnthropicModels(); res.send( - JSON.stringify({ azureOpenAI, openAI, google, bingAI, chatGPTBrowser, gptPlugins, anthropic }), + JSON.stringify({ + [EModelEndpoint.openAI]: openAI, + [EModelEndpoint.azureOpenAI]: azureOpenAI, + [EModelEndpoint.assistant]: openAI.filter(fitlerAssistantModels), + [EModelEndpoint.google]: ['chat-bison', 'text-bison', 'codechat-bison'], + [EModelEndpoint.bingAI]: ['BingAI', 'Sydney'], + [EModelEndpoint.chatGPTBrowser]: chatGPTBrowser, + [EModelEndpoint.gptPlugins]: gptPlugins, + [EModelEndpoint.anthropic]: anthropic, + }), ); } diff --git a/api/server/index.js b/api/server/index.js index 7975f406b..ea581663f 100644 --- a/api/server/index.js +++ b/api/server/index.js @@ -64,6 +64,7 @@ const startServer = async () => { app.use('/api/models', routes.models); app.use('/api/plugins', routes.plugins); app.use('/api/config', routes.config); + app.use('/api/assistants', routes.assistants); // Static files app.get('/*', function (req, res) { diff --git a/api/server/routes/ask/index.js b/api/server/routes/ask/index.js index d87daa6a8..e13f20195 100644 --- a/api/server/routes/ask/index.js +++ b/api/server/routes/ask/index.js @@ -15,6 +15,7 @@ const { messageUserLimiter, } = require('../../middleware'); const { isEnabled } = require('../../utils'); +const { EModelEndpoint } = require('../endpoints/schemas'); const { LIMIT_CONCURRENT_MESSAGES, LIMIT_MESSAGE_IP, LIMIT_MESSAGE_USER } = process.env ?? {}; @@ -34,11 +35,11 @@ if (isEnabled(LIMIT_MESSAGE_USER)) { router.use(messageUserLimiter); } -router.use(['/azureOpenAI', '/openAI'], openAI); -router.use('/google', google); -router.use('/bingAI', bingAI); -router.use('/chatGPTBrowser', askChatGPTBrowser); -router.use('/gptPlugins', gptPlugins); -router.use('/anthropic', anthropic); +router.use([`/${EModelEndpoint.azureOpenAI}`, `/${EModelEndpoint.openAI}`], openAI); +router.use(`/${EModelEndpoint.google}`, google); +router.use(`/${EModelEndpoint.bingAI}`, bingAI); +router.use(`/${EModelEndpoint.chatGPTBrowser}`, askChatGPTBrowser); +router.use(`/${EModelEndpoint.gptPlugins}`, gptPlugins); +router.use(`/${EModelEndpoint.anthropic}`, anthropic); module.exports = router; diff --git a/api/server/routes/assistants/assistants.js b/api/server/routes/assistants/assistants.js new file mode 100644 index 000000000..a33729b2b --- /dev/null +++ b/api/server/routes/assistants/assistants.js @@ -0,0 +1,98 @@ +const OpenAI = require('openai'); +const express = require('express'); +const router = express.Router(); + +/** + * Create an assistant. + * @route POST /assistants + * @param {AssistantCreateParams} req.body - The assistant creation parameters. + * @returns {Assistant} 201 - success response - application/json + */ +router.post('/', async (req, res) => { + try { + const openai = new OpenAI(process.env.OPENAI_API_KEY); + const assistantData = req.body; + const assistant = await openai.beta.assistants.create(assistantData); + console.log(assistant); + res.status(201).json(assistant); + } catch (error) { + res.status(500).json({ error: error.message }); + } +}); + +/** + * Retrieves an assistant. + * @route GET /assistants/:id + * @param {string} req.params.id - Assistant identifier. + * @returns {Assistant} 200 - success response - application/json + */ +router.get('/:id', async (req, res) => { + try { + const openai = new OpenAI(process.env.OPENAI_API_KEY); + const assistant_id = req.params.id; + const assistant = await openai.beta.assistants.retrieve(assistant_id); + res.json(assistant); + } catch (error) { + res.status(500).json({ error: error.message }); + } +}); + +/** + * Modifies an assistant. + * @route PATCH /assistants/:id + * @param {string} req.params.id - Assistant identifier. + * @param {AssistantUpdateParams} req.body - The assistant update parameters. + * @returns {Assistant} 200 - success response - application/json + */ +router.patch('/:id', async (req, res) => { + try { + const openai = new OpenAI(process.env.OPENAI_API_KEY); + const assistant_id = req.params.id; + const updateData = req.body; + const updatedAssistant = await openai.beta.assistants.update(assistant_id, updateData); + res.json(updatedAssistant); + } catch (error) { + res.status(500).json({ error: error.message }); + } +}); + +/** + * Deletes an assistant. + * @route DELETE /assistants/:id + * @param {string} req.params.id - Assistant identifier. + * @returns {Assistant} 200 - success response - application/json + */ +router.delete('/:id', async (req, res) => { + try { + const openai = new OpenAI(process.env.OPENAI_API_KEY); + const assistant_id = req.params.id; + const deletionStatus = await openai.beta.assistants.del(assistant_id); + res.json(deletionStatus); + } catch (error) { + res.status(500).json({ error: error.message }); + } +}); + +/** + * Returns a list of assistants. + * @route GET /assistants + * @param {AssistantListParams} req.query - The assistant list parameters for pagination and sorting. + * @returns {Array} 200 - success response - application/json + */ +router.get('/', async (req, res) => { + try { + const openai = new OpenAI(process.env.OPENAI_API_KEY); + const { limit, order, after, before } = req.query; + const assistants = await openai.beta.assistants.list({ + limit, + order, + after, + before, + }); + res.json(assistants); + } catch (error) { + res.status(500).json({ error: error.message }); + } +}); + +module.exports = router; diff --git a/api/server/routes/assistants/chat.js b/api/server/routes/assistants/chat.js new file mode 100644 index 000000000..71cbef221 --- /dev/null +++ b/api/server/routes/assistants/chat.js @@ -0,0 +1,108 @@ +const crypto = require('crypto'); +const OpenAI = require('openai'); +const { sendMessage } = require('../../utils'); +const { initThread, createRun, handleRun } = require('../../services/AssistantService'); +const express = require('express'); +const router = express.Router(); +const { + setHeaders, + // handleAbort, + // handleAbortError, + // validateEndpoint, + // buildEndpointOption, + // createAbortController, +} = require('../../middleware'); + +// const thread = { +// id: 'thread_LexzJUVugYFqfslS7c7iL3Zo', +// "thread_nZoiCbPauU60LqY1Q0ME1elg" +// }; + +/** + * Chat with an assistant. + */ +router.post('/', setHeaders, async (req, res) => { + try { + console.log(req.body); + // test message: + // How many polls of 500 ms intervals are there in 18 seconds? + + const { assistant_id, messages, text: userMessage, messageId } = req.body; + const conversationId = req.body.conversationId || crypto.randomUUID(); + // let thread_id = req.body.thread_id ?? 'thread_nZoiCbPauU60LqY1Q0ME1elg'; // for testing + let thread_id = req.body.thread_id; + + if (!assistant_id) { + throw new Error('Missing assistant_id'); + } + + const openai = new OpenAI(process.env.OPENAI_API_KEY); + console.log(messages); + + const initThreadBody = { + messages: [ + { + role: 'user', + content: userMessage, + metadata: { + messageId, + }, + }, + ], + metadata: { + conversationId, + }, + }; + + const result = await initThread({ openai, body: initThreadBody, thread_id }); + // const { messages: _messages } = result; + thread_id = result.thread_id; + + /* NOTE: + * By default, a Run will use the model and tools configuration specified in Assistant object, + * but you can override most of these when creating the Run for added flexibility: + */ + const run = await createRun({ + openai, + thread_id, + body: { assistant_id, model: 'gpt-3.5-turbo-1106' }, + }); + const response = await handleRun({ openai, thread_id, run_id: run.id }); + + // TODO: parse responses, save to db, send to user + + sendMessage(res, { + title: 'New Chat', + final: true, + conversation: { + conversationId: 'fake-convo-id', + title: 'New Chat', + }, + requestMessage: { + messageId: 'fake-user-message-id', + parentMessageId: '00000000-0000-0000-0000-000000000000', + conversationId: 'fake-convo-id', + sender: 'User', + text: req.body.text, + isCreatedByUser: true, + }, + responseMessage: { + messageId: 'fake-response-id', + conversationId: 'fake-convo-id', + parentMessageId: 'fake-user-message-id', + isCreatedByUser: false, + isEdited: false, + model: 'gpt-3.5-turbo-1106', + sender: 'Assistant', + text: response.choices[0].text, + }, + }); + res.end(); + } catch (error) { + // res.status(500).json({ error: error.message }); + console.error(error); + res.end(); + } +}); + +module.exports = router; diff --git a/api/server/routes/assistants/index.js b/api/server/routes/assistants/index.js new file mode 100644 index 000000000..a47a768f9 --- /dev/null +++ b/api/server/routes/assistants/index.js @@ -0,0 +1,22 @@ +const express = require('express'); +const router = express.Router(); +const { + uaParser, + checkBan, + requireJwtAuth, + // concurrentLimiter, + // messageIpLimiter, + // messageUserLimiter, +} = require('../../middleware'); + +const assistants = require('./assistants'); +const chat = require('./chat'); + +router.use(requireJwtAuth); +router.use(checkBan); +router.use(uaParser); + +router.use('/', assistants); +router.use('/chat', chat); + +module.exports = router; diff --git a/api/server/routes/endpoints/schemas.js b/api/server/routes/endpoints/schemas.js index 99a603605..839692beb 100644 --- a/api/server/routes/endpoints/schemas.js +++ b/api/server/routes/endpoints/schemas.js @@ -8,6 +8,7 @@ const EModelEndpoint = { google: 'google', gptPlugins: 'gptPlugins', anthropic: 'anthropic', + assistant: 'assistant', }; const eModelEndpointSchema = z.nativeEnum(EModelEndpoint); @@ -263,14 +264,33 @@ const gptPluginsSchema = tConversationSchema }, })); +const assistantSchema = tConversationSchema + .pick({ + model: true, + assistant_id: true, + thread_id: true, + }) + .transform((obj) => { + const newObj = { ...obj }; + Object.keys(newObj).forEach((key) => { + const value = newObj[key]; + if (value === undefined || value === null) { + delete newObj[key]; + } + }); + return newObj; + }) + .catch(() => ({})); + const endpointSchemas = { - openAI: openAISchema, - azureOpenAI: openAISchema, - google: googleSchema, - bingAI: bingAISchema, - anthropic: anthropicSchema, - chatGPTBrowser: chatGPTBrowserSchema, - gptPlugins: gptPluginsSchema, + [EModelEndpoint.openAI]: openAISchema, + [EModelEndpoint.assistant]: assistantSchema, + [EModelEndpoint.azureOpenAI]: openAISchema, + [EModelEndpoint.google]: googleSchema, + [EModelEndpoint.bingAI]: bingAISchema, + [EModelEndpoint.anthropic]: anthropicSchema, + [EModelEndpoint.chatGPTBrowser]: chatGPTBrowserSchema, + [EModelEndpoint.gptPlugins]: gptPluginsSchema, }; function getFirstDefinedValue(possibleValues) { @@ -303,19 +323,26 @@ const parseConvo = (endpoint, conversation, possibleValues) => { const getResponseSender = (endpointOption) => { const { endpoint, chatGptLabel, modelLabel, jailbreak } = endpointOption; - if (['openAI', 'azureOpenAI', 'gptPlugins', 'chatGPTBrowser'].includes(endpoint)) { + if ( + [ + EModelEndpoint.openAI, + EModelEndpoint.azureOpenAI, + EModelEndpoint.gptPlugins, + EModelEndpoint.chatGPTBrowser, + ].includes(endpoint) + ) { return chatGptLabel ?? 'ChatGPT'; } - if (endpoint === 'bingAI') { + if (endpoint === EModelEndpoint.bingAI) { return jailbreak ? 'Sydney' : 'BingAI'; } - if (endpoint === 'anthropic') { + if (endpoint === EModelEndpoint.anthropic) { return modelLabel ?? 'Anthropic'; } - if (endpoint === 'google') { + if (endpoint === EModelEndpoint.google) { return modelLabel ?? 'PaLM2'; } @@ -325,4 +352,5 @@ const getResponseSender = (endpointOption) => { module.exports = { parseConvo, getResponseSender, + EModelEndpoint, }; diff --git a/api/server/routes/index.js b/api/server/routes/index.js index 5d98c1b51..ae531664f 100644 --- a/api/server/routes/index.js +++ b/api/server/routes/index.js @@ -15,6 +15,7 @@ const models = require('./models'); const plugins = require('./plugins'); const user = require('./user'); const config = require('./config'); +const assistants = require('./assistants'); module.exports = { search, @@ -34,4 +35,5 @@ module.exports = { models, plugins, config, + assistants, }; diff --git a/api/server/routes/types/assistants.js b/api/server/routes/types/assistants.js new file mode 100644 index 000000000..974bf587a --- /dev/null +++ b/api/server/routes/types/assistants.js @@ -0,0 +1,53 @@ +/** + * Enum for the possible tools that can be enabled on an assistant. + * @readonly + * @enum {string} + */ +// eslint-disable-next-line no-unused-vars +const Tools = { + code_interpreter: 'code_interpreter', + retrieval: 'retrieval', + function: 'function', +}; + +/** + * Represents a tool with its type. + * @typedef {Object} Tool + * @property {Tools} toolName - The name of the tool and its corresponding type from the Tools enum. + */ + +/** + * @typedef {Object} Assistant + * @property {string} id - The identifier, which can be referenced in API endpoints. + * @property {number} created_at - The Unix timestamp (in seconds) for when the assistant was created. + * @property {string|null} description - The maximum length is 512 characters. + * @property {Array} file_ids - A list of file IDs attached to this assistant. + * @property {string|null} instructions - The system instructions that the assistant uses. The maximum length is 32768 characters. + * @property {Object|null} metadata - Set of 16 key-value pairs that can be attached to an object. + * @property {string} model - ID of the model to use. + * @property {string|null} name - The name of the assistant. The maximum length is 256 characters. + * @property {string} object - The object type, which is always 'assistant'. + * @property {Tool[]} tools - A list of tools enabled on the assistant. + */ + +/** + * @typedef {Object} AssistantCreateParams + * @property {string} model - ID of the model to use. + * @property {string|null} [description] - The description of the assistant. + * @property {Array} [file_ids] - A list of file IDs attached to this assistant. + * @property {string|null} [instructions] - The system instructions that the assistant uses. + * @property {Object|null} [metadata] - Set of 16 key-value pairs that can be attached to an object. + * @property {string|null} [name] - The name of the assistant. + * @property {Tool[]} tools - A list of tools enabled on the assistant. + */ + +/** + * @typedef {Object} AssistantUpdateParams + * // Similar properties to AssistantCreateParams, but all optional + */ + +/** + * @typedef {Object} AssistantListParams + * @property {string|null} [before] - A cursor for use in pagination. + * @property {'asc'|'desc'} [order] - Sort order by the created_at timestamp of the objects. + */ diff --git a/api/server/services/AssistantService.js b/api/server/services/AssistantService.js new file mode 100644 index 000000000..cc6b0a61d --- /dev/null +++ b/api/server/services/AssistantService.js @@ -0,0 +1,398 @@ +const RunManager = require('./Runs/RunMananger'); + +/** + * @typedef {import('openai').OpenAI} OpenAI + * @typedef {import('openai').OpenAI.Beta.Threads.ThreadMessage} ThreadMessage + * @typedef {import('openai').OpenAI.Beta.Threads.RequiredActionFunctionToolCall} RequiredActionFunctionToolCall + * @typedef {import('./Runs/RunManager').RunManager} RunManager + */ + +/** + * @typedef {Object} Thread + * @property {string} id - The identifier of the thread. + * @property {string} object - The object type, always 'thread'. + * @property {number} created_at - The Unix timestamp (in seconds) for when the thread was created. + * @property {Object} [metadata] - Optional metadata associated with the thread. + * @property {Message[]} [messages] - An array of messages associated with the thread. + */ + +/** + * @typedef {Object} Message + * @property {string} id - The identifier of the message. + * @property {string} object - The object type, always 'thread.message'. + * @property {number} created_at - The Unix timestamp (in seconds) for when the message was created. + * @property {string} thread_id - The thread ID that this message belongs to. + * @property {string} role - The entity that produced the message. One of 'user' or 'assistant'. + * @property {Object[]} content - The content of the message in an array of text and/or images. + * @property {string} content[].type - The type of content, either 'text' or 'image_file'. + * @property {Object} [content[].text] - The text content, present if type is 'text'. + * @property {string} content[].text.value - The data that makes up the text. + * @property {Object[]} [content[].text.annotations] - Annotations for the text content. + * @property {Object} [content[].image_file] - The image file content, present if type is 'image_file'. + * @property {string} content[].image_file.file_id - The File ID of the image in the message content. + * @property {string[]} [file_ids] - Optional list of File IDs for the message. + * @property {string|null} [assistant_id] - If applicable, the ID of the assistant that authored this message. + * @property {string|null} [run_id] - If applicable, the ID of the run associated with the authoring of this message. + * @property {Object} [metadata] - Optional metadata for the message, a map of key-value pairs. + */ + +/** + * @typedef {Object} FunctionTool + * @property {string} type - The type of tool, 'function'. + * @property {Object} function - The function definition. + * @property {string} function.description - A description of what the function does. + * @property {string} function.name - The name of the function to be called. + * @property {Object} function.parameters - The parameters the function accepts, described as a JSON Schema object. + */ + +/** + * @typedef {Object} Tool + * @property {string} type - The type of tool, can be 'code_interpreter', 'retrieval', or 'function'. + * @property {FunctionTool} [function] - The function tool, present if type is 'function'. + */ + +/** + * @typedef {Object} Run + * @property {string} id - The identifier of the run. + * @property {string} object - The object type, always 'thread.run'. + * @property {number} created_at - The Unix timestamp (in seconds) for when the run was created. + * @property {string} thread_id - The ID of the thread that was executed on as a part of this run. + * @property {string} assistant_id - The ID of the assistant used for execution of this run. + * @property {string} status - The status of the run (e.g., 'queued', 'completed'). + * @property {Object} [required_action] - Details on the action required to continue the run. + * @property {string} required_action.type - The type of required action, always 'submit_tool_outputs'. + * @property {Object} required_action.submit_tool_outputs - Details on the tool outputs needed for the run to continue. + * @property {Object[]} required_action.submit_tool_outputs.tool_calls - A list of the relevant tool calls. + * @property {string} required_action.submit_tool_outputs.tool_calls[].id - The ID of the tool call. + * @property {string} required_action.submit_tool_outputs.tool_calls[].type - The type of tool call the output is required for, always 'function'. + * @property {Object} required_action.submit_tool_outputs.tool_calls[].function - The function definition. + * @property {string} required_action.submit_tool_outputs.tool_calls[].function.name - The name of the function. + * @property {string} required_action.submit_tool_outputs.tool_calls[].function.arguments - The arguments that the model expects you to pass to the function. + * @property {Object} [last_error] - The last error associated with this run. + * @property {string} last_error.code - One of 'server_error' or 'rate_limit_exceeded'. + * @property {string} last_error.message - A human-readable description of the error. + * @property {number} [expires_at] - The Unix timestamp (in seconds) for when the run will expire. + * @property {number} [started_at] - The Unix timestamp (in seconds) for when the run was started. + * @property {number} [cancelled_at] - The Unix timestamp (in seconds) for when the run was cancelled. + * @property {number} [failed_at] - The Unix timestamp (in seconds) for when the run failed. + * @property {number} [completed_at] - The Unix timestamp (in seconds) for when the run was completed. + * @property {string} [model] - The model that the assistant used for this run. + * @property {string} [instructions] - The instructions that the assistant used for this run. + * @property {Tool[]} [tools] - The list of tools used for this run. + * @property {string[]} [file_ids] - The list of File IDs used for this run. + * @property {Object} [metadata] - Metadata associated with this run. + */ + +/** + * @typedef {Object} RunStep + * @property {string} id - The identifier of the run step. + * @property {string} object - The object type, always 'thread.run.step'. + * @property {number} created_at - The Unix timestamp (in seconds) for when the run step was created. + * @property {string} assistant_id - The ID of the assistant associated with the run step. + * @property {string} thread_id - The ID of the thread that was run. + * @property {string} run_id - The ID of the run that this run step is a part of. + * @property {string} type - The type of run step, either 'message_creation' or 'tool_calls'. + * @property {string} status - The status of the run step, can be 'in_progress', 'cancelled', 'failed', 'completed', or 'expired'. + * @property {Object} step_details - The details of the run step. + * @property {Object} [last_error] - The last error associated with this run step. + * @property {string} last_error.code - One of 'server_error' or 'rate_limit_exceeded'. + * @property {string} last_error.message - A human-readable description of the error. + * @property {number} [expired_at] - The Unix timestamp (in seconds) for when the run step expired. + * @property {number} [cancelled_at] - The Unix timestamp (in seconds) for when the run step was cancelled. + * @property {number} [failed_at] - The Unix timestamp (in seconds) for when the run step failed. + * @property {number} [completed_at] - The Unix timestamp (in seconds) for when the run step completed. + * @property {Object} [metadata] - Metadata associated with this run step, a map of up to 16 key-value pairs. + */ + +/** + * @typedef {Object} StepMessage + * @property {Message} message - The complete message object created by the step. + * @property {string} id - The identifier of the run step. + * @property {string} object - The object type, always 'thread.run.step'. + * @property {number} created_at - The Unix timestamp (in seconds) for when the run step was created. + * @property {string} assistant_id - The ID of the assistant associated with the run step. + * @property {string} thread_id - The ID of the thread that was run. + * @property {string} run_id - The ID of the run that this run step is a part of. + * @property {string} type - The type of run step, either 'message_creation' or 'tool_calls'. + * @property {string} status - The status of the run step, can be 'in_progress', 'cancelled', 'failed', 'completed', or 'expired'. + * @property {Object} step_details - The details of the run step. + * @property {Object} [last_error] - The last error associated with this run step. + * @property {string} last_error.code - One of 'server_error' or 'rate_limit_exceeded'. + * @property {string} last_error.message - A human-readable description of the error. + * @property {number} [expired_at] - The Unix timestamp (in seconds) for when the run step expired. + * @property {number} [cancelled_at] - The Unix timestamp (in seconds) for when the run step was cancelled. + * @property {number} [failed_at] - The Unix timestamp (in seconds) for when the run step failed. + * @property {number} [completed_at] - The Unix timestamp (in seconds) for when the run step completed. + * @property {Object} [metadata] - Metadata associated with this run step, a map of up to 16 key-value pairs. + */ + +/** + * Initializes a new thread or adds messages to an existing thread. + * + * @param {Object} params - The parameters for initializing a thread. + * @param {OpenAI} params.openai - The OpenAI client instance. + * @param {Object} params.body - The body of the request. + * @param {Message[]} params.body.messages - A list of messages to start the thread with. + * @param {Object} [params.body.metadata] - Optional metadata for the thread. + * @param {string} [params.thread_id] - Optional existing thread ID. If provided, a message will be added to this thread. + * @return {Promise} A promise that resolves to the newly created thread object or the updated thread object. + */ +async function initThread({ openai, body, thread_id: _thread_id }) { + let thread = {}; + const messages = []; + if (_thread_id) { + const message = await openai.beta.threads.messages.create(_thread_id, body.messages[0]); + messages.push(message); + } else { + thread = await openai.beta.threads.create(body); + } + + const thread_id = _thread_id ?? thread.id; + return { messages, thread_id, ...thread }; +} + +/** + * Creates a run on a thread using the OpenAI API. + * + * @param {Object} params - The parameters for creating a run. + * @param {OpenAI} params.openai - The OpenAI client instance. + * @param {string} params.thread_id - The ID of the thread to run. + * @param {Object} params.body - The body of the request to create a run. + * @param {string} params.body.assistant_id - The ID of the assistant to use for this run. + * @param {string} [params.body.model] - Optional. The ID of the model to be used for this run. + * @param {string} [params.body.instructions] - Optional. Override the default system message of the assistant. + * @param {Object[]} [params.body.tools] - Optional. Override the tools the assistant can use for this run. + * @param {string[]} [params.body.file_ids] - Optional. List of File IDs the assistant can use for this run. + * @param {Object} [params.body.metadata] - Optional. Metadata for the run. + * @return {Promise} A promise that resolves to the created run object. + */ +async function createRun({ openai, thread_id, body }) { + const run = await openai.beta.threads.runs.create(thread_id, body); + return run; +} + +// /** +// * Retrieves all steps of a run. +// * +// * @param {Object} params - The parameters for the retrieveRunSteps function. +// * @param {OpenAI} params.openai - The OpenAI client instance. +// * @param {string} params.thread_id - The ID of the thread associated with the run. +// * @param {string} params.run_id - The ID of the run to retrieve steps for. +// * @return {Promise} A promise that resolves to an array of RunStep objects. +// */ +// async function retrieveRunSteps({ openai, thread_id, run_id }) { +// const runSteps = await openai.beta.threads.runs.steps.list(thread_id, run_id); +// return runSteps; +// } + +/** + * Delays the execution for a specified number of milliseconds. + * + * @param {number} ms - The number of milliseconds to delay. + * @return {Promise} A promise that resolves after the specified delay. + */ +function sleep(ms) { + return new Promise((resolve) => setTimeout(resolve, ms)); +} + +/** + * Waits for a run to complete by repeatedly checking its status. It uses a RunManager instance to fetch and manage run steps based on the run status. + * + * @param {Object} params - The parameters for the waitForRun function. + * @param {OpenAI} params.openai - The OpenAI client instance. + * @param {string} params.run_id - The ID of the run to wait for. + * @param {string} params.thread_id - The ID of the thread associated with the run. + * @param {RunManager} params.runManager - The RunManager instance to manage run steps. + * @param {number} params.pollIntervalMs - The interval for polling the run status, default is 500 milliseconds. + * @return {Promise} A promise that resolves to the last fetched run object. + */ +async function waitForRun({ openai, run_id, thread_id, runManager, pollIntervalMs = 500 }) { + const timeout = 18000; // 18 seconds + let timeElapsed = 0; + let run; + + // this runManager will be passed in from the caller + // const runManager = new RunManager({ + // 'in_progress': (step) => { /* ... */ }, + // 'queued': (step) => { /* ... */ }, + // }); + + while (timeElapsed < timeout) { + run = await openai.beta.threads.runs.retrieve(thread_id, run_id); + console.log(`Run status: ${run.status}`); + + if (!['in_progress', 'queued'].includes(run.status)) { + await runManager.fetchRunSteps({ + openai, + thread_id: thread_id, + run_id: run_id, + runStatus: run.status, + final: true, + }); + break; + } + + // may use in future + // await runManager.fetchRunSteps({ + // openai, + // thread_id: thread_id, + // run_id: run_id, + // runStatus: run.status, + // }); + + await sleep(pollIntervalMs); + timeElapsed += pollIntervalMs; + } + + return run; +} + +/** + * @typedef {Object} AgentAction + * @property {string} tool - The name of the tool used. + * @property {string} toolInput - The input provided to the tool. + * @property {string} log - A log or message associated with the action. + */ + +/** + * @typedef {Object} AgentFinish + * @property {Record} returnValues - The return values of the agent's execution. + * @property {string} log - A log or message associated with the finish. + */ + +/** + * @typedef {AgentFinish & { run_id: string; thread_id: string; }} OpenAIAssistantFinish + */ + +/** + * @typedef {AgentAction & { toolCallId: string; run_id: string; thread_id: string; }} OpenAIAssistantAction + */ + +/** + * Retrieves the response from an OpenAI run. + * + * @param {Object} params - The parameters for getting the response. + * @param {OpenAI} params.openai - The OpenAI client instance. + * @param {string} params.run_id - The ID of the run to get the response for. + * @param {string} params.thread_id - The ID of the thread associated with the run. + * @return {Promise} + */ +async function getResponse({ openai, run_id, thread_id }) { + const run = await waitForRun({ openai, run_id, thread_id, pollIntervalMs: 500 }); + + if (run.status === 'completed') { + const messages = await openai.beta.threads.messages.list(thread_id, { + order: 'asc', + }); + const newMessages = messages.data.filter((msg) => msg.run_id === run_id); + + return newMessages; + } else if (run.status === 'requires_action') { + const actions = []; + run.required_action?.submit_tool_outputs.tool_calls.forEach((item) => { + const functionCall = item.function; + const args = JSON.parse(functionCall.arguments); + actions.push({ + tool: functionCall.name, + toolInput: args, + toolCallId: item.id, + log: '', + run_id, + thread_id, + }); + }); + + return actions; + } + + const runInfo = JSON.stringify(run, null, 2); + throw new Error(`Unexpected run status ${run.status}.\nFull run info:\n\n${runInfo}`); +} + +/** + * Initializes a RunManager with handlers, then invokes waitForRun to monitor and manage an OpenAI run. + * + * @param {Object} params - The parameters for managing and monitoring the run. + * @param {OpenAI} params.openai - The OpenAI client instance. + * @param {string} params.run_id - The ID of the run to manage and monitor. + * @param {string} params.thread_id - The ID of the thread associated with the run. + * @return {Promise} A promise that resolves to an object containing the run and managed steps. + */ +async function handleRun({ openai, run_id, thread_id }) { + let steps; + let messages; + const runManager = new RunManager({ + // 'in_progress': async ({ step, final, isLast }) => { + // // Define logic for handling steps with 'in_progress' status + // }, + // 'queued': async ({ step, final, isLast }) => { + // // Define logic for handling steps with 'queued' status + // }, + final: async ({ step, runStatus, stepsByStatus }) => { + console.log(`Final step for ${run_id} with status ${runStatus}`); + console.dir(step, { depth: null }); + + const promises = []; + promises.push( + openai.beta.threads.messages.list(thread_id, { + order: 'asc', + }), + ); + + const finalSteps = stepsByStatus[runStatus]; + + // loop across all statuses, may use in the future + // for (const [_status, stepsPromises] of Object.entries(stepsByStatus)) { + // promises.push(...stepsPromises); + // } + for (const stepPromise of finalSteps) { + promises.push(stepPromise); + } + + const resolved = await Promise.all(promises); + const res = resolved.shift(); + messages = res.data.filter((msg) => msg.run_id === run_id); + resolved.push(step); + steps = resolved; + }, + }); + + const run = await waitForRun({ openai, run_id, thread_id, runManager, pollIntervalMs: 500 }); + + return { run, steps, messages }; +} + +/** + * Maps messages to their corresponding steps. Steps with message creation will be paired with their messages, + * while steps without message creation will be returned as is. + * + * @param {RunStep[]} steps - An array of steps from the run. + * @param {Message[]} messages - An array of message objects. + * @returns {(StepMessage | RunStep)[]} An array where each element is either a step with its corresponding message (StepMessage) or a step without a message (RunStep). + */ +function mapMessagesToSteps(steps, messages) { + // Create a map of messages indexed by their IDs for efficient lookup + const messageMap = messages.reduce((acc, msg) => { + acc[msg.id] = msg; + return acc; + }, {}); + + // Map each step to its corresponding message, or return the step as is if no message ID is present + return steps.map((step) => { + const messageId = step.step_details?.message_creation?.message_id; + + if (messageId && messageMap[messageId]) { + return { step, message: messageMap[messageId] }; + } + return step; + }); +} + +module.exports = { + initThread, + createRun, + waitForRun, + getResponse, + handleRun, + mapMessagesToSteps, +}; diff --git a/api/server/services/EndpointService.js b/api/server/services/EndpointService.js index 67c669a70..ddcc65e94 100644 --- a/api/server/services/EndpointService.js +++ b/api/server/services/EndpointService.js @@ -1,3 +1,5 @@ +const { EModelEndpoint } = require('../routes/endpoints/schemas'); + const { OPENAI_API_KEY: openAIApiKey, AZURE_API_KEY: azureOpenAIApiKey, @@ -18,12 +20,6 @@ function isUserProvided(key) { return key ? { userProvide: key === 'user_provided' } : false; } -const openAI = isUserProvided(openAIApiKey); -const azureOpenAI = isUserProvided(azureOpenAIApiKey); -const bingAI = isUserProvided(bingToken); -const chatGPTBrowser = isUserProvided(chatGPTToken); -const anthropic = isUserProvided(anthropicApiKey); - module.exports = { config: { openAIApiKey, @@ -31,10 +27,11 @@ module.exports = { useAzurePlugins, userProvidedOpenAI, palmKey, - openAI, - azureOpenAI, - chatGPTBrowser, - anthropic, - bingAI, + [EModelEndpoint.openAI]: isUserProvided(openAIApiKey), + [EModelEndpoint.assistant]: isUserProvided(openAIApiKey), + [EModelEndpoint.azureOpenAI]: isUserProvided(azureOpenAIApiKey), + [EModelEndpoint.chatGPTBrowser]: isUserProvided(chatGPTToken), + [EModelEndpoint.anthropic]: isUserProvided(anthropicApiKey), + [EModelEndpoint.bingAI]: isUserProvided(bingToken), }, }; diff --git a/api/server/services/Runs/RunMananger.js b/api/server/services/Runs/RunMananger.js new file mode 100644 index 000000000..adc0e1819 --- /dev/null +++ b/api/server/services/Runs/RunMananger.js @@ -0,0 +1,93 @@ +/** + * @typedef {import('openai').OpenAI} OpenAI + * @typedef {import('../AssistantService').RunStep} RunStep + * @callback StepHandler + * @param {RunStep} step - A single run step to be processed. + */ + +/** + * @typedef {Object} RunManager + * Manages the retrieval and processing of run steps based on run status. + * @property {Set} seenSteps - A set of IDs for steps that have already been seen. + * @property {Object.>} stepsByStatus - Steps organized by run status. + * @property {Object.} handlers - Handlers for different run statuses. + * @property {Object.} lastStepPromiseByStatus - Last processed step's promise by run status. + * @property {Function} fetchRunSteps - Fetches run steps based on run status. + * @property {Function} handleStep - Handles a run step based on its status. + */ + +/** + * Manages the retrieval and processing of run steps based on run status. + */ +class RunManager { + /** + * Initializes the RunManager instance. + * @param {Object.} handlers - An object containing handler functions for different run statuses. + */ + constructor(handlers = {}) { + this.seenSteps = new Set(); + this.stepsByStatus = {}; + this.handlers = handlers; + this.lastStepPromiseByStatus = {}; + } + + /** + * Fetches run steps once and filters out already seen steps. + * @param {Object} params - The parameters for fetching run steps. + * @param {OpenAI} params.openai - The OpenAI client instance. + * @param {string} params.thread_id - The ID of the thread associated with the run. + * @param {string} params.run_id - The ID of the run to retrieve steps for. + * @param {string} params.runStatus - The status of the run. + * @param {boolean} [params.final] - The end of the run polling loop, due to `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, or `expired` statuses. + */ + async fetchRunSteps({ openai, thread_id, run_id, runStatus, final = false }) { + // const { data: steps, first_id, last_id, has_more } = await openai.beta.threads.runs.steps.list(thread_id, run_id); + const { data: _steps } = await openai.beta.threads.runs.steps.list(thread_id, run_id); + const steps = _steps.sort((a, b) => a.created_at - b.created_at); + for (const [i, step] of steps.entries()) { + if (this.seenSteps.has(step.id)) { + continue; + } + + const isLast = i === steps.length - 1; + this.seenSteps.add(step.id); + this.stepsByStatus[runStatus] = this.stepsByStatus[runStatus] || []; + + const currentStepPromise = (async () => { + await (this.lastStepPromiseByStatus[runStatus] || Promise.resolve()); + return this.handleStep({ step, runStatus, final, isLast }); + })(); + + if (final && isLast) { + return await currentStepPromise; + } + + this.lastStepPromiseByStatus[runStatus] = currentStepPromise; + this.stepsByStatus[runStatus].push(currentStepPromise); + } + } + + /** + * Handles a run step based on its status. + * @param {Object} params - The parameters for handling a run step. + * @param {RunStep} params.step - The run step to handle. + * @param {string} params.runStatus - The status of the run step. + * @param {string} params.final - The final run status (no further polling will occur) + * @param {boolean} params.isLast - Whether the current step is the last step of the list. + */ + async handleStep({ step, runStatus, final, isLast }) { + if (this.handlers[runStatus]) { + return this.handlers[runStatus]({ step, final, isLast }); + } + + if (final && isLast && this.handlers['final']) { + return await this.handlers['final']({ step, runStatus, stepsByStatus: this.stepsByStatus }); + } + + console.log(`Default handler for ${step.id} with status \`${runStatus}\``); + console.dir({ step, runStatus, final, isLast }, { depth: null }); + return step; + } +} + +module.exports = RunManager; diff --git a/client/package.json b/client/package.json index 9c3edc68f..933705c33 100644 --- a/client/package.json +++ b/client/package.json @@ -34,6 +34,8 @@ "@radix-ui/react-hover-card": "^1.0.5", "@radix-ui/react-icons": "^1.3.0", "@radix-ui/react-label": "^2.0.0", + "@radix-ui/react-popover": "^1.0.7", + "@radix-ui/react-separator": "^1.0.3", "@radix-ui/react-slider": "^1.1.1", "@radix-ui/react-switch": "^1.0.3", "@radix-ui/react-tabs": "^1.0.3", @@ -55,6 +57,8 @@ "lucide-react": "^0.220.0", "rc-input-number": "^7.4.2", "react": "^18.2.0", + "react-dnd": "^16.0.1", + "react-dnd-html5-backend": "^16.0.1", "react-dom": "^18.2.0", "react-hook-form": "^7.43.9", "react-markdown": "^8.0.6", diff --git a/client/src/App.jsx b/client/src/App.jsx index a2bb729c6..72d07b1c9 100644 --- a/client/src/App.jsx +++ b/client/src/App.jsx @@ -1,10 +1,12 @@ import { RecoilRoot } from 'recoil'; -import * as RadixToast from '@radix-ui/react-toast'; +import { DndProvider } from 'react-dnd'; import { RouterProvider } from 'react-router-dom'; +import * as RadixToast from '@radix-ui/react-toast'; +import { HTML5Backend } from 'react-dnd-html5-backend'; import { ReactQueryDevtools } from '@tanstack/react-query-devtools'; import { QueryClient, QueryClientProvider, QueryCache } from '@tanstack/react-query'; import { ScreenshotProvider, ThemeProvider, useApiErrorBoundary } from './hooks'; -import { ToastProvider } from './Providers'; +import { ToastProvider, AssistantsProvider } from './Providers'; import Toast from './components/ui/Toast'; import { router } from './routes'; @@ -27,10 +29,14 @@ const App = () => { - - - - + + + + + + + + diff --git a/client/src/Providers/AssistantsContext.tsx b/client/src/Providers/AssistantsContext.tsx new file mode 100644 index 000000000..515618879 --- /dev/null +++ b/client/src/Providers/AssistantsContext.tsx @@ -0,0 +1,29 @@ +import { createContext, useContext } from 'react'; +import type { UseFormReturn } from 'react-hook-form'; +import type { CreationForm } from '~/common'; +import useCreationForm from './useCreationForm'; + +// type AssistantsContextType = { +// // open: boolean; +// // setOpen: Dispatch>; +// form: UseFormReturn; +// }; +type AssistantsContextType = UseFormReturn; + +export const AssistantsContext = createContext({} as AssistantsContextType); + +export function useAssistantsContext() { + const context = useContext(AssistantsContext); + + if (context === undefined) { + throw new Error('useAssistantsContext must be used within an AssistantsProvider'); + } + + return context; +} + +export default function AssistantsProvider({ children }) { + const hookValues = useCreationForm(); + + return {children}; +} diff --git a/client/src/Providers/ChatContext.tsx b/client/src/Providers/ChatContext.tsx new file mode 100644 index 000000000..0c6880111 --- /dev/null +++ b/client/src/Providers/ChatContext.tsx @@ -0,0 +1,6 @@ +import { createContext, useContext } from 'react'; +import useChatHelpers from '~/hooks/useChatHelpers'; +type TChatContext = ReturnType; + +export const ChatContext = createContext({} as TChatContext); +export const useChatContext = () => useContext(ChatContext); diff --git a/client/src/Providers/index.ts b/client/src/Providers/index.ts index 41cb62ae9..ab8b65d78 100644 --- a/client/src/Providers/index.ts +++ b/client/src/Providers/index.ts @@ -1,2 +1,5 @@ export { default as ToastProvider } from './ToastContext'; +export { default as AssistantsProvider } from './AssistantsContext'; +export * from './ChatContext'; export * from './ToastContext'; +export * from './AssistantsContext'; diff --git a/client/src/Providers/useCreationForm.ts b/client/src/Providers/useCreationForm.ts new file mode 100644 index 000000000..6fadf4c94 --- /dev/null +++ b/client/src/Providers/useCreationForm.ts @@ -0,0 +1,19 @@ +// import { useState } from 'react'; +import { useForm } from 'react-hook-form'; +import type { CreationForm } from '~/common'; + +export default function useViewPromptForm() { + return useForm({ + defaultValues: { + assistant: '', + id: '', + name: '', + description: '', + instructions: '', + model: 'gpt-3.5-turbo-1106', + function: false, + code_interpreter: false, + retrieval: false, + }, + }); +} diff --git a/client/src/common/assistants-types.ts b/client/src/common/assistants-types.ts new file mode 100644 index 000000000..7dc6906e7 --- /dev/null +++ b/client/src/common/assistants-types.ts @@ -0,0 +1,19 @@ +import type { Option } from './types'; +import type { Assistant } from 'librechat-data-provider'; + +export type TAssistantOption = string | (Option & Assistant); + +export type Actions = { + function: boolean; + code_interpreter: boolean; + retrieval: boolean; +}; + +export type CreationForm = { + assistant: TAssistantOption; + id: string; + name: string | null; + description: string | null; + instructions: string | null; + model: string; +} & Actions; diff --git a/client/src/common/index.ts b/client/src/common/index.ts index fcb073fef..35acc738e 100644 --- a/client/src/common/index.ts +++ b/client/src/common/index.ts @@ -1 +1,2 @@ export * from './types'; +export * from './assistants-types'; diff --git a/client/src/common/types.ts b/client/src/common/types.ts index 5397cc2d8..57f4bff6c 100644 --- a/client/src/common/types.ts +++ b/client/src/common/types.ts @@ -6,6 +6,7 @@ import type { TLoginUser, TUser, } from 'librechat-data-provider'; +import { EModelEndpoint } from 'librechat-data-provider'; export type TSetOption = (param: number | string) => (newValue: number | string | boolean) => void; export type TSetExample = ( @@ -14,6 +15,22 @@ export type TSetExample = ( newValue: number | string | boolean | null, ) => void; +export const alternateName = { + [EModelEndpoint.openAI]: 'OpenAI', + [EModelEndpoint.assistant]: 'Assistants', + [EModelEndpoint.azureOpenAI]: 'Azure OpenAI', + [EModelEndpoint.bingAI]: 'Bing', + [EModelEndpoint.chatGPTBrowser]: 'ChatGPT', + [EModelEndpoint.gptPlugins]: 'Plugins', + [EModelEndpoint.google]: 'PaLM', + [EModelEndpoint.anthropic]: 'Anthropic', +}; + +export const supportsFiles = { + [EModelEndpoint.openAI]: true, + [EModelEndpoint.assistant]: true, +}; + export enum ESide { Top = 'top', Right = 'right', @@ -47,6 +64,8 @@ export type TSettingsProps = TBaseSettingsProps & { export type TModels = { models: string[]; + showAbove?: boolean; + popover?: boolean; }; export type TModelSelectProps = TSettingsProps & TModels; @@ -64,7 +83,7 @@ export type TSetOptionsPayload = { addExample: () => void; removeExample: () => void; setAgentOption: TSetOption; - getConversation: () => TConversation | TPreset | null; + // getConversation: () => TConversation | TPreset | null; checkPluginSelection: (value: string) => boolean; setTools: (newValue: string) => void; }; @@ -201,3 +220,23 @@ export type IconProps = Pick & className?: string; endpoint?: string | null; }; + +export type Option = Record & { + label?: string; + value: string | number | null; +}; + +export type TOptionSettings = { + showExamples?: boolean; + isCodeChat?: boolean; +}; + +export interface ExtendedFile { + file: File; + width?: number; + height?: number; + preview: string; + progress: number; +} + +export type ContextType = { navVisible: boolean; setNavVisible: (visible: boolean) => void }; diff --git a/client/src/components/Chat/ChatView.tsx b/client/src/components/Chat/ChatView.tsx new file mode 100644 index 000000000..6b323ad46 --- /dev/null +++ b/client/src/components/Chat/ChatView.tsx @@ -0,0 +1,72 @@ +import { memo } from 'react'; +import { useRecoilValue } from 'recoil'; +import { useParams } from 'react-router-dom'; +import { useGetMessagesByConvoId } from 'librechat-data-provider'; +import { useChatHelpers, useDragHelpers, useSSE } from '~/hooks'; +// import GenerationButtons from './Input/GenerationButtons'; +import DragDropOverlay from './Input/Files/DragDropOverlay'; +import MessagesView from './Messages/MessagesView'; +// import OptionsBar from './Input/OptionsBar'; +import { ChatContext } from '~/Providers'; +import ChatForm from './Input/ChatForm'; +import { Spinner } from '~/components'; +import { buildTree } from '~/utils'; +import Landing from './Landing'; +import Header from './Header'; +import Footer from './Footer'; +import store from '~/store'; + +function ChatView({ + // messagesTree, + // isLoading, + index = 0, +}: { + // messagesTree?: TMessage[] | null; + // isLoading: boolean; + index?: number; +}) { + const { conversationId } = useParams(); + const submissionAtIndex = useRecoilValue(store.submissionByIndex(0)); + useSSE(submissionAtIndex); + + const { data: messagesTree = null, isLoading } = useGetMessagesByConvoId(conversationId ?? '', { + select: (data) => { + const dataTree = buildTree(data, false); + return dataTree?.length === 0 ? null : dataTree ?? null; + }, + }); + const chatHelpers = useChatHelpers(index, conversationId); + const { isOver, canDrop, drop } = useDragHelpers(chatHelpers.setFiles); + const isActive = canDrop && isOver; + return ( + +
+
+
+ {isLoading && conversationId !== 'new' ? ( +
+ +
+ ) : messagesTree && messagesTree.length !== 0 ? ( + } /> + ) : ( + } /> + )} + {/* */} + {/* */} +
+ +
+
+ {isActive && } +
+
+
+
+ ); +} + +export default memo(ChatView); diff --git a/client/src/components/Chat/CreationHeader.tsx b/client/src/components/Chat/CreationHeader.tsx new file mode 100644 index 000000000..264237e36 --- /dev/null +++ b/client/src/components/Chat/CreationHeader.tsx @@ -0,0 +1,113 @@ +// import { useState } from 'react'; +import { Plus } from 'lucide-react'; +import { useListAssistantsQuery } from 'librechat-data-provider'; +import type { Assistant } from 'librechat-data-provider'; +import type { UseFormReset, UseFormSetValue } from 'react-hook-form'; +import type { CreationForm, Actions, Option } from '~/common'; +import SelectDropDown from '~/components/ui/SelectDropDown'; +import { cn } from '~/utils/'; + +const keys = new Set(['name', 'id', 'description', 'instructions', 'model']); + +type TAssistantOption = string | (Option & Assistant); + +export default function CreationHeader({ + reset, + value, + onChange, + setValue, +}: { + reset: UseFormReset; + value: TAssistantOption; + onChange: (value: TAssistantOption) => void; + setValue: UseFormSetValue; +}) { + const assistants = useListAssistantsQuery( + { + order: 'asc', + }, + { + select: (res) => + res.data.map((assistant) => ({ + ...assistant, + label: assistant?.name ?? '', + value: assistant.id, + })), + }, + ); + + const onSelect = (value: string) => { + const assistant = assistants.data?.find((assistant) => assistant.id === value); + if (!assistant) { + reset(); + return; + } + onChange({ + ...assistant, + label: assistant?.name ?? '', + value: assistant?.id ?? '', + }); + const actions: Actions = { + function: false, + code_interpreter: false, + retrieval: false, + }; + assistant?.tools + ?.map((tool) => tool.type) + .forEach((tool) => { + actions[tool] = true; + }); + + Object.entries(assistant).forEach(([name, value]) => { + if (typeof value === 'number') { + return; + } else if (typeof value === 'object') { + return; + } + if (keys.has(name)) { + setValue(name as keyof CreationForm, value); + } + }); + + Object.entries(actions).forEach(([name, value]) => setValue(name as keyof Actions, value)); + }; + + return ( + ( + + + + + + {'Create Assistant'} + + + )} + /> + ); +} diff --git a/client/src/components/Chat/CreationPanel.tsx b/client/src/components/Chat/CreationPanel.tsx new file mode 100644 index 000000000..ee8ebeb3f --- /dev/null +++ b/client/src/components/Chat/CreationPanel.tsx @@ -0,0 +1,228 @@ +import { Controller, useWatch } from 'react-hook-form'; +import type { Tool } from 'librechat-data-provider'; +import type { CreationForm, Actions } from '~/common'; +import { useCreateAssistantMutation, Tools, EModelEndpoint } from 'librechat-data-provider'; +import { Separator } from '~/components/ui/Separator'; +import { useAssistantsContext } from '~/Providers'; +import { Switch } from '~/components/ui/Switch'; +import CreationHeader from './CreationHeader'; +import { useNewConvo } from '~/hooks'; + +export default function CreationPanel({ index = 0 }) { + const { switchToConversation } = useNewConvo(index); + const create = useCreateAssistantMutation(); + const { control, handleSubmit, reset, setValue } = useAssistantsContext(); + + const onSubmit = (data: CreationForm) => { + const tools: Tool[] = []; + console.log(data); + if (data.function) { + tools.push({ type: Tools.function }); + } + if (data.code_interpreter) { + tools.push({ type: Tools.code_interpreter }); + } + if (data.retrieval) { + tools.push({ type: Tools.retrieval }); + } + + const { + name, + description, + instructions, + model, + // file_ids, + } = data; + + create.mutate({ + name, + description, + instructions, + model, + tools, + }); + }; + + const assistant_id = useWatch({ control, name: 'id' }); + + // Render function for the Switch component + const renderSwitch = (name: keyof Actions) => ( + ( + + )} + /> + ); + + return ( +
+ ( + + )} + /> +
+ {/* Name */} +
+ + ( + + )} + /> + ( +

{field.value ?? ''}

+ )} + /> +
+ {/* Description */} +
+ + ( + + )} + /> +
+ + {/* Instructions */} +
+ + ( +