mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-09-22 06:00:56 +02:00
🅰️ feat: Azure OpenAI Assistants API Support (#1992)
* chore: rename dir from `assistant` to plural * feat: `assistants` field for azure config, spread options in AppService * refactor: rename constructAzureURL param for azure as `azureOptions` * chore: bump openai and bun * chore(loadDefaultModels): change naming of assistant -> assistants * feat: load azure settings with currect baseURL for assistants' initializeClient * refactor: add `assistants` flags to groups and model configs, add mapGroupToAzureConfig * feat(loadConfigEndpoints): initialize assistants endpoint if azure flag `assistants` is enabled * feat(AppService): determine assistant models on startup, throw Error if none * refactor(useDeleteAssistantMutation): send model along with assistant id for delete mutations * feat: support listing and deleting assistants with azure * feat: add model query to assistant avatar upload * feat: add azure support for retrieveRun method * refactor: update OpenAIClient initialization * chore: update README * fix(ci): tests passing * refactor(uploadOpenAIFile): improve logging and use more efficient REST API method * refactor(useFileHandling): add model to metadata to target Azure region compatible with current model * chore(files): add azure naming pattern for valid file id recognition * fix(assistants): initialize openai with first available assistant model if none provided * refactor(uploadOpenAIFile): add content type for azure, initialize formdata before azure options * refactor(sleep): move sleep function out of Runs and into `~/server/utils` * fix(azureOpenAI/assistants): make sure to only overwrite models with assistant models if `assistants` flag is enabled * refactor(uploadOpenAIFile): revert to old method * chore(uploadOpenAIFile): use enum for file purpose * docs: azureOpenAI update guide with more info, examples * feat: enable/disable assistant capabilities and specify retrieval models * refactor: optional chain conditional statement in loadConfigModels.js * docs: add assistants examples * chore: update librechat.example.yaml * docs(azure): update note of file upload behavior in Azure OpenAI Assistants * chore: update docs and add descriptive message about assistant errors * fix: prevent message submission with invalid assistant or if files loading * style: update Landing icon & text when assistant is not selected * chore: bump librechat-data-provider to 0.4.8 * fix(assistants/azure): assign req.body.model for proper azure init to abort runs
This commit is contained in:
parent
1b243c6f8c
commit
5cd5c3bef8
60 changed files with 1044 additions and 300 deletions
|
@ -40,14 +40,15 @@
|
||||||
|
|
||||||
# 📃 Features
|
# 📃 Features
|
||||||
|
|
||||||
- 🖥️ UI matching ChatGPT, including Dark mode, Streaming, and 11-2023 updates
|
- 🖥️ UI matching ChatGPT, including Dark mode, Streaming, and latest updates
|
||||||
- 💬 Multimodal Chat:
|
- 💬 Multimodal Chat:
|
||||||
- Upload and analyze images with GPT-4 and Gemini Vision 📸
|
- Upload and analyze images with GPT-4 and Gemini Vision 📸
|
||||||
- More filetypes and Assistants API integration in Active Development 🚧
|
- General file support now available through the Assistants API integration. 🗃️
|
||||||
|
- Local RAG in Active Development 🚧
|
||||||
- 🌎 Multilingual UI:
|
- 🌎 Multilingual UI:
|
||||||
- English, 中文, Deutsch, Español, Français, Italiano, Polski, Português Brasileiro,
|
- English, 中文, Deutsch, Español, Français, Italiano, Polski, Português Brasileiro,
|
||||||
- Русский, 日本語, Svenska, 한국어, Tiếng Việt, 繁體中文, العربية, Türkçe, Nederlands, עברית
|
- Русский, 日本語, Svenska, 한국어, Tiếng Việt, 繁體中文, العربية, Türkçe, Nederlands, עברית
|
||||||
- 🤖 AI model selection: OpenAI API, Azure, BingAI, ChatGPT, Google Vertex AI, Anthropic (Claude), Plugins
|
- 🤖 AI model selection: OpenAI, Azure OpenAI, BingAI, ChatGPT, Google Vertex AI, Anthropic (Claude), Plugins, Assistants API (including Azure Assistants)
|
||||||
- 💾 Create, Save, & Share Custom Presets
|
- 💾 Create, Save, & Share Custom Presets
|
||||||
- 🔄 Edit, Resubmit, and Continue messages with conversation branching
|
- 🔄 Edit, Resubmit, and Continue messages with conversation branching
|
||||||
- 📤 Export conversations as screenshots, markdown, text, json.
|
- 📤 Export conversations as screenshots, markdown, text, json.
|
||||||
|
|
|
@ -234,7 +234,7 @@ class ChatGPTClient extends BaseClient {
|
||||||
baseURL = this.langchainProxy
|
baseURL = this.langchainProxy
|
||||||
? constructAzureURL({
|
? constructAzureURL({
|
||||||
baseURL: this.langchainProxy,
|
baseURL: this.langchainProxy,
|
||||||
azure: this.azure,
|
azureOptions: this.azure,
|
||||||
})
|
})
|
||||||
: this.azureEndpoint.split(/\/(chat|completion)/)[0];
|
: this.azureEndpoint.split(/\/(chat|completion)/)[0];
|
||||||
|
|
||||||
|
|
|
@ -1062,7 +1062,7 @@ ${convo}
|
||||||
opts.baseURL = this.langchainProxy
|
opts.baseURL = this.langchainProxy
|
||||||
? constructAzureURL({
|
? constructAzureURL({
|
||||||
baseURL: this.langchainProxy,
|
baseURL: this.langchainProxy,
|
||||||
azure: this.azure,
|
azureOptions: this.azure,
|
||||||
})
|
})
|
||||||
: this.azureEndpoint.split(/\/(chat|completion)/)[0];
|
: this.azureEndpoint.split(/\/(chat|completion)/)[0];
|
||||||
opts.defaultQuery = { 'api-version': this.azure.azureOpenAIApiVersion };
|
opts.defaultQuery = { 'api-version': this.azure.azureOpenAIApiVersion };
|
||||||
|
|
|
@ -57,7 +57,7 @@ function createLLM({
|
||||||
if (azure && configOptions.basePath) {
|
if (azure && configOptions.basePath) {
|
||||||
const azureURL = constructAzureURL({
|
const azureURL = constructAzureURL({
|
||||||
baseURL: configOptions.basePath,
|
baseURL: configOptions.basePath,
|
||||||
azure: azureOptions,
|
azureOptions,
|
||||||
});
|
});
|
||||||
azureOptions.azureOpenAIBasePath = azureURL.split(
|
azureOptions.azureOpenAIBasePath = azureURL.split(
|
||||||
`/${azureOptions.azureOpenAIApiDeploymentName}`,
|
`/${azureOptions.azureOpenAIApiDeploymentName}`,
|
||||||
|
|
|
@ -66,7 +66,7 @@
|
||||||
"multer": "^1.4.5-lts.1",
|
"multer": "^1.4.5-lts.1",
|
||||||
"nodejs-gpt": "^1.37.4",
|
"nodejs-gpt": "^1.37.4",
|
||||||
"nodemailer": "^6.9.4",
|
"nodemailer": "^6.9.4",
|
||||||
"openai": "^4.20.1",
|
"openai": "^4.28.4",
|
||||||
"openai-chat-tokens": "^0.2.8",
|
"openai-chat-tokens": "^0.2.8",
|
||||||
"openid-client": "^5.4.2",
|
"openid-client": "^5.4.2",
|
||||||
"passport": "^0.6.0",
|
"passport": "^0.6.0",
|
||||||
|
|
|
@ -16,8 +16,14 @@ async function endpointController(req, res) {
|
||||||
/** @type {TEndpointsConfig} */
|
/** @type {TEndpointsConfig} */
|
||||||
const mergedConfig = { ...defaultEndpointsConfig, ...customConfigEndpoints };
|
const mergedConfig = { ...defaultEndpointsConfig, ...customConfigEndpoints };
|
||||||
if (mergedConfig[EModelEndpoint.assistants] && req.app.locals?.[EModelEndpoint.assistants]) {
|
if (mergedConfig[EModelEndpoint.assistants] && req.app.locals?.[EModelEndpoint.assistants]) {
|
||||||
mergedConfig[EModelEndpoint.assistants].disableBuilder =
|
const { disableBuilder, retrievalModels, capabilities, ..._rest } =
|
||||||
req.app.locals[EModelEndpoint.assistants].disableBuilder;
|
req.app.locals[EModelEndpoint.assistants];
|
||||||
|
mergedConfig[EModelEndpoint.assistants] = {
|
||||||
|
...mergedConfig[EModelEndpoint.assistants],
|
||||||
|
retrievalModels,
|
||||||
|
disableBuilder,
|
||||||
|
capabilities,
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
const endpointsConfig = orderEndpointsConfig(mergedConfig);
|
const endpointsConfig = orderEndpointsConfig(mergedConfig);
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
const { CacheKeys, RunStatus, isUUID } = require('librechat-data-provider');
|
const { CacheKeys, RunStatus, isUUID } = require('librechat-data-provider');
|
||||||
const { initializeClient } = require('~/server/services/Endpoints/assistant');
|
const { initializeClient } = require('~/server/services/Endpoints/assistants');
|
||||||
const { checkMessageGaps, recordUsage } = require('~/server/services/Threads');
|
const { checkMessageGaps, recordUsage } = require('~/server/services/Threads');
|
||||||
const { getConvo } = require('~/models/Conversation');
|
const { getConvo } = require('~/models/Conversation');
|
||||||
const getLogStores = require('~/cache/getLogStores');
|
const getLogStores = require('~/cache/getLogStores');
|
||||||
|
@ -11,6 +11,11 @@ async function abortRun(req, res) {
|
||||||
res.setHeader('Content-Type', 'application/json');
|
res.setHeader('Content-Type', 'application/json');
|
||||||
const { abortKey } = req.body;
|
const { abortKey } = req.body;
|
||||||
const [conversationId, latestMessageId] = abortKey.split(':');
|
const [conversationId, latestMessageId] = abortKey.split(':');
|
||||||
|
const conversation = await getConvo(req.user.id, conversationId);
|
||||||
|
|
||||||
|
if (conversation?.model) {
|
||||||
|
req.body.model = conversation.model;
|
||||||
|
}
|
||||||
|
|
||||||
if (!isUUID.safeParse(conversationId).success) {
|
if (!isUUID.safeParse(conversationId).success) {
|
||||||
logger.error('[abortRun] Invalid conversationId', { conversationId });
|
logger.error('[abortRun] Invalid conversationId', { conversationId });
|
||||||
|
@ -71,7 +76,7 @@ async function abortRun(req, res) {
|
||||||
const finalEvent = {
|
const finalEvent = {
|
||||||
title: 'New Chat',
|
title: 'New Chat',
|
||||||
final: true,
|
final: true,
|
||||||
conversation: await getConvo(req.user.id, conversationId),
|
conversation,
|
||||||
runMessages,
|
runMessages,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
const { parseConvo, EModelEndpoint } = require('librechat-data-provider');
|
const { parseConvo, EModelEndpoint } = require('librechat-data-provider');
|
||||||
const { getModelsConfig } = require('~/server/controllers/ModelController');
|
const { getModelsConfig } = require('~/server/controllers/ModelController');
|
||||||
const { processFiles } = require('~/server/services/Files/process');
|
const assistants = require('~/server/services/Endpoints/assistants');
|
||||||
const gptPlugins = require('~/server/services/Endpoints/gptPlugins');
|
const gptPlugins = require('~/server/services/Endpoints/gptPlugins');
|
||||||
|
const { processFiles } = require('~/server/services/Files/process');
|
||||||
const anthropic = require('~/server/services/Endpoints/anthropic');
|
const anthropic = require('~/server/services/Endpoints/anthropic');
|
||||||
const assistant = require('~/server/services/Endpoints/assistant');
|
|
||||||
const openAI = require('~/server/services/Endpoints/openAI');
|
const openAI = require('~/server/services/Endpoints/openAI');
|
||||||
const custom = require('~/server/services/Endpoints/custom');
|
const custom = require('~/server/services/Endpoints/custom');
|
||||||
const google = require('~/server/services/Endpoints/google');
|
const google = require('~/server/services/Endpoints/google');
|
||||||
|
@ -15,7 +15,7 @@ const buildFunction = {
|
||||||
[EModelEndpoint.azureOpenAI]: openAI.buildOptions,
|
[EModelEndpoint.azureOpenAI]: openAI.buildOptions,
|
||||||
[EModelEndpoint.anthropic]: anthropic.buildOptions,
|
[EModelEndpoint.anthropic]: anthropic.buildOptions,
|
||||||
[EModelEndpoint.gptPlugins]: gptPlugins.buildOptions,
|
[EModelEndpoint.gptPlugins]: gptPlugins.buildOptions,
|
||||||
[EModelEndpoint.assistants]: assistant.buildOptions,
|
[EModelEndpoint.assistants]: assistants.buildOptions,
|
||||||
};
|
};
|
||||||
|
|
||||||
async function buildEndpointOption(req, res, next) {
|
async function buildEndpointOption(req, res, next) {
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
const { v4 } = require('uuid');
|
const { v4 } = require('uuid');
|
||||||
const express = require('express');
|
const express = require('express');
|
||||||
const { actionDelimiter } = require('librechat-data-provider');
|
const { actionDelimiter } = require('librechat-data-provider');
|
||||||
const { initializeClient } = require('~/server/services/Endpoints/assistant');
|
const { initializeClient } = require('~/server/services/Endpoints/assistants');
|
||||||
const { updateAction, getActions, deleteAction } = require('~/models/Action');
|
const { updateAction, getActions, deleteAction } = require('~/models/Action');
|
||||||
const { updateAssistant, getAssistant } = require('~/models/Assistant');
|
const { updateAssistant, getAssistant } = require('~/models/Assistant');
|
||||||
const { encryptMetadata } = require('~/server/services/ActionService');
|
const { encryptMetadata } = require('~/server/services/ActionService');
|
||||||
|
|
|
@ -1,10 +1,14 @@
|
||||||
const multer = require('multer');
|
const multer = require('multer');
|
||||||
const express = require('express');
|
const express = require('express');
|
||||||
const { FileContext, EModelEndpoint } = require('librechat-data-provider');
|
const { FileContext, EModelEndpoint } = require('librechat-data-provider');
|
||||||
const { updateAssistant, getAssistants } = require('~/models/Assistant');
|
const {
|
||||||
const { initializeClient } = require('~/server/services/Endpoints/assistant');
|
initializeClient,
|
||||||
|
listAssistantsForAzure,
|
||||||
|
listAssistants,
|
||||||
|
} = require('~/server/services/Endpoints/assistants');
|
||||||
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
|
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
|
||||||
const { uploadImageBuffer } = require('~/server/services/Files/process');
|
const { uploadImageBuffer } = require('~/server/services/Files/process');
|
||||||
|
const { updateAssistant, getAssistants } = require('~/models/Assistant');
|
||||||
const { deleteFileByFilter } = require('~/models/File');
|
const { deleteFileByFilter } = require('~/models/File');
|
||||||
const { logger } = require('~/config');
|
const { logger } = require('~/config');
|
||||||
const actions = require('./actions');
|
const actions = require('./actions');
|
||||||
|
@ -48,6 +52,10 @@ router.post('/', async (req, res) => {
|
||||||
})
|
})
|
||||||
.filter((tool) => tool);
|
.filter((tool) => tool);
|
||||||
|
|
||||||
|
if (openai.locals?.azureOptions) {
|
||||||
|
assistantData.model = openai.locals.azureOptions.azureOpenAIApiDeploymentName;
|
||||||
|
}
|
||||||
|
|
||||||
const assistant = await openai.beta.assistants.create(assistantData);
|
const assistant = await openai.beta.assistants.create(assistantData);
|
||||||
logger.debug('/assistants/', assistant);
|
logger.debug('/assistants/', assistant);
|
||||||
res.status(201).json(assistant);
|
res.status(201).json(assistant);
|
||||||
|
@ -101,6 +109,10 @@ router.patch('/:id', async (req, res) => {
|
||||||
})
|
})
|
||||||
.filter((tool) => tool);
|
.filter((tool) => tool);
|
||||||
|
|
||||||
|
if (openai.locals?.azureOptions && updateData.model) {
|
||||||
|
updateData.model = openai.locals.azureOptions.azureOpenAIApiDeploymentName;
|
||||||
|
}
|
||||||
|
|
||||||
const updatedAssistant = await openai.beta.assistants.update(assistant_id, updateData);
|
const updatedAssistant = await openai.beta.assistants.update(assistant_id, updateData);
|
||||||
res.json(updatedAssistant);
|
res.json(updatedAssistant);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
@ -137,19 +149,18 @@ router.delete('/:id', async (req, res) => {
|
||||||
*/
|
*/
|
||||||
router.get('/', async (req, res) => {
|
router.get('/', async (req, res) => {
|
||||||
try {
|
try {
|
||||||
/** @type {{ openai: OpenAI }} */
|
|
||||||
const { openai } = await initializeClient({ req, res });
|
|
||||||
|
|
||||||
const { limit, order, after, before } = req.query;
|
const { limit, order, after, before } = req.query;
|
||||||
const response = await openai.beta.assistants.list({
|
const query = { limit, order, after, before };
|
||||||
limit,
|
|
||||||
order,
|
|
||||||
after,
|
|
||||||
before,
|
|
||||||
});
|
|
||||||
|
|
||||||
|
const azureConfig = req.app.locals[EModelEndpoint.azureOpenAI];
|
||||||
/** @type {AssistantListResponse} */
|
/** @type {AssistantListResponse} */
|
||||||
let body = response.body;
|
let body;
|
||||||
|
|
||||||
|
if (azureConfig?.assistants) {
|
||||||
|
body = await listAssistantsForAzure({ req, res, azureConfig, query });
|
||||||
|
} else {
|
||||||
|
({ body } = await listAssistants({ req, res, query }));
|
||||||
|
}
|
||||||
|
|
||||||
if (req.app.locals?.[EModelEndpoint.assistants]) {
|
if (req.app.locals?.[EModelEndpoint.assistants]) {
|
||||||
/** @type {Partial<TAssistantEndpoint>} */
|
/** @type {Partial<TAssistantEndpoint>} */
|
||||||
|
@ -165,7 +176,7 @@ router.get('/', async (req, res) => {
|
||||||
res.json(body);
|
res.json(body);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('[/assistants] Error listing assistants', error);
|
logger.error('[/assistants] Error listing assistants', error);
|
||||||
res.status(500).json({ error: error.message });
|
res.status(500).json({ message: 'Error listing assistants' });
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
|
@ -10,9 +10,9 @@ const {
|
||||||
saveAssistantMessage,
|
saveAssistantMessage,
|
||||||
} = require('~/server/services/Threads');
|
} = require('~/server/services/Threads');
|
||||||
const { runAssistant, createOnTextProgress } = require('~/server/services/AssistantService');
|
const { runAssistant, createOnTextProgress } = require('~/server/services/AssistantService');
|
||||||
const { addTitle, initializeClient } = require('~/server/services/Endpoints/assistant');
|
const { addTitle, initializeClient } = require('~/server/services/Endpoints/assistants');
|
||||||
const { sendResponse, sendMessage } = require('~/server/utils');
|
const { sendResponse, sendMessage, sleep } = require('~/server/utils');
|
||||||
const { createRun, sleep } = require('~/server/services/Runs');
|
const { createRun } = require('~/server/services/Runs');
|
||||||
const { getConvo } = require('~/models/Conversation');
|
const { getConvo } = require('~/models/Conversation');
|
||||||
const getLogStores = require('~/cache/getLogStores');
|
const getLogStores = require('~/cache/getLogStores');
|
||||||
const { logger } = require('~/config');
|
const { logger } = require('~/config');
|
||||||
|
@ -101,6 +101,8 @@ router.post('/', validateModel, buildEndpointOption, setHeaders, async (req, res
|
||||||
let completedRun;
|
let completedRun;
|
||||||
|
|
||||||
const handleError = async (error) => {
|
const handleError = async (error) => {
|
||||||
|
const defaultErrorMessage =
|
||||||
|
'The Assistant run failed to initialize. Try sending a message in a new conversation.';
|
||||||
const messageData = {
|
const messageData = {
|
||||||
thread_id,
|
thread_id,
|
||||||
assistant_id,
|
assistant_id,
|
||||||
|
@ -119,12 +121,19 @@ router.post('/', validateModel, buildEndpointOption, setHeaders, async (req, res
|
||||||
return;
|
return;
|
||||||
} else if (error.message === 'Request closed') {
|
} else if (error.message === 'Request closed') {
|
||||||
logger.debug('[/assistants/chat/] Request aborted on close');
|
logger.debug('[/assistants/chat/] Request aborted on close');
|
||||||
|
} else if (/Files.*are invalid/.test(error.message)) {
|
||||||
|
const errorMessage = `Files are invalid, or may not have uploaded yet.${
|
||||||
|
req.app.locals?.[EModelEndpoint.azureOpenAI].assistants
|
||||||
|
? ' If using Azure OpenAI, files are only available in the region of the assistant\'s model at the time of upload.'
|
||||||
|
: ''
|
||||||
|
}`;
|
||||||
|
return sendResponse(res, messageData, errorMessage);
|
||||||
} else {
|
} else {
|
||||||
logger.error('[/assistants/chat/]', error);
|
logger.error('[/assistants/chat/]', error);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!openai || !thread_id || !run_id) {
|
if (!openai || !thread_id || !run_id) {
|
||||||
return sendResponse(res, messageData, 'The Assistant run failed to initialize');
|
return sendResponse(res, messageData, defaultErrorMessage);
|
||||||
}
|
}
|
||||||
|
|
||||||
await sleep(3000);
|
await sleep(3000);
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
const express = require('express');
|
const express = require('express');
|
||||||
const { CacheKeys } = require('librechat-data-provider');
|
const { CacheKeys } = require('librechat-data-provider');
|
||||||
const { initializeClient } = require('~/server/services/Endpoints/assistant');
|
const { initializeClient } = require('~/server/services/Endpoints/assistants');
|
||||||
const { getConvosByPage, deleteConvos, getConvo, saveConvo } = require('~/models/Conversation');
|
const { getConvosByPage, deleteConvos, getConvo, saveConvo } = require('~/models/Conversation');
|
||||||
const requireJwtAuth = require('~/server/middleware/requireJwtAuth');
|
const requireJwtAuth = require('~/server/middleware/requireJwtAuth');
|
||||||
const { sleep } = require('~/server/services/Runs/handle');
|
|
||||||
const getLogStores = require('~/cache/getLogStores');
|
const getLogStores = require('~/cache/getLogStores');
|
||||||
|
const { sleep } = require('~/server/utils');
|
||||||
const { logger } = require('~/config');
|
const { logger } = require('~/config');
|
||||||
|
|
||||||
const router = express.Router();
|
const router = express.Router();
|
||||||
|
|
|
@ -44,7 +44,7 @@ router.delete('/', async (req, res) => {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (/^file-/.test(file.file_id)) {
|
if (/^(file|assistant)-/.test(file.file_id)) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,6 +5,7 @@ const {
|
||||||
defaultSocialLogins,
|
defaultSocialLogins,
|
||||||
validateAzureGroups,
|
validateAzureGroups,
|
||||||
mapModelToAzureConfig,
|
mapModelToAzureConfig,
|
||||||
|
assistantEndpointSchema,
|
||||||
deprecatedAzureVariables,
|
deprecatedAzureVariables,
|
||||||
conflictingAzureVariables,
|
conflictingAzureVariables,
|
||||||
} = require('librechat-data-provider');
|
} = require('librechat-data-provider');
|
||||||
|
@ -68,8 +69,7 @@ const AppService = async (app) => {
|
||||||
const endpointLocals = {};
|
const endpointLocals = {};
|
||||||
|
|
||||||
if (config?.endpoints?.[EModelEndpoint.azureOpenAI]) {
|
if (config?.endpoints?.[EModelEndpoint.azureOpenAI]) {
|
||||||
const { groups, titleModel, titleConvo, titleMethod, plugins } =
|
const { groups, ...azureConfiguration } = config.endpoints[EModelEndpoint.azureOpenAI];
|
||||||
config.endpoints[EModelEndpoint.azureOpenAI];
|
|
||||||
const { isValid, modelNames, modelGroupMap, groupMap, errors } = validateAzureGroups(groups);
|
const { isValid, modelNames, modelGroupMap, groupMap, errors } = validateAzureGroups(groups);
|
||||||
|
|
||||||
if (!isValid) {
|
if (!isValid) {
|
||||||
|
@ -79,18 +79,32 @@ const AppService = async (app) => {
|
||||||
throw new Error(errorMessage);
|
throw new Error(errorMessage);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const assistantModels = [];
|
||||||
|
const assistantGroups = new Set();
|
||||||
for (const modelName of modelNames) {
|
for (const modelName of modelNames) {
|
||||||
mapModelToAzureConfig({ modelName, modelGroupMap, groupMap });
|
mapModelToAzureConfig({ modelName, modelGroupMap, groupMap });
|
||||||
|
const groupName = modelGroupMap?.[modelName]?.group;
|
||||||
|
const modelGroup = groupMap?.[groupName];
|
||||||
|
let supportsAssistants = modelGroup?.assistants || modelGroup?.[modelName]?.assistants;
|
||||||
|
if (supportsAssistants) {
|
||||||
|
assistantModels.push(modelName);
|
||||||
|
!assistantGroups.has(groupName) && assistantGroups.add(groupName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (azureConfiguration.assistants && assistantModels.length === 0) {
|
||||||
|
throw new Error(
|
||||||
|
'No Azure models are configured to support assistants. Please remove the `assistants` field or configure at least one model to support assistants.',
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
endpointLocals[EModelEndpoint.azureOpenAI] = {
|
endpointLocals[EModelEndpoint.azureOpenAI] = {
|
||||||
modelNames,
|
modelNames,
|
||||||
modelGroupMap,
|
modelGroupMap,
|
||||||
groupMap,
|
groupMap,
|
||||||
titleConvo,
|
assistantModels,
|
||||||
titleMethod,
|
assistantGroups: Array.from(assistantGroups),
|
||||||
titleModel,
|
...azureConfiguration,
|
||||||
plugins,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
deprecatedAzureVariables.forEach(({ key, description }) => {
|
deprecatedAzureVariables.forEach(({ key, description }) => {
|
||||||
|
@ -111,10 +125,9 @@ const AppService = async (app) => {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config?.endpoints?.[EModelEndpoint.assistants]) {
|
if (config?.endpoints?.[EModelEndpoint.assistants]) {
|
||||||
const { disableBuilder, pollIntervalMs, timeoutMs, supportedIds, excludedIds } =
|
const assistantsConfig = config.endpoints[EModelEndpoint.assistants];
|
||||||
config.endpoints[EModelEndpoint.assistants];
|
const parsedConfig = assistantEndpointSchema.parse(assistantsConfig);
|
||||||
|
if (assistantsConfig.supportedIds?.length && assistantsConfig.excludedIds?.length) {
|
||||||
if (supportedIds?.length && excludedIds?.length) {
|
|
||||||
logger.warn(
|
logger.warn(
|
||||||
`Both \`supportedIds\` and \`excludedIds\` are defined for the ${EModelEndpoint.assistants} endpoint; \`excludedIds\` field will be ignored.`,
|
`Both \`supportedIds\` and \`excludedIds\` are defined for the ${EModelEndpoint.assistants} endpoint; \`excludedIds\` field will be ignored.`,
|
||||||
);
|
);
|
||||||
|
@ -122,11 +135,13 @@ const AppService = async (app) => {
|
||||||
|
|
||||||
/** @type {Partial<TAssistantEndpoint>} */
|
/** @type {Partial<TAssistantEndpoint>} */
|
||||||
endpointLocals[EModelEndpoint.assistants] = {
|
endpointLocals[EModelEndpoint.assistants] = {
|
||||||
disableBuilder,
|
retrievalModels: parsedConfig.retrievalModels,
|
||||||
pollIntervalMs,
|
disableBuilder: parsedConfig.disableBuilder,
|
||||||
timeoutMs,
|
pollIntervalMs: parsedConfig.pollIntervalMs,
|
||||||
supportedIds,
|
supportedIds: parsedConfig.supportedIds,
|
||||||
excludedIds,
|
capabilities: parsedConfig.capabilities,
|
||||||
|
excludedIds: parsedConfig.excludedIds,
|
||||||
|
timeoutMs: parsedConfig.timeoutMs,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -13,9 +13,9 @@ const {
|
||||||
defaultOrderQuery,
|
defaultOrderQuery,
|
||||||
} = require('librechat-data-provider');
|
} = require('librechat-data-provider');
|
||||||
const { retrieveAndProcessFile } = require('~/server/services/Files/process');
|
const { retrieveAndProcessFile } = require('~/server/services/Files/process');
|
||||||
const { RunManager, waitForRun, sleep } = require('~/server/services/Runs');
|
const { RunManager, waitForRun } = require('~/server/services/Runs');
|
||||||
const { processRequiredActions } = require('~/server/services/ToolService');
|
const { processRequiredActions } = require('~/server/services/ToolService');
|
||||||
const { createOnProgress, sendMessage } = require('~/server/utils');
|
const { createOnProgress, sendMessage, sleep } = require('~/server/utils');
|
||||||
const { TextStream } = require('~/app/clients');
|
const { TextStream } = require('~/app/clients');
|
||||||
const { logger } = require('~/config');
|
const { logger } = require('~/config');
|
||||||
|
|
||||||
|
|
|
@ -51,6 +51,13 @@ async function loadConfigEndpoints(req) {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (req.app.locals[EModelEndpoint.azureOpenAI]?.assistants) {
|
||||||
|
/** @type {Omit<TConfig, 'order'>} */
|
||||||
|
endpointsConfig[EModelEndpoint.assistants] = {
|
||||||
|
userProvide: false,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
return endpointsConfig;
|
return endpointsConfig;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,15 +17,20 @@ async function loadConfigModels(req) {
|
||||||
|
|
||||||
const { endpoints = {} } = customConfig ?? {};
|
const { endpoints = {} } = customConfig ?? {};
|
||||||
const modelsConfig = {};
|
const modelsConfig = {};
|
||||||
const azureModels = req.app.locals[EModelEndpoint.azureOpenAI]?.modelNames;
|
|
||||||
const azureEndpoint = endpoints[EModelEndpoint.azureOpenAI];
|
const azureEndpoint = endpoints[EModelEndpoint.azureOpenAI];
|
||||||
|
const azureConfig = req.app.locals[EModelEndpoint.azureOpenAI];
|
||||||
|
const { modelNames } = azureConfig ?? {};
|
||||||
|
|
||||||
if (azureModels && azureEndpoint) {
|
if (modelNames && azureEndpoint) {
|
||||||
modelsConfig[EModelEndpoint.azureOpenAI] = azureModels;
|
modelsConfig[EModelEndpoint.azureOpenAI] = modelNames;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (azureModels && azureEndpoint && azureEndpoint.plugins) {
|
if (modelNames && azureEndpoint && azureEndpoint.plugins) {
|
||||||
modelsConfig[EModelEndpoint.gptPlugins] = azureModels;
|
modelsConfig[EModelEndpoint.gptPlugins] = modelNames;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (azureEndpoint?.assistants && azureConfig.assistantModels) {
|
||||||
|
modelsConfig[EModelEndpoint.assistants] = azureConfig.assistantModels;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!Array.isArray(endpoints[EModelEndpoint.custom])) {
|
if (!Array.isArray(endpoints[EModelEndpoint.custom])) {
|
||||||
|
|
|
@ -24,7 +24,7 @@ async function loadDefaultModels(req) {
|
||||||
azure: useAzurePlugins,
|
azure: useAzurePlugins,
|
||||||
plugins: true,
|
plugins: true,
|
||||||
});
|
});
|
||||||
const assistant = await getOpenAIModels({ assistants: true });
|
const assistants = await getOpenAIModels({ assistants: true });
|
||||||
|
|
||||||
return {
|
return {
|
||||||
[EModelEndpoint.openAI]: openAI,
|
[EModelEndpoint.openAI]: openAI,
|
||||||
|
@ -34,7 +34,7 @@ async function loadDefaultModels(req) {
|
||||||
[EModelEndpoint.azureOpenAI]: azureOpenAI,
|
[EModelEndpoint.azureOpenAI]: azureOpenAI,
|
||||||
[EModelEndpoint.bingAI]: ['BingAI', 'Sydney'],
|
[EModelEndpoint.bingAI]: ['BingAI', 'Sydney'],
|
||||||
[EModelEndpoint.chatGPTBrowser]: chatGPTBrowser,
|
[EModelEndpoint.chatGPTBrowser]: chatGPTBrowser,
|
||||||
[EModelEndpoint.assistants]: assistant,
|
[EModelEndpoint.assistants]: assistants,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,9 +0,0 @@
|
||||||
const addTitle = require('./addTitle');
|
|
||||||
const buildOptions = require('./buildOptions');
|
|
||||||
const initializeClient = require('./initializeClient');
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
addTitle,
|
|
||||||
buildOptions,
|
|
||||||
initializeClient,
|
|
||||||
};
|
|
73
api/server/services/Endpoints/assistants/index.js
Normal file
73
api/server/services/Endpoints/assistants/index.js
Normal file
|
@ -0,0 +1,73 @@
|
||||||
|
const addTitle = require('./addTitle');
|
||||||
|
const buildOptions = require('./buildOptions');
|
||||||
|
const initializeClient = require('./initializeClient');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Asynchronously lists assistants based on provided query parameters.
|
||||||
|
*
|
||||||
|
* Initializes the client with the current request and response objects and lists assistants
|
||||||
|
* according to the query parameters. This function abstracts the logic for non-Azure paths.
|
||||||
|
*
|
||||||
|
* @async
|
||||||
|
* @param {object} params - The parameters object.
|
||||||
|
* @param {object} params.req - The request object, used for initializing the client.
|
||||||
|
* @param {object} params.res - The response object, used for initializing the client.
|
||||||
|
* @param {object} params.query - The query parameters to list assistants (e.g., limit, order).
|
||||||
|
* @returns {Promise<object>} A promise that resolves to the response from the `openai.beta.assistants.list` method call.
|
||||||
|
*/
|
||||||
|
const listAssistants = async ({ req, res, query }) => {
|
||||||
|
const { openai } = await initializeClient({ req, res });
|
||||||
|
return openai.beta.assistants.list(query);
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Asynchronously lists assistants for Azure configured groups.
|
||||||
|
*
|
||||||
|
* Iterates through Azure configured assistant groups, initializes the client with the current request and response objects,
|
||||||
|
* lists assistants based on the provided query parameters, and merges their data alongside the model information into a single array.
|
||||||
|
*
|
||||||
|
* @async
|
||||||
|
* @param {object} params - The parameters object.
|
||||||
|
* @param {object} params.req - The request object, used for initializing the client and manipulating the request body.
|
||||||
|
* @param {object} params.res - The response object, used for initializing the client.
|
||||||
|
* @param {TAzureConfig} params.azureConfig - The Azure configuration object containing assistantGroups and groupMap.
|
||||||
|
* @param {object} params.query - The query parameters to list assistants (e.g., limit, order).
|
||||||
|
* @returns {Promise<AssistantListResponse>} A promise that resolves to an array of assistant data merged with their respective model information.
|
||||||
|
*/
|
||||||
|
const listAssistantsForAzure = async ({ req, res, azureConfig = {}, query }) => {
|
||||||
|
const promises = [];
|
||||||
|
const models = [];
|
||||||
|
|
||||||
|
const { groupMap, assistantGroups } = azureConfig;
|
||||||
|
|
||||||
|
for (const groupName of assistantGroups) {
|
||||||
|
const group = groupMap[groupName];
|
||||||
|
req.body.model = Object.keys(group?.models)[0];
|
||||||
|
models.push(req.body.model);
|
||||||
|
promises.push(listAssistants({ req, res, query }));
|
||||||
|
}
|
||||||
|
|
||||||
|
const resolvedQueries = await Promise.all(promises);
|
||||||
|
const data = resolvedQueries.flatMap((res, i) =>
|
||||||
|
res.data.map((assistant) => {
|
||||||
|
const model = models[i];
|
||||||
|
return { ...assistant, model } ?? {};
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
return {
|
||||||
|
first_id: data[0]?.id,
|
||||||
|
last_id: data[data.length - 1]?.id,
|
||||||
|
object: 'list',
|
||||||
|
has_more: false,
|
||||||
|
data,
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
addTitle,
|
||||||
|
buildOptions,
|
||||||
|
initializeClient,
|
||||||
|
listAssistants,
|
||||||
|
listAssistantsForAzure,
|
||||||
|
};
|
|
@ -1,6 +1,10 @@
|
||||||
const OpenAI = require('openai');
|
const OpenAI = require('openai');
|
||||||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||||
const { EModelEndpoint } = require('librechat-data-provider');
|
const {
|
||||||
|
EModelEndpoint,
|
||||||
|
resolveHeaders,
|
||||||
|
mapModelToAzureConfig,
|
||||||
|
} = require('librechat-data-provider');
|
||||||
const {
|
const {
|
||||||
getUserKey,
|
getUserKey,
|
||||||
getUserKeyExpiry,
|
getUserKeyExpiry,
|
||||||
|
@ -8,6 +12,7 @@ const {
|
||||||
} = require('~/server/services/UserService');
|
} = require('~/server/services/UserService');
|
||||||
const OpenAIClient = require('~/app/clients/OpenAIClient');
|
const OpenAIClient = require('~/app/clients/OpenAIClient');
|
||||||
const { isUserProvided } = require('~/server/utils');
|
const { isUserProvided } = require('~/server/utils');
|
||||||
|
const { constructAzureURL } = require('~/utils');
|
||||||
|
|
||||||
const initializeClient = async ({ req, res, endpointOption, initAppClient = false }) => {
|
const initializeClient = async ({ req, res, endpointOption, initAppClient = false }) => {
|
||||||
const { PROXY, OPENAI_ORGANIZATION, ASSISTANTS_API_KEY, ASSISTANTS_BASE_URL } = process.env;
|
const { PROXY, OPENAI_ORGANIZATION, ASSISTANTS_API_KEY, ASSISTANTS_BASE_URL } = process.env;
|
||||||
|
@ -38,12 +43,68 @@ const initializeClient = async ({ req, res, endpointOption, initAppClient = fals
|
||||||
let apiKey = userProvidesKey ? userValues.apiKey : ASSISTANTS_API_KEY;
|
let apiKey = userProvidesKey ? userValues.apiKey : ASSISTANTS_API_KEY;
|
||||||
let baseURL = userProvidesURL ? userValues.baseURL : ASSISTANTS_BASE_URL;
|
let baseURL = userProvidesURL ? userValues.baseURL : ASSISTANTS_BASE_URL;
|
||||||
|
|
||||||
|
const opts = {};
|
||||||
|
|
||||||
|
const clientOptions = {
|
||||||
|
reverseProxyUrl: baseURL ?? null,
|
||||||
|
proxy: PROXY ?? null,
|
||||||
|
req,
|
||||||
|
res,
|
||||||
|
...endpointOption,
|
||||||
|
};
|
||||||
|
|
||||||
|
/** @type {TAzureConfig | undefined} */
|
||||||
|
const azureConfig = req.app.locals[EModelEndpoint.azureOpenAI];
|
||||||
|
|
||||||
|
/** @type {AzureOptions | undefined} */
|
||||||
|
let azureOptions;
|
||||||
|
|
||||||
|
if (azureConfig && azureConfig.assistants) {
|
||||||
|
const { modelGroupMap, groupMap, assistantModels } = azureConfig;
|
||||||
|
const modelName = req.body.model ?? req.query.model ?? assistantModels[0];
|
||||||
|
const {
|
||||||
|
azureOptions: currentOptions,
|
||||||
|
baseURL: azureBaseURL,
|
||||||
|
headers = {},
|
||||||
|
serverless,
|
||||||
|
} = mapModelToAzureConfig({
|
||||||
|
modelName,
|
||||||
|
modelGroupMap,
|
||||||
|
groupMap,
|
||||||
|
});
|
||||||
|
|
||||||
|
azureOptions = currentOptions;
|
||||||
|
|
||||||
|
baseURL = constructAzureURL({
|
||||||
|
baseURL: azureBaseURL ?? 'https://${INSTANCE_NAME}.openai.azure.com/openai',
|
||||||
|
azureOptions,
|
||||||
|
});
|
||||||
|
|
||||||
|
apiKey = azureOptions.azureOpenAIApiKey;
|
||||||
|
opts.defaultQuery = { 'api-version': azureOptions.azureOpenAIApiVersion };
|
||||||
|
opts.defaultHeaders = resolveHeaders({ ...headers, 'api-key': apiKey });
|
||||||
|
opts.model = azureOptions.azureOpenAIApiDeploymentName;
|
||||||
|
|
||||||
|
if (initAppClient) {
|
||||||
|
clientOptions.titleConvo = azureConfig.titleConvo;
|
||||||
|
clientOptions.titleModel = azureConfig.titleModel;
|
||||||
|
clientOptions.titleMethod = azureConfig.titleMethod ?? 'completion';
|
||||||
|
|
||||||
|
const groupName = modelGroupMap[modelName].group;
|
||||||
|
clientOptions.addParams = azureConfig.groupMap[groupName].addParams;
|
||||||
|
clientOptions.dropParams = azureConfig.groupMap[groupName].dropParams;
|
||||||
|
clientOptions.forcePrompt = azureConfig.groupMap[groupName].forcePrompt;
|
||||||
|
|
||||||
|
clientOptions.reverseProxyUrl = baseURL ?? clientOptions.reverseProxyUrl;
|
||||||
|
clientOptions.headers = opts.defaultHeaders;
|
||||||
|
clientOptions.azure = !serverless && azureOptions;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (!apiKey) {
|
if (!apiKey) {
|
||||||
throw new Error('Assistants API key not provided. Please provide it again.');
|
throw new Error('Assistants API key not provided. Please provide it again.');
|
||||||
}
|
}
|
||||||
|
|
||||||
const opts = {};
|
|
||||||
|
|
||||||
if (baseURL) {
|
if (baseURL) {
|
||||||
opts.baseURL = baseURL;
|
opts.baseURL = baseURL;
|
||||||
}
|
}
|
||||||
|
@ -61,18 +122,15 @@ const initializeClient = async ({ req, res, endpointOption, initAppClient = fals
|
||||||
apiKey,
|
apiKey,
|
||||||
...opts,
|
...opts,
|
||||||
});
|
});
|
||||||
|
|
||||||
openai.req = req;
|
openai.req = req;
|
||||||
openai.res = res;
|
openai.res = res;
|
||||||
|
|
||||||
if (endpointOption && initAppClient) {
|
if (azureOptions) {
|
||||||
const clientOptions = {
|
openai.locals = { ...(openai.locals ?? {}), azureOptions };
|
||||||
reverseProxyUrl: baseURL,
|
}
|
||||||
proxy: PROXY ?? null,
|
|
||||||
req,
|
|
||||||
res,
|
|
||||||
...endpointOption,
|
|
||||||
};
|
|
||||||
|
|
||||||
|
if (endpointOption && initAppClient) {
|
||||||
const client = new OpenAIClient(apiKey, clientOptions);
|
const client = new OpenAIClient(apiKey, clientOptions);
|
||||||
return {
|
return {
|
||||||
client,
|
client,
|
|
@ -57,7 +57,7 @@ describe('initializeClient', () => {
|
||||||
);
|
);
|
||||||
getUserKeyExpiry.mockResolvedValue(isoString);
|
getUserKeyExpiry.mockResolvedValue(isoString);
|
||||||
|
|
||||||
const req = { user: { id: 'user123' } };
|
const req = { user: { id: 'user123' }, app };
|
||||||
const res = {};
|
const res = {};
|
||||||
|
|
||||||
const { openai, openAIApiKey } = await initializeClient({ req, res });
|
const { openai, openAIApiKey } = await initializeClient({ req, res });
|
||||||
|
@ -80,7 +80,7 @@ describe('initializeClient', () => {
|
||||||
test('throws error if API key is not provided', async () => {
|
test('throws error if API key is not provided', async () => {
|
||||||
delete process.env.ASSISTANTS_API_KEY; // Simulate missing API key
|
delete process.env.ASSISTANTS_API_KEY; // Simulate missing API key
|
||||||
|
|
||||||
const req = { user: { id: 'user123' } };
|
const req = { user: { id: 'user123' }, app };
|
||||||
const res = {};
|
const res = {};
|
||||||
|
|
||||||
await expect(initializeClient({ req, res })).rejects.toThrow(/Assistants API key not/);
|
await expect(initializeClient({ req, res })).rejects.toThrow(/Assistants API key not/);
|
|
@ -1,7 +1,7 @@
|
||||||
const {
|
const {
|
||||||
EModelEndpoint,
|
EModelEndpoint,
|
||||||
mapModelToAzureConfig,
|
|
||||||
resolveHeaders,
|
resolveHeaders,
|
||||||
|
mapModelToAzureConfig,
|
||||||
} = require('librechat-data-provider');
|
} = require('librechat-data-provider');
|
||||||
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
|
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
|
||||||
const { isEnabled, isUserProvided } = require('~/server/utils');
|
const { isEnabled, isUserProvided } = require('~/server/utils');
|
||||||
|
|
|
@ -1,4 +1,7 @@
|
||||||
const fs = require('fs');
|
const fs = require('fs');
|
||||||
|
const { FilePurpose } = require('librechat-data-provider');
|
||||||
|
const { sleep } = require('~/server/utils');
|
||||||
|
const { logger } = require('~/config');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Uploads a file that can be used across various OpenAI services.
|
* Uploads a file that can be used across various OpenAI services.
|
||||||
|
@ -6,23 +9,31 @@ const fs = require('fs');
|
||||||
* @param {Express.Request} req - The request object from Express. It should have a `user` property with an `id`
|
* @param {Express.Request} req - The request object from Express. It should have a `user` property with an `id`
|
||||||
* representing the user, and an `app.locals.paths` object with an `imageOutput` path.
|
* representing the user, and an `app.locals.paths` object with an `imageOutput` path.
|
||||||
* @param {Express.Multer.File} file - The file uploaded to the server via multer.
|
* @param {Express.Multer.File} file - The file uploaded to the server via multer.
|
||||||
* @param {OpenAI} openai - The initialized OpenAI client.
|
* @param {OpenAIClient} openai - The initialized OpenAI client.
|
||||||
* @returns {Promise<OpenAIFile>}
|
* @returns {Promise<OpenAIFile>}
|
||||||
*/
|
*/
|
||||||
async function uploadOpenAIFile(req, file, openai) {
|
async function uploadOpenAIFile(req, file, openai) {
|
||||||
try {
|
|
||||||
const uploadedFile = await openai.files.create({
|
const uploadedFile = await openai.files.create({
|
||||||
file: fs.createReadStream(file.path),
|
file: fs.createReadStream(file.path),
|
||||||
purpose: 'assistants',
|
purpose: FilePurpose.Assistants,
|
||||||
});
|
});
|
||||||
|
|
||||||
console.log('File uploaded successfully to OpenAI');
|
logger.debug(
|
||||||
|
`[uploadOpenAIFile] User ${req.user.id} successfully uploaded file to OpenAI`,
|
||||||
|
uploadedFile,
|
||||||
|
);
|
||||||
|
|
||||||
|
if (uploadedFile.status !== 'processed') {
|
||||||
|
const sleepTime = 2500;
|
||||||
|
logger.debug(
|
||||||
|
`[uploadOpenAIFile] File ${
|
||||||
|
uploadedFile.id
|
||||||
|
} is not yet processed. Waiting for it to be processed (${sleepTime / 1000}s)...`,
|
||||||
|
);
|
||||||
|
await sleep(sleepTime);
|
||||||
|
}
|
||||||
|
|
||||||
return uploadedFile;
|
return uploadedFile;
|
||||||
} catch (error) {
|
|
||||||
console.error('Error uploading file to OpenAI:', error.message);
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -39,9 +50,11 @@ async function deleteOpenAIFile(req, file, openai) {
|
||||||
if (!res.deleted) {
|
if (!res.deleted) {
|
||||||
throw new Error('OpenAI returned `false` for deleted status');
|
throw new Error('OpenAI returned `false` for deleted status');
|
||||||
}
|
}
|
||||||
console.log('File deleted successfully from OpenAI');
|
logger.debug(
|
||||||
|
`[deleteOpenAIFile] User ${req.user.id} successfully deleted ${file.file_id} from OpenAI`,
|
||||||
|
);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Error deleting file from OpenAI:', error.message);
|
logger.error('[deleteOpenAIFile] Error deleting file from OpenAI: ' + error.message);
|
||||||
throw error;
|
throw error;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,7 +11,7 @@ const {
|
||||||
mergeFileConfig,
|
mergeFileConfig,
|
||||||
} = require('librechat-data-provider');
|
} = require('librechat-data-provider');
|
||||||
const { convertToWebP, resizeAndConvert } = require('~/server/services/Files/images');
|
const { convertToWebP, resizeAndConvert } = require('~/server/services/Files/images');
|
||||||
const { initializeClient } = require('~/server/services/Endpoints/assistant');
|
const { initializeClient } = require('~/server/services/Endpoints/assistants');
|
||||||
const { createFile, updateFileUsage, deleteFiles } = require('~/models/File');
|
const { createFile, updateFileUsage, deleteFiles } = require('~/models/File');
|
||||||
const { isEnabled, determineFileType } = require('~/server/utils');
|
const { isEnabled, determineFileType } = require('~/server/utils');
|
||||||
const { LB_QueueAsyncCall } = require('~/server/utils/queue');
|
const { LB_QueueAsyncCall } = require('~/server/utils/queue');
|
||||||
|
@ -286,7 +286,7 @@ const processFileUpload = async ({ req, res, file, metadata }) => {
|
||||||
file_id: id ?? file_id,
|
file_id: id ?? file_id,
|
||||||
temp_file_id,
|
temp_file_id,
|
||||||
bytes,
|
bytes,
|
||||||
filepath: isAssistantUpload ? `https://api.openai.com/v1/files/${id}` : filepath,
|
filepath: isAssistantUpload ? `${openai.baseURL}/files/${id}` : filepath,
|
||||||
filename: filename ?? file.originalname,
|
filename: filename ?? file.originalname,
|
||||||
context: isAssistantUpload ? FileContext.assistants : FileContext.message_attachment,
|
context: isAssistantUpload ? FileContext.assistants : FileContext.message_attachment,
|
||||||
source,
|
source,
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
const { RunStatus, defaultOrderQuery, CacheKeys } = require('librechat-data-provider');
|
const { RunStatus, defaultOrderQuery, CacheKeys } = require('librechat-data-provider');
|
||||||
const getLogStores = require('~/cache/getLogStores');
|
const getLogStores = require('~/cache/getLogStores');
|
||||||
const { retrieveRun } = require('./methods');
|
const { retrieveRun } = require('./methods');
|
||||||
|
const { sleep } = require('~/server/utils');
|
||||||
const RunManager = require('./RunManager');
|
const RunManager = require('./RunManager');
|
||||||
const { logger } = require('~/config');
|
const { logger } = require('~/config');
|
||||||
|
|
||||||
|
@ -46,16 +47,6 @@ async function createRun({ openai, thread_id, body }) {
|
||||||
return await openai.beta.threads.runs.create(thread_id, body);
|
return await openai.beta.threads.runs.create(thread_id, body);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Delays the execution for a specified number of milliseconds.
|
|
||||||
*
|
|
||||||
* @param {number} ms - The number of milliseconds to delay.
|
|
||||||
* @return {Promise<void>} A promise that resolves after the specified delay.
|
|
||||||
*/
|
|
||||||
function sleep(ms) {
|
|
||||||
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Waits for a run to complete by repeatedly checking its status. It uses a RunManager instance to fetch and manage run steps based on the run status.
|
* Waits for a run to complete by repeatedly checking its status. It uses a RunManager instance to fetch and manage run steps based on the run status.
|
||||||
*
|
*
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
const axios = require('axios');
|
const axios = require('axios');
|
||||||
|
const { EModelEndpoint } = require('librechat-data-provider');
|
||||||
const { logger } = require('~/config');
|
const { logger } = require('~/config');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -18,9 +19,9 @@ const { logger } = require('~/config');
|
||||||
*/
|
*/
|
||||||
async function retrieveRun({ thread_id, run_id, timeout, openai }) {
|
async function retrieveRun({ thread_id, run_id, timeout, openai }) {
|
||||||
const { apiKey, baseURL, httpAgent, organization } = openai;
|
const { apiKey, baseURL, httpAgent, organization } = openai;
|
||||||
const url = `${baseURL}/threads/${thread_id}/runs/${run_id}`;
|
let url = `${baseURL}/threads/${thread_id}/runs/${run_id}`;
|
||||||
|
|
||||||
const headers = {
|
let headers = {
|
||||||
Authorization: `Bearer ${apiKey}`,
|
Authorization: `Bearer ${apiKey}`,
|
||||||
'OpenAI-Beta': 'assistants=v1',
|
'OpenAI-Beta': 'assistants=v1',
|
||||||
};
|
};
|
||||||
|
@ -29,6 +30,16 @@ async function retrieveRun({ thread_id, run_id, timeout, openai }) {
|
||||||
headers['OpenAI-Organization'] = organization;
|
headers['OpenAI-Organization'] = organization;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** @type {TAzureConfig | undefined} */
|
||||||
|
const azureConfig = openai.req.app.locals[EModelEndpoint.azureOpenAI];
|
||||||
|
|
||||||
|
if (azureConfig && azureConfig.assistants) {
|
||||||
|
delete headers.Authorization;
|
||||||
|
headers = { ...headers, ...openai._options.defaultHeaders };
|
||||||
|
const queryParams = new URLSearchParams(openai._options.defaultQuery).toString();
|
||||||
|
url = `${url}?${queryParams}`;
|
||||||
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const axiosConfig = {
|
const axiosConfig = {
|
||||||
headers: headers,
|
headers: headers,
|
||||||
|
|
|
@ -14,7 +14,7 @@ const { loadActionSets, createActionTool } = require('./ActionService');
|
||||||
const { processFileURL } = require('~/server/services/Files/process');
|
const { processFileURL } = require('~/server/services/Files/process');
|
||||||
const { loadTools } = require('~/app/clients/tools/util');
|
const { loadTools } = require('~/app/clients/tools/util');
|
||||||
const { redactMessage } = require('~/config/parsers');
|
const { redactMessage } = require('~/config/parsers');
|
||||||
const { sleep } = require('./Runs/handle');
|
const { sleep } = require('~/server/utils');
|
||||||
const { logger } = require('~/config');
|
const { logger } = require('~/config');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -5,6 +5,7 @@ const handleText = require('./handleText');
|
||||||
const cryptoUtils = require('./crypto');
|
const cryptoUtils = require('./crypto');
|
||||||
const citations = require('./citations');
|
const citations = require('./citations');
|
||||||
const sendEmail = require('./sendEmail');
|
const sendEmail = require('./sendEmail');
|
||||||
|
const queue = require('./queue');
|
||||||
const files = require('./files');
|
const files = require('./files');
|
||||||
const math = require('./math');
|
const math = require('./math');
|
||||||
|
|
||||||
|
@ -17,5 +18,6 @@ module.exports = {
|
||||||
removePorts,
|
removePorts,
|
||||||
sendEmail,
|
sendEmail,
|
||||||
...files,
|
...files,
|
||||||
|
...queue,
|
||||||
math,
|
math,
|
||||||
};
|
};
|
||||||
|
|
|
@ -53,6 +53,17 @@ function LB_QueueAsyncCall(asyncFunc, args, callback) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Delays the execution for a specified number of milliseconds.
|
||||||
|
*
|
||||||
|
* @param {number} ms - The number of milliseconds to delay.
|
||||||
|
* @return {Promise<void>} A promise that resolves after the specified delay.
|
||||||
|
*/
|
||||||
|
function sleep(ms) {
|
||||||
|
return new Promise((resolve) => setTimeout(resolve, ms));
|
||||||
|
}
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
|
sleep,
|
||||||
LB_QueueAsyncCall,
|
LB_QueueAsyncCall,
|
||||||
};
|
};
|
||||||
|
|
|
@ -743,6 +743,8 @@
|
||||||
* @property {Set<string>} completeToolCallSteps - A set of completed tool call steps.
|
* @property {Set<string>} completeToolCallSteps - A set of completed tool call steps.
|
||||||
* @property {Set<string>} seenCompletedMessages - A set of completed messages that have been seen/processed.
|
* @property {Set<string>} seenCompletedMessages - A set of completed messages that have been seen/processed.
|
||||||
* @property {Map<string, StepToolCall>} seenToolCalls - A map of tool calls that have been seen/processed.
|
* @property {Map<string, StepToolCall>} seenToolCalls - A map of tool calls that have been seen/processed.
|
||||||
|
* @property {object | undefined} locals - Local variables for the request.
|
||||||
|
* @property {AzureOptions} locals.azureOptions - Local Azure options for the request.
|
||||||
* @property {(data: TContentData) => void} addContentData - Updates the response message's relevant
|
* @property {(data: TContentData) => void} addContentData - Updates the response message's relevant
|
||||||
* @property {InProgressFunction} in_progress - Updates the response message's relevant
|
* @property {InProgressFunction} in_progress - Updates the response message's relevant
|
||||||
* content array with the part by index & sends intermediate SSE message with content data.
|
* content array with the part by index & sends intermediate SSE message with content data.
|
||||||
|
|
|
@ -78,16 +78,19 @@ const getAzureCredentials = () => {
|
||||||
*
|
*
|
||||||
* @param {Object} params - The parameters object.
|
* @param {Object} params - The parameters object.
|
||||||
* @param {string} params.baseURL - The baseURL to inspect for replacement placeholders.
|
* @param {string} params.baseURL - The baseURL to inspect for replacement placeholders.
|
||||||
* @param {AzureOptions} params.azure - The baseURL to inspect for replacement placeholders.
|
* @param {AzureOptions} params.azureOptions - The azure options object containing the instance and deployment names.
|
||||||
* @returns {string} The complete baseURL with credentials injected for the Azure OpenAI API.
|
* @returns {string} The complete baseURL with credentials injected for the Azure OpenAI API.
|
||||||
*/
|
*/
|
||||||
function constructAzureURL({ baseURL, azure }) {
|
function constructAzureURL({ baseURL, azureOptions }) {
|
||||||
let finalURL = baseURL;
|
let finalURL = baseURL;
|
||||||
|
|
||||||
// Replace INSTANCE_NAME and DEPLOYMENT_NAME placeholders with actual values if available
|
// Replace INSTANCE_NAME and DEPLOYMENT_NAME placeholders with actual values if available
|
||||||
if (azure) {
|
if (azureOptions) {
|
||||||
finalURL = finalURL.replace('${INSTANCE_NAME}', azure.azureOpenAIApiInstanceName ?? '');
|
finalURL = finalURL.replace('${INSTANCE_NAME}', azureOptions.azureOpenAIApiInstanceName ?? '');
|
||||||
finalURL = finalURL.replace('${DEPLOYMENT_NAME}', azure.azureOpenAIApiDeploymentName ?? '');
|
finalURL = finalURL.replace(
|
||||||
|
'${DEPLOYMENT_NAME}',
|
||||||
|
azureOptions.azureOpenAIApiDeploymentName ?? '',
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
return finalURL;
|
return finalURL;
|
||||||
|
|
|
@ -199,7 +199,7 @@ describe('constructAzureURL', () => {
|
||||||
test('replaces both placeholders when both properties are provided', () => {
|
test('replaces both placeholders when both properties are provided', () => {
|
||||||
const url = constructAzureURL({
|
const url = constructAzureURL({
|
||||||
baseURL: 'https://example.com/${INSTANCE_NAME}/${DEPLOYMENT_NAME}',
|
baseURL: 'https://example.com/${INSTANCE_NAME}/${DEPLOYMENT_NAME}',
|
||||||
azure: {
|
azureOptions: {
|
||||||
azureOpenAIApiInstanceName: 'instance1',
|
azureOpenAIApiInstanceName: 'instance1',
|
||||||
azureOpenAIApiDeploymentName: 'deployment1',
|
azureOpenAIApiDeploymentName: 'deployment1',
|
||||||
},
|
},
|
||||||
|
@ -210,7 +210,7 @@ describe('constructAzureURL', () => {
|
||||||
test('replaces only INSTANCE_NAME when only azureOpenAIApiInstanceName is provided', () => {
|
test('replaces only INSTANCE_NAME when only azureOpenAIApiInstanceName is provided', () => {
|
||||||
const url = constructAzureURL({
|
const url = constructAzureURL({
|
||||||
baseURL: 'https://example.com/${INSTANCE_NAME}/${DEPLOYMENT_NAME}',
|
baseURL: 'https://example.com/${INSTANCE_NAME}/${DEPLOYMENT_NAME}',
|
||||||
azure: {
|
azureOptions: {
|
||||||
azureOpenAIApiInstanceName: 'instance2',
|
azureOpenAIApiInstanceName: 'instance2',
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
@ -220,7 +220,7 @@ describe('constructAzureURL', () => {
|
||||||
test('replaces only DEPLOYMENT_NAME when only azureOpenAIApiDeploymentName is provided', () => {
|
test('replaces only DEPLOYMENT_NAME when only azureOpenAIApiDeploymentName is provided', () => {
|
||||||
const url = constructAzureURL({
|
const url = constructAzureURL({
|
||||||
baseURL: 'https://example.com/${INSTANCE_NAME}/${DEPLOYMENT_NAME}',
|
baseURL: 'https://example.com/${INSTANCE_NAME}/${DEPLOYMENT_NAME}',
|
||||||
azure: {
|
azureOptions: {
|
||||||
azureOpenAIApiDeploymentName: 'deployment2',
|
azureOpenAIApiDeploymentName: 'deployment2',
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
@ -230,12 +230,12 @@ describe('constructAzureURL', () => {
|
||||||
test('does not replace any placeholders when azure object is empty', () => {
|
test('does not replace any placeholders when azure object is empty', () => {
|
||||||
const url = constructAzureURL({
|
const url = constructAzureURL({
|
||||||
baseURL: 'https://example.com/${INSTANCE_NAME}/${DEPLOYMENT_NAME}',
|
baseURL: 'https://example.com/${INSTANCE_NAME}/${DEPLOYMENT_NAME}',
|
||||||
azure: {},
|
azureOptions: {},
|
||||||
});
|
});
|
||||||
expect(url).toBe('https://example.com//');
|
expect(url).toBe('https://example.com//');
|
||||||
});
|
});
|
||||||
|
|
||||||
test('returns baseURL as is when azure object is not provided', () => {
|
test('returns baseURL as is when `azureOptions` object is not provided', () => {
|
||||||
const url = constructAzureURL({
|
const url = constructAzureURL({
|
||||||
baseURL: 'https://example.com/${INSTANCE_NAME}/${DEPLOYMENT_NAME}',
|
baseURL: 'https://example.com/${INSTANCE_NAME}/${DEPLOYMENT_NAME}',
|
||||||
});
|
});
|
||||||
|
@ -245,7 +245,7 @@ describe('constructAzureURL', () => {
|
||||||
test('returns baseURL as is when no placeholders are set', () => {
|
test('returns baseURL as is when no placeholders are set', () => {
|
||||||
const url = constructAzureURL({
|
const url = constructAzureURL({
|
||||||
baseURL: 'https://example.com/my_custom_instance/my_deployment',
|
baseURL: 'https://example.com/my_custom_instance/my_deployment',
|
||||||
azure: {
|
azureOptions: {
|
||||||
azureOpenAIApiInstanceName: 'instance1',
|
azureOpenAIApiInstanceName: 'instance1',
|
||||||
azureOpenAIApiDeploymentName: 'deployment1',
|
azureOpenAIApiDeploymentName: 'deployment1',
|
||||||
},
|
},
|
||||||
|
@ -258,7 +258,7 @@ describe('constructAzureURL', () => {
|
||||||
'https://${INSTANCE_NAME}.openai.azure.com/openai/deployments/${DEPLOYMENT_NAME}';
|
'https://${INSTANCE_NAME}.openai.azure.com/openai/deployments/${DEPLOYMENT_NAME}';
|
||||||
const url = constructAzureURL({
|
const url = constructAzureURL({
|
||||||
baseURL,
|
baseURL,
|
||||||
azure: {
|
azureOptions: {
|
||||||
azureOpenAIApiInstanceName: 'instance1',
|
azureOpenAIApiInstanceName: 'instance1',
|
||||||
azureOpenAIApiDeploymentName: 'deployment1',
|
azureOpenAIApiDeploymentName: 'deployment1',
|
||||||
},
|
},
|
||||||
|
|
BIN
bun.lockb
BIN
bun.lockb
Binary file not shown.
|
@ -1,16 +1,17 @@
|
||||||
import { useRecoilState } from 'recoil';
|
import { useRecoilState } from 'recoil';
|
||||||
import { memo, useCallback, useRef } from 'react';
|
|
||||||
import TextareaAutosize from 'react-textarea-autosize';
|
|
||||||
import { useForm } from 'react-hook-form';
|
import { useForm } from 'react-hook-form';
|
||||||
|
import TextareaAutosize from 'react-textarea-autosize';
|
||||||
|
import { memo, useCallback, useRef, useMemo } from 'react';
|
||||||
import {
|
import {
|
||||||
supportsFiles,
|
supportsFiles,
|
||||||
mergeFileConfig,
|
mergeFileConfig,
|
||||||
fileConfig as defaultFileConfig,
|
fileConfig as defaultFileConfig,
|
||||||
|
EModelEndpoint,
|
||||||
} from 'librechat-data-provider';
|
} from 'librechat-data-provider';
|
||||||
|
import { useChatContext, useAssistantsMapContext } from '~/Providers';
|
||||||
import { useRequiresKey, useTextarea } from '~/hooks';
|
import { useRequiresKey, useTextarea } from '~/hooks';
|
||||||
import { useGetFileConfig } from '~/data-provider';
|
import { useGetFileConfig } from '~/data-provider';
|
||||||
import { cn, removeFocusOutlines } from '~/utils';
|
import { cn, removeFocusOutlines } from '~/utils';
|
||||||
import { useChatContext } from '~/Providers';
|
|
||||||
import AttachFile from './Files/AttachFile';
|
import AttachFile from './Files/AttachFile';
|
||||||
import StopButton from './StopButton';
|
import StopButton from './StopButton';
|
||||||
import SendButton from './SendButton';
|
import SendButton from './SendButton';
|
||||||
|
@ -37,6 +38,7 @@ const ChatForm = ({ index = 0 }) => {
|
||||||
setFilesLoading,
|
setFilesLoading,
|
||||||
} = useChatContext();
|
} = useChatContext();
|
||||||
|
|
||||||
|
const assistantMap = useAssistantsMapContext();
|
||||||
const methods = useForm<{ text: string }>({
|
const methods = useForm<{ text: string }>({
|
||||||
defaultValues: { text: '' },
|
defaultValues: { text: '' },
|
||||||
});
|
});
|
||||||
|
@ -61,6 +63,16 @@ const ChatForm = ({ index = 0 }) => {
|
||||||
});
|
});
|
||||||
|
|
||||||
const endpointFileConfig = fileConfig.endpoints[endpoint ?? ''];
|
const endpointFileConfig = fileConfig.endpoints[endpoint ?? ''];
|
||||||
|
const invalidAssistant = useMemo(
|
||||||
|
() =>
|
||||||
|
conversation?.endpoint === EModelEndpoint.assistants &&
|
||||||
|
(!conversation?.assistant_id || !assistantMap?.[conversation?.assistant_id ?? '']),
|
||||||
|
[conversation?.assistant_id, conversation?.endpoint, assistantMap],
|
||||||
|
);
|
||||||
|
const disableInputs = useMemo(
|
||||||
|
() => !!(requiresKey || invalidAssistant),
|
||||||
|
[requiresKey, invalidAssistant],
|
||||||
|
);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<form
|
<form
|
||||||
|
@ -92,7 +104,7 @@ const ChatForm = ({ index = 0 }) => {
|
||||||
ref={(e) => {
|
ref={(e) => {
|
||||||
textAreaRef.current = e;
|
textAreaRef.current = e;
|
||||||
}}
|
}}
|
||||||
disabled={!!requiresKey}
|
disabled={disableInputs}
|
||||||
onPaste={handlePaste}
|
onPaste={handlePaste}
|
||||||
onKeyUp={handleKeyUp}
|
onKeyUp={handleKeyUp}
|
||||||
onKeyDown={handleKeyDown}
|
onKeyDown={handleKeyDown}
|
||||||
|
@ -116,7 +128,7 @@ const ChatForm = ({ index = 0 }) => {
|
||||||
<AttachFile
|
<AttachFile
|
||||||
endpoint={_endpoint ?? ''}
|
endpoint={_endpoint ?? ''}
|
||||||
endpointType={endpointType}
|
endpointType={endpointType}
|
||||||
disabled={requiresKey}
|
disabled={disableInputs}
|
||||||
/>
|
/>
|
||||||
{isSubmitting && showStopButton ? (
|
{isSubmitting && showStopButton ? (
|
||||||
<StopButton stop={handleStopGenerating} setShowStopButton={setShowStopButton} />
|
<StopButton stop={handleStopGenerating} setShowStopButton={setShowStopButton} />
|
||||||
|
@ -125,7 +137,7 @@ const ChatForm = ({ index = 0 }) => {
|
||||||
<SendButton
|
<SendButton
|
||||||
ref={submitButtonRef}
|
ref={submitButtonRef}
|
||||||
control={methods.control}
|
control={methods.control}
|
||||||
disabled={!!(filesLoading || isSubmitting || requiresKey)}
|
disabled={!!(filesLoading || isSubmitting || disableInputs)}
|
||||||
/>
|
/>
|
||||||
)
|
)
|
||||||
)}
|
)}
|
||||||
|
|
|
@ -87,7 +87,9 @@ export default function Landing({ Header }: { Header?: ReactNode }) {
|
||||||
</div>
|
</div>
|
||||||
) : (
|
) : (
|
||||||
<div className="mb-5 text-2xl font-medium dark:text-white">
|
<div className="mb-5 text-2xl font-medium dark:text-white">
|
||||||
{localize('com_nav_welcome_message')}
|
{endpoint === EModelEndpoint.assistants
|
||||||
|
? localize('com_nav_welcome_assistant')
|
||||||
|
: localize('com_nav_welcome_message')}
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
</div>
|
</div>
|
||||||
|
|
|
@ -48,7 +48,7 @@ export const icons = {
|
||||||
return <AssistantIcon className={cn('text-token-secondary', className)} size={size} />;
|
return <AssistantIcon className={cn('text-token-secondary', className)} size={size} />;
|
||||||
}
|
}
|
||||||
|
|
||||||
return <Sparkles className={className} />;
|
return <Sparkles className={cn(assistantName === '' ? 'icon-2xl' : '', className)} />;
|
||||||
},
|
},
|
||||||
unknown: UnknownIcon,
|
unknown: UnknownIcon,
|
||||||
};
|
};
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
import * as Popover from '@radix-ui/react-popover';
|
import * as Popover from '@radix-ui/react-popover';
|
||||||
import { useState, useEffect, useRef } from 'react';
|
import { useState, useEffect, useRef, useMemo } from 'react';
|
||||||
import { useQueryClient } from '@tanstack/react-query';
|
import { useQueryClient } from '@tanstack/react-query';
|
||||||
import {
|
import {
|
||||||
fileConfig as defaultFileConfig,
|
fileConfig as defaultFileConfig,
|
||||||
|
@ -16,7 +16,7 @@ import type {
|
||||||
} from 'librechat-data-provider';
|
} from 'librechat-data-provider';
|
||||||
import { useUploadAssistantAvatarMutation, useGetFileConfig } from '~/data-provider';
|
import { useUploadAssistantAvatarMutation, useGetFileConfig } from '~/data-provider';
|
||||||
import { AssistantAvatar, NoImage, AvatarMenu } from './Images';
|
import { AssistantAvatar, NoImage, AvatarMenu } from './Images';
|
||||||
import { useToastContext } from '~/Providers';
|
import { useToastContext, useAssistantsMapContext } from '~/Providers';
|
||||||
// import { Spinner } from '~/components/svg';
|
// import { Spinner } from '~/components/svg';
|
||||||
import { useLocalize } from '~/hooks';
|
import { useLocalize } from '~/hooks';
|
||||||
// import { cn } from '~/utils/';
|
// import { cn } from '~/utils/';
|
||||||
|
@ -32,6 +32,7 @@ function Avatar({
|
||||||
}) {
|
}) {
|
||||||
// console.log('Avatar', assistant_id, metadata, createMutation);
|
// console.log('Avatar', assistant_id, metadata, createMutation);
|
||||||
const queryClient = useQueryClient();
|
const queryClient = useQueryClient();
|
||||||
|
const assistantsMap = useAssistantsMapContext();
|
||||||
const [menuOpen, setMenuOpen] = useState(false);
|
const [menuOpen, setMenuOpen] = useState(false);
|
||||||
const [progress, setProgress] = useState<number>(1);
|
const [progress, setProgress] = useState<number>(1);
|
||||||
const [input, setInput] = useState<File | null>(null);
|
const [input, setInput] = useState<File | null>(null);
|
||||||
|
@ -44,6 +45,10 @@ function Avatar({
|
||||||
const localize = useLocalize();
|
const localize = useLocalize();
|
||||||
const { showToast } = useToastContext();
|
const { showToast } = useToastContext();
|
||||||
|
|
||||||
|
const activeModel = useMemo(() => {
|
||||||
|
return assistantsMap[assistant_id ?? '']?.model ?? '';
|
||||||
|
}, [assistant_id, assistantsMap]);
|
||||||
|
|
||||||
const { mutate: uploadAvatar } = useUploadAssistantAvatarMutation({
|
const { mutate: uploadAvatar } = useUploadAssistantAvatarMutation({
|
||||||
onMutate: () => {
|
onMutate: () => {
|
||||||
setProgress(0.4);
|
setProgress(0.4);
|
||||||
|
@ -141,11 +146,12 @@ function Avatar({
|
||||||
|
|
||||||
uploadAvatar({
|
uploadAvatar({
|
||||||
assistant_id: createMutation.data.id,
|
assistant_id: createMutation.data.id,
|
||||||
|
model: activeModel,
|
||||||
postCreation: true,
|
postCreation: true,
|
||||||
formData,
|
formData,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}, [createMutation.data, createMutation.isSuccess, input, previewUrl, uploadAvatar]);
|
}, [createMutation.data, createMutation.isSuccess, input, previewUrl, uploadAvatar, activeModel]);
|
||||||
|
|
||||||
const handleFileChange = (event: React.ChangeEvent<HTMLInputElement>): void => {
|
const handleFileChange = (event: React.ChangeEvent<HTMLInputElement>): void => {
|
||||||
const file = event.target.files?.[0];
|
const file = event.target.files?.[0];
|
||||||
|
@ -175,6 +181,7 @@ function Avatar({
|
||||||
|
|
||||||
uploadAvatar({
|
uploadAvatar({
|
||||||
assistant_id,
|
assistant_id,
|
||||||
|
model: activeModel,
|
||||||
formData,
|
formData,
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -1,17 +1,17 @@
|
||||||
import { useState, useMemo, useEffect } from 'react';
|
import { useState, useMemo, useEffect } from 'react';
|
||||||
import { useQueryClient } from '@tanstack/react-query';
|
import { useQueryClient } from '@tanstack/react-query';
|
||||||
import { useGetModelsQuery } from 'librechat-data-provider/react-query';
|
|
||||||
import { useForm, FormProvider, Controller, useWatch } from 'react-hook-form';
|
import { useForm, FormProvider, Controller, useWatch } from 'react-hook-form';
|
||||||
|
import { useGetModelsQuery, useGetEndpointsQuery } from 'librechat-data-provider/react-query';
|
||||||
import {
|
import {
|
||||||
Tools,
|
Tools,
|
||||||
QueryKeys,
|
QueryKeys,
|
||||||
|
Capabilities,
|
||||||
EModelEndpoint,
|
EModelEndpoint,
|
||||||
actionDelimiter,
|
actionDelimiter,
|
||||||
supportsRetrieval,
|
|
||||||
defaultAssistantFormValues,
|
defaultAssistantFormValues,
|
||||||
} from 'librechat-data-provider';
|
} from 'librechat-data-provider';
|
||||||
import type { FunctionTool, TPlugin } from 'librechat-data-provider';
|
|
||||||
import type { AssistantForm, AssistantPanelProps } from '~/common';
|
import type { AssistantForm, AssistantPanelProps } from '~/common';
|
||||||
|
import type { FunctionTool, TPlugin, TEndpointsConfig } from 'librechat-data-provider';
|
||||||
import { useCreateAssistantMutation, useUpdateAssistantMutation } from '~/data-provider';
|
import { useCreateAssistantMutation, useUpdateAssistantMutation } from '~/data-provider';
|
||||||
import { SelectDropDown, Checkbox, QuestionMark } from '~/components/ui';
|
import { SelectDropDown, Checkbox, QuestionMark } from '~/components/ui';
|
||||||
import { useAssistantsMapContext, useToastContext } from '~/Providers';
|
import { useAssistantsMapContext, useToastContext } from '~/Providers';
|
||||||
|
@ -42,7 +42,7 @@ export default function AssistantPanel({
|
||||||
const queryClient = useQueryClient();
|
const queryClient = useQueryClient();
|
||||||
const modelsQuery = useGetModelsQuery();
|
const modelsQuery = useGetModelsQuery();
|
||||||
const assistantMap = useAssistantsMapContext();
|
const assistantMap = useAssistantsMapContext();
|
||||||
const [showToolDialog, setShowToolDialog] = useState(false);
|
const { data: endpointsConfig = {} as TEndpointsConfig } = useGetEndpointsQuery();
|
||||||
const allTools = queryClient.getQueryData<TPlugin[]>([QueryKeys.tools]) ?? [];
|
const allTools = queryClient.getQueryData<TPlugin[]>([QueryKeys.tools]) ?? [];
|
||||||
const { onSelect: onSelectAssistant } = useSelectAssistant();
|
const { onSelect: onSelectAssistant } = useSelectAssistant();
|
||||||
const { showToast } = useToastContext();
|
const { showToast } = useToastContext();
|
||||||
|
@ -51,17 +51,43 @@ export default function AssistantPanel({
|
||||||
const methods = useForm<AssistantForm>({
|
const methods = useForm<AssistantForm>({
|
||||||
defaultValues: defaultAssistantFormValues,
|
defaultValues: defaultAssistantFormValues,
|
||||||
});
|
});
|
||||||
|
|
||||||
|
const [showToolDialog, setShowToolDialog] = useState(false);
|
||||||
|
|
||||||
const { control, handleSubmit, reset, setValue, getValues } = methods;
|
const { control, handleSubmit, reset, setValue, getValues } = methods;
|
||||||
const assistant_id = useWatch({ control, name: 'id' });
|
|
||||||
const assistant = useWatch({ control, name: 'assistant' });
|
const assistant = useWatch({ control, name: 'assistant' });
|
||||||
const functions = useWatch({ control, name: 'functions' });
|
const functions = useWatch({ control, name: 'functions' });
|
||||||
|
const assistant_id = useWatch({ control, name: 'id' });
|
||||||
const model = useWatch({ control, name: 'model' });
|
const model = useWatch({ control, name: 'model' });
|
||||||
|
|
||||||
|
const activeModel = useMemo(() => {
|
||||||
|
return assistantMap?.[assistant_id]?.model;
|
||||||
|
}, [assistantMap, assistant_id]);
|
||||||
|
|
||||||
|
const assistants = useMemo(() => endpointsConfig?.[EModelEndpoint.assistants], [endpointsConfig]);
|
||||||
|
const retrievalModels = useMemo(() => new Set(assistants?.retrievalModels ?? []), [assistants]);
|
||||||
|
const toolsEnabled = useMemo(
|
||||||
|
() => assistants?.capabilities?.includes(Capabilities.tools),
|
||||||
|
[assistants],
|
||||||
|
);
|
||||||
|
const actionsEnabled = useMemo(
|
||||||
|
() => assistants?.capabilities?.includes(Capabilities.actions),
|
||||||
|
[assistants],
|
||||||
|
);
|
||||||
|
const retrievalEnabled = useMemo(
|
||||||
|
() => assistants?.capabilities?.includes(Capabilities.retrieval),
|
||||||
|
[assistants],
|
||||||
|
);
|
||||||
|
const codeEnabled = useMemo(
|
||||||
|
() => assistants?.capabilities?.includes(Capabilities.code_interpreter),
|
||||||
|
[assistants],
|
||||||
|
);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (model && !supportsRetrieval.has(model)) {
|
if (model && !retrievalModels.has(model)) {
|
||||||
setValue('retrieval', false);
|
setValue(Capabilities.retrieval, false);
|
||||||
}
|
}
|
||||||
}, [model, setValue]);
|
}, [model, setValue, retrievalModels]);
|
||||||
|
|
||||||
/* Mutations */
|
/* Mutations */
|
||||||
const update = useUpdateAssistantMutation({
|
const update = useUpdateAssistantMutation({
|
||||||
|
@ -300,18 +326,23 @@ export default function AssistantPanel({
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
{/* Knowledge */}
|
{/* Knowledge */}
|
||||||
|
{(codeEnabled || retrievalEnabled) && (
|
||||||
<Knowledge assistant_id={assistant_id} files={files} />
|
<Knowledge assistant_id={assistant_id} files={files} />
|
||||||
|
)}
|
||||||
{/* Capabilities */}
|
{/* Capabilities */}
|
||||||
<div className="mb-6">
|
<div className="mb-6">
|
||||||
<div className="mb-1.5 flex items-center">
|
<div className="mb-1.5 flex items-center">
|
||||||
<span>
|
<span>
|
||||||
<label className="text-token-text-primary block font-medium">Capabilities</label>
|
<label className="text-token-text-primary block font-medium">
|
||||||
|
{localize('com_assistants_capabilities')}
|
||||||
|
</label>
|
||||||
</span>
|
</span>
|
||||||
</div>
|
</div>
|
||||||
<div className="flex flex-col items-start gap-2">
|
<div className="flex flex-col items-start gap-2">
|
||||||
|
{codeEnabled && (
|
||||||
<div className="flex items-center">
|
<div className="flex items-center">
|
||||||
<Controller
|
<Controller
|
||||||
name={'code_interpreter'}
|
name={Capabilities.code_interpreter}
|
||||||
control={control}
|
control={control}
|
||||||
render={({ field }) => (
|
render={({ field }) => (
|
||||||
<Checkbox
|
<Checkbox
|
||||||
|
@ -325,11 +356,15 @@ export default function AssistantPanel({
|
||||||
/>
|
/>
|
||||||
<label
|
<label
|
||||||
className="form-check-label text-token-text-primary w-full cursor-pointer"
|
className="form-check-label text-token-text-primary w-full cursor-pointer"
|
||||||
htmlFor="code_interpreter"
|
htmlFor={Capabilities.code_interpreter}
|
||||||
onClick={() =>
|
onClick={() =>
|
||||||
setValue('code_interpreter', !getValues('code_interpreter'), {
|
setValue(
|
||||||
|
Capabilities.code_interpreter,
|
||||||
|
!getValues(Capabilities.code_interpreter),
|
||||||
|
{
|
||||||
shouldDirty: true,
|
shouldDirty: true,
|
||||||
})
|
},
|
||||||
|
)
|
||||||
}
|
}
|
||||||
>
|
>
|
||||||
<div className="flex items-center">
|
<div className="flex items-center">
|
||||||
|
@ -338,15 +373,17 @@ export default function AssistantPanel({
|
||||||
</div>
|
</div>
|
||||||
</label>
|
</label>
|
||||||
</div>
|
</div>
|
||||||
|
)}
|
||||||
|
{retrievalEnabled && (
|
||||||
<div className="flex items-center">
|
<div className="flex items-center">
|
||||||
<Controller
|
<Controller
|
||||||
name={'retrieval'}
|
name={Capabilities.retrieval}
|
||||||
control={control}
|
control={control}
|
||||||
render={({ field }) => (
|
render={({ field }) => (
|
||||||
<Checkbox
|
<Checkbox
|
||||||
{...field}
|
{...field}
|
||||||
checked={field.value}
|
checked={field.value}
|
||||||
disabled={!supportsRetrieval.has(model)}
|
disabled={!retrievalModels.has(model)}
|
||||||
onCheckedChange={field.onChange}
|
onCheckedChange={field.onChange}
|
||||||
className="relative float-left mr-2 inline-flex h-4 w-4 cursor-pointer"
|
className="relative float-left mr-2 inline-flex h-4 w-4 cursor-pointer"
|
||||||
value={field?.value?.toString()}
|
value={field?.value?.toString()}
|
||||||
|
@ -356,22 +393,29 @@ export default function AssistantPanel({
|
||||||
<label
|
<label
|
||||||
className={cn(
|
className={cn(
|
||||||
'form-check-label text-token-text-primary w-full',
|
'form-check-label text-token-text-primary w-full',
|
||||||
!supportsRetrieval.has(model) ? 'cursor-no-drop opacity-50' : 'cursor-pointer',
|
!retrievalModels.has(model) ? 'cursor-no-drop opacity-50' : 'cursor-pointer',
|
||||||
)}
|
)}
|
||||||
htmlFor="retrieval"
|
htmlFor={Capabilities.retrieval}
|
||||||
onClick={() =>
|
onClick={() =>
|
||||||
supportsRetrieval.has(model) &&
|
retrievalModels.has(model) &&
|
||||||
setValue('retrieval', !getValues('retrieval'), { shouldDirty: true })
|
setValue(Capabilities.retrieval, !getValues(Capabilities.retrieval), {
|
||||||
|
shouldDirty: true,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
>
|
>
|
||||||
{localize('com_assistants_retrieval')}
|
{localize('com_assistants_retrieval')}
|
||||||
</label>
|
</label>
|
||||||
</div>
|
</div>
|
||||||
|
)}
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
{/* Tools */}
|
{/* Tools */}
|
||||||
<div className="mb-6">
|
<div className="mb-6">
|
||||||
<label className={labelClass}>{localize('com_assistants_tools_section')}</label>
|
<label className={labelClass}>
|
||||||
|
{`${toolsEnabled ? localize('com_assistants_tools') : ''}
|
||||||
|
${toolsEnabled && actionsEnabled ? ' + ' : ''}
|
||||||
|
${actionsEnabled ? localize('com_assistants_actions') : ''}`}
|
||||||
|
</label>
|
||||||
<div className="space-y-1">
|
<div className="space-y-1">
|
||||||
{functions.map((func) => (
|
{functions.map((func) => (
|
||||||
<AssistantTool
|
<AssistantTool
|
||||||
|
@ -388,6 +432,7 @@ export default function AssistantPanel({
|
||||||
<AssistantAction key={i} action={action} onClick={() => setAction(action)} />
|
<AssistantAction key={i} action={action} onClick={() => setAction(action)} />
|
||||||
);
|
);
|
||||||
})}
|
})}
|
||||||
|
{toolsEnabled && (
|
||||||
<button
|
<button
|
||||||
type="button"
|
type="button"
|
||||||
onClick={() => setShowToolDialog(true)}
|
onClick={() => setShowToolDialog(true)}
|
||||||
|
@ -397,6 +442,8 @@ export default function AssistantPanel({
|
||||||
{localize('com_assistants_add_tools')}
|
{localize('com_assistants_add_tools')}
|
||||||
</div>
|
</div>
|
||||||
</button>
|
</button>
|
||||||
|
)}
|
||||||
|
{actionsEnabled && (
|
||||||
<button
|
<button
|
||||||
type="button"
|
type="button"
|
||||||
disabled={!assistant_id}
|
disabled={!assistant_id}
|
||||||
|
@ -415,12 +462,14 @@ export default function AssistantPanel({
|
||||||
{localize('com_assistants_add_actions')}
|
{localize('com_assistants_add_actions')}
|
||||||
</div>
|
</div>
|
||||||
</button>
|
</button>
|
||||||
|
)}
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div className="flex items-center justify-end gap-2">
|
<div className="flex items-center justify-end gap-2">
|
||||||
{/* Context Button */}
|
{/* Context Button */}
|
||||||
<ContextButton
|
<ContextButton
|
||||||
assistant_id={assistant_id}
|
assistant_id={assistant_id}
|
||||||
|
activeModel={activeModel}
|
||||||
setCurrentAssistantId={setCurrentAssistantId}
|
setCurrentAssistantId={setCurrentAssistantId}
|
||||||
createMutation={create}
|
createMutation={create}
|
||||||
/>
|
/>
|
||||||
|
|
|
@ -10,10 +10,12 @@ import { NewTrashIcon } from '~/components/svg';
|
||||||
import { useChatContext } from '~/Providers';
|
import { useChatContext } from '~/Providers';
|
||||||
|
|
||||||
export default function ContextButton({
|
export default function ContextButton({
|
||||||
|
activeModel,
|
||||||
assistant_id,
|
assistant_id,
|
||||||
setCurrentAssistantId,
|
setCurrentAssistantId,
|
||||||
createMutation,
|
createMutation,
|
||||||
}: {
|
}: {
|
||||||
|
activeModel: string;
|
||||||
assistant_id: string;
|
assistant_id: string;
|
||||||
setCurrentAssistantId: React.Dispatch<React.SetStateAction<string | undefined>>;
|
setCurrentAssistantId: React.Dispatch<React.SetStateAction<string | undefined>>;
|
||||||
createMutation: UseMutationResult<Assistant, Error, AssistantCreateParams>;
|
createMutation: UseMutationResult<Assistant, Error, AssistantCreateParams>;
|
||||||
|
@ -136,7 +138,7 @@ export default function ContextButton({
|
||||||
</>
|
</>
|
||||||
}
|
}
|
||||||
selection={{
|
selection={{
|
||||||
selectHandler: () => deleteAssistant.mutate({ assistant_id }),
|
selectHandler: () => deleteAssistant.mutate({ assistant_id, model: activeModel }),
|
||||||
selectClasses: 'bg-red-600 hover:bg-red-700 dark:hover:bg-red-800 text-white',
|
selectClasses: 'bg-red-600 hover:bg-red-700 dark:hover:bg-red-800 text-white',
|
||||||
selectText: localize('com_ui_delete'),
|
selectText: localize('com_ui_delete'),
|
||||||
}}
|
}}
|
||||||
|
|
|
@ -26,6 +26,7 @@ import type {
|
||||||
CreateAssistantMutationOptions,
|
CreateAssistantMutationOptions,
|
||||||
UpdateAssistantMutationOptions,
|
UpdateAssistantMutationOptions,
|
||||||
DeleteAssistantMutationOptions,
|
DeleteAssistantMutationOptions,
|
||||||
|
DeleteAssistantBody,
|
||||||
DeleteConversationOptions,
|
DeleteConversationOptions,
|
||||||
UpdateActionOptions,
|
UpdateActionOptions,
|
||||||
UpdateActionVariables,
|
UpdateActionVariables,
|
||||||
|
@ -369,10 +370,11 @@ export const useUpdateAssistantMutation = (
|
||||||
*/
|
*/
|
||||||
export const useDeleteAssistantMutation = (
|
export const useDeleteAssistantMutation = (
|
||||||
options?: DeleteAssistantMutationOptions,
|
options?: DeleteAssistantMutationOptions,
|
||||||
): UseMutationResult<void, Error, { assistant_id: string }> => {
|
): UseMutationResult<void, Error, DeleteAssistantBody> => {
|
||||||
const queryClient = useQueryClient();
|
const queryClient = useQueryClient();
|
||||||
return useMutation(
|
return useMutation(
|
||||||
({ assistant_id }: { assistant_id: string }) => dataService.deleteAssistant(assistant_id),
|
({ assistant_id, model }: DeleteAssistantBody) =>
|
||||||
|
dataService.deleteAssistant(assistant_id, model),
|
||||||
{
|
{
|
||||||
onMutate: (variables) => options?.onMutate?.(variables),
|
onMutate: (variables) => options?.onMutate?.(variables),
|
||||||
onError: (error, variables, context) => options?.onError?.(error, variables, context),
|
onError: (error, variables, context) => options?.onError?.(error, variables, context),
|
||||||
|
|
|
@ -139,6 +139,7 @@ const useFileHandling = (params?: UseFileHandling) => {
|
||||||
conversation?.assistant_id
|
conversation?.assistant_id
|
||||||
) {
|
) {
|
||||||
formData.append('assistant_id', conversation.assistant_id);
|
formData.append('assistant_id', conversation.assistant_id);
|
||||||
|
formData.append('model', conversation?.model ?? '');
|
||||||
formData.append('message_file', 'true');
|
formData.append('message_file', 'true');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -55,8 +55,14 @@ export default function useTextarea({
|
||||||
disabled?: boolean;
|
disabled?: boolean;
|
||||||
}) {
|
}) {
|
||||||
const assistantMap = useAssistantsMapContext();
|
const assistantMap = useAssistantsMapContext();
|
||||||
const { conversation, isSubmitting, latestMessage, setShowBingToneSetting, setFilesLoading } =
|
const {
|
||||||
useChatContext();
|
conversation,
|
||||||
|
isSubmitting,
|
||||||
|
latestMessage,
|
||||||
|
setShowBingToneSetting,
|
||||||
|
filesLoading,
|
||||||
|
setFilesLoading,
|
||||||
|
} = useChatContext();
|
||||||
const isComposing = useRef(false);
|
const isComposing = useRef(false);
|
||||||
const { handleFiles } = useFileHandling();
|
const { handleFiles } = useFileHandling();
|
||||||
const getSender = useGetSender();
|
const getSender = useGetSender();
|
||||||
|
@ -103,9 +109,16 @@ export default function useTextarea({
|
||||||
}
|
}
|
||||||
|
|
||||||
const getPlaceholderText = () => {
|
const getPlaceholderText = () => {
|
||||||
|
if (
|
||||||
|
conversation?.endpoint === EModelEndpoint.assistants &&
|
||||||
|
(!conversation?.assistant_id || !assistantMap?.[conversation?.assistant_id ?? ''])
|
||||||
|
) {
|
||||||
|
return localize('com_endpoint_assistant_placeholder');
|
||||||
|
}
|
||||||
if (disabled) {
|
if (disabled) {
|
||||||
return localize('com_endpoint_config_placeholder');
|
return localize('com_endpoint_config_placeholder');
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isNotAppendable) {
|
if (isNotAppendable) {
|
||||||
return localize('com_endpoint_message_not_appendable');
|
return localize('com_endpoint_message_not_appendable');
|
||||||
}
|
}
|
||||||
|
@ -145,6 +158,7 @@ export default function useTextarea({
|
||||||
getSender,
|
getSender,
|
||||||
assistantName,
|
assistantName,
|
||||||
textAreaRef,
|
textAreaRef,
|
||||||
|
assistantMap,
|
||||||
]);
|
]);
|
||||||
|
|
||||||
const handleKeyDown = (e: KeyEvent) => {
|
const handleKeyDown = (e: KeyEvent) => {
|
||||||
|
@ -152,11 +166,17 @@ export default function useTextarea({
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (e.key === 'Enter' && !e.shiftKey) {
|
const isNonShiftEnter = e.key === 'Enter' && !e.shiftKey;
|
||||||
|
|
||||||
|
if (isNonShiftEnter && filesLoading) {
|
||||||
e.preventDefault();
|
e.preventDefault();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (e.key === 'Enter' && !e.shiftKey && !isComposing?.current) {
|
if (isNonShiftEnter) {
|
||||||
|
e.preventDefault();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (isNonShiftEnter && !isComposing?.current) {
|
||||||
submitButtonRef.current?.click();
|
submitButtonRef.current?.click();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -7,6 +7,7 @@ export default {
|
||||||
com_sidepanel_assistant_builder: 'Assistant Builder',
|
com_sidepanel_assistant_builder: 'Assistant Builder',
|
||||||
com_sidepanel_attach_files: 'Attach Files',
|
com_sidepanel_attach_files: 'Attach Files',
|
||||||
com_sidepanel_manage_files: 'Manage Files',
|
com_sidepanel_manage_files: 'Manage Files',
|
||||||
|
com_assistants_capabilities: 'Capabilities',
|
||||||
com_assistants_knowledge: 'Knowledge',
|
com_assistants_knowledge: 'Knowledge',
|
||||||
com_assistants_knowledge_info:
|
com_assistants_knowledge_info:
|
||||||
'If you upload files under Knowledge, conversations with your Assistant may include file contents.',
|
'If you upload files under Knowledge, conversations with your Assistant may include file contents.',
|
||||||
|
@ -16,7 +17,8 @@ export default {
|
||||||
com_assistants_code_interpreter_files:
|
com_assistants_code_interpreter_files:
|
||||||
'The following files are only available for Code Interpreter:',
|
'The following files are only available for Code Interpreter:',
|
||||||
com_assistants_retrieval: 'Retrieval',
|
com_assistants_retrieval: 'Retrieval',
|
||||||
com_assistants_tools_section: 'Actions, Tools',
|
com_assistants_tools: 'Tools',
|
||||||
|
com_assistants_actions: 'Actions',
|
||||||
com_assistants_add_tools: 'Add Tools',
|
com_assistants_add_tools: 'Add Tools',
|
||||||
com_assistants_add_actions: 'Add Actions',
|
com_assistants_add_actions: 'Add Actions',
|
||||||
com_assistants_name_placeholder: 'Optional: The name of the assistant',
|
com_assistants_name_placeholder: 'Optional: The name of the assistant',
|
||||||
|
@ -285,6 +287,7 @@ export default {
|
||||||
com_endpoint_skip_hover:
|
com_endpoint_skip_hover:
|
||||||
'Enable skipping the completion step, which reviews the final answer and generated steps',
|
'Enable skipping the completion step, which reviews the final answer and generated steps',
|
||||||
com_endpoint_config_key: 'Set API Key',
|
com_endpoint_config_key: 'Set API Key',
|
||||||
|
com_endpoint_assistant_placeholder: 'Please select an Assistant from the right-hand Side Panel',
|
||||||
com_endpoint_config_placeholder: 'Set your Key in the Header menu to chat.',
|
com_endpoint_config_placeholder: 'Set your Key in the Header menu to chat.',
|
||||||
com_endpoint_config_key_for: 'Set API Key for',
|
com_endpoint_config_key_for: 'Set API Key for',
|
||||||
com_endpoint_config_key_name: 'Key',
|
com_endpoint_config_key_name: 'Key',
|
||||||
|
@ -316,6 +319,7 @@ export default {
|
||||||
com_endpoint_config_key_google_service_account: 'Create a Service Account',
|
com_endpoint_config_key_google_service_account: 'Create a Service Account',
|
||||||
com_endpoint_config_key_google_vertex_api_role:
|
com_endpoint_config_key_google_vertex_api_role:
|
||||||
'Make sure to click \'Create and Continue\' to give at least the \'Vertex AI User\' role. Lastly, create a JSON key to import here.',
|
'Make sure to click \'Create and Continue\' to give at least the \'Vertex AI User\' role. Lastly, create a JSON key to import here.',
|
||||||
|
com_nav_welcome_assistant: 'Please Select an Assistant',
|
||||||
com_nav_welcome_message: 'How can I help you today?',
|
com_nav_welcome_message: 'How can I help you today?',
|
||||||
com_nav_auto_scroll: 'Auto-scroll to Newest on Open',
|
com_nav_auto_scroll: 'Auto-scroll to Newest on Open',
|
||||||
com_nav_hide_panel: 'Hide Right-most Side Panel',
|
com_nav_hide_panel: 'Hide Right-most Side Panel',
|
||||||
|
|
|
@ -10,23 +10,123 @@ weight: -10
|
||||||
|
|
||||||
LibreChat boasts compatibility with Azure OpenAI API services, treating the endpoint as a first-class citizen. To properly utilize Azure OpenAI within LibreChat, it's crucial to configure the [`librechat.yaml` file](./custom_config.md#azure-openai-object-structure) according to your specific needs. This document guides you through the essential setup process which allows seamless use of multiple deployments and models with as much flexibility as needed.
|
LibreChat boasts compatibility with Azure OpenAI API services, treating the endpoint as a first-class citizen. To properly utilize Azure OpenAI within LibreChat, it's crucial to configure the [`librechat.yaml` file](./custom_config.md#azure-openai-object-structure) according to your specific needs. This document guides you through the essential setup process which allows seamless use of multiple deployments and models with as much flexibility as needed.
|
||||||
|
|
||||||
|
## Example
|
||||||
|
|
||||||
|
Here's a quick snapshot of what a comprehensive configuration might look like, including many of the options and features discussed below.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
endpoints:
|
||||||
|
azureOpenAI:
|
||||||
|
# Endpoint-level configuration
|
||||||
|
titleModel: "llama-70b-chat"
|
||||||
|
plugins: true
|
||||||
|
assistants: true
|
||||||
|
groups:
|
||||||
|
# Group-level configuration
|
||||||
|
- group: "my-resource-westus"
|
||||||
|
apiKey: "${WESTUS_API_KEY}"
|
||||||
|
instanceName: "my-resource-westus"
|
||||||
|
version: "2024-03-01-preview"
|
||||||
|
# Model-level configuration
|
||||||
|
models:
|
||||||
|
gpt-4-vision-preview:
|
||||||
|
deploymentName: gpt-4-vision-preview
|
||||||
|
version: "2024-03-01-preview"
|
||||||
|
gpt-3.5-turbo:
|
||||||
|
deploymentName: gpt-35-turbo
|
||||||
|
gpt-4-1106-preview:
|
||||||
|
deploymentName: gpt-4-1106-preview
|
||||||
|
# Group-level configuration
|
||||||
|
- group: "mistral-inference"
|
||||||
|
apiKey: "${AZURE_MISTRAL_API_KEY}"
|
||||||
|
baseURL: "https://Mistral-large-vnpet-serverless.region.inference.ai.azure.com/v1/chat/completions"
|
||||||
|
serverless: true
|
||||||
|
# Model-level configuration
|
||||||
|
models:
|
||||||
|
mistral-large: true
|
||||||
|
# Group-level configuration
|
||||||
|
- group: "my-resource-sweden"
|
||||||
|
apiKey: "${SWEDEN_API_KEY}"
|
||||||
|
instanceName: "my-resource-sweden"
|
||||||
|
deploymentName: gpt-4-1106-preview
|
||||||
|
version: "2024-03-01-preview"
|
||||||
|
assistants: true
|
||||||
|
# Model-level configuration
|
||||||
|
models:
|
||||||
|
gpt-4-turbo: true
|
||||||
|
```
|
||||||
|
|
||||||
|
Here's another working example configured according to the specifications of the [Azure OpenAI Endpoint Configuration Docs:](./custom_config.md#azure-openai-object-structure)
|
||||||
|
|
||||||
|
Each level of configuration is extensively detailed in their respective sections:
|
||||||
|
|
||||||
|
1. [Endpoint-level config](#endpoint-level-configuration)
|
||||||
|
|
||||||
|
2. [Group-level config](#group-level-configuration)
|
||||||
|
|
||||||
|
3. [Model-level config](#model-level-configuration)
|
||||||
|
|
||||||
## Setup
|
## Setup
|
||||||
|
|
||||||
1. **Open `librechat.yaml` for Editing**: Use your preferred text editor or IDE to open and edit the `librechat.yaml` file.
|
1. **Open `librechat.yaml` for Editing**: Use your preferred text editor or IDE to open and edit the `librechat.yaml` file.
|
||||||
|
|
||||||
|
- Optional: use a remote or custom file path with the following environment variable:
|
||||||
|
|
||||||
|
```.env
|
||||||
|
CONFIG_PATH="/alternative/path/to/librechat.yaml"
|
||||||
|
```
|
||||||
|
|
||||||
2. **Configure Azure OpenAI Settings**: Follow the detailed structure outlined below to populate your Azure OpenAI settings appropriately. This includes specifying API keys, instance names, model groups, and other essential configurations.
|
2. **Configure Azure OpenAI Settings**: Follow the detailed structure outlined below to populate your Azure OpenAI settings appropriately. This includes specifying API keys, instance names, model groups, and other essential configurations.
|
||||||
|
|
||||||
3. **Save Your Changes**: After accurately inputting your settings, save the `librechat.yaml` file.
|
3. **Make sure to Remove Legacy Settings**: If you are using any of the [legacy configurations](#legacy-setup), be sure to remove. The LibreChat server will also detect these and remind you.
|
||||||
|
|
||||||
4. **Restart LibreChat**: For the changes to take effect, restart your LibreChat application. This ensures that the updated configurations are loaded and utilized.
|
4. **Save Your Changes**: After accurately inputting your settings, save the `librechat.yaml` file.
|
||||||
|
|
||||||
Here's a working example configured according to the specifications of the [Azure OpenAI Endpoint Configuration Docs:](./custom_config.md#azure-openai-object-structure)
|
5. **Restart LibreChat**: For the changes to take effect, restart your LibreChat application. This ensures that the updated configurations are loaded and utilized.
|
||||||
|
|
||||||
## Required Fields
|
## Required Fields
|
||||||
|
|
||||||
To properly integrate Azure OpenAI with LibreChat, specific fields must be accurately configured in your `librechat.yaml` file. These fields are validated through a combination of custom and environmental variables to ensure the correct setup. Here are the detailed requirements based on the validation process:
|
To properly integrate Azure OpenAI with LibreChat, specific fields must be accurately configured in your `librechat.yaml` file. These fields are validated through a combination of custom and environmental variables to ensure the correct setup. Here are the detailed requirements based on the validation process:
|
||||||
|
|
||||||
### Group-Level Configuration
|
## Endpoint-Level Configuration
|
||||||
|
|
||||||
|
These settings apply globally to all Azure models and groups within the endpoint. Here are the available fields:
|
||||||
|
|
||||||
|
1. **titleModel** (String, Optional): Specifies the model to use for generating conversation titles. If not provided, the default model is set as `gpt-3.5-turbo`, which will result in no titles if lacking this model.
|
||||||
|
|
||||||
|
2. **plugins** (Boolean, Optional): Enables the use of plugins through Azure. Set to `true` to activate Plugins endpoint support through your Azure config. Default: `false`.
|
||||||
|
|
||||||
|
3. **assistants** (Boolean, Optional): Enables the use of assistants through Azure. Set to `true` to activate Assistants endpoint through your Azure config. Default: `false`. Note: this requires an assistants-compatible region.
|
||||||
|
|
||||||
|
4. **summarize** (Boolean, Optional): Enables conversation summarization for all Azure models. Set to `true` to activate summarization. Default: `false`.
|
||||||
|
|
||||||
|
5. **summaryModel** (String, Optional): Specifies the model to use for generating conversation summaries. If not provided, the default behavior is to use the first model in the `default` array of the first group.
|
||||||
|
|
||||||
|
6. **titleConvo** (Boolean, Optional): Enables conversation title generation for all Azure models. Set to `true` to activate title generation. Default: `false`.
|
||||||
|
|
||||||
|
7. **titleMethod** (String, Optional): Specifies the method to use for generating conversation titles. Valid options are `"completion"` and `"functions"`. If not provided, the default behavior is to use the `"completion"` method.
|
||||||
|
|
||||||
|
8. **groups** (Array/List, Required): Specifies the list of Azure OpenAI model groups. Each group represents a set of models with shared configurations. The groups field is an array of objects, where each object defines the settings for a specific group. This is a required field at the endpoint level, and at least one group must be defined. The group-level configurations are detailed in the Group-Level Configuration section.
|
||||||
|
|
||||||
|
<!-- 9. **customOrder** (Number, Optional): Allows you to specify a custom order for the Azure endpoint in the user interface. Higher numbers will appear lower in the list. If not provided, the default order is determined by the order in which the endpoints are defined in the `librechat.yaml` file. -->
|
||||||
|
|
||||||
|
Here's an example of how you can configure these endpoint-level settings in your `librechat.yaml` file:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
endpoints:
|
||||||
|
azureOpenAI:
|
||||||
|
titleModel: "gpt-3.5-turbo-1106"
|
||||||
|
plugins: true
|
||||||
|
assistants: true
|
||||||
|
summarize: true
|
||||||
|
summaryModel: "gpt-3.5-turbo-1106"
|
||||||
|
titleConvo: true
|
||||||
|
titleMethod: "functions"
|
||||||
|
groups:
|
||||||
|
# ... (group-level and model-level configurations)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Group-Level Configuration
|
||||||
|
|
||||||
This is a breakdown of the fields configurable as defined for the Custom Config (`librechat.yaml`) file. For more information on each field, see the [Azure OpenAI section in the Custom Config Docs](./custom_config.md#azure-openai-object-structure).
|
This is a breakdown of the fields configurable as defined for the Custom Config (`librechat.yaml`) file. For more information on each field, see the [Azure OpenAI section in the Custom Config Docs](./custom_config.md#azure-openai-object-structure).
|
||||||
|
|
||||||
|
@ -38,7 +138,7 @@ This is a breakdown of the fields configurable as defined for the Custom Config
|
||||||
|
|
||||||
4. **deploymentName** (String, Optional): The deployment name at the group level is optional but required if any model within the group is set to `true`.
|
4. **deploymentName** (String, Optional): The deployment name at the group level is optional but required if any model within the group is set to `true`.
|
||||||
|
|
||||||
5. **version** (String, Optional): The version of the Azure OpenAI service at the group level is optional but required if any model within the group is set to `true`.
|
5. **version** (String, Optional): The Azure OpenAI API version at the group level is optional but required if any model within the group is set to `true`.
|
||||||
|
|
||||||
6. **baseURL** (String, Optional): Custom base URL for the Azure OpenAI API requests. Environment variable references are supported. This is optional and can be used for advanced routing scenarios.
|
6. **baseURL** (String, Optional): Custom base URL for the Azure OpenAI API requests. Environment variable references are supported. This is optional and can be used for advanced routing scenarios.
|
||||||
|
|
||||||
|
@ -52,16 +152,61 @@ This is a breakdown of the fields configurable as defined for the Custom Config
|
||||||
|
|
||||||
11. **forcePrompt** (Boolean, Optional): Dictates whether to send a `prompt` parameter instead of `messages` in the request body. This option is useful when needing to format the request in a manner consistent with OpenAI's API expectations, particularly for scenarios preferring a single text payload.
|
11. **forcePrompt** (Boolean, Optional): Dictates whether to send a `prompt` parameter instead of `messages` in the request body. This option is useful when needing to format the request in a manner consistent with OpenAI's API expectations, particularly for scenarios preferring a single text payload.
|
||||||
|
|
||||||
### Model-Level Configuration
|
12. **models** (Object, Required): Specifies the mapping of model identifiers to their configurations within the group. The keys represent the model identifiers, which must match the corresponding OpenAI model names. The values can be either boolean (true) or objects containing model-specific settings. If a model is set to true, it inherits the group-level deploymentName and version. If a model is configured as an object, it can have its own deploymentName and version. This field is required, and at least one model must be defined within each group. [More info here](#model-level-configuration)
|
||||||
|
|
||||||
Within each group, the `models` field must contain a mapping of records, or model identifiers to either boolean values or object configurations.
|
Here's an example of a group-level configuration in the librechat.yaml file
|
||||||
|
|
||||||
- The key or model identifier must match its corresponding OpenAI model name in order for it to properly reflect its known context limits and/or function in the case of vision. For example, if you intend to use gpt-4-vision, it must be configured like so:
|
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
models:
|
endpoints:
|
||||||
|
azureOpenAI:
|
||||||
|
# ... (endpoint-level configurations)
|
||||||
|
groups:
|
||||||
|
- group: "my-resource-group"
|
||||||
|
apiKey: "${AZURE_API_KEY}"
|
||||||
|
instanceName: "my-instance"
|
||||||
|
deploymentName: "gpt-35-turbo"
|
||||||
|
version: "2023-03-15-preview"
|
||||||
|
baseURL: "https://my-instance.openai.azure.com/"
|
||||||
|
additionalHeaders:
|
||||||
|
CustomHeader: "HeaderValue"
|
||||||
|
addParams:
|
||||||
|
max_tokens: 2048
|
||||||
|
temperature: 0.7
|
||||||
|
dropParams:
|
||||||
|
- "frequency_penalty"
|
||||||
|
- "presence_penalty"
|
||||||
|
forcePrompt: false
|
||||||
|
models:
|
||||||
|
# ... (model-level configurations)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Model-Level Configuration
|
||||||
|
|
||||||
|
Within each group, the `models` field contains a mapping of model identifiers to their configurations:
|
||||||
|
|
||||||
|
1. **Model Identifier** (String, Required): Must match the corresponding OpenAI model name. Can be a partial match.
|
||||||
|
|
||||||
|
2. **Model Configuration** (Boolean or Object, Required):
|
||||||
|
- Boolean `true`: Uses the group-level `deploymentName` and `version`.
|
||||||
|
- Object: Specifies model-specific `deploymentName` and `version`. If not provided, inherits from the group.
|
||||||
|
- **deploymentName** (String, Optional): The deployment name for this specific model.
|
||||||
|
- **version** (String, Optional): The Azure OpenAI API version for this specific model.
|
||||||
|
|
||||||
|
3. **Serverless Inference Endpoints**: For serverless models, set the model to `true`.
|
||||||
|
|
||||||
|
- The **model identifier must match its corresponding OpenAI model name** in order for it to properly reflect its known context limits and/or function in the case of vision. For example, if you intend to use gpt-4-vision, it must be configured like so:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
endpoints:
|
||||||
|
azureOpenAI:
|
||||||
|
# ... (endpoint-level configurations)
|
||||||
|
groups:
|
||||||
|
# ... (group-level configurations)
|
||||||
|
- group: "example_group"
|
||||||
|
models:
|
||||||
|
# Model identifiers must match OpenAI Model name (can be a partial match)
|
||||||
|
gpt-4-vision-preview:
|
||||||
# Object setting: must include at least "deploymentName" and/or "version"
|
# Object setting: must include at least "deploymentName" and/or "version"
|
||||||
gpt-4-vision-preview: # Must match OpenAI Model name
|
|
||||||
deploymentName: "arbitrary-deployment-name"
|
deploymentName: "arbitrary-deployment-name"
|
||||||
version: "2024-02-15-preview" # version can be any that supports vision
|
version: "2024-02-15-preview" # version can be any that supports vision
|
||||||
# Boolean setting, must be "true"
|
# Boolean setting, must be "true"
|
||||||
|
@ -122,6 +267,60 @@ endpoints:
|
||||||
|
|
||||||
The above configuration would enable `gpt-4-vision-preview`, `gpt-3.5-turbo` and `gpt-4-turbo` for your users in the order they were defined.
|
The above configuration would enable `gpt-4-vision-preview`, `gpt-3.5-turbo` and `gpt-4-turbo` for your users in the order they were defined.
|
||||||
|
|
||||||
|
### Using Assistants with Azure
|
||||||
|
|
||||||
|
To enable use of Assistants with Azure OpenAI, there are 2 main steps.
|
||||||
|
|
||||||
|
1) Set the `assistants` field at the [Endpoint-level](#endpoint-level-configuration) to `true`, like so:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
endpoints:
|
||||||
|
azureOpenAI:
|
||||||
|
# Enable use of Assistants with Azure
|
||||||
|
assistants: true
|
||||||
|
```
|
||||||
|
|
||||||
|
2) Add the `assistants` field to all groups compatible with Azure's Assistants API integration.
|
||||||
|
|
||||||
|
- At least one of your group configurations must be compatible.
|
||||||
|
- You can check the [compatible regions and models in the Azure docs here](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#assistants-preview).
|
||||||
|
- The version must also be "2024-02-15-preview" or later, preferably later for access to the latest features.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
endpoints:
|
||||||
|
azureOpenAI:
|
||||||
|
assistants: true
|
||||||
|
groups:
|
||||||
|
- group: "my-sweden-group"
|
||||||
|
apiKey: "${SWEDEN_API_KEY}"
|
||||||
|
instanceName: "actual-instance-name"
|
||||||
|
# Mark this group as assistants compatible
|
||||||
|
assistants: true
|
||||||
|
# version must be "2024-02-15-preview" or later
|
||||||
|
version: "2024-03-01-preview"
|
||||||
|
models:
|
||||||
|
# ... (model-level configuration)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Notes:**
|
||||||
|
|
||||||
|
- If you mark multiple regions as assistants-compatible, assistants you create will be aggregated across regions to the main assistant selection list.
|
||||||
|
- Files you upload to Azure OpenAI, whether at the message or assistant level, will only be available in the region the current assistant's model is part of.
|
||||||
|
- For this reason, it's recommended you use only one region or resource group for Azure OpenAI Assistants, or you will experience an error.
|
||||||
|
- Uploading to "OpenAI" is the default behavior for official `code_interpeter` and `retrieval` capabilities.
|
||||||
|
- Downloading files that assistants generate will soon be supported.
|
||||||
|
- As of March 14th 2024, retrieval and streaming are not supported through Azure OpenAI.
|
||||||
|
- To avoid any errors with retrieval while it's not supported, it's recommended to disable the capability altogether through the `assistants` endpoint config:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
endpoints:
|
||||||
|
assistants:
|
||||||
|
# "retrieval" omitted.
|
||||||
|
capabilities: ["code_interpreter", "actions", "tools"]
|
||||||
|
```
|
||||||
|
|
||||||
|
- By default, all capabilities are enabled.
|
||||||
|
|
||||||
### Using Plugins with Azure
|
### Using Plugins with Azure
|
||||||
|
|
||||||
To use the Plugins endpoint with Azure OpenAI, you need a deployment supporting **[function calling](https://techcommunity.microsoft.com/t5/azure-ai-services-blog/function-calling-is-now-available-in-azure-openai-service/ba-p/3879241)**. Otherwise, you need to set "Functions" off in the Agent settings. When you are not using "functions" mode, it's recommend to have "skip completion" off as well, which is a review step of what the agent generated.
|
To use the Plugins endpoint with Azure OpenAI, you need a deployment supporting **[function calling](https://techcommunity.microsoft.com/t5/azure-ai-services-blog/function-calling-is-now-available-in-azure-openai-service/ba-p/3879241)**. Otherwise, you need to set "Functions" off in the Agent settings. When you are not using "functions" mode, it's recommend to have "skip completion" off as well, which is a review step of what the agent generated.
|
||||||
|
|
|
@ -71,33 +71,42 @@ docker compose up # no need to rebuild
|
||||||
## Example Config
|
## Example Config
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
version: 1.0.3
|
version: 1.0.5
|
||||||
cache: true
|
cache: true
|
||||||
# fileStrategy: "firebase" # If using Firebase CDN
|
# fileStrategy: "firebase" # If using Firebase CDN
|
||||||
fileConfig:
|
fileConfig:
|
||||||
endpoints:
|
endpoints:
|
||||||
assistants:
|
assistants:
|
||||||
fileLimit: 5
|
fileLimit: 5
|
||||||
fileSizeLimit: 10 # Maximum size for an individual file in MB
|
# Maximum size for an individual file in MB
|
||||||
totalSizeLimit: 50 # Maximum total size for all files in a single request in MB
|
fileSizeLimit: 10
|
||||||
# supportedMimeTypes: # In case you wish to limit certain filetypes
|
# Maximum total size for all files in a single request in MB
|
||||||
|
totalSizeLimit: 50
|
||||||
|
# In case you wish to limit certain filetypes
|
||||||
|
# supportedMimeTypes:
|
||||||
# - "image/.*"
|
# - "image/.*"
|
||||||
# - "application/pdf"
|
# - "application/pdf"
|
||||||
openAI:
|
openAI:
|
||||||
disabled: true # Disables file uploading to the OpenAI endpoint
|
# Disables file uploading to the OpenAI endpoint
|
||||||
|
disabled: true
|
||||||
default:
|
default:
|
||||||
totalSizeLimit: 20
|
totalSizeLimit: 20
|
||||||
# YourCustomEndpointName: # Example for custom endpoints
|
# Example for custom endpoints
|
||||||
|
# YourCustomEndpointName:
|
||||||
# fileLimit: 2
|
# fileLimit: 2
|
||||||
# fileSizeLimit: 5
|
# fileSizeLimit: 5
|
||||||
serverFileSizeLimit: 100 # Global server file size limit in MB
|
# Global server file size limit in MB
|
||||||
avatarSizeLimit: 4 # Limit for user avatar image size in MB, default: 2 MB
|
serverFileSizeLimit: 100
|
||||||
|
# Limit for user avatar image size in MB, default: 2 MB
|
||||||
|
avatarSizeLimit: 4
|
||||||
rateLimits:
|
rateLimits:
|
||||||
fileUploads:
|
fileUploads:
|
||||||
ipMax: 100
|
ipMax: 100
|
||||||
ipWindowInMinutes: 60 # Rate limit window for file uploads per IP
|
# Rate limit window for file uploads per IP
|
||||||
|
ipWindowInMinutes: 60
|
||||||
userMax: 50
|
userMax: 50
|
||||||
userWindowInMinutes: 60 # Rate limit window for file uploads per user
|
# Rate limit window for file uploads per user
|
||||||
|
userWindowInMinutes: 60
|
||||||
registration:
|
registration:
|
||||||
socialLogins: ["google", "facebook", "github", "discord", "openid"]
|
socialLogins: ["google", "facebook", "github", "discord", "openid"]
|
||||||
allowedDomains:
|
allowedDomains:
|
||||||
|
@ -105,26 +114,35 @@ registration:
|
||||||
- "anotherdomain.com"
|
- "anotherdomain.com"
|
||||||
endpoints:
|
endpoints:
|
||||||
assistants:
|
assistants:
|
||||||
disableBuilder: false # Disable Assistants Builder Interface by setting to `true`
|
# Disable Assistants Builder Interface by setting to `true`
|
||||||
pollIntervalMs: 750 # Polling interval for checking assistant updates
|
disableBuilder: false
|
||||||
timeoutMs: 180000 # Timeout for assistant operations
|
# Polling interval for checking assistant updates
|
||||||
|
pollIntervalMs: 750
|
||||||
|
# Timeout for assistant operations
|
||||||
|
timeoutMs: 180000
|
||||||
# Should only be one or the other, either `supportedIds` or `excludedIds`
|
# Should only be one or the other, either `supportedIds` or `excludedIds`
|
||||||
supportedIds: ["asst_supportedAssistantId1", "asst_supportedAssistantId2"]
|
supportedIds: ["asst_supportedAssistantId1", "asst_supportedAssistantId2"]
|
||||||
# excludedIds: ["asst_excludedAssistantId"]
|
# excludedIds: ["asst_excludedAssistantId"]
|
||||||
|
# (optional) Models that support retrieval, will default to latest known OpenAI models that support the feature
|
||||||
|
# retrievalModels: ["gpt-4-turbo-preview"]
|
||||||
|
# (optional) Assistant Capabilities available to all users. Omit the ones you wish to exclude. Defaults to list below.
|
||||||
|
# capabilities: ["code_interpreter", "retrieval", "actions", "tools"]
|
||||||
custom:
|
custom:
|
||||||
- name: "Mistral"
|
- name: "Mistral"
|
||||||
apiKey: "${MISTRAL_API_KEY}"
|
apiKey: "${MISTRAL_API_KEY}"
|
||||||
baseURL: "https://api.mistral.ai/v1"
|
baseURL: "https://api.mistral.ai/v1"
|
||||||
models:
|
models:
|
||||||
default: ["mistral-tiny", "mistral-small", "mistral-medium", "mistral-large-latest"]
|
default: ["mistral-tiny", "mistral-small", "mistral-medium", "mistral-large-latest"]
|
||||||
fetch: true # Attempt to dynamically fetch available models
|
# Attempt to dynamically fetch available models
|
||||||
|
fetch: true
|
||||||
userIdQuery: false
|
userIdQuery: false
|
||||||
iconURL: "https://example.com/mistral-icon.png"
|
iconURL: "https://example.com/mistral-icon.png"
|
||||||
titleConvo: true
|
titleConvo: true
|
||||||
titleModel: "mistral-tiny"
|
titleModel: "mistral-tiny"
|
||||||
modelDisplayLabel: "Mistral AI"
|
modelDisplayLabel: "Mistral AI"
|
||||||
# addParams:
|
# addParams:
|
||||||
# safe_prompt: true # Mistral specific value for moderating messages
|
# Mistral API specific value for moderating messages
|
||||||
|
# safe_prompt: true
|
||||||
dropParams:
|
dropParams:
|
||||||
- "stop"
|
- "stop"
|
||||||
- "user"
|
- "user"
|
||||||
|
@ -170,7 +188,7 @@ This example configuration file sets up LibreChat with detailed options across s
|
||||||
- **Key**: `version`
|
- **Key**: `version`
|
||||||
- **Type**: String
|
- **Type**: String
|
||||||
- **Description**: Specifies the version of the configuration file.
|
- **Description**: Specifies the version of the configuration file.
|
||||||
- **Example**: `version: 1.0.1`
|
- **Example**: `version: 1.0.5`
|
||||||
- **Required**
|
- **Required**
|
||||||
|
|
||||||
### Cache Settings
|
### Cache Settings
|
||||||
|
@ -454,6 +472,10 @@ endpoints:
|
||||||
# Use either `supportedIds` or `excludedIds` but not both
|
# Use either `supportedIds` or `excludedIds` but not both
|
||||||
supportedIds: ["asst_supportedAssistantId1", "asst_supportedAssistantId2"]
|
supportedIds: ["asst_supportedAssistantId1", "asst_supportedAssistantId2"]
|
||||||
# excludedIds: ["asst_excludedAssistantId"]
|
# excludedIds: ["asst_excludedAssistantId"]
|
||||||
|
# (optional) Models that support retrieval, will default to latest known OpenAI models that support the feature
|
||||||
|
# retrievalModels: ["gpt-4-turbo-preview"]
|
||||||
|
# (optional) Assistant Capabilities available to all users. Omit the ones you wish to exclude. Defaults to list below.
|
||||||
|
# capabilities: ["code_interpreter", "retrieval", "actions", "tools"]
|
||||||
```
|
```
|
||||||
> This configuration enables the builder interface for assistants, sets a polling interval of 500ms to check for run updates, and establishes a timeout of 10 seconds for assistant run operations.
|
> This configuration enables the builder interface for assistants, sets a polling interval of 500ms to check for run updates, and establishes a timeout of 10 seconds for assistant run operations.
|
||||||
|
|
||||||
|
@ -502,6 +524,28 @@ In addition to custom endpoints, you can configure settings specific to the assi
|
||||||
- **Description**: List of excluded assistant Ids. Use this or `supportedIds` but not both (the `excludedIds` field will be ignored if so).
|
- **Description**: List of excluded assistant Ids. Use this or `supportedIds` but not both (the `excludedIds` field will be ignored if so).
|
||||||
- **Example**: `excludedIds: ["asst_excludedAssistantId1", "asst_excludedAssistantId2"]`
|
- **Example**: `excludedIds: ["asst_excludedAssistantId1", "asst_excludedAssistantId2"]`
|
||||||
|
|
||||||
|
### **retrievalModels**:
|
||||||
|
|
||||||
|
> Specifies the models that support retrieval for the assistants endpoint.
|
||||||
|
|
||||||
|
- **Type**: Array/List of Strings
|
||||||
|
- **Example**: `retrievalModels: ["gpt-4-turbo-preview"]`
|
||||||
|
- **Description**: Defines the models that support retrieval capabilities for the assistants endpoint. By default, it uses the latest known OpenAI models that support the official Retrieval feature.
|
||||||
|
- **Note**: This field is optional. If omitted, the default behavior is to use the latest known OpenAI models that support retrieval.
|
||||||
|
|
||||||
|
### **capabilities**:
|
||||||
|
|
||||||
|
> Specifies the assistant capabilities available to all users for the assistants endpoint.
|
||||||
|
|
||||||
|
- **Type**: Array/List of Strings
|
||||||
|
- **Example**: `capabilities: ["code_interpreter", "retrieval", "actions", "tools"]`
|
||||||
|
- **Description**: Defines the assistant capabilities that are available to all users for the assistants endpoint. You can omit the capabilities you wish to exclude from the list. The available capabilities are:
|
||||||
|
- `code_interpreter`: Enables code interpretation capabilities for the assistant.
|
||||||
|
- `retrieval`: Enables retrieval capabilities for the assistant.
|
||||||
|
- `actions`: Enables action capabilities for the assistant.
|
||||||
|
- `tools`: Enables tool capabilities for the assistant.
|
||||||
|
- **Note**: This field is optional. If omitted, the default behavior is to include all the capabilities listed in the example.
|
||||||
|
|
||||||
## Custom Endpoint Object Structure
|
## Custom Endpoint Object Structure
|
||||||
Each endpoint in the `custom` array should have the following structure:
|
Each endpoint in the `custom` array should have the following structure:
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# https://docs.librechat.ai/install/configuration/custom_config.html
|
# https://docs.librechat.ai/install/configuration/custom_config.html
|
||||||
|
|
||||||
# Configuration version (required)
|
# Configuration version (required)
|
||||||
version: 1.0.4
|
version: 1.0.5
|
||||||
|
|
||||||
# Cache settings: Set to true to enable caching
|
# Cache settings: Set to true to enable caching
|
||||||
cache: true
|
cache: true
|
||||||
|
@ -59,6 +59,10 @@ endpoints:
|
||||||
# # Should only be one or the other, either `supportedIds` or `excludedIds`
|
# # Should only be one or the other, either `supportedIds` or `excludedIds`
|
||||||
# supportedIds: ["asst_supportedAssistantId1", "asst_supportedAssistantId2"]
|
# supportedIds: ["asst_supportedAssistantId1", "asst_supportedAssistantId2"]
|
||||||
# # excludedIds: ["asst_excludedAssistantId"]
|
# # excludedIds: ["asst_excludedAssistantId"]
|
||||||
|
# # (optional) Models that support retrieval, will default to latest known OpenAI models that support the feature
|
||||||
|
# retrievalModels: ["gpt-4-turbo-preview"]
|
||||||
|
# # (optional) Assistant Capabilities available to all users. Omit the ones you wish to exclude. Defaults to list below.
|
||||||
|
# capabilities: ["code_interpreter", "retrieval", "actions", "tools"]
|
||||||
custom:
|
custom:
|
||||||
# Groq Example
|
# Groq Example
|
||||||
- name: 'groq'
|
- name: 'groq'
|
||||||
|
|
8
package-lock.json
generated
8
package-lock.json
generated
|
@ -80,7 +80,7 @@
|
||||||
"multer": "^1.4.5-lts.1",
|
"multer": "^1.4.5-lts.1",
|
||||||
"nodejs-gpt": "^1.37.4",
|
"nodejs-gpt": "^1.37.4",
|
||||||
"nodemailer": "^6.9.4",
|
"nodemailer": "^6.9.4",
|
||||||
"openai": "^4.20.1",
|
"openai": "^4.28.4",
|
||||||
"openai-chat-tokens": "^0.2.8",
|
"openai-chat-tokens": "^0.2.8",
|
||||||
"openid-client": "^5.4.2",
|
"openid-client": "^5.4.2",
|
||||||
"passport": "^0.6.0",
|
"passport": "^0.6.0",
|
||||||
|
@ -115,9 +115,9 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"api/node_modules/openai": {
|
"api/node_modules/openai": {
|
||||||
"version": "4.26.1",
|
"version": "4.28.4",
|
||||||
"resolved": "https://registry.npmjs.org/openai/-/openai-4.26.1.tgz",
|
"resolved": "https://registry.npmjs.org/openai/-/openai-4.28.4.tgz",
|
||||||
"integrity": "sha512-DvWbjhWbappsFRatOWmu4Dp1/Q4RG9oOz6CfOSjy0/Drb8G+5iAiqWAO4PfpGIkhOOKtvvNfQri2SItl+U7LhQ==",
|
"integrity": "sha512-RNIwx4MT/F0zyizGcwS+bXKLzJ8QE9IOyigDG/ttnwB220d58bYjYFp0qjvGwEFBO6+pvFVIDABZPGDl46RFsg==",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@types/node": "^18.11.18",
|
"@types/node": "^18.11.18",
|
||||||
"@types/node-fetch": "^2.6.4",
|
"@types/node-fetch": "^2.6.4",
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{
|
{
|
||||||
"name": "librechat-data-provider",
|
"name": "librechat-data-provider",
|
||||||
"version": "0.4.7",
|
"version": "0.4.8",
|
||||||
"description": "data services for librechat apps",
|
"description": "data services for librechat apps",
|
||||||
"main": "dist/index.js",
|
"main": "dist/index.js",
|
||||||
"module": "dist/index.es.js",
|
"module": "dist/index.es.js",
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
import type { TAzureGroups } from '../src/config';
|
import type { TAzureGroups } from '../src/config';
|
||||||
import { validateAzureGroups, mapModelToAzureConfig } from '../src/azure';
|
import { validateAzureGroups, mapModelToAzureConfig, mapGroupToAzureConfig } from '../src/azure';
|
||||||
|
|
||||||
describe('validateAzureGroups', () => {
|
describe('validateAzureGroups', () => {
|
||||||
it('should validate a correct configuration', () => {
|
it('should validate a correct configuration', () => {
|
||||||
|
@ -785,3 +785,57 @@ describe('validateAzureGroups with modelGroupMap and groupMap', () => {
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
describe('mapGroupToAzureConfig', () => {
|
||||||
|
// Test setup for a basic config with 2 groups
|
||||||
|
const groupMap = {
|
||||||
|
group1: {
|
||||||
|
apiKey: 'key-for-group1',
|
||||||
|
instanceName: 'instance-group1',
|
||||||
|
models: {
|
||||||
|
model1: { deploymentName: 'deployment1', version: '1.0' },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
group2: {
|
||||||
|
apiKey: 'key-for-group2',
|
||||||
|
instanceName: 'instance-group2',
|
||||||
|
serverless: true,
|
||||||
|
baseURL: 'https://group2.example.com',
|
||||||
|
models: {
|
||||||
|
model2: true, // demonstrating a boolean style model configuration
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
it('should successfully map non-serverless group configuration', () => {
|
||||||
|
const groupName = 'group1';
|
||||||
|
const result = mapGroupToAzureConfig({ groupName, groupMap });
|
||||||
|
expect(result).toEqual({
|
||||||
|
azureOptions: expect.objectContaining({
|
||||||
|
azureOpenAIApiKey: 'key-for-group1',
|
||||||
|
azureOpenAIApiInstanceName: 'instance-group1',
|
||||||
|
azureOpenAIApiDeploymentName: expect.any(String),
|
||||||
|
azureOpenAIApiVersion: expect.any(String),
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should successfully map serverless group configuration', () => {
|
||||||
|
const groupName = 'group2';
|
||||||
|
const result = mapGroupToAzureConfig({ groupName, groupMap });
|
||||||
|
expect(result).toEqual({
|
||||||
|
azureOptions: expect.objectContaining({
|
||||||
|
azureOpenAIApiKey: 'key-for-group2',
|
||||||
|
}),
|
||||||
|
baseURL: 'https://group2.example.com',
|
||||||
|
serverless: true,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw error for nonexistent group name', () => {
|
||||||
|
const groupName = 'nonexistent-group';
|
||||||
|
expect(() => {
|
||||||
|
mapGroupToAzureConfig({ groupName, groupMap });
|
||||||
|
}).toThrow(`Group named "${groupName}" not found in configuration.`);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
|
@ -66,7 +66,20 @@ export const plugins = () => '/api/plugins';
|
||||||
|
|
||||||
export const config = () => '/api/config';
|
export const config = () => '/api/config';
|
||||||
|
|
||||||
export const assistants = (id?: string) => `/api/assistants${id ? `/${id}` : ''}`;
|
export const assistants = (id?: string, options?: Record<string, string>) => {
|
||||||
|
let url = '/api/assistants';
|
||||||
|
|
||||||
|
if (id) {
|
||||||
|
url += `/${id}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (options && Object.keys(options).length > 0) {
|
||||||
|
const queryParams = new URLSearchParams(options).toString();
|
||||||
|
url += `?${queryParams}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
return url;
|
||||||
|
};
|
||||||
|
|
||||||
export const files = () => '/api/files';
|
export const files = () => '/api/files';
|
||||||
|
|
||||||
|
|
|
@ -234,14 +234,16 @@ export function mapModelToAzureConfig({
|
||||||
}
|
}
|
||||||
|
|
||||||
const modelDetails = groupConfig.models[modelName];
|
const modelDetails = groupConfig.models[modelName];
|
||||||
const deploymentName =
|
const { deploymentName, version } =
|
||||||
typeof modelDetails === 'object'
|
typeof modelDetails === 'object'
|
||||||
? modelDetails.deploymentName || groupConfig.deploymentName
|
? {
|
||||||
: groupConfig.deploymentName;
|
deploymentName: modelDetails.deploymentName || groupConfig.deploymentName,
|
||||||
const version =
|
version: modelDetails.version || groupConfig.version,
|
||||||
typeof modelDetails === 'object'
|
}
|
||||||
? modelDetails.version || groupConfig.version
|
: {
|
||||||
: groupConfig.version;
|
deploymentName: groupConfig.deploymentName,
|
||||||
|
version: groupConfig.version,
|
||||||
|
};
|
||||||
|
|
||||||
if (!deploymentName || !version) {
|
if (!deploymentName || !version) {
|
||||||
throw new Error(
|
throw new Error(
|
||||||
|
@ -274,3 +276,86 @@ export function mapModelToAzureConfig({
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function mapGroupToAzureConfig({
|
||||||
|
groupName,
|
||||||
|
groupMap,
|
||||||
|
}: {
|
||||||
|
groupName: string;
|
||||||
|
groupMap: TAzureGroupMap;
|
||||||
|
}): MappedAzureConfig {
|
||||||
|
const groupConfig = groupMap[groupName];
|
||||||
|
if (!groupConfig) {
|
||||||
|
throw new Error(`Group named "${groupName}" not found in configuration.`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const instanceName = groupConfig.instanceName as string;
|
||||||
|
|
||||||
|
if (!instanceName && !groupConfig.serverless) {
|
||||||
|
throw new Error(
|
||||||
|
`Group "${groupName}" is missing an instanceName for non-serverless configuration.`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (groupConfig.serverless && !groupConfig.baseURL) {
|
||||||
|
throw new Error(
|
||||||
|
`Group "${groupName}" is missing the required base URL for serverless configuration.`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const models = Object.keys(groupConfig.models);
|
||||||
|
if (models.length === 0) {
|
||||||
|
throw new Error(`Group "${groupName}" does not have any models configured.`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use the first available model in the group
|
||||||
|
const firstModelName = models[0];
|
||||||
|
const modelDetails = groupConfig.models[firstModelName];
|
||||||
|
|
||||||
|
const azureOptions: AzureOptions = {
|
||||||
|
azureOpenAIApiKey: extractEnvVariable(groupConfig.apiKey),
|
||||||
|
azureOpenAIApiInstanceName: extractEnvVariable(instanceName),
|
||||||
|
// DeploymentName and Version set below
|
||||||
|
};
|
||||||
|
|
||||||
|
if (groupConfig.serverless) {
|
||||||
|
return {
|
||||||
|
azureOptions,
|
||||||
|
baseURL: extractEnvVariable(groupConfig.baseURL ?? ''),
|
||||||
|
serverless: true,
|
||||||
|
...(groupConfig.additionalHeaders && { headers: groupConfig.additionalHeaders }),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
const { deploymentName, version } =
|
||||||
|
typeof modelDetails === 'object'
|
||||||
|
? {
|
||||||
|
deploymentName: modelDetails.deploymentName || groupConfig.deploymentName,
|
||||||
|
version: modelDetails.version || groupConfig.version,
|
||||||
|
}
|
||||||
|
: {
|
||||||
|
deploymentName: groupConfig.deploymentName,
|
||||||
|
version: groupConfig.version,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!deploymentName || !version) {
|
||||||
|
throw new Error(
|
||||||
|
`Model "${firstModelName}" in group "${groupName}" or the group itself is missing a deploymentName ("${deploymentName}") or version ("${version}").`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
azureOptions.azureOpenAIApiDeploymentName = extractEnvVariable(deploymentName);
|
||||||
|
azureOptions.azureOpenAIApiVersion = extractEnvVariable(version);
|
||||||
|
|
||||||
|
const result: MappedAzureConfig = { azureOptions };
|
||||||
|
|
||||||
|
if (groupConfig.baseURL) {
|
||||||
|
result.baseURL = extractEnvVariable(groupConfig.baseURL);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (groupConfig.additionalHeaders) {
|
||||||
|
result.headers = groupConfig.additionalHeaders;
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
|
@ -6,12 +6,25 @@ import { FileSources } from './types/files';
|
||||||
|
|
||||||
export const defaultSocialLogins = ['google', 'facebook', 'openid', 'github', 'discord'];
|
export const defaultSocialLogins = ['google', 'facebook', 'openid', 'github', 'discord'];
|
||||||
|
|
||||||
|
export const defaultRetrievalModels = [
|
||||||
|
'gpt-4-turbo-preview',
|
||||||
|
'gpt-3.5-turbo-0125',
|
||||||
|
'gpt-4-0125-preview',
|
||||||
|
'gpt-4-1106-preview',
|
||||||
|
'gpt-3.5-turbo-1106',
|
||||||
|
'gpt-3.5-turbo-0125',
|
||||||
|
'gpt-4-turbo',
|
||||||
|
'gpt-4-0125',
|
||||||
|
'gpt-4-1106',
|
||||||
|
];
|
||||||
|
|
||||||
export const fileSourceSchema = z.nativeEnum(FileSources);
|
export const fileSourceSchema = z.nativeEnum(FileSources);
|
||||||
|
|
||||||
export const modelConfigSchema = z
|
export const modelConfigSchema = z
|
||||||
.object({
|
.object({
|
||||||
deploymentName: z.string().optional(),
|
deploymentName: z.string().optional(),
|
||||||
version: z.string().optional(),
|
version: z.string().optional(),
|
||||||
|
assistants: z.boolean().optional(),
|
||||||
})
|
})
|
||||||
.or(z.boolean());
|
.or(z.boolean());
|
||||||
|
|
||||||
|
@ -22,6 +35,7 @@ export const azureBaseSchema = z.object({
|
||||||
serverless: z.boolean().optional(),
|
serverless: z.boolean().optional(),
|
||||||
instanceName: z.string().optional(),
|
instanceName: z.string().optional(),
|
||||||
deploymentName: z.string().optional(),
|
deploymentName: z.string().optional(),
|
||||||
|
assistants: z.boolean().optional(),
|
||||||
addParams: z.record(z.any()).optional(),
|
addParams: z.record(z.any()).optional(),
|
||||||
dropParams: z.array(z.string()).optional(),
|
dropParams: z.array(z.string()).optional(),
|
||||||
forcePrompt: z.boolean().optional(),
|
forcePrompt: z.boolean().optional(),
|
||||||
|
@ -61,6 +75,13 @@ export type TValidatedAzureConfig = {
|
||||||
groupMap: TAzureGroupMap;
|
groupMap: TAzureGroupMap;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
export enum Capabilities {
|
||||||
|
code_interpreter = 'code_interpreter',
|
||||||
|
retrieval = 'retrieval',
|
||||||
|
actions = 'actions',
|
||||||
|
tools = 'tools',
|
||||||
|
}
|
||||||
|
|
||||||
export const assistantEndpointSchema = z.object({
|
export const assistantEndpointSchema = z.object({
|
||||||
/* assistants specific */
|
/* assistants specific */
|
||||||
disableBuilder: z.boolean().optional(),
|
disableBuilder: z.boolean().optional(),
|
||||||
|
@ -68,6 +89,16 @@ export const assistantEndpointSchema = z.object({
|
||||||
timeoutMs: z.number().optional(),
|
timeoutMs: z.number().optional(),
|
||||||
supportedIds: z.array(z.string()).min(1).optional(),
|
supportedIds: z.array(z.string()).min(1).optional(),
|
||||||
excludedIds: z.array(z.string()).min(1).optional(),
|
excludedIds: z.array(z.string()).min(1).optional(),
|
||||||
|
retrievalModels: z.array(z.string()).min(1).optional().default(defaultRetrievalModels),
|
||||||
|
capabilities: z
|
||||||
|
.array(z.nativeEnum(Capabilities))
|
||||||
|
.optional()
|
||||||
|
.default([
|
||||||
|
Capabilities.code_interpreter,
|
||||||
|
Capabilities.retrieval,
|
||||||
|
Capabilities.actions,
|
||||||
|
Capabilities.tools,
|
||||||
|
]),
|
||||||
/* general */
|
/* general */
|
||||||
apiKey: z.string().optional(),
|
apiKey: z.string().optional(),
|
||||||
baseURL: z.string().optional(),
|
baseURL: z.string().optional(),
|
||||||
|
@ -116,6 +147,7 @@ export const azureEndpointSchema = z
|
||||||
.object({
|
.object({
|
||||||
groups: azureGroupConfigsSchema,
|
groups: azureGroupConfigsSchema,
|
||||||
plugins: z.boolean().optional(),
|
plugins: z.boolean().optional(),
|
||||||
|
assistants: z.boolean().optional(),
|
||||||
})
|
})
|
||||||
.and(
|
.and(
|
||||||
endpointSchema
|
endpointSchema
|
||||||
|
@ -288,14 +320,6 @@ export const defaultModels = {
|
||||||
],
|
],
|
||||||
};
|
};
|
||||||
|
|
||||||
export const supportsRetrieval = new Set([
|
|
||||||
'gpt-3.5-turbo-0125',
|
|
||||||
'gpt-4-0125-preview',
|
|
||||||
'gpt-4-turbo-preview',
|
|
||||||
'gpt-4-1106-preview',
|
|
||||||
'gpt-3.5-turbo-1106',
|
|
||||||
]);
|
|
||||||
|
|
||||||
export const EndpointURLs: { [key in EModelEndpoint]: string } = {
|
export const EndpointURLs: { [key in EModelEndpoint]: string } = {
|
||||||
[EModelEndpoint.openAI]: `/api/ask/${EModelEndpoint.openAI}`,
|
[EModelEndpoint.openAI]: `/api/ask/${EModelEndpoint.openAI}`,
|
||||||
[EModelEndpoint.bingAI]: `/api/ask/${EModelEndpoint.bingAI}`,
|
[EModelEndpoint.bingAI]: `/api/ask/${EModelEndpoint.bingAI}`,
|
||||||
|
@ -485,7 +509,7 @@ export enum Constants {
|
||||||
/**
|
/**
|
||||||
* Key for the Custom Config's version (librechat.yaml).
|
* Key for the Custom Config's version (librechat.yaml).
|
||||||
*/
|
*/
|
||||||
CONFIG_VERSION = '1.0.4',
|
CONFIG_VERSION = '1.0.5',
|
||||||
/**
|
/**
|
||||||
* Standard value for the first message's `parentMessageId` value, to indicate no parent exists.
|
* Standard value for the first message's `parentMessageId` value, to indicate no parent exists.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -186,8 +186,8 @@ export const updateAssistant = (
|
||||||
return request.patch(endpoints.assistants(assistant_id), data);
|
return request.patch(endpoints.assistants(assistant_id), data);
|
||||||
};
|
};
|
||||||
|
|
||||||
export const deleteAssistant = (assistant_id: string): Promise<void> => {
|
export const deleteAssistant = (assistant_id: string, model: string): Promise<void> => {
|
||||||
return request.delete(endpoints.assistants(assistant_id));
|
return request.delete(endpoints.assistants(assistant_id, { model }));
|
||||||
};
|
};
|
||||||
|
|
||||||
export const listAssistants = (
|
export const listAssistants = (
|
||||||
|
@ -225,7 +225,10 @@ export const uploadAvatar = (data: FormData): Promise<f.AvatarUploadResponse> =>
|
||||||
};
|
};
|
||||||
|
|
||||||
export const uploadAssistantAvatar = (data: m.AssistantAvatarVariables): Promise<a.Assistant> => {
|
export const uploadAssistantAvatar = (data: m.AssistantAvatarVariables): Promise<a.Assistant> => {
|
||||||
return request.postMultiPart(endpoints.assistants(`avatar/${data.assistant_id}`), data.formData);
|
return request.postMultiPart(
|
||||||
|
endpoints.assistants(`avatar/${data.assistant_id}`, { model: data.model }),
|
||||||
|
data.formData,
|
||||||
|
);
|
||||||
};
|
};
|
||||||
|
|
||||||
export const updateAction = (data: m.UpdateActionVariables): Promise<m.UpdateActionResponse> => {
|
export const updateAction = (data: m.UpdateActionVariables): Promise<m.UpdateActionResponse> => {
|
||||||
|
|
|
@ -146,6 +146,8 @@ export type TConfig = {
|
||||||
userProvide?: boolean | null;
|
userProvide?: boolean | null;
|
||||||
userProvideURL?: boolean | null;
|
userProvideURL?: boolean | null;
|
||||||
disableBuilder?: boolean;
|
disableBuilder?: boolean;
|
||||||
|
retrievalModels?: string[];
|
||||||
|
capabilities?: string[];
|
||||||
};
|
};
|
||||||
|
|
||||||
export type TEndpointsConfig =
|
export type TEndpointsConfig =
|
||||||
|
|
|
@ -46,6 +46,7 @@ export type LogoutOptions = {
|
||||||
|
|
||||||
export type AssistantAvatarVariables = {
|
export type AssistantAvatarVariables = {
|
||||||
assistant_id: string;
|
assistant_id: string;
|
||||||
|
model: string;
|
||||||
formData: FormData;
|
formData: FormData;
|
||||||
postCreation?: boolean;
|
postCreation?: boolean;
|
||||||
};
|
};
|
||||||
|
@ -86,6 +87,8 @@ export type UpdateAssistantMutationOptions = {
|
||||||
) => void;
|
) => void;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
export type DeleteAssistantBody = { assistant_id: string; model: string };
|
||||||
|
|
||||||
export type DeleteAssistantMutationOptions = {
|
export type DeleteAssistantMutationOptions = {
|
||||||
onSuccess?: (data: void, variables: { assistant_id: string }, context?: unknown) => void;
|
onSuccess?: (data: void, variables: { assistant_id: string }, context?: unknown) => void;
|
||||||
onMutate?: (variables: { assistant_id: string }) => void | Promise<unknown>;
|
onMutate?: (variables: { assistant_id: string }) => void | Promise<unknown>;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue