🤖 feat: OpenAI Assistants v2 (initial support) (#2781)

* 🤖 Assistants V2 Support: Part 1

- Separated Azure Assistants to its own endpoint
- File Search / Vector Store integration is incomplete, but can toggle and use storage from playground
- Code Interpreter resource files can be added but not deleted
- GPT-4o is supported
- Many improvements to the Assistants Endpoint overall

data-provider v2 changes

copy existing route as v1

chore: rename new endpoint to reduce comparison operations and add new azure filesource

api: add azureAssistants part 1

force use of version for assistants/assistantsAzure

chore: switch name back to azureAssistants

refactor type version: string | number

Ensure assistants endpoints have version set

fix: isArchived type issue in ConversationListParams

refactor: update assistants mutations/queries with endpoint/version definitions, update Assistants Map structure

chore:  FilePreview component ExtendedFile type assertion

feat: isAssistantsEndpoint helper

chore: remove unused useGenerations

chore(buildTree): type issue

chore(Advanced): type issue (unused component, maybe in future)

first pass for multi-assistant endpoint rewrite

fix(listAssistants): pass params correctly

feat: list separate assistants by endpoint

fix(useTextarea): access assistantMap correctly

fix: assistant endpoint switching, resetting ID

fix: broken during rewrite, selecting assistant mention

fix: set/invalidate assistants endpoint query data correctly

feat: Fix issue with assistant ID not being reset correctly

getOpenAIClient helper function

feat: add toast for assistant deletion

fix: assistants delete right after create issue for azure

fix: assistant patching

refactor: actions to use getOpenAIClient

refactor: consolidate logic into helpers file

fix: issue where conversation data was not initially available

v1 chat support

refactor(spendTokens): only early return if completionTokens isNaN

fix(OpenAIClient): ensure spendTokens has all necessary params

refactor: route/controller logic

fix(assistants/initializeClient): use defaultHeaders field

fix: sanitize default operation id

chore: bump openai package

first pass v2 action service

feat: retroactive domain parsing for actions added via v1

feat: delete db records of actions/assistants on openai assistant deletion

chore: remove vision tools from v2 assistants

feat: v2 upload and delete assistant vision images

WIP first pass, thread attachments

fix: show assistant vision files (save local/firebase copy)

v2 image continue

fix: annotations

fix: refine annotations

show analyze as error if is no longer submitting before progress reaches 1 and show file_search as retrieval tool

fix: abort run, undefined endpoint issue

refactor: consolidate capabilities logic and anticipate versioning

frontend version 2 changes

fix: query selection and filter

add endpoint to unknown filepath

add file ids to resource, deleting in progress

enable/disable file search

remove version log

* 🤖 Assistants V2 Support: Part 2

🎹 fix: Autocompletion Chrome Bug on Action API Key Input

chore: remove `useOriginNavigate`

chore: set correct OpenAI Storage Source

fix: azure file deletions, instantiate clients by source for deletion

update code interpret files info

feat: deleteResourceFileId

chore: increase poll interval as azure easily rate limits

fix: openai file deletions, TODO: evaluate rejected deletion settled promises to determine which to delete from db records

file source icons

update table file filters

chore: file search info and versioning

fix: retrieval update with necessary tool_resources if specified

fix(useMentions): add optional chaining in case listMap value is undefined

fix: force assistant avatar roundedness

fix: azure assistants, check correct flag

chore: bump data-provider

* fix: merge conflict

* ci: fix backend tests due to new updates

* chore: update .env.example

* meilisearch improvements

* localization updates

* chore: update comparisons

* feat: add additional metadata: endpoint, author ID

* chore: azureAssistants ENDPOINTS exclusion warning
This commit is contained in:
Danny Avila 2024-05-19 12:56:55 -04:00 committed by GitHub
parent af8bcb08d6
commit 1a452121fa
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
158 changed files with 4184 additions and 1204 deletions

View file

@ -1,20 +1,59 @@
const {
AuthTypeEnum,
EModelEndpoint,
actionDomainSeparator,
CacheKeys,
Constants,
AuthTypeEnum,
actionDelimiter,
isImageVisionTool,
actionDomainSeparator,
} = require('librechat-data-provider');
const { encryptV2, decryptV2 } = require('~/server/utils/crypto');
const { getActions } = require('~/models/Action');
const { getActions, deleteActions } = require('~/models/Action');
const { deleteAssistant } = require('~/models/Assistant');
const { getLogStores } = require('~/cache');
const { logger } = require('~/config');
const toolNameRegex = /^[a-zA-Z0-9_-]+$/;
/**
* Validates tool name against regex pattern and updates if necessary.
* @param {object} params - The parameters for the function.
* @param {object} params.req - Express Request.
* @param {FunctionTool} params.tool - The tool object.
* @param {string} params.assistant_id - The assistant ID
* @returns {object|null} - Updated tool object or null if invalid and not an action.
*/
const validateAndUpdateTool = async ({ req, tool, assistant_id }) => {
let actions;
if (isImageVisionTool(tool)) {
return null;
}
if (!toolNameRegex.test(tool.function.name)) {
const [functionName, domain] = tool.function.name.split(actionDelimiter);
actions = await getActions({ assistant_id, user: req.user.id }, true);
const matchingActions = actions.filter((action) => {
const metadata = action.metadata;
return metadata && metadata.domain === domain;
});
const action = matchingActions[0];
if (!action) {
return null;
}
const parsedDomain = await domainParser(req, domain, true);
if (!parsedDomain) {
return null;
}
tool.function.name = `${functionName}${actionDelimiter}${parsedDomain}`;
}
return tool;
};
/**
* Encodes or decodes a domain name to/from base64, or replacing periods with a custom separator.
*
* Necessary because Azure OpenAI Assistants API doesn't support periods in function
* names due to `[a-zA-Z0-9_-]*` Regex Validation, limited to a 64-character maximum.
* Necessary due to `[a-zA-Z0-9_-]*` Regex Validation, limited to a 64-character maximum.
*
* @param {Express.Request} req - The Express Request object.
* @param {string} domain - The domain name to encode/decode.
@ -26,10 +65,6 @@ async function domainParser(req, domain, inverse = false) {
return;
}
if (!req.app.locals[EModelEndpoint.azureOpenAI]?.assistants) {
return domain;
}
const domainsCache = getLogStores(CacheKeys.ENCODED_DOMAINS);
const cachedDomain = await domainsCache.get(domain);
if (inverse && cachedDomain) {
@ -170,10 +205,29 @@ function decryptMetadata(metadata) {
return decryptedMetadata;
}
/**
* Deletes an action and its corresponding assistant.
* @param {Object} params - The parameters for the function.
* @param {OpenAIClient} params.req - The Express Request object.
* @param {string} params.assistant_id - The ID of the assistant.
*/
const deleteAssistantActions = async ({ req, assistant_id }) => {
try {
await deleteActions({ assistant_id, user: req.user.id });
await deleteAssistant({ assistant_id, user: req.user.id });
} catch (error) {
const message = 'Trouble deleting Assistant Actions for Assistant ID: ' + assistant_id;
logger.error(message, error);
throw new Error(message);
}
};
module.exports = {
loadActionSets,
deleteAssistantActions,
validateAndUpdateTool,
createActionTool,
encryptMetadata,
decryptMetadata,
loadActionSets,
domainParser,
};

View file

@ -73,12 +73,12 @@ describe('domainParser', () => {
const TLD = '.com';
// Non-azure request
it('returns domain as is if not azure', async () => {
it('does not return domain as is if not azure', async () => {
const domain = `example.com${actionDomainSeparator}test${actionDomainSeparator}`;
const result1 = await domainParser(reqNoAzure, domain, false);
const result2 = await domainParser(reqNoAzure, domain, true);
expect(result1).toEqual(domain);
expect(result2).toEqual(domain);
expect(result1).not.toEqual(domain);
expect(result2).not.toEqual(domain);
});
// Test for Empty or Null Inputs

View file

@ -72,7 +72,14 @@ const AppService = async (app) => {
}
if (config?.endpoints?.[EModelEndpoint.azureOpenAI]?.assistants) {
endpointLocals[EModelEndpoint.assistants] = azureAssistantsDefaults();
endpointLocals[EModelEndpoint.azureAssistants] = azureAssistantsDefaults();
}
if (config?.endpoints?.[EModelEndpoint.azureAssistants]) {
endpointLocals[EModelEndpoint.azureAssistants] = assistantsConfigSetup(
config,
endpointLocals[EModelEndpoint.azureAssistants],
);
}
if (config?.endpoints?.[EModelEndpoint.assistants]) {

View file

@ -253,8 +253,8 @@ describe('AppService', () => {
process.env.EASTUS_API_KEY = 'eastus-key';
await AppService(app);
expect(app.locals).toHaveProperty(EModelEndpoint.assistants);
expect(app.locals[EModelEndpoint.assistants].capabilities.length).toEqual(3);
expect(app.locals).toHaveProperty(EModelEndpoint.azureAssistants);
expect(app.locals[EModelEndpoint.azureAssistants].capabilities.length).toEqual(3);
});
it('should correctly configure Azure OpenAI endpoint based on custom config', async () => {

View file

@ -78,7 +78,7 @@ async function createOnTextProgress({
* @return {Promise<OpenAIAssistantFinish | OpenAIAssistantAction[] | ThreadMessage[] | RequiredActionFunctionToolCall[]>}
*/
async function getResponse({ openai, run_id, thread_id }) {
const run = await waitForRun({ openai, run_id, thread_id, pollIntervalMs: 500 });
const run = await waitForRun({ openai, run_id, thread_id, pollIntervalMs: 2000 });
if (run.status === RunStatus.COMPLETED) {
const messages = await openai.beta.threads.messages.list(thread_id, defaultOrderQuery);
@ -393,8 +393,9 @@ async function runAssistant({
},
});
const { endpoint = EModelEndpoint.azureAssistants } = openai.req.body;
/** @type {TCustomConfig.endpoints.assistants} */
const assistantsEndpointConfig = openai.req.app.locals?.[EModelEndpoint.assistants] ?? {};
const assistantsEndpointConfig = openai.req.app.locals?.[endpoint] ?? {};
const { pollIntervalMs, timeoutMs } = assistantsEndpointConfig;
const run = await waitForRun({

View file

@ -3,6 +3,7 @@ const { isUserProvided, generateConfig } = require('~/server/utils');
const {
OPENAI_API_KEY: openAIApiKey,
AZURE_ASSISTANTS_API_KEY: azureAssistantsApiKey,
ASSISTANTS_API_KEY: assistantsApiKey,
AZURE_API_KEY: azureOpenAIApiKey,
ANTHROPIC_API_KEY: anthropicApiKey,
@ -13,6 +14,7 @@ const {
OPENAI_REVERSE_PROXY,
AZURE_OPENAI_BASEURL,
ASSISTANTS_BASE_URL,
AZURE_ASSISTANTS_BASE_URL,
} = process.env ?? {};
const useAzurePlugins = !!PLUGINS_USE_AZURE;
@ -28,11 +30,20 @@ module.exports = {
useAzurePlugins,
userProvidedOpenAI,
googleKey,
[EModelEndpoint.openAI]: generateConfig(openAIApiKey, OPENAI_REVERSE_PROXY),
[EModelEndpoint.assistants]: generateConfig(assistantsApiKey, ASSISTANTS_BASE_URL, true),
[EModelEndpoint.azureOpenAI]: generateConfig(azureOpenAIApiKey, AZURE_OPENAI_BASEURL),
[EModelEndpoint.chatGPTBrowser]: generateConfig(chatGPTToken),
[EModelEndpoint.anthropic]: generateConfig(anthropicApiKey),
[EModelEndpoint.bingAI]: generateConfig(bingToken),
[EModelEndpoint.anthropic]: generateConfig(anthropicApiKey),
[EModelEndpoint.chatGPTBrowser]: generateConfig(chatGPTToken),
[EModelEndpoint.openAI]: generateConfig(openAIApiKey, OPENAI_REVERSE_PROXY),
[EModelEndpoint.azureOpenAI]: generateConfig(azureOpenAIApiKey, AZURE_OPENAI_BASEURL),
[EModelEndpoint.assistants]: generateConfig(
assistantsApiKey,
ASSISTANTS_BASE_URL,
EModelEndpoint.assistants,
),
[EModelEndpoint.azureAssistants]: generateConfig(
azureAssistantsApiKey,
AZURE_ASSISTANTS_BASE_URL,
EModelEndpoint.azureAssistants,
),
},
};

View file

@ -53,7 +53,7 @@ async function loadConfigEndpoints(req) {
if (req.app.locals[EModelEndpoint.azureOpenAI]?.assistants) {
/** @type {Omit<TConfig, 'order'>} */
endpointsConfig[EModelEndpoint.assistants] = {
endpointsConfig[EModelEndpoint.azureAssistants] = {
userProvide: false,
};
}

View file

@ -30,7 +30,7 @@ async function loadConfigModels(req) {
}
if (azureEndpoint?.assistants && azureConfig.assistantModels) {
modelsConfig[EModelEndpoint.assistants] = azureConfig.assistantModels;
modelsConfig[EModelEndpoint.azureAssistants] = azureConfig.assistantModels;
}
if (!Array.isArray(endpoints[EModelEndpoint.custom])) {

View file

@ -9,13 +9,15 @@ const { config } = require('./EndpointService');
*/
async function loadDefaultEndpointsConfig(req) {
const { google, gptPlugins } = await loadAsyncEndpoints(req);
const { openAI, assistants, bingAI, anthropic, azureOpenAI, chatGPTBrowser } = config;
const { openAI, assistants, azureAssistants, bingAI, anthropic, azureOpenAI, chatGPTBrowser } =
config;
const enabledEndpoints = getEnabledEndpoints();
const endpointConfig = {
[EModelEndpoint.openAI]: openAI,
[EModelEndpoint.assistants]: assistants,
[EModelEndpoint.azureAssistants]: azureAssistants,
[EModelEndpoint.azureOpenAI]: azureOpenAI,
[EModelEndpoint.google]: google,
[EModelEndpoint.bingAI]: bingAI,

View file

@ -25,6 +25,7 @@ async function loadDefaultModels(req) {
plugins: true,
});
const assistants = await getOpenAIModels({ assistants: true });
const azureAssistants = await getOpenAIModels({ azureAssistants: true });
return {
[EModelEndpoint.openAI]: openAI,
@ -35,6 +36,7 @@ async function loadDefaultModels(req) {
[EModelEndpoint.bingAI]: ['BingAI', 'Sydney'],
[EModelEndpoint.chatGPTBrowser]: chatGPTBrowser,
[EModelEndpoint.assistants]: assistants,
[EModelEndpoint.azureAssistants]: azureAssistants,
};
}

View file

@ -2,95 +2,8 @@ const addTitle = require('./addTitle');
const buildOptions = require('./buildOptions');
const initializeClient = require('./initializeClient');
/**
* Asynchronously lists assistants based on provided query parameters.
*
* Initializes the client with the current request and response objects and lists assistants
* according to the query parameters. This function abstracts the logic for non-Azure paths.
*
* @async
* @param {object} params - The parameters object.
* @param {object} params.req - The request object, used for initializing the client.
* @param {object} params.res - The response object, used for initializing the client.
* @param {object} params.query - The query parameters to list assistants (e.g., limit, order).
* @returns {Promise<object>} A promise that resolves to the response from the `openai.beta.assistants.list` method call.
*/
const listAssistants = async ({ req, res, query }) => {
const { openai } = await initializeClient({ req, res });
return openai.beta.assistants.list(query);
};
/**
* Asynchronously lists assistants for Azure configured groups.
*
* Iterates through Azure configured assistant groups, initializes the client with the current request and response objects,
* lists assistants based on the provided query parameters, and merges their data alongside the model information into a single array.
*
* @async
* @param {object} params - The parameters object.
* @param {object} params.req - The request object, used for initializing the client and manipulating the request body.
* @param {object} params.res - The response object, used for initializing the client.
* @param {TAzureConfig} params.azureConfig - The Azure configuration object containing assistantGroups and groupMap.
* @param {object} params.query - The query parameters to list assistants (e.g., limit, order).
* @returns {Promise<AssistantListResponse>} A promise that resolves to an array of assistant data merged with their respective model information.
*/
const listAssistantsForAzure = async ({ req, res, azureConfig = {}, query }) => {
/** @type {Array<[string, TAzureModelConfig]>} */
const groupModelTuples = [];
const promises = [];
/** @type {Array<TAzureGroup>} */
const groups = [];
const { groupMap, assistantGroups } = azureConfig;
for (const groupName of assistantGroups) {
const group = groupMap[groupName];
groups.push(group);
const currentModelTuples = Object.entries(group?.models);
groupModelTuples.push(currentModelTuples);
/* The specified model is only necessary to
fetch assistants for the shared instance */
req.body.model = currentModelTuples[0][0];
promises.push(listAssistants({ req, res, query }));
}
const resolvedQueries = await Promise.all(promises);
const data = resolvedQueries.flatMap((res, i) =>
res.data.map((assistant) => {
const deploymentName = assistant.model;
const currentGroup = groups[i];
const currentModelTuples = groupModelTuples[i];
const firstModel = currentModelTuples[0][0];
if (currentGroup.deploymentName === deploymentName) {
return { ...assistant, model: firstModel };
}
for (const [model, modelConfig] of currentModelTuples) {
if (modelConfig.deploymentName === deploymentName) {
return { ...assistant, model };
}
}
return { ...assistant, model: firstModel };
}),
);
return {
first_id: data[0]?.id,
last_id: data[data.length - 1]?.id,
object: 'list',
has_more: false,
data,
};
};
module.exports = {
addTitle,
buildOptions,
initializeClient,
listAssistants,
listAssistantsForAzure,
};

View file

@ -1,11 +1,6 @@
const OpenAI = require('openai');
const { HttpsProxyAgent } = require('https-proxy-agent');
const {
ErrorTypes,
EModelEndpoint,
resolveHeaders,
mapModelToAzureConfig,
} = require('librechat-data-provider');
const { ErrorTypes, EModelEndpoint } = require('librechat-data-provider');
const {
getUserKeyValues,
getUserKeyExpiry,
@ -13,9 +8,8 @@ const {
} = require('~/server/services/UserService');
const OpenAIClient = require('~/app/clients/OpenAIClient');
const { isUserProvided } = require('~/server/utils');
const { constructAzureURL } = require('~/utils');
const initializeClient = async ({ req, res, endpointOption, initAppClient = false }) => {
const initializeClient = async ({ req, res, endpointOption, version, initAppClient = false }) => {
const { PROXY, OPENAI_ORGANIZATION, ASSISTANTS_API_KEY, ASSISTANTS_BASE_URL } = process.env;
const userProvidesKey = isUserProvided(ASSISTANTS_API_KEY);
@ -34,7 +28,11 @@ const initializeClient = async ({ req, res, endpointOption, initAppClient = fals
let apiKey = userProvidesKey ? userValues.apiKey : ASSISTANTS_API_KEY;
let baseURL = userProvidesURL ? userValues.baseURL : ASSISTANTS_BASE_URL;
const opts = {};
const opts = {
defaultHeaders: {
'OpenAI-Beta': `assistants=${version}`,
},
};
const clientOptions = {
reverseProxyUrl: baseURL ?? null,
@ -44,54 +42,6 @@ const initializeClient = async ({ req, res, endpointOption, initAppClient = fals
...endpointOption,
};
/** @type {TAzureConfig | undefined} */
const azureConfig = req.app.locals[EModelEndpoint.azureOpenAI];
/** @type {AzureOptions | undefined} */
let azureOptions;
if (azureConfig && azureConfig.assistants) {
const { modelGroupMap, groupMap, assistantModels } = azureConfig;
const modelName = req.body.model ?? req.query.model ?? assistantModels[0];
const {
azureOptions: currentOptions,
baseURL: azureBaseURL,
headers = {},
serverless,
} = mapModelToAzureConfig({
modelName,
modelGroupMap,
groupMap,
});
azureOptions = currentOptions;
baseURL = constructAzureURL({
baseURL: azureBaseURL ?? 'https://${INSTANCE_NAME}.openai.azure.com/openai',
azureOptions,
});
apiKey = azureOptions.azureOpenAIApiKey;
opts.defaultQuery = { 'api-version': azureOptions.azureOpenAIApiVersion };
opts.defaultHeaders = resolveHeaders({ ...headers, 'api-key': apiKey });
opts.model = azureOptions.azureOpenAIApiDeploymentName;
if (initAppClient) {
clientOptions.titleConvo = azureConfig.titleConvo;
clientOptions.titleModel = azureConfig.titleModel;
clientOptions.titleMethod = azureConfig.titleMethod ?? 'completion';
const groupName = modelGroupMap[modelName].group;
clientOptions.addParams = azureConfig.groupMap[groupName].addParams;
clientOptions.dropParams = azureConfig.groupMap[groupName].dropParams;
clientOptions.forcePrompt = azureConfig.groupMap[groupName].forcePrompt;
clientOptions.reverseProxyUrl = baseURL ?? clientOptions.reverseProxyUrl;
clientOptions.headers = opts.defaultHeaders;
clientOptions.azure = !serverless && azureOptions;
}
}
if (userProvidesKey & !apiKey) {
throw new Error(
JSON.stringify({
@ -125,10 +75,6 @@ const initializeClient = async ({ req, res, endpointOption, initAppClient = fals
openai.req = req;
openai.res = res;
if (azureOptions) {
openai.locals = { ...(openai.locals ?? {}), azureOptions };
}
if (endpointOption && initAppClient) {
const client = new OpenAIClient(apiKey, clientOptions);
return {

View file

@ -0,0 +1,19 @@
const buildOptions = (endpoint, parsedBody) => {
// eslint-disable-next-line no-unused-vars
const { promptPrefix, assistant_id, iconURL, greeting, spec, ...rest } = parsedBody;
const endpointOption = {
endpoint,
promptPrefix,
assistant_id,
iconURL,
greeting,
spec,
modelOptions: {
...rest,
},
};
return endpointOption;
};
module.exports = buildOptions;

View file

@ -0,0 +1,7 @@
const buildOptions = require('./buildOptions');
const initializeClient = require('./initializeClient');
module.exports = {
buildOptions,
initializeClient,
};

View file

@ -0,0 +1,195 @@
const OpenAI = require('openai');
const { HttpsProxyAgent } = require('https-proxy-agent');
const {
ErrorTypes,
EModelEndpoint,
resolveHeaders,
mapModelToAzureConfig,
} = require('librechat-data-provider');
const {
getUserKeyValues,
getUserKeyExpiry,
checkUserKeyExpiry,
} = require('~/server/services/UserService');
const OpenAIClient = require('~/app/clients/OpenAIClient');
const { isUserProvided } = require('~/server/utils');
const { constructAzureURL } = require('~/utils');
class Files {
constructor(client) {
this._client = client;
}
/**
* Create an assistant file by attaching a
* [File](https://platform.openai.com/docs/api-reference/files) to an
* [assistant](https://platform.openai.com/docs/api-reference/assistants).
*/
create(assistantId, body, options) {
return this._client.post(`/assistants/${assistantId}/files`, {
body,
...options,
headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
});
}
/**
* Retrieves an AssistantFile.
*/
retrieve(assistantId, fileId, options) {
return this._client.get(`/assistants/${assistantId}/files/${fileId}`, {
...options,
headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
});
}
/**
* Delete an assistant file.
*/
del(assistantId, fileId, options) {
return this._client.delete(`/assistants/${assistantId}/files/${fileId}`, {
...options,
headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
});
}
}
const initializeClient = async ({ req, res, version, endpointOption, initAppClient = false }) => {
const { PROXY, OPENAI_ORGANIZATION, AZURE_ASSISTANTS_API_KEY, AZURE_ASSISTANTS_BASE_URL } =
process.env;
const userProvidesKey = isUserProvided(AZURE_ASSISTANTS_API_KEY);
const userProvidesURL = isUserProvided(AZURE_ASSISTANTS_BASE_URL);
let userValues = null;
if (userProvidesKey || userProvidesURL) {
const expiresAt = await getUserKeyExpiry({
userId: req.user.id,
name: EModelEndpoint.azureAssistants,
});
checkUserKeyExpiry(expiresAt, EModelEndpoint.azureAssistants);
userValues = await getUserKeyValues({
userId: req.user.id,
name: EModelEndpoint.azureAssistants,
});
}
let apiKey = userProvidesKey ? userValues.apiKey : AZURE_ASSISTANTS_API_KEY;
let baseURL = userProvidesURL ? userValues.baseURL : AZURE_ASSISTANTS_BASE_URL;
const opts = {};
const clientOptions = {
reverseProxyUrl: baseURL ?? null,
proxy: PROXY ?? null,
req,
res,
...endpointOption,
};
/** @type {TAzureConfig | undefined} */
const azureConfig = req.app.locals[EModelEndpoint.azureOpenAI];
/** @type {AzureOptions | undefined} */
let azureOptions;
if (azureConfig && azureConfig.assistants) {
const { modelGroupMap, groupMap, assistantModels } = azureConfig;
const modelName = req.body.model ?? req.query.model ?? assistantModels[0];
const {
azureOptions: currentOptions,
baseURL: azureBaseURL,
headers = {},
serverless,
} = mapModelToAzureConfig({
modelName,
modelGroupMap,
groupMap,
});
azureOptions = currentOptions;
baseURL = constructAzureURL({
baseURL: azureBaseURL ?? 'https://${INSTANCE_NAME}.openai.azure.com/openai',
azureOptions,
});
apiKey = azureOptions.azureOpenAIApiKey;
opts.defaultQuery = { 'api-version': azureOptions.azureOpenAIApiVersion };
opts.defaultHeaders = resolveHeaders({
...headers,
'api-key': apiKey,
'OpenAI-Beta': `assistants=${version}`,
});
opts.model = azureOptions.azureOpenAIApiDeploymentName;
if (initAppClient) {
clientOptions.titleConvo = azureConfig.titleConvo;
clientOptions.titleModel = azureConfig.titleModel;
clientOptions.titleMethod = azureConfig.titleMethod ?? 'completion';
const groupName = modelGroupMap[modelName].group;
clientOptions.addParams = azureConfig.groupMap[groupName].addParams;
clientOptions.dropParams = azureConfig.groupMap[groupName].dropParams;
clientOptions.forcePrompt = azureConfig.groupMap[groupName].forcePrompt;
clientOptions.reverseProxyUrl = baseURL ?? clientOptions.reverseProxyUrl;
clientOptions.headers = opts.defaultHeaders;
clientOptions.azure = !serverless && azureOptions;
}
}
if (userProvidesKey & !apiKey) {
throw new Error(
JSON.stringify({
type: ErrorTypes.NO_USER_KEY,
}),
);
}
if (!apiKey) {
throw new Error('Assistants API key not provided. Please provide it again.');
}
if (baseURL) {
opts.baseURL = baseURL;
}
if (PROXY) {
opts.httpAgent = new HttpsProxyAgent(PROXY);
}
if (OPENAI_ORGANIZATION) {
opts.organization = OPENAI_ORGANIZATION;
}
/** @type {OpenAIClient} */
const openai = new OpenAI({
apiKey,
...opts,
});
openai.beta.assistants.files = new Files(openai);
openai.req = req;
openai.res = res;
if (azureOptions) {
openai.locals = { ...(openai.locals ?? {}), azureOptions };
}
if (endpointOption && initAppClient) {
const client = new OpenAIClient(apiKey, clientOptions);
return {
client,
openai,
openAIApiKey: apiKey,
};
}
return {
openai,
openAIApiKey: apiKey,
};
};
module.exports = initializeClient;

View file

@ -0,0 +1,112 @@
// const OpenAI = require('openai');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { ErrorTypes } = require('librechat-data-provider');
const { getUserKey, getUserKeyExpiry, getUserKeyValues } = require('~/server/services/UserService');
const initializeClient = require('./initializeClient');
// const { OpenAIClient } = require('~/app');
jest.mock('~/server/services/UserService', () => ({
getUserKey: jest.fn(),
getUserKeyExpiry: jest.fn(),
getUserKeyValues: jest.fn(),
checkUserKeyExpiry: jest.requireActual('~/server/services/UserService').checkUserKeyExpiry,
}));
const today = new Date();
const tenDaysFromToday = new Date(today.setDate(today.getDate() + 10));
const isoString = tenDaysFromToday.toISOString();
describe('initializeClient', () => {
// Set up environment variables
const originalEnvironment = process.env;
const app = {
locals: {},
};
beforeEach(() => {
jest.resetModules(); // Clears the cache
process.env = { ...originalEnvironment }; // Make a copy
});
afterAll(() => {
process.env = originalEnvironment; // Restore original env vars
});
test('initializes OpenAI client with default API key and URL', async () => {
process.env.AZURE_ASSISTANTS_API_KEY = 'default-api-key';
process.env.AZURE_ASSISTANTS_BASE_URL = 'https://default.api.url';
// Assuming 'isUserProvided' to return false for this test case
jest.mock('~/server/utils', () => ({
isUserProvided: jest.fn().mockReturnValueOnce(false),
}));
const req = { user: { id: 'user123' }, app };
const res = {};
const { openai, openAIApiKey } = await initializeClient({ req, res });
expect(openai.apiKey).toBe('default-api-key');
expect(openAIApiKey).toBe('default-api-key');
expect(openai.baseURL).toBe('https://default.api.url');
});
test('initializes OpenAI client with user-provided API key and URL', async () => {
process.env.AZURE_ASSISTANTS_API_KEY = 'user_provided';
process.env.AZURE_ASSISTANTS_BASE_URL = 'user_provided';
getUserKeyValues.mockResolvedValue({ apiKey: 'user-api-key', baseURL: 'https://user.api.url' });
getUserKeyExpiry.mockResolvedValue(isoString);
const req = { user: { id: 'user123' }, app };
const res = {};
const { openai, openAIApiKey } = await initializeClient({ req, res });
expect(openAIApiKey).toBe('user-api-key');
expect(openai.apiKey).toBe('user-api-key');
expect(openai.baseURL).toBe('https://user.api.url');
});
test('throws error for invalid JSON in user-provided values', async () => {
process.env.AZURE_ASSISTANTS_API_KEY = 'user_provided';
getUserKey.mockResolvedValue('invalid-json');
getUserKeyExpiry.mockResolvedValue(isoString);
getUserKeyValues.mockImplementation(() => {
let userValues = getUserKey();
try {
userValues = JSON.parse(userValues);
} catch (e) {
throw new Error(
JSON.stringify({
type: ErrorTypes.INVALID_USER_KEY,
}),
);
}
return userValues;
});
const req = { user: { id: 'user123' } };
const res = {};
await expect(initializeClient({ req, res })).rejects.toThrow(/invalid_user_key/);
});
test('throws error if API key is not provided', async () => {
delete process.env.AZURE_ASSISTANTS_API_KEY; // Simulate missing API key
const req = { user: { id: 'user123' }, app };
const res = {};
await expect(initializeClient({ req, res })).rejects.toThrow(/Assistants API key not/);
});
test('initializes OpenAI client with proxy configuration', async () => {
process.env.AZURE_ASSISTANTS_API_KEY = 'test-key';
process.env.PROXY = 'http://proxy.server';
const req = { user: { id: 'user123' }, app };
const res = {};
const { openai } = await initializeClient({ req, res });
expect(openai.httpAgent).toBeInstanceOf(HttpsProxyAgent);
});
});

View file

@ -180,7 +180,15 @@ const deleteFirebaseFile = async (req, file) => {
if (!fileName.includes(req.user.id)) {
throw new Error('Invalid file path');
}
await deleteFile('', fileName);
try {
await deleteFile('', fileName);
} catch (error) {
logger.error('Error deleting file from Firebase:', error);
if (error.code === 'storage/object-not-found') {
return;
}
throw error;
}
};
/**

View file

@ -14,9 +14,11 @@ const { logger } = require('~/config');
* @returns {Promise<OpenAIFile>}
*/
async function uploadOpenAIFile({ req, file, openai }) {
const { height, width } = req.body;
const isImage = height && width;
const uploadedFile = await openai.files.create({
file: fs.createReadStream(file.path),
purpose: FilePurpose.Assistants,
purpose: isImage ? FilePurpose.Vision : FilePurpose.Assistants,
});
logger.debug(
@ -34,7 +36,7 @@ async function uploadOpenAIFile({ req, file, openai }) {
await sleep(sleepTime);
}
return uploadedFile;
return isImage ? { ...uploadedFile, height, width } : uploadedFile;
}
/**

View file

@ -10,10 +10,13 @@ const {
EModelEndpoint,
mergeFileConfig,
hostImageIdSuffix,
checkOpenAIStorage,
hostImageNamePrefix,
isAssistantsEndpoint,
} = require('librechat-data-provider');
const { addResourceFileId, deleteResourceFileId } = require('~/server/controllers/assistants/v2');
const { convertImage, resizeAndConvert } = require('~/server/services/Files/images');
const { initializeClient } = require('~/server/services/Endpoints/assistants');
const { getOpenAIClient } = require('~/server/controllers/assistants/helpers');
const { createFile, updateFileUsage, deleteFiles } = require('~/models/File');
const { LB_QueueAsyncCall } = require('~/server/utils/queue');
const { getStrategyFunctions } = require('./strategies');
@ -34,14 +37,16 @@ const processFiles = async (files) => {
/**
* Enqueues the delete operation to the leaky bucket queue if necessary, or adds it directly to promises.
*
* @param {Express.Request} req - The express request object.
* @param {MongoFile} file - The file object to delete.
* @param {Function} deleteFile - The delete file function.
* @param {Promise[]} promises - The array of promises to await.
* @param {OpenAI | undefined} [openai] - If an OpenAI file, the initialized OpenAI client.
* @param {object} params - The passed parameters.
* @param {Express.Request} params.req - The express request object.
* @param {MongoFile} params.file - The file object to delete.
* @param {Function} params.deleteFile - The delete file function.
* @param {Promise[]} params.promises - The array of promises to await.
* @param {string[]} params.resolvedFileIds - The array of promises to await.
* @param {OpenAI | undefined} [params.openai] - If an OpenAI file, the initialized OpenAI client.
*/
function enqueueDeleteOperation(req, file, deleteFile, promises, openai) {
if (file.source === FileSources.openai) {
function enqueueDeleteOperation({ req, file, deleteFile, promises, resolvedFileIds, openai }) {
if (checkOpenAIStorage(file.source)) {
// Enqueue to leaky bucket
promises.push(
new Promise((resolve, reject) => {
@ -53,6 +58,7 @@ function enqueueDeleteOperation(req, file, deleteFile, promises, openai) {
logger.error('Error deleting file from OpenAI source', err);
reject(err);
} else {
resolvedFileIds.push(file.file_id);
resolve(result);
}
},
@ -62,10 +68,12 @@ function enqueueDeleteOperation(req, file, deleteFile, promises, openai) {
} else {
// Add directly to promises
promises.push(
deleteFile(req, file).catch((err) => {
logger.error('Error deleting file', err);
return Promise.reject(err);
}),
deleteFile(req, file)
.then(() => resolvedFileIds.push(file.file_id))
.catch((err) => {
logger.error('Error deleting file', err);
return Promise.reject(err);
}),
);
}
}
@ -80,35 +88,71 @@ function enqueueDeleteOperation(req, file, deleteFile, promises, openai) {
* @param {Express.Request} params.req - The express request object.
* @param {DeleteFilesBody} params.req.body - The request body.
* @param {string} [params.req.body.assistant_id] - The assistant ID if file uploaded is associated to an assistant.
* @param {string} [params.req.body.tool_resource] - The tool resource if assistant file uploaded is associated to a tool resource.
*
* @returns {Promise<void>}
*/
const processDeleteRequest = async ({ req, files }) => {
const file_ids = files.map((file) => file.file_id);
const resolvedFileIds = [];
const deletionMethods = {};
const promises = [];
promises.push(deleteFiles(file_ids));
/** @type {OpenAI | undefined} */
let openai;
if (req.body.assistant_id) {
({ openai } = await initializeClient({ req }));
/** @type {Record<string, OpenAI | undefined>} */
const client = { [FileSources.openai]: undefined, [FileSources.azure]: undefined };
const initializeClients = async () => {
const openAIClient = await getOpenAIClient({
req,
overrideEndpoint: EModelEndpoint.assistants,
});
client[FileSources.openai] = openAIClient.openai;
if (!req.app.locals[EModelEndpoint.azureOpenAI]?.assistants) {
return;
}
const azureClient = await getOpenAIClient({
req,
overrideEndpoint: EModelEndpoint.azureAssistants,
});
client[FileSources.azure] = azureClient.openai;
};
if (req.body.assistant_id !== undefined) {
await initializeClients();
}
for (const file of files) {
const source = file.source ?? FileSources.local;
if (source === FileSources.openai && !openai) {
({ openai } = await initializeClient({ req }));
if (checkOpenAIStorage(source) && !client[source]) {
await initializeClients();
}
if (req.body.assistant_id) {
const openai = client[source];
if (req.body.assistant_id && req.body.tool_resource) {
promises.push(
deleteResourceFileId({
req,
openai,
file_id: file.file_id,
assistant_id: req.body.assistant_id,
tool_resource: req.body.tool_resource,
}),
);
} else if (req.body.assistant_id) {
promises.push(openai.beta.assistants.files.del(req.body.assistant_id, file.file_id));
}
if (deletionMethods[source]) {
enqueueDeleteOperation(req, file, deletionMethods[source], promises, openai);
enqueueDeleteOperation({
req,
file,
deleteFile: deletionMethods[source],
promises,
resolvedFileIds,
openai,
});
continue;
}
@ -118,10 +162,11 @@ const processDeleteRequest = async ({ req, files }) => {
}
deletionMethods[source] = deleteFile;
enqueueDeleteOperation(req, file, deleteFile, promises, openai);
enqueueDeleteOperation({ req, file, deleteFile, promises, resolvedFileIds, openai });
}
await Promise.allSettled(promises);
await deleteFiles(resolvedFileIds);
};
/**
@ -180,12 +225,13 @@ const processFileURL = async ({ fileStrategy, userId, URL, fileName, basePath, c
*
* @param {Object} params - The parameters object.
* @param {Express.Request} params.req - The Express request object.
* @param {Express.Response} params.res - The Express response object.
* @param {Express.Response} [params.res] - The Express response object.
* @param {Express.Multer.File} params.file - The uploaded file.
* @param {ImageMetadata} params.metadata - Additional metadata for the file.
* @param {boolean} params.returnFile - Whether to return the file metadata or return response as normal.
* @returns {Promise<void>}
*/
const processImageFile = async ({ req, res, file, metadata }) => {
const processImageFile = async ({ req, res, file, metadata, returnFile = false }) => {
const source = req.app.locals.fileStrategy;
const { handleImageUpload } = getStrategyFunctions(source);
const { file_id, temp_file_id, endpoint } = metadata;
@ -213,6 +259,10 @@ const processImageFile = async ({ req, res, file, metadata }) => {
},
true,
);
if (returnFile) {
return result;
}
res.status(200).json({ message: 'File uploaded and processed successfully', ...result });
};
@ -274,28 +324,57 @@ const uploadImageBuffer = async ({ req, context, metadata = {}, resize = true })
* @returns {Promise<void>}
*/
const processFileUpload = async ({ req, res, file, metadata }) => {
const isAssistantUpload = metadata.endpoint === EModelEndpoint.assistants;
const source = isAssistantUpload ? FileSources.openai : FileSources.vectordb;
const isAssistantUpload = isAssistantsEndpoint(metadata.endpoint);
const assistantSource =
metadata.endpoint === EModelEndpoint.azureAssistants ? FileSources.azure : FileSources.openai;
const source = isAssistantUpload ? assistantSource : FileSources.vectordb;
const { handleFileUpload } = getStrategyFunctions(source);
const { file_id, temp_file_id } = metadata;
/** @type {OpenAI | undefined} */
let openai;
if (source === FileSources.openai) {
({ openai } = await initializeClient({ req }));
if (checkOpenAIStorage(source)) {
({ openai } = await getOpenAIClient({ req }));
}
const { id, bytes, filename, filepath, embedded } = await handleFileUpload({
const {
id,
bytes,
filename,
filepath: _filepath,
embedded,
height,
width,
} = await handleFileUpload({
req,
file,
file_id,
openai,
});
if (isAssistantUpload && !metadata.message_file) {
if (isAssistantUpload && !metadata.message_file && !metadata.tool_resource) {
await openai.beta.assistants.files.create(metadata.assistant_id, {
file_id: id,
});
} else if (isAssistantUpload && !metadata.message_file) {
await addResourceFileId({
req,
openai,
file_id: id,
assistant_id: metadata.assistant_id,
tool_resource: metadata.tool_resource,
});
}
let filepath = isAssistantUpload ? `${openai.baseURL}/files/${id}` : _filepath;
if (isAssistantUpload && file.mimetype.startsWith('image')) {
const result = await processImageFile({
req,
file,
metadata: { file_id: v4() },
returnFile: true,
});
filepath = result.filepath;
}
const result = await createFile(
@ -304,13 +383,15 @@ const processFileUpload = async ({ req, res, file, metadata }) => {
file_id: id ?? file_id,
temp_file_id,
bytes,
filepath,
filename: filename ?? file.originalname,
filepath: isAssistantUpload ? `${openai.baseURL}/files/${id}` : filepath,
context: isAssistantUpload ? FileContext.assistants : FileContext.message_attachment,
model: isAssistantUpload ? req.body.model : undefined,
type: file.mimetype,
embedded,
source,
height,
width,
},
true,
);
@ -340,7 +421,10 @@ const processOpenAIFile = async ({
originalName ? `/${originalName}` : ''
}`;
const type = mime.getType(originalName ?? file_id);
const source =
openai.req.body.endpoint === EModelEndpoint.azureAssistants
? FileSources.azure
: FileSources.openai;
const file = {
..._file,
type,
@ -349,7 +433,7 @@ const processOpenAIFile = async ({
usage: 1,
user: userId,
context: _file.purpose,
source: FileSources.openai,
source,
model: openai.req.body.model,
filename: originalName ?? file_id,
};
@ -394,12 +478,14 @@ const processOpenAIImageOutput = async ({ req, buffer, file_id, filename, fileEx
filename: `${hostImageNamePrefix}${filename}`,
};
createFile(file, true);
const source =
req.body.endpoint === EModelEndpoint.azureAssistants ? FileSources.azure : FileSources.openai;
createFile(
{
...file,
file_id,
filename,
source: FileSources.openai,
source,
type: mime.getType(fileExt),
},
true,
@ -500,7 +586,12 @@ async function retrieveAndProcessFile({
* Filters a file based on its size and the endpoint origin.
*
* @param {Object} params - The parameters for the function.
* @param {Express.Request} params.req - The request object from Express.
* @param {object} params.req - The request object from Express.
* @param {string} [params.req.endpoint]
* @param {string} [params.req.file_id]
* @param {number} [params.req.width]
* @param {number} [params.req.height]
* @param {number} [params.req.version]
* @param {Express.Multer.File} params.file - The file uploaded to the server via multer.
* @param {boolean} [params.image] - Whether the file expected is an image.
* @returns {void}

View file

@ -111,6 +111,8 @@ const getStrategyFunctions = (fileSource) => {
return localStrategy();
} else if (fileSource === FileSources.openai) {
return openAIStrategy();
} else if (fileSource === FileSources.azure) {
return openAIStrategy();
} else if (fileSource === FileSources.vectordb) {
return vectorStrategy();
} else {

View file

@ -167,6 +167,8 @@ const getOpenAIModels = async (opts) => {
if (opts.assistants) {
models = defaultModels[EModelEndpoint.assistants];
} else if (opts.azure) {
models = defaultModels[EModelEndpoint.azureAssistants];
}
if (opts.plugins) {

View file

@ -55,7 +55,7 @@ async function createRun({ openai, thread_id, body }) {
* @param {string} params.run_id - The ID of the run to wait for.
* @param {string} params.thread_id - The ID of the thread associated with the run.
* @param {RunManager} params.runManager - The RunManager instance to manage run steps.
* @param {number} [params.pollIntervalMs=750] - The interval for polling the run status; default is 750 milliseconds.
* @param {number} [params.pollIntervalMs=2000] - The interval for polling the run status; default is 2000 milliseconds.
* @param {number} [params.timeout=180000] - The period to wait until timing out polling; default is 3 minutes (in ms).
* @return {Promise<Run>} A promise that resolves to the last fetched run object.
*/
@ -64,7 +64,7 @@ async function waitForRun({
run_id,
thread_id,
runManager,
pollIntervalMs = 750,
pollIntervalMs = 2000,
timeout = 60000 * 3,
}) {
let timeElapsed = 0;
@ -233,7 +233,7 @@ async function _handleRun({ openai, run_id, thread_id }) {
run_id,
thread_id,
runManager,
pollIntervalMs: 750,
pollIntervalMs: 2000,
timeout: 60000,
});
const actions = [];

View file

@ -3,7 +3,6 @@ const { v4 } = require('uuid');
const {
Constants,
ContentTypes,
EModelEndpoint,
AnnotationTypes,
defaultOrderQuery,
} = require('librechat-data-provider');
@ -50,6 +49,7 @@ async function initThread({ openai, body, thread_id: _thread_id }) {
* @param {string} params.assistant_id - The current assistant Id.
* @param {string} params.thread_id - The thread Id.
* @param {string} params.conversationId - The message's conversationId
* @param {string} params.endpoint - The conversation endpoint
* @param {string} [params.parentMessageId] - Optional if initial message.
* Defaults to Constants.NO_PARENT.
* @param {string} [params.instructions] - Optional: from preset for `instructions` field.
@ -82,7 +82,7 @@ async function saveUserMessage(params) {
const userMessage = {
user: params.user,
endpoint: EModelEndpoint.assistants,
endpoint: params.endpoint,
messageId: params.messageId,
conversationId: params.conversationId,
parentMessageId: params.parentMessageId ?? Constants.NO_PARENT,
@ -96,7 +96,7 @@ async function saveUserMessage(params) {
};
const convo = {
endpoint: EModelEndpoint.assistants,
endpoint: params.endpoint,
conversationId: params.conversationId,
promptPrefix: params.promptPrefix,
instructions: params.instructions,
@ -126,6 +126,7 @@ async function saveUserMessage(params) {
* @param {string} params.model - The model used by the assistant.
* @param {ContentPart[]} params.content - The message content parts.
* @param {string} params.conversationId - The message's conversationId
* @param {string} params.endpoint - The conversation endpoint
* @param {string} params.parentMessageId - The latest user message that triggered this response.
* @param {string} [params.instructions] - Optional: from preset for `instructions` field.
* Overrides the instructions of the assistant.
@ -145,7 +146,7 @@ async function saveAssistantMessage(params) {
const message = await recordMessage({
user: params.user,
endpoint: EModelEndpoint.assistants,
endpoint: params.endpoint,
messageId: params.messageId,
conversationId: params.conversationId,
parentMessageId: params.parentMessageId,
@ -160,7 +161,7 @@ async function saveAssistantMessage(params) {
});
await saveConvo(params.user, {
endpoint: EModelEndpoint.assistants,
endpoint: params.endpoint,
conversationId: params.conversationId,
promptPrefix: params.promptPrefix,
instructions: params.instructions,
@ -205,20 +206,22 @@ async function addThreadMetadata({ openai, thread_id, messageId, messages }) {
*
* @param {Object} params - The parameters for synchronizing messages.
* @param {OpenAIClient} params.openai - The OpenAI client instance.
* @param {string} params.endpoint - The current endpoint.
* @param {string} params.thread_id - The current thread ID.
* @param {TMessage[]} params.dbMessages - The LibreChat DB messages.
* @param {ThreadMessage[]} params.apiMessages - The thread messages from the API.
* @param {string} params.conversationId - The current conversation ID.
* @param {string} params.thread_id - The current thread ID.
* @param {string} [params.assistant_id] - The current assistant ID.
* @param {string} params.conversationId - The current conversation ID.
* @return {Promise<TMessage[]>} A promise that resolves to the updated messages
*/
async function syncMessages({
openai,
apiMessages,
dbMessages,
conversationId,
endpoint,
thread_id,
dbMessages,
apiMessages,
assistant_id,
conversationId,
}) {
let result = [];
let dbMessageMap = new Map(dbMessages.map((msg) => [msg.messageId, msg]));
@ -290,7 +293,7 @@ async function syncMessages({
thread_id,
conversationId,
messageId: v4(),
endpoint: EModelEndpoint.assistants,
endpoint,
parentMessageId: lastMessage ? lastMessage.messageId : Constants.NO_PARENT,
role: apiMessage.role,
isCreatedByUser: apiMessage.role === 'user',
@ -382,13 +385,21 @@ function mapMessagesToSteps(steps, messages) {
*
* @param {Object} params - The parameters for initializing a thread.
* @param {OpenAIClient} params.openai - The OpenAI client instance.
* @param {string} params.endpoint - The current endpoint.
* @param {string} [params.latestMessageId] - Optional: The latest message ID from LibreChat.
* @param {string} params.thread_id - Response thread ID.
* @param {string} params.run_id - Response Run ID.
* @param {string} params.conversationId - LibreChat conversation ID.
* @return {Promise<TMessage[]>} A promise that resolves to the updated messages
*/
async function checkMessageGaps({ openai, latestMessageId, thread_id, run_id, conversationId }) {
async function checkMessageGaps({
openai,
endpoint,
latestMessageId,
thread_id,
run_id,
conversationId,
}) {
const promises = [];
promises.push(openai.beta.threads.messages.list(thread_id, defaultOrderQuery));
promises.push(openai.beta.threads.runs.steps.list(thread_id, run_id));
@ -406,6 +417,7 @@ async function checkMessageGaps({ openai, latestMessageId, thread_id, run_id, co
role: 'assistant',
run_id,
thread_id,
endpoint,
metadata: {
messageId: latestMessageId,
},
@ -452,11 +464,12 @@ async function checkMessageGaps({ openai, latestMessageId, thread_id, run_id, co
const syncedMessages = await syncMessages({
openai,
endpoint,
thread_id,
dbMessages,
apiMessages,
thread_id,
conversationId,
assistant_id,
conversationId,
});
return Object.values(
@ -498,41 +511,62 @@ const recordUsage = async ({
};
/**
* Safely replaces the annotated text within the specified range denoted by start_index and end_index,
* after verifying that the text within that range matches the given annotation text.
* Proceeds with the replacement even if a mismatch is found, but logs a warning.
* Creates a replaceAnnotation function with internal state for tracking the index offset.
*
* @param {string} originalText The original text content.
* @param {number} start_index The starting index where replacement should begin.
* @param {number} end_index The ending index where replacement should end.
* @param {string} expectedText The text expected to be found in the specified range.
* @param {string} replacementText The text to insert in place of the existing content.
* @returns {string} The text with the replacement applied, regardless of text match.
* @returns {function} The replaceAnnotation function with closure for index offset.
*/
function replaceAnnotation(originalText, start_index, end_index, expectedText, replacementText) {
if (start_index < 0 || end_index > originalText.length || start_index > end_index) {
logger.warn(`Invalid range specified for annotation replacement.
Attempting replacement with \`replace\` method instead...
length: ${originalText.length}
start_index: ${start_index}
end_index: ${end_index}`);
return originalText.replace(originalText, replacementText);
function createReplaceAnnotation() {
let indexOffset = 0;
/**
* Safely replaces the annotated text within the specified range denoted by start_index and end_index,
* after verifying that the text within that range matches the given annotation text.
* Proceeds with the replacement even if a mismatch is found, but logs a warning.
*
* @param {object} params The original text content.
* @param {string} params.currentText The current text content, with/without replacements.
* @param {number} params.start_index The starting index where replacement should begin.
* @param {number} params.end_index The ending index where replacement should end.
* @param {string} params.expectedText The text expected to be found in the specified range.
* @param {string} params.replacementText The text to insert in place of the existing content.
* @returns {string} The text with the replacement applied, regardless of text match.
*/
function replaceAnnotation({
currentText,
start_index,
end_index,
expectedText,
replacementText,
}) {
const adjustedStartIndex = start_index + indexOffset;
const adjustedEndIndex = end_index + indexOffset;
if (
adjustedStartIndex < 0 ||
adjustedEndIndex > currentText.length ||
adjustedStartIndex > adjustedEndIndex
) {
logger.warn(`Invalid range specified for annotation replacement.
Attempting replacement with \`replace\` method instead...
length: ${currentText.length}
start_index: ${adjustedStartIndex}
end_index: ${adjustedEndIndex}`);
return currentText.replace(expectedText, replacementText);
}
if (currentText.substring(adjustedStartIndex, adjustedEndIndex) !== expectedText) {
return currentText.replace(expectedText, replacementText);
}
indexOffset += replacementText.length - (adjustedEndIndex - adjustedStartIndex);
return (
currentText.slice(0, adjustedStartIndex) +
replacementText +
currentText.slice(adjustedEndIndex)
);
}
const actualTextInRange = originalText.substring(start_index, end_index);
if (actualTextInRange !== expectedText) {
logger.warn(`The text within the specified range does not match the expected annotation text.
Attempting replacement with \`replace\` method instead...
Expected: ${expectedText}
Actual: ${actualTextInRange}`);
return originalText.replace(originalText, replacementText);
}
const beforeText = originalText.substring(0, start_index);
const afterText = originalText.substring(end_index);
return beforeText + replacementText + afterText;
return replaceAnnotation;
}
/**
@ -581,6 +615,11 @@ async function processMessages({ openai, client, messages = [] }) {
continue;
}
const originalText = currentText;
text += originalText;
const replaceAnnotation = createReplaceAnnotation();
logger.debug('[processMessages] Processing annotations:', annotations);
for (const annotation of annotations) {
let file;
@ -589,14 +628,16 @@ async function processMessages({ openai, client, messages = [] }) {
const file_id = annotationType?.file_id;
const alreadyProcessed = client.processedFileIds.has(file_id);
const replaceCurrentAnnotation = (replacement = '') => {
currentText = replaceAnnotation(
const replaceCurrentAnnotation = (replacementText = '') => {
const { start_index, end_index, text: expectedText } = annotation;
currentText = replaceAnnotation({
originalText,
currentText,
annotation.start_index,
annotation.end_index,
annotation.text,
replacement,
);
start_index,
end_index,
expectedText,
replacementText,
});
edited = true;
};
@ -623,7 +664,7 @@ async function processMessages({ openai, client, messages = [] }) {
replaceCurrentAnnotation(`^${sources.length}^`);
}
text += currentText + ' ';
text = currentText;
if (!file) {
continue;

View file

@ -2,6 +2,7 @@ const {
Capabilities,
EModelEndpoint,
assistantEndpointSchema,
defaultAssistantsVersion,
} = require('librechat-data-provider');
const { logger } = require('~/config');
@ -12,6 +13,7 @@ const { logger } = require('~/config');
function azureAssistantsDefaults() {
return {
capabilities: [Capabilities.tools, Capabilities.actions, Capabilities.code_interpreter],
version: defaultAssistantsVersion.azureAssistants,
};
}

View file

@ -41,6 +41,17 @@ function azureConfigSetup(config) {
);
}
if (
azureConfiguration.assistants &&
process.env.ENDPOINTS &&
!process.env.ENDPOINTS.includes(EModelEndpoint.azureAssistants)
) {
logger.warn(
`Azure Assistants are configured, but the endpoint will not be accessible as it's not included in the ENDPOINTS environment variable.
Please add the value "${EModelEndpoint.azureAssistants}" to the ENDPOINTS list if expected.`,
);
}
return {
modelNames,
modelGroupMap,