🔄 fix: Assistants Endpoint & Minor Issues (#7274)

* 🔄 fix: Include usage in stream options for OpenAI and Azure endpoints

* fix: Agents support for Azure serverless endpoints

* fix: Refactor condition for assistants and azureAssistants endpoint handling

* AWS Titan via Bedrock: model doesn't support system messages, Closes #6456

* fix: Add EndpointSchemaKey type to endpoint parameters in buildDefaultConvo and ensure assistantId is always defined

* fix: Handle new conversation state for assistants endpoint in finalHandler

* fix: Add spec and iconURL parameters to `saveAssistantMessage` to persist modelSpec fields

* fix: Handle assistant unlinking even if no valid files to delete

* chore: move type definitions from callbacks.js to typedefs.js

* chore: Add StandardGraph typedef to typedefs.js

* chore: Update parameter type for graph in ModelEndHandler to StandardGraph

---------

Co-authored-by: Andres Restrepo <andres@enric.ai>
This commit is contained in:
Danny Avila 2025-05-07 17:11:33 -04:00 committed by GitHub
parent 3606349a0f
commit 71105cd49c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 119 additions and 35 deletions

View file

@ -14,15 +14,6 @@ const { loadAuthValues } = require('~/server/services/Tools/credentials');
const { saveBase64Image } = require('~/server/services/Files/process');
const { logger, sendEvent } = require('~/config');
/** @typedef {import('@librechat/agents').Graph} Graph */
/** @typedef {import('@librechat/agents').EventHandler} EventHandler */
/** @typedef {import('@librechat/agents').ModelEndData} ModelEndData */
/** @typedef {import('@librechat/agents').ToolEndData} ToolEndData */
/** @typedef {import('@librechat/agents').ToolEndCallback} ToolEndCallback */
/** @typedef {import('@librechat/agents').ChatModelStreamHandler} ChatModelStreamHandler */
/** @typedef {import('@librechat/agents').ContentAggregatorResult['aggregateContent']} ContentAggregator */
/** @typedef {import('@librechat/agents').GraphEvents} GraphEvents */
class ModelEndHandler {
/**
* @param {Array<UsageMetadata>} collectedUsage
@ -38,7 +29,7 @@ class ModelEndHandler {
* @param {string} event
* @param {ModelEndData | undefined} data
* @param {Record<string, unknown> | undefined} metadata
* @param {Graph} graph
* @param {StandardGraph} graph
* @returns
*/
handle(event, data, metadata, graph) {

View file

@ -58,7 +58,7 @@ const payloadParser = ({ req, agent, endpoint }) => {
const legacyContentEndpoints = new Set([KnownEndpoints.groq, KnownEndpoints.deepseek]);
const noSystemModelRegex = [/\b(o1-preview|o1-mini)\b/gi];
const noSystemModelRegex = [/\b(o1-preview|o1-mini|amazon\.titan-text)\b/gi];
// const { processMemory, memoryInstructions } = require('~/server/services/Endpoints/agents/memory');
// const { getFormattedMemories } = require('~/models/Memory');

View file

@ -119,7 +119,7 @@ const chatV1 = async (req, res) => {
} else if (/Files.*are invalid/.test(error.message)) {
const errorMessage = `Files are invalid, or may not have uploaded yet.${
endpoint === EModelEndpoint.azureAssistants
? ' If using Azure OpenAI, files are only available in the region of the assistant\'s model at the time of upload.'
? " If using Azure OpenAI, files are only available in the region of the assistant's model at the time of upload."
: ''
}`;
return sendResponse(req, res, messageData, errorMessage);
@ -379,8 +379,8 @@ const chatV1 = async (req, res) => {
body.additional_instructions ? `${body.additional_instructions}\n` : ''
}The user has uploaded ${imageCount} image${pluralized}.
Use the \`${ImageVisionTool.function.name}\` tool to retrieve ${
plural ? '' : 'a '
}detailed text description${pluralized} for ${plural ? 'each' : 'the'} image${pluralized}.`;
plural ? '' : 'a '
}detailed text description${pluralized} for ${plural ? 'each' : 'the'} image${pluralized}.`;
return files;
};
@ -576,6 +576,8 @@ const chatV1 = async (req, res) => {
thread_id,
model: assistant_id,
endpoint,
spec: endpointOption.spec,
iconURL: endpointOption.iconURL,
};
sendMessage(res, {

View file

@ -428,6 +428,8 @@ const chatV2 = async (req, res) => {
thread_id,
model: assistant_id,
endpoint,
spec: endpointOption.spec,
iconURL: endpointOption.iconURL,
};
sendMessage(res, {

View file

@ -21,6 +21,7 @@ const { getOpenAIClient } = require('~/server/controllers/assistants/helpers');
const { loadAuthValues } = require('~/server/services/Tools/credentials');
const { refreshS3FileUrls } = require('~/server/services/Files/S3/crud');
const { getFiles, batchUpdateFiles } = require('~/models/File');
const { getAssistant } = require('~/models/Assistant');
const { getAgent } = require('~/models/Agent');
const { getLogStores } = require('~/cache');
const { logger } = require('~/config');
@ -94,7 +95,7 @@ router.delete('/', async (req, res) => {
});
}
/* Handle entity unlinking even if no valid files to delete */
/* Handle agent unlinking even if no valid files to delete */
if (req.body.agent_id && req.body.tool_resource && dbFiles.length === 0) {
const agent = await getAgent({
id: req.body.agent_id,
@ -104,7 +105,21 @@ router.delete('/', async (req, res) => {
const agentFiles = files.filter((f) => toolResourceFiles.includes(f.file_id));
await processDeleteRequest({ req, files: agentFiles });
res.status(200).json({ message: 'File associations removed successfully' });
res.status(200).json({ message: 'File associations removed successfully from agent' });
return;
}
/* Handle assistant unlinking even if no valid files to delete */
if (req.body.assistant_id && req.body.tool_resource && dbFiles.length === 0) {
const assistant = await getAssistant({
id: req.body.assistant_id,
});
const toolResourceFiles = assistant.tool_resources?.[req.body.tool_resource]?.file_ids ?? [];
const assistantFiles = files.filter((f) => toolResourceFiles.includes(f.file_id));
await processDeleteRequest({ req, files: assistantFiles });
res.status(200).json({ message: 'File associations removed successfully from assistant' });
return;
}

View file

@ -233,6 +233,13 @@ const initializeAgentOptions = async ({
endpointOption: _endpointOption,
});
if (
agent.endpoint === EModelEndpoint.azureOpenAI &&
options.llmConfig?.azureOpenAIApiInstanceName == null
) {
agent.provider = Providers.OPENAI;
}
if (options.provider != null) {
agent.provider = options.provider;
}

View file

@ -3,7 +3,6 @@ const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts');
const { getAssistant } = require('~/models/Assistant');
const buildOptions = async (endpoint, parsedBody) => {
const { promptPrefix, assistant_id, iconURL, greeting, spec, artifacts, ...modelOptions } =
parsedBody;
const endpointOption = removeNullishValues({

View file

@ -132,6 +132,8 @@ async function saveUserMessage(req, params) {
* @param {string} params.endpoint - The conversation endpoint
* @param {string} params.parentMessageId - The latest user message that triggered this response.
* @param {string} [params.instructions] - Optional: from preset for `instructions` field.
* @param {string} [params.spec] - Optional: Model spec identifier.
* @param {string} [params.iconURL]
* Overrides the instructions of the assistant.
* @param {string} [params.promptPrefix] - Optional: from preset for `additional_instructions` field.
* @return {Promise<Run>} A promise that resolves to the created run object.
@ -154,6 +156,8 @@ async function saveAssistantMessage(req, params) {
text: params.text,
unfinished: false,
// tokenCount,
iconURL: params.iconURL,
spec: params.spec,
});
await saveConvo(
@ -165,6 +169,8 @@ async function saveAssistantMessage(req, params) {
instructions: params.instructions,
assistant_id: params.assistant_id,
model: params.model,
iconURL: params.iconURL,
spec: params.spec,
},
{ context: 'api/server/services/Threads/manage.js #saveAssistantMessage' },
);