🚧 chore: merge latest dev build to main repo (#3844)

* agents - phase 1 (#30)

* chore: copy assistant files

* feat: frontend and data-provider

* feat: backend get endpoint test

* fix(MessageEndpointIcon): switched to AgentName and AgentAvatar

* fix: small fixes

* fix: agent endpoint config

* fix: show Agent Builder

* chore: install agentus

* chore: initial scaffolding for agents

* fix: updated Assistant logic to Agent Logic for some Agent components

* WIP first pass, demo of agent package

* WIP: initial backend infra for agents

* fix: agent list error

* wip: agents routing

* chore: Refactor useSSE hook to handle different data events

* wip: correctly emit events

* chore: Update @librechat/agentus npm dependency to version 1.0.9

* remove comment

* first pass: streaming agent text

* chore: Remove @librechat/agentus root-level workspace npm dependency

* feat: Agent Schema and Model

* fix: content handling fixes

* fix: content message save

* WIP: new content data

* fix: run step issue with tool calls

* chore: Update @librechat/agentus npm dependency to version 1.1.5

* feat: update controller and agent routes

* wip: initial backend tool and tool error handling support

* wip: tool chunks

* chore: Update @librechat/agentus npm dependency to version 1.1.7

* chore: update tool_call typing, add test conditions and logs

* fix: create agent

* fix: create agent

* first pass: render completed content parts

* fix: remove logging, fix step handler typing

* chore: Update @librechat/agentus npm dependency to version 1.1.9

* refactor: cleanup maps on unmount

* chore: Update BaseClient.js to safely count tokens for string, number, and boolean values

* fix: support subsequent messages with tool_calls

* chore: export order

* fix: select agent

* fix: tool call types and handling

* chore: switch to anthropic for testing

* fix: AgentSelect

* refactor: experimental: OpenAIClient to use array for intermediateReply

* fix(useSSE): revert old condition for streaming legacy client tokens

* fix: lint

* revert `agent_id` to `id`

* chore: update localization keys for agent-related components

* feat: zod schema handling for actions

* refactor(actions): if no params, no zodSchema

* chore: Update @librechat/agentus npm dependency to version 1.2.1

* feat: first pass, actions

* refactor: empty schema for actions without params

* feat: Update createRun function to accept additional options

* fix: message payload formatting; feat: add more client options

* fix: ToolCall component rendering when action has no args but has output

* refactor(ToolCall): allow non-stringy args

* WIP: first pass, correctly formatted tool_calls between providers

* refactor: Remove duplicate import of 'roles' module

* refactor: Exclude 'vite.config.ts' from TypeScript compilation

* refactor: fix agent related types
> - no need to use endpoint/model fields for identifying agent metadata
> - add `provider` distinction for agent-configured 'endpoint'
- no need for agent-endpoint map
- reduce complexity of tools as functions into tools as string[]
- fix types related to above changes
- reduce unnecessary variables for queries/mutations and corresponding react-query keys

* refactor: Add tools and tool_kwargs fields to agent schema

* refactor: Remove unused code and update dependencies

* refactor: Update updateAgentHandler to use req.body directly

* refactor: Update AgentSelect component to use localized hooks

* refactor: Update agent schema to include tools and provider fields

* refactor(AgentPanel): add scrollbar gutter, add provider field to form, fix agent schema required values

* refactor: Update AgentSwitcher component to use selectedAgentId instead of selectedAgent

* refactor: Update AgentPanel component to include alternateName import and defaultAgentFormValues

* refactor(SelectDropDown): allow setting value as option while still supporting legacy usage (string values only)

* refactor: SelectDropdown changes - Only necessary when the available values are objects with label/value fields and the selected value is expected to be a string.

* refactor: TypeError issues and handle provider as option

* feat: Add placeholder for provider selection in AgentPanel component

* refactor: Update agent schema to include author and provider fields

* fix: show expected 'create agent' placeholder when creating agent

* chore: fix localization strings, hide capabilities form for now

* chore: typing

* refactor: import order and use compact agents schema for now

* chore: typing

* refactor: Update AgentForm type to use AgentCapabilities

* fix agent form agent selection issues

* feat: responsive agent selection

* fix: Handle cancelled fetch in useSelectAgent hook

* fix: reset agent form on accordion close/open

* feat: Add agent_id to default conversation for agents endpoint

* feat: agents endpoint request handling

* refactor: reset conversation model on agent select

* refactor: add `additional_instructions` to conversation schema, organize other fields

* chore: casing

* chore: types

* refactor(loadAgentTools): explicitly pass agent_id, do not pass `model` to loadAgentTools for now, load action sets by agent_id

* WIP: initial draft of real agent client initialization

* WIP: first pass, anthropic agent requests

* feat: remember last selected agent

* feat: openai and azure connected

* fix: prioritize agent model for runs unless an explicit override model is passed from client

* feat: Agent Actions

* fix: save agent id to convo

* feat: model panel (#29)

* feat: model panel

* bring back comments

* fix: method still null

* fix: AgentPanel FormContext

* feat: add more parameters

* fix: style issues; refactor: Agent Controller

* fix: cherry-pick

* fix: Update AgentAvatar component to use AssistantIcon instead of BrainCircuit

* feat: OGDialog for delete agent; feat(assistant): update Agent types, introduced `model_parameters`

* feat: icon and general `model_parameters` update

* feat: use react-hook-form better

* fix: agent builder form reset issue when switching panels

* refactor: modularize agent builder form

---------

Co-authored-by: Danny Avila <danny@librechat.ai>

* fix: AgentPanel and ModelPanel type issues and use `useFormContext` and `watch` instead of `methods` directly and `useWatch`.

* fix: tool call issues due to invalid input (anthropic) of empty string

* fix: handle empty text in Part component

---------

Co-authored-by: Marco Beretta <81851188+berry-13@users.noreply.github.com>

* refactor: remove form ModelPanel and fixed nested ternary expressions in AgentConfig

* fix: Model Parameters not saved correctly

* refactor: remove console log

* feat: avatar upload and get for Agents (#36)

Co-authored-by: Marco Beretta <81851188+berry-13@users.noreply.github.com>

* chore: update to public package

* fix: typing, optional chaining

* fix: cursor not showing for content parts

* chore: conditionally enable agents

* ci: fix azure test

* ci: fix frontend tests, fix eslint api

* refactor: Remove unused errorContentPart variable

* continue of the agent message PR (#40)

* last fixes

* fix: agentMap

* pr merge test  (#41)

* fix: model icon not fetching correctly

* remove console logs

* feat: agent name

* refactor: pass documentsMap as a prop to allow re-render of assistant form

* refactor: pass documentsMap as a prop to allow re-render of assistant form

* chore: Bump version to 0.7.419

* fix: TypeError: Cannot read properties of undefined (reading 'id')

* refactor: update AgentSwitcher component to use ControlCombobox instead of Combobox

---------

Co-authored-by: Marco Beretta <81851188+berry-13@users.noreply.github.com>
This commit is contained in:
Danny Avila 2024-08-31 16:33:51 -04:00 committed by GitHub
parent 618be4bf2b
commit a0291ed155
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
141 changed files with 14473 additions and 5714 deletions

View file

@ -2,6 +2,9 @@ const { CacheKeys } = require('librechat-data-provider');
const { loadDefaultModels, loadConfigModels } = require('~/server/services/Config');
const { getLogStores } = require('~/cache');
/**
* @param {ServerRequest} req
*/
const getModelsConfig = async (req) => {
const cache = getLogStores(CacheKeys.CONFIG_STORE);
let modelsConfig = await cache.get(CacheKeys.MODELS_CONFIG);
@ -14,7 +17,7 @@ const getModelsConfig = async (req) => {
/**
* Loads the models from the config.
* @param {Express.Request} req - The Express request object.
* @param {ServerRequest} req - The Express request object.
* @returns {Promise<TModelsConfig>} The models config.
*/
async function loadModels(req) {

View file

@ -0,0 +1,83 @@
const { GraphEvents, ToolEndHandler, ChatModelStreamHandler } = require('@librechat/agents');
/** @typedef {import('@librechat/agents').EventHandler} EventHandler */
/** @typedef {import('@librechat/agents').ChatModelStreamHandler} ChatModelStreamHandler */
/** @typedef {import('@librechat/agents').GraphEvents} GraphEvents */
/**
* Sends message data in Server Sent Events format.
* @param {ServerResponse} res - The server response.
* @param {{ data: string | Record<string, unknown>, event?: string }} event - The message event.
* @param {string} event.event - The type of event.
* @param {string} event.data - The message to be sent.
*/
const sendEvent = (res, event) => {
if (typeof event.data === 'string' && event.data.length === 0) {
return;
}
res.write(`event: message\ndata: ${JSON.stringify(event)}\n\n`);
};
/**
* Get default handlers for stream events.
* @param {{ res?: ServerResponse }} options - The options object.
* @returns {Record<string, t.EventHandler>} The default handlers.
* @throws {Error} If the request is not found.
*/
function getDefaultHandlers({ res }) {
if (!res) {
throw new Error('Request not found');
}
const handlers = {
// [GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(),
[GraphEvents.TOOL_END]: new ToolEndHandler(),
[GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
[GraphEvents.ON_RUN_STEP]: {
/**
* Handle ON_RUN_STEP event.
* @param {string} event - The event name.
* @param {StreamEventData} data - The event data.
*/
handle: (event, data) => {
sendEvent(res, { event, data });
},
},
[GraphEvents.ON_RUN_STEP_DELTA]: {
/**
* Handle ON_RUN_STEP_DELTA event.
* @param {string} event - The event name.
* @param {StreamEventData} data - The event data.
*/
handle: (event, data) => {
sendEvent(res, { event, data });
},
},
[GraphEvents.ON_RUN_STEP_COMPLETED]: {
/**
* Handle ON_RUN_STEP_COMPLETED event.
* @param {string} event - The event name.
* @param {StreamEventData & { result: ToolEndData }} data - The event data.
*/
handle: (event, data) => {
sendEvent(res, { event, data });
},
},
[GraphEvents.ON_MESSAGE_DELTA]: {
/**
* Handle ON_MESSAGE_DELTA event.
* @param {string} event - The event name.
* @param {StreamEventData} data - The event data.
*/
handle: (event, data) => {
sendEvent(res, { event, data });
},
},
};
return handlers;
}
module.exports = {
sendEvent,
getDefaultHandlers,
};

View file

@ -0,0 +1,462 @@
// const { HttpsProxyAgent } = require('https-proxy-agent');
// const {
// Constants,
// ImageDetail,
// EModelEndpoint,
// resolveHeaders,
// validateVisionModel,
// mapModelToAzureConfig,
// } = require('librechat-data-provider');
const { Callback } = require('@librechat/agents');
const {
EModelEndpoint,
providerEndpointMap,
removeNullishValues,
} = require('librechat-data-provider');
const {
extractBaseURL,
// constructAzureURL,
// genAzureChatCompletion,
} = require('~/utils');
const {
formatMessage,
formatAgentMessages,
createContextHandlers,
} = require('~/app/clients/prompts');
const Tokenizer = require('~/server/services/Tokenizer');
const BaseClient = require('~/app/clients/BaseClient');
// const { sleep } = require('~/server/utils');
const { createRun } = require('./run');
const { logger } = require('~/config');
class AgentClient extends BaseClient {
constructor(options = {}) {
super(options);
/** @type {'discard' | 'summarize'} */
this.contextStrategy = 'discard';
/** @deprecated @type {true} - Is a Chat Completion Request */
this.isChatCompletion = true;
const { maxContextTokens, modelOptions = {}, ...clientOptions } = options;
this.modelOptions = modelOptions;
this.maxContextTokens = maxContextTokens;
this.options = Object.assign({ endpoint: EModelEndpoint.agents }, clientOptions);
}
setOptions(options) {
logger.info('[api/server/controllers/agents/client.js] setOptions', options);
}
/**
*
* Checks if the model is a vision model based on request attachments and sets the appropriate options:
* - Sets `this.modelOptions.model` to `gpt-4-vision-preview` if the request is a vision request.
* - Sets `this.isVisionModel` to `true` if vision request.
* - Deletes `this.modelOptions.stop` if vision request.
* @param {MongoFile[]} attachments
*/
checkVisionRequest(attachments) {
logger.info(
'[api/server/controllers/agents/client.js #checkVisionRequest] not implemented',
attachments,
);
// if (!attachments) {
// return;
// }
// const availableModels = this.options.modelsConfig?.[this.options.endpoint];
// if (!availableModels) {
// return;
// }
// let visionRequestDetected = false;
// for (const file of attachments) {
// if (file?.type?.includes('image')) {
// visionRequestDetected = true;
// break;
// }
// }
// if (!visionRequestDetected) {
// return;
// }
// this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels });
// if (this.isVisionModel) {
// delete this.modelOptions.stop;
// return;
// }
// for (const model of availableModels) {
// if (!validateVisionModel({ model, availableModels })) {
// continue;
// }
// this.modelOptions.model = model;
// this.isVisionModel = true;
// delete this.modelOptions.stop;
// return;
// }
// if (!availableModels.includes(this.defaultVisionModel)) {
// return;
// }
// if (!validateVisionModel({ model: this.defaultVisionModel, availableModels })) {
// return;
// }
// this.modelOptions.model = this.defaultVisionModel;
// this.isVisionModel = true;
// delete this.modelOptions.stop;
}
getSaveOptions() {
return removeNullishValues(
Object.assign(
{
agent_id: this.options.agent.id,
modelLabel: this.options.modelLabel,
maxContextTokens: this.options.maxContextTokens,
resendFiles: this.options.resendFiles,
imageDetail: this.options.imageDetail,
spec: this.options.spec,
},
this.modelOptions,
{
model: undefined,
// TODO:
// would need to be override settings; otherwise, model needs to be undefined
// model: this.override.model,
// instructions: this.override.instructions,
// additional_instructions: this.override.additional_instructions,
},
),
);
}
getBuildMessagesOptions(opts) {
return {
instructions: opts.instructions,
additional_instructions: opts.additional_instructions,
};
}
async buildMessages(
messages,
parentMessageId,
{ instructions = null, additional_instructions = null },
opts,
) {
let orderedMessages = this.constructor.getMessagesForConversation({
messages,
parentMessageId,
summary: this.shouldSummarize,
});
let payload;
/** @type {{ role: string; name: string; content: string } | undefined} */
let systemMessage;
/** @type {number | undefined} */
let promptTokens;
/** @type {string} */
let systemContent = `${instructions ?? ''}${additional_instructions ?? ''}`;
if (this.options.attachments) {
const attachments = await this.options.attachments;
if (this.message_file_map) {
this.message_file_map[orderedMessages[orderedMessages.length - 1].messageId] = attachments;
} else {
this.message_file_map = {
[orderedMessages[orderedMessages.length - 1].messageId]: attachments,
};
}
const files = await this.addImageURLs(
orderedMessages[orderedMessages.length - 1],
attachments,
);
this.options.attachments = files;
}
if (this.message_file_map) {
this.contextHandlers = createContextHandlers(
this.options.req,
orderedMessages[orderedMessages.length - 1].text,
);
}
const formattedMessages = orderedMessages.map((message, i) => {
const formattedMessage = formatMessage({
message,
userName: this.options?.name,
assistantName: this.options?.modelLabel,
});
const needsTokenCount = this.contextStrategy && !orderedMessages[i].tokenCount;
/* If tokens were never counted, or, is a Vision request and the message has files, count again */
if (needsTokenCount || (this.isVisionModel && (message.image_urls || message.files))) {
orderedMessages[i].tokenCount = this.getTokenCountForMessage(formattedMessage);
}
/* If message has files, calculate image token cost */
// if (this.message_file_map && this.message_file_map[message.messageId]) {
// const attachments = this.message_file_map[message.messageId];
// for (const file of attachments) {
// if (file.embedded) {
// this.contextHandlers?.processFile(file);
// continue;
// }
// orderedMessages[i].tokenCount += this.calculateImageTokenCost({
// width: file.width,
// height: file.height,
// detail: this.options.imageDetail ?? ImageDetail.auto,
// });
// }
// }
return formattedMessage;
});
if (this.contextHandlers) {
this.augmentedPrompt = await this.contextHandlers.createContext();
systemContent = this.augmentedPrompt + systemContent;
}
if (systemContent) {
systemContent = `${systemContent.trim()}`;
systemMessage = {
role: 'system',
name: 'instructions',
content: systemContent,
};
if (this.contextStrategy) {
const instructionTokens = this.getTokenCountForMessage(systemMessage);
if (instructionTokens >= 0) {
const firstMessageTokens = orderedMessages[0].tokenCount ?? 0;
orderedMessages[0].tokenCount = firstMessageTokens + instructionTokens;
}
}
}
if (this.contextStrategy) {
({ payload, promptTokens, messages } = await this.handleContextStrategy({
orderedMessages,
formattedMessages,
/* prefer usage_metadata from final message */
buildTokenMap: false,
}));
}
const result = {
prompt: payload,
promptTokens,
messages,
};
if (promptTokens >= 0 && typeof opts?.getReqData === 'function') {
opts.getReqData({ promptTokens });
}
return result;
}
/** @type {sendCompletion} */
async sendCompletion(payload, opts = {}) {
this.modelOptions.user = this.user;
return await this.chatCompletion({
payload,
onProgress: opts.onProgress,
abortController: opts.abortController,
});
}
// async recordTokenUsage({ promptTokens, completionTokens, context = 'message' }) {
// await spendTokens(
// {
// context,
// model: this.modelOptions.model,
// conversationId: this.conversationId,
// user: this.user ?? this.options.req.user?.id,
// endpointTokenConfig: this.options.endpointTokenConfig,
// },
// { promptTokens, completionTokens },
// );
// }
async chatCompletion({ payload, abortController = null }) {
try {
if (!abortController) {
abortController = new AbortController();
}
const baseURL = extractBaseURL(this.completionsUrl);
logger.debug('[api/server/controllers/agents/client.js] chatCompletion', {
baseURL,
payload,
});
// if (this.useOpenRouter) {
// opts.defaultHeaders = {
// 'HTTP-Referer': 'https://librechat.ai',
// 'X-Title': 'LibreChat',
// };
// }
// if (this.options.headers) {
// opts.defaultHeaders = { ...opts.defaultHeaders, ...this.options.headers };
// }
// if (this.options.proxy) {
// opts.httpAgent = new HttpsProxyAgent(this.options.proxy);
// }
// if (this.isVisionModel) {
// modelOptions.max_tokens = 4000;
// }
// /** @type {TAzureConfig | undefined} */
// const azureConfig = this.options?.req?.app?.locals?.[EModelEndpoint.azureOpenAI];
// if (
// (this.azure && this.isVisionModel && azureConfig) ||
// (azureConfig && this.isVisionModel && this.options.endpoint === EModelEndpoint.azureOpenAI)
// ) {
// const { modelGroupMap, groupMap } = azureConfig;
// const {
// azureOptions,
// baseURL,
// headers = {},
// serverless,
// } = mapModelToAzureConfig({
// modelName: modelOptions.model,
// modelGroupMap,
// groupMap,
// });
// opts.defaultHeaders = resolveHeaders(headers);
// this.langchainProxy = extractBaseURL(baseURL);
// this.apiKey = azureOptions.azureOpenAIApiKey;
// const groupName = modelGroupMap[modelOptions.model].group;
// this.options.addParams = azureConfig.groupMap[groupName].addParams;
// this.options.dropParams = azureConfig.groupMap[groupName].dropParams;
// // Note: `forcePrompt` not re-assigned as only chat models are vision models
// this.azure = !serverless && azureOptions;
// this.azureEndpoint =
// !serverless && genAzureChatCompletion(this.azure, modelOptions.model, this);
// }
// if (this.azure || this.options.azure) {
// /* Azure Bug, extremely short default `max_tokens` response */
// if (!modelOptions.max_tokens && modelOptions.model === 'gpt-4-vision-preview') {
// modelOptions.max_tokens = 4000;
// }
// /* Azure does not accept `model` in the body, so we need to remove it. */
// delete modelOptions.model;
// opts.baseURL = this.langchainProxy
// ? constructAzureURL({
// baseURL: this.langchainProxy,
// azureOptions: this.azure,
// })
// : this.azureEndpoint.split(/(?<!\/)\/(chat|completion)\//)[0];
// opts.defaultQuery = { 'api-version': this.azure.azureOpenAIApiVersion };
// opts.defaultHeaders = { ...opts.defaultHeaders, 'api-key': this.apiKey };
// }
// if (process.env.OPENAI_ORGANIZATION) {
// opts.organization = process.env.OPENAI_ORGANIZATION;
// }
// if (this.options.addParams && typeof this.options.addParams === 'object') {
// modelOptions = {
// ...modelOptions,
// ...this.options.addParams,
// };
// logger.debug('[api/server/controllers/agents/client.js #chatCompletion] added params', {
// addParams: this.options.addParams,
// modelOptions,
// });
// }
// if (this.options.dropParams && Array.isArray(this.options.dropParams)) {
// this.options.dropParams.forEach((param) => {
// delete modelOptions[param];
// });
// logger.debug('[api/server/controllers/agents/client.js #chatCompletion] dropped params', {
// dropParams: this.options.dropParams,
// modelOptions,
// });
// }
// const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
const run = await createRun({
agent: this.options.agent,
tools: this.options.tools,
toolMap: this.options.toolMap,
runId: this.responseMessageId,
modelOptions: this.modelOptions,
customHandlers: this.options.eventHandlers,
});
const config = {
configurable: {
provider: providerEndpointMap[this.options.agent.provider],
thread_id: this.conversationId,
},
run_id: this.responseMessageId,
streamMode: 'values',
version: 'v2',
};
if (!run) {
throw new Error('Failed to create run');
}
const messages = formatAgentMessages(payload);
const runMessages = await run.processStream({ messages }, config, {
[Callback.TOOL_ERROR]: (graph, error, toolId) => {
logger.error(
'[api/server/controllers/agents/client.js #chatCompletion] Tool Error',
error,
toolId,
);
},
});
// console.dir(runMessages, { depth: null });
return runMessages;
} catch (err) {
logger.error(
'[api/server/controllers/agents/client.js #chatCompletion] Unhandled error type',
err,
);
throw err;
}
}
getEncoding() {
return this.modelOptions.model?.includes('gpt-4o') ? 'o200k_base' : 'cl100k_base';
}
/**
* Returns the token count of a given text. It also checks and resets the tokenizers if necessary.
* @param {string} text - The text to get the token count for.
* @returns {number} The token count of the given text.
*/
getTokenCount(text) {
const encoding = this.getEncoding();
return Tokenizer.getTokenCount(text, encoding);
}
}
module.exports = AgentClient;

View file

@ -0,0 +1,44 @@
// Import the necessary modules
const path = require('path');
const base = path.resolve(__dirname, '..', '..', '..', '..', 'api');
console.log(base);
//api/server/controllers/agents/demo.js
require('module-alias')({ base });
const connectDb = require('~/lib/db/connectDb');
const AgentClient = require('./client');
// Define the user and message options
const user = 'user123';
const parentMessageId = 'pmid123';
const conversationId = 'cid456';
const maxContextTokens = 200000;
const req = {
user: { id: user },
};
const progressOptions = {
res: {},
};
// Define the message options
const messageOptions = {
user,
parentMessageId,
conversationId,
progressOptions,
};
async function main() {
await connectDb();
const client = new AgentClient({ req, maxContextTokens });
const text = 'Hello, this is a test message.';
try {
let response = await client.sendMessage(text, messageOptions);
console.log('Response:', response);
} catch (error) {
console.error('Error sending message:', error);
}
}
main();

View file

@ -0,0 +1,153 @@
// errorHandler.js
const { logger } = require('~/config');
const getLogStores = require('~/cache/getLogStores');
const { CacheKeys, ViolationTypes } = require('librechat-data-provider');
const { recordUsage } = require('~/server/services/Threads');
const { getConvo } = require('~/models/Conversation');
const { sendResponse } = require('~/server/utils');
/**
* @typedef {Object} ErrorHandlerContext
* @property {OpenAIClient} openai - The OpenAI client
* @property {string} run_id - The run ID
* @property {boolean} completedRun - Whether the run has completed
* @property {string} assistant_id - The assistant ID
* @property {string} conversationId - The conversation ID
* @property {string} parentMessageId - The parent message ID
* @property {string} responseMessageId - The response message ID
* @property {string} endpoint - The endpoint being used
* @property {string} cacheKey - The cache key for the current request
*/
/**
* @typedef {Object} ErrorHandlerDependencies
* @property {Express.Request} req - The Express request object
* @property {Express.Response} res - The Express response object
* @property {() => ErrorHandlerContext} getContext - Function to get the current context
* @property {string} [originPath] - The origin path for the error handler
*/
/**
* Creates an error handler function with the given dependencies
* @param {ErrorHandlerDependencies} dependencies - The dependencies for the error handler
* @returns {(error: Error) => Promise<void>} The error handler function
*/
const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/chat/' }) => {
const cache = getLogStores(CacheKeys.ABORT_KEYS);
/**
* Handles errors that occur during the chat process
* @param {Error} error - The error that occurred
* @returns {Promise<void>}
*/
return async (error) => {
const {
openai,
run_id,
endpoint,
cacheKey,
completedRun,
assistant_id,
conversationId,
parentMessageId,
responseMessageId,
} = getContext();
const defaultErrorMessage =
'The Assistant run failed to initialize. Try sending a message in a new conversation.';
const messageData = {
assistant_id,
conversationId,
parentMessageId,
sender: 'System',
user: req.user.id,
shouldSaveMessage: false,
messageId: responseMessageId,
endpoint,
};
if (error.message === 'Run cancelled') {
return res.end();
} else if (error.message === 'Request closed' && completedRun) {
return;
} else if (error.message === 'Request closed') {
logger.debug(`[${originPath}] Request aborted on close`);
} else if (/Files.*are invalid/.test(error.message)) {
const errorMessage = `Files are invalid, or may not have uploaded yet.${
endpoint === 'azureAssistants'
? ' If using Azure OpenAI, files are only available in the region of the assistant\'s model at the time of upload.'
: ''
}`;
return sendResponse(req, res, messageData, errorMessage);
} else if (error?.message?.includes('string too long')) {
return sendResponse(
req,
res,
messageData,
'Message too long. The Assistants API has a limit of 32,768 characters per message. Please shorten it and try again.',
);
} else if (error?.message?.includes(ViolationTypes.TOKEN_BALANCE)) {
return sendResponse(req, res, messageData, error.message);
} else {
logger.error(`[${originPath}]`, error);
}
if (!openai || !run_id) {
return sendResponse(req, res, messageData, defaultErrorMessage);
}
await new Promise((resolve) => setTimeout(resolve, 2000));
try {
const status = await cache.get(cacheKey);
if (status === 'cancelled') {
logger.debug(`[${originPath}] Run already cancelled`);
return res.end();
}
await cache.delete(cacheKey);
// const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id);
// logger.debug(`[${originPath}] Cancelled run:`, cancelledRun);
} catch (error) {
logger.error(`[${originPath}] Error cancelling run`, error);
}
await new Promise((resolve) => setTimeout(resolve, 2000));
let run;
try {
// run = await openai.beta.threads.runs.retrieve(thread_id, run_id);
await recordUsage({
...run.usage,
model: run.model,
user: req.user.id,
conversationId,
});
} catch (error) {
logger.error(`[${originPath}] Error fetching or processing run`, error);
}
let finalEvent;
try {
// const errorContentPart = {
// text: {
// value:
// error?.message ?? 'There was an error processing your request. Please try again later.',
// },
// type: ContentTypes.ERROR,
// };
finalEvent = {
final: true,
conversation: await getConvo(req.user.id, conversationId),
// runMessages,
};
} catch (error) {
logger.error(`[${originPath}] Error finalizing error process`, error);
return sendResponse(req, res, messageData, 'The Assistant run failed');
}
return sendResponse(req, res, finalEvent);
};
};
module.exports = { createErrorHandler };

View file

@ -0,0 +1,106 @@
const { HttpsProxyAgent } = require('https-proxy-agent');
const { resolveHeaders } = require('librechat-data-provider');
const { createLLM } = require('~/app/clients/llm');
/**
* Initializes and returns a Language Learning Model (LLM) instance.
*
* @param {Object} options - Configuration options for the LLM.
* @param {string} options.model - The model identifier.
* @param {string} options.modelName - The specific name of the model.
* @param {number} options.temperature - The temperature setting for the model.
* @param {number} options.presence_penalty - The presence penalty for the model.
* @param {number} options.frequency_penalty - The frequency penalty for the model.
* @param {number} options.max_tokens - The maximum number of tokens for the model output.
* @param {boolean} options.streaming - Whether to use streaming for the model output.
* @param {Object} options.context - The context for the conversation.
* @param {number} options.tokenBuffer - The token buffer size.
* @param {number} options.initialMessageCount - The initial message count.
* @param {string} options.conversationId - The ID of the conversation.
* @param {string} options.user - The user identifier.
* @param {string} options.langchainProxy - The langchain proxy URL.
* @param {boolean} options.useOpenRouter - Whether to use OpenRouter.
* @param {Object} options.options - Additional options.
* @param {Object} options.options.headers - Custom headers for the request.
* @param {string} options.options.proxy - Proxy URL.
* @param {Object} options.options.req - The request object.
* @param {Object} options.options.res - The response object.
* @param {boolean} options.options.debug - Whether to enable debug mode.
* @param {string} options.apiKey - The API key for authentication.
* @param {Object} options.azure - Azure-specific configuration.
* @param {Object} options.abortController - The AbortController instance.
* @returns {Object} The initialized LLM instance.
*/
function initializeLLM(options) {
const {
model,
modelName,
temperature,
presence_penalty,
frequency_penalty,
max_tokens,
streaming,
user,
langchainProxy,
useOpenRouter,
options: { headers, proxy },
apiKey,
azure,
} = options;
const modelOptions = {
modelName: modelName || model,
temperature,
presence_penalty,
frequency_penalty,
user,
};
if (max_tokens) {
modelOptions.max_tokens = max_tokens;
}
const configOptions = {};
if (langchainProxy) {
configOptions.basePath = langchainProxy;
}
if (useOpenRouter) {
configOptions.basePath = 'https://openrouter.ai/api/v1';
configOptions.baseOptions = {
headers: {
'HTTP-Referer': 'https://librechat.ai',
'X-Title': 'LibreChat',
},
};
}
if (headers && typeof headers === 'object' && !Array.isArray(headers)) {
configOptions.baseOptions = {
headers: resolveHeaders({
...headers,
...configOptions?.baseOptions?.headers,
}),
};
}
if (proxy) {
configOptions.httpAgent = new HttpsProxyAgent(proxy);
configOptions.httpsAgent = new HttpsProxyAgent(proxy);
}
const llm = createLLM({
modelOptions,
configOptions,
openAIApiKey: apiKey,
azure,
streaming,
});
return llm;
}
module.exports = {
initializeLLM,
};

View file

@ -0,0 +1,150 @@
const { Constants, getResponseSender } = require('librechat-data-provider');
const { createAbortController, handleAbortError } = require('~/server/middleware');
const { sendMessage } = require('~/server/utils');
const { saveMessage } = require('~/models');
const { logger } = require('~/config');
const AgentController = async (req, res, next, initializeClient, addTitle) => {
let {
text,
endpointOption,
conversationId,
modelDisplayLabel,
parentMessageId = null,
overrideParentMessageId = null,
} = req.body;
let userMessage;
let userMessagePromise;
let promptTokens;
let userMessageId;
let responseMessageId;
const sender = getResponseSender({
...endpointOption,
model: endpointOption.modelOptions.model,
modelDisplayLabel,
});
const newConvo = !conversationId;
const user = req.user.id;
const getReqData = (data = {}) => {
for (let key in data) {
if (key === 'userMessage') {
userMessage = data[key];
userMessageId = data[key].messageId;
} else if (key === 'userMessagePromise') {
userMessagePromise = data[key];
} else if (key === 'responseMessageId') {
responseMessageId = data[key];
} else if (key === 'promptTokens') {
promptTokens = data[key];
} else if (!conversationId && key === 'conversationId') {
conversationId = data[key];
}
}
};
try {
const { client } = await initializeClient({ req, res, endpointOption });
const getAbortData = () => ({
sender,
userMessage,
promptTokens,
conversationId,
userMessagePromise,
// text: getPartialText(),
messageId: responseMessageId,
parentMessageId: overrideParentMessageId ?? userMessageId,
});
const { abortController, onStart } = createAbortController(req, res, getAbortData, getReqData);
res.on('close', () => {
logger.debug('[AgentController] Request closed');
if (!abortController) {
return;
} else if (abortController.signal.aborted) {
return;
} else if (abortController.requestCompleted) {
return;
}
abortController.abort();
logger.debug('[AgentController] Request aborted on close');
});
const messageOptions = {
user,
onStart,
getReqData,
conversationId,
parentMessageId,
abortController,
overrideParentMessageId,
progressOptions: {
res,
// parentMessageId: overrideParentMessageId || userMessageId,
},
};
let response = await client.sendMessage(text, messageOptions);
if (overrideParentMessageId) {
response.parentMessageId = overrideParentMessageId;
}
response.endpoint = endpointOption.endpoint;
const { conversation = {} } = await client.responsePromise;
conversation.title =
conversation && !conversation.title ? null : conversation?.title || 'New Chat';
if (client.options.attachments) {
userMessage.files = client.options.attachments;
conversation.model = endpointOption.modelOptions.model;
delete userMessage.image_urls;
}
if (!abortController.signal.aborted) {
sendMessage(res, {
final: true,
conversation,
title: conversation.title,
requestMessage: userMessage,
responseMessage: response,
});
res.end();
await saveMessage(
req,
{ ...response, user },
{ context: 'api/server/controllers/agents/request.js - response end' },
);
}
if (!client.skipSaveUserMessage) {
await saveMessage(req, userMessage, {
context: 'api/server/controllers/agents/request.js - don\'t skip saving user message',
});
}
if (addTitle && parentMessageId === Constants.NO_PARENT && newConvo) {
addTitle(req, {
text,
response,
client,
});
}
} catch (error) {
handleAbortError(res, req, error, {
conversationId,
sender,
messageId: responseMessageId,
parentMessageId: userMessageId ?? parentMessageId,
});
}
};
module.exports = AgentController;

View file

@ -0,0 +1,59 @@
const { Run } = require('@librechat/agents');
const { providerEndpointMap } = require('librechat-data-provider');
/**
* @typedef {import('@librechat/agents').t} t
* @typedef {import('@librechat/agents').StreamEventData} StreamEventData
* @typedef {import('@librechat/agents').ClientOptions} ClientOptions
* @typedef {import('@librechat/agents').EventHandler} EventHandler
* @typedef {import('@librechat/agents').GraphEvents} GraphEvents
* @typedef {import('@librechat/agents').IState} IState
*/
/**
* Creates a new Run instance with custom handlers and configuration.
*
* @param {Object} options - The options for creating the Run instance.
* @param {Agent} options.agent - The agent for this run.
* @param {StructuredTool[] | undefined} [options.tools] - The tools to use in the run.
* @param {Record<string, StructuredTool[]> | undefined} [options.toolMap] - The tool map for the run.
* @param {Record<GraphEvents, EventHandler> | undefined} [options.customHandlers] - Custom event handlers.
* @param {string | undefined} [options.runId] - Optional run ID; otherwise, a new run ID will be generated.
* @param {ClientOptions} [options.modelOptions] - Optional model to use; if not provided, it will use the default from modelMap.
* @param {boolean} [options.streaming=true] - Whether to use streaming.
* @param {boolean} [options.streamUsage=true] - Whether to stream usage information.
* @returns {Promise<Run<IState>>} A promise that resolves to a new Run instance.
*/
async function createRun({
runId,
tools,
agent,
toolMap,
modelOptions,
customHandlers,
streaming = true,
streamUsage = true,
}) {
const llmConfig = Object.assign(
{
provider: providerEndpointMap[agent.provider],
streaming,
streamUsage,
},
modelOptions,
);
return Run.create({
graphConfig: {
runId,
llmConfig,
tools,
toolMap,
instructions: agent.instructions,
additional_instructions: agent.additional_instructions,
},
customHandlers,
});
}
module.exports = { createRun };

View file

@ -0,0 +1,208 @@
const { nanoid } = require('nanoid');
const { FileContext } = require('librechat-data-provider');
const {
getAgent,
createAgent,
updateAgent,
deleteAgent,
getListAgents,
} = require('~/models/Agent');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { uploadImageBuffer } = require('~/server/services/Files/process');
const { deleteFileByFilter } = require('~/models/File');
const { logger } = require('~/config');
/**
* Creates an Agent.
* @route POST /Agents
* @param {ServerRequest} req - The request object.
* @param {AgentCreateParams} req.body - The request body.
* @param {ServerResponse} res - The response object.
* @returns {Agent} 201 - success response - application/json
*/
const createAgentHandler = async (req, res) => {
try {
const { tools = [], provider, name, description, instructions, model, ...agentData } = req.body;
const { id: userId } = req.user;
agentData.tools = tools
.map((tool) => (typeof tool === 'string' ? req.app.locals.availableTools[tool] : tool))
.filter(Boolean);
Object.assign(agentData, {
author: userId,
name,
description,
instructions,
provider,
model,
});
agentData.id = `agent_${nanoid()}`;
const agent = await createAgent(agentData);
res.status(201).json(agent);
} catch (error) {
logger.error('[/Agents] Error creating agent', error);
res.status(500).json({ error: error.message });
}
};
/**
* Retrieves an Agent by ID.
* @route GET /Agents/:id
* @param {object} req - Express Request
* @param {object} req.params - Request params
* @param {string} req.params.id - Agent identifier.
* @returns {Agent} 200 - success response - application/json
* @returns {Error} 404 - Agent not found
*/
const getAgentHandler = async (req, res) => {
try {
const id = req.params.id;
const agent = await getAgent({ id });
if (!agent) {
return res.status(404).json({ error: 'Agent not found' });
}
return res.status(200).json(agent);
} catch (error) {
logger.error('[/Agents/:id] Error retrieving agent', error);
res.status(500).json({ error: error.message });
}
};
/**
* Updates an Agent.
* @route PATCH /Agents/:id
* @param {object} req - Express Request
* @param {object} req.params - Request params
* @param {string} req.params.id - Agent identifier.
* @param {AgentUpdateParams} req.body - The Agent update parameters.
* @returns {Agent} 200 - success response - application/json
*/
const updateAgentHandler = async (req, res) => {
try {
const id = req.params.id;
const updatedAgent = await updateAgent({ id, author: req.user.id }, req.body);
return res.json(updatedAgent);
} catch (error) {
logger.error('[/Agents/:id] Error updating Agent', error);
res.status(500).json({ error: error.message });
}
};
/**
* Deletes an Agent based on the provided ID.
* @route DELETE /Agents/:id
* @param {object} req - Express Request
* @param {object} req.params - Request params
* @param {string} req.params.id - Agent identifier.
* @returns {Agent} 200 - success response - application/json
*/
const deleteAgentHandler = async (req, res) => {
try {
const id = req.params.id;
const agent = await getAgent({ id });
if (!agent) {
return res.status(404).json({ error: 'Agent not found' });
}
await deleteAgent({ id, author: req.user.id });
return res.json({ message: 'Agent deleted' });
} catch (error) {
logger.error('[/Agents/:id] Error deleting Agent', error);
res.status(500).json({ error: error.message });
}
};
/**
*
* @route GET /Agents
* @param {object} req - Express Request
* @param {object} req.query - Request query
* @param {string} [req.query.user] - The user ID of the agent's author.
* @returns {AgentListResponse} 200 - success response - application/json
*/
const getListAgentsHandler = async (req, res) => {
try {
const { user } = req.query;
const filter = user ? { author: user } : {};
const data = await getListAgents(filter);
return res.json(data);
} catch (error) {
logger.error('[/Agents] Error listing Agents', error);
res.status(500).json({ error: error.message });
}
};
/**
* Uploads and updates an avatar for a specific agent.
* @route POST /avatar/:agent_id
* @param {object} req - Express Request
* @param {object} req.params - Request params
* @param {string} req.params.agent_id - The ID of the agent.
* @param {Express.Multer.File} req.file - The avatar image file.
* @param {object} req.body - Request body
* @param {string} [req.body.avatar] - Optional avatar for the agent's avatar.
* @returns {Object} 200 - success response - application/json
*/
const uploadAgentAvatarHandler = async (req, res) => {
try {
const { agent_id } = req.params;
if (!agent_id) {
return res.status(400).json({ message: 'Agent ID is required' });
}
let { avatar: _avatar = '{}' } = req.body;
const image = await uploadImageBuffer({
req,
context: FileContext.avatar,
metadata: {
buffer: req.file.buffer,
},
});
try {
_avatar = JSON.parse(_avatar);
} catch (error) {
logger.error('[/avatar/:agent_id] Error parsing avatar', error);
_avatar = {};
}
if (_avatar && _avatar.source) {
const { deleteFile } = getStrategyFunctions(_avatar.source);
try {
await deleteFile(req, { filepath: _avatar.filepath });
await deleteFileByFilter({ filepath: _avatar.filepath });
} catch (error) {
logger.error('[/avatar/:agent_id] Error deleting old avatar', error);
}
}
const promises = [];
const data = {
avatar: {
filepath: image.filepath,
source: req.app.locals.fileStrategy,
},
};
promises.push(await updateAgent({ id: agent_id, author: req.user.id }, data));
const resolved = await Promise.all(promises);
res.status(201).json(resolved[0]);
} catch (error) {
const message = 'An error occurred while updating the Agent Avatar';
logger.error(message, error);
res.status(500).json({ message });
}
};
module.exports = {
createAgent: createAgentHandler,
getAgent: getAgentHandler,
updateAgent: updateAgentHandler,
deleteAgent: deleteAgentHandler,
getListAgents: getListAgentsHandler,
uploadAgentAvatar: uploadAgentAvatarHandler,
};

View file

@ -105,6 +105,7 @@ const startServer = async () => {
app.use('/images/', validateImageRequest, routes.staticRoute);
app.use('/api/share', routes.share);
app.use('/api/roles', routes.roles);
app.use('/api/agents', routes.agents);
app.use('/api/tags', routes.tags);

View file

@ -1,4 +1,4 @@
const { parseCompactConvo, EModelEndpoint } = require('librechat-data-provider');
const { parseCompactConvo, EModelEndpoint, isAgentsEndpoint } = require('librechat-data-provider');
const { getModelsConfig } = require('~/server/controllers/ModelController');
const azureAssistants = require('~/server/services/Endpoints/azureAssistants');
const assistants = require('~/server/services/Endpoints/assistants');
@ -6,6 +6,7 @@ const gptPlugins = require('~/server/services/Endpoints/gptPlugins');
const { processFiles } = require('~/server/services/Files/process');
const anthropic = require('~/server/services/Endpoints/anthropic');
const openAI = require('~/server/services/Endpoints/openAI');
const agents = require('~/server/services/Endpoints/agents');
const custom = require('~/server/services/Endpoints/custom');
const google = require('~/server/services/Endpoints/google');
const enforceModelSpec = require('./enforceModelSpec');
@ -15,6 +16,7 @@ const buildFunction = {
[EModelEndpoint.openAI]: openAI.buildOptions,
[EModelEndpoint.google]: google.buildOptions,
[EModelEndpoint.custom]: custom.buildOptions,
[EModelEndpoint.agents]: agents.buildOptions,
[EModelEndpoint.azureOpenAI]: openAI.buildOptions,
[EModelEndpoint.anthropic]: anthropic.buildOptions,
[EModelEndpoint.gptPlugins]: gptPlugins.buildOptions,
@ -59,12 +61,13 @@ async function buildEndpointOption(req, res, next) {
}
}
req.body.endpointOption = buildFunction[endpointType ?? endpoint](
endpoint,
parsedBody,
endpointType,
);
const endpointFn = buildFunction[endpointType ?? endpoint];
const builder = isAgentsEndpoint(endpoint) ? (...args) => endpointFn(req, ...args) : endpointFn;
// TODO: use object params
req.body.endpointOption = builder(endpoint, parsedBody, endpointType);
// TODO: use `getModelsConfig` only when necessary
const modelsConfig = await getModelsConfig(req);
req.body.endpointOption.modelsConfig = modelsConfig;

View file

@ -0,0 +1,166 @@
const express = require('express');
const { nanoid } = require('nanoid');
const { actionDelimiter } = require('librechat-data-provider');
const { encryptMetadata, domainParser } = require('~/server/services/ActionService');
const { updateAction, getActions, deleteAction } = require('~/models/Action');
const { getAgent, updateAgent } = require('~/models/Agent');
const { logger } = require('~/config');
const router = express.Router();
/**
* Retrieves all user's actions
* @route GET /actions/
* @param {string} req.params.id - Assistant identifier.
* @returns {Action[]} 200 - success response - application/json
*/
router.get('/', async (req, res) => {
try {
res.json(await getActions({ user: req.user.id }));
} catch (error) {
res.status(500).json({ error: error.message });
}
});
/**
* Adds or updates actions for a specific agent.
* @route POST /actions/:agent_id
* @param {string} req.params.agent_id - The ID of the agent.
* @param {FunctionTool[]} req.body.functions - The functions to be added or updated.
* @param {string} [req.body.action_id] - Optional ID for the action.
* @param {ActionMetadata} req.body.metadata - Metadata for the action.
* @returns {Object} 200 - success response - application/json
*/
router.post('/:agent_id', async (req, res) => {
try {
const { agent_id } = req.params;
/** @type {{ functions: FunctionTool[], action_id: string, metadata: ActionMetadata }} */
const { functions, action_id: _action_id, metadata: _metadata } = req.body;
if (!functions.length) {
return res.status(400).json({ message: 'No functions provided' });
}
let metadata = encryptMetadata(_metadata);
let { domain } = metadata;
domain = await domainParser(req, domain, true);
if (!domain) {
return res.status(400).json({ message: 'No domain provided' });
}
const action_id = _action_id ?? nanoid();
const initialPromises = [];
// TODO: share agents
initialPromises.push(getAgent({ id: agent_id, author: req.user.id }));
if (_action_id) {
initialPromises.push(getActions({ action_id }, true));
}
/** @type {[Agent, [Action|undefined]]} */
const [agent, actions_result] = await Promise.all(initialPromises);
if (!agent) {
return res.status(404).json({ message: 'Agent not found for adding action' });
}
if (actions_result && actions_result.length) {
const action = actions_result[0];
metadata = { ...action.metadata, ...metadata };
}
const { actions: _actions = [] } = agent ?? {};
const actions = [];
for (const action of _actions) {
const [_action_domain, current_action_id] = action.split(actionDelimiter);
if (current_action_id === action_id) {
continue;
}
actions.push(action);
}
actions.push(`${domain}${actionDelimiter}${action_id}`);
/** @type {string[]}} */
const { tools: _tools = [] } = agent;
const tools = _tools
.filter((tool) => !(tool && (tool.includes(domain) || tool.includes(action_id))))
.concat(functions.map((tool) => `${tool.function.name}${actionDelimiter}${domain}`));
const updatedAgent = await updateAgent(
{ id: agent_id, author: req.user.id },
{ tools, actions },
);
/** @type {[Action]} */
const updatedAction = await updateAction(
{ action_id },
{ metadata, agent_id, user: req.user.id },
);
const sensitiveFields = ['api_key', 'oauth_client_id', 'oauth_client_secret'];
for (let field of sensitiveFields) {
if (updatedAction.metadata[field]) {
delete updatedAction.metadata[field];
}
}
res.json([updatedAgent, updatedAction]);
} catch (error) {
const message = 'Trouble updating the Agent Action';
logger.error(message, error);
res.status(500).json({ message });
}
});
/**
* Deletes an action for a specific agent.
* @route DELETE /actions/:agent_id/:action_id
* @param {string} req.params.agent_id - The ID of the agent.
* @param {string} req.params.action_id - The ID of the action to delete.
* @returns {Object} 200 - success response - application/json
*/
router.delete('/:agent_id/:action_id', async (req, res) => {
try {
const { agent_id, action_id } = req.params;
const agent = await getAgent({ id: agent_id, author: req.user.id });
if (!agent) {
return res.status(404).json({ message: 'Agent not found for deleting action' });
}
const { tools = [], actions = [] } = agent;
let domain = '';
const updatedActions = actions.filter((action) => {
if (action.includes(action_id)) {
[domain] = action.split(actionDelimiter);
return false;
}
return true;
});
domain = await domainParser(req, domain, true);
if (!domain) {
return res.status(400).json({ message: 'No domain provided' });
}
const updatedTools = tools.filter((tool) => !(tool && tool.includes(domain)));
await updateAgent(
{ id: agent_id, author: req.user.id },
{ tools: updatedTools, actions: updatedActions },
);
await deleteAction({ action_id });
res.status(200).json({ message: 'Action deleted successfully' });
} catch (error) {
const message = 'Trouble deleting the Agent Action';
logger.error(message, error);
res.status(500).json({ message });
}
});
module.exports = router;

View file

@ -0,0 +1,35 @@
const express = require('express');
const router = express.Router();
const {
setHeaders,
handleAbort,
// validateModel,
// validateEndpoint,
buildEndpointOption,
} = require('~/server/middleware');
const { initializeClient } = require('~/server/services/Endpoints/agents');
const AgentController = require('~/server/controllers/agents/request');
router.post('/abort', handleAbort());
/**
* @route POST /
* @desc Chat with an assistant
* @access Public
* @param {express.Request} req - The request object, containing the request data.
* @param {express.Response} res - The response object, used to send back a response.
* @returns {void}
*/
router.post(
'/',
// validateModel,
// validateEndpoint,
buildEndpointOption,
setHeaders,
async (req, res, next) => {
await AgentController(req, res, next, initializeClient);
},
);
module.exports = router;

View file

@ -0,0 +1,21 @@
const express = require('express');
const router = express.Router();
const {
uaParser,
checkBan,
requireJwtAuth,
// concurrentLimiter,
// messageIpLimiter,
// messageUserLimiter,
} = require('~/server/middleware');
const v1 = require('./v1');
const chat = require('./chat');
router.use(requireJwtAuth);
router.use(checkBan);
router.use(uaParser);
router.use('/', v1);
router.use('/chat', chat);
module.exports = router;

View file

@ -0,0 +1,77 @@
const multer = require('multer');
const express = require('express');
const v1 = require('~/server/controllers/agents/v1');
const actions = require('./actions');
const upload = multer();
const router = express.Router();
/**
* Agent actions route.
* @route GET|POST /agents/actions
*/
router.use('/actions', actions);
/**
* Get a list of available tools for agents.
* @route GET /agents/tools
* @returns {TPlugin[]} 200 - application/json
*/
router.use('/tools', (req, res) => {
res.json([]);
});
/**
* Creates an agent.
* @route POST /agents
* @param {AgentCreateParams} req.body - The agent creation parameters.
* @returns {Agent} 201 - Success response - application/json
*/
router.post('/', v1.createAgent);
/**
* Retrieves an agent.
* @route GET /agents/:id
* @param {string} req.params.id - Agent identifier.
* @returns {Agent} 200 - Success response - application/json
*/
router.get('/:id', v1.getAgent);
/**
* Updates an agent.
* @route PATCH /agents/:id
* @param {string} req.params.id - Agent identifier.
* @param {AgentUpdateParams} req.body - The agent update parameters.
* @returns {Agent} 200 - Success response - application/json
*/
router.patch('/:id', v1.updateAgent);
/**
* Deletes an agent.
* @route DELETE /agents/:id
* @param {string} req.params.id - Agent identifier.
* @returns {Agent} 200 - success response - application/json
*/
router.delete('/:id', v1.deleteAgent);
/**
* Returns a list of agents.
* @route GET /agents
* @param {AgentListParams} req.query - The agent list parameters for pagination and sorting.
* @returns {AgentListResponse} 200 - success response - application/json
*/
router.get('/', v1.getListAgents);
// TODO: handle private agents
/**
* Uploads and updates an avatar for a specific agent.
* @route POST /avatar/:agent_id
* @param {string} req.params.agent_id - The ID of the agent.
* @param {Express.Multer.File} req.file - The avatar image file.
* @param {string} [req.body.metadata] - Optional metadata for the agent's avatar.
* @returns {Object} 200 - success response - application/json
*/
router.post('/avatar/:agent_id', upload.single('file'), v1.uploadAgentAvatar);
module.exports = router;

View file

@ -1,5 +1,5 @@
const { v4 } = require('uuid');
const express = require('express');
const { nanoid } = require('nanoid');
const { encryptMetadata, domainParser } = require('~/server/services/ActionService');
const { actionDelimiter, EModelEndpoint } = require('librechat-data-provider');
const { getOpenAIClient } = require('~/server/controllers/assistants/helpers');
@ -9,20 +9,6 @@ const { logger } = require('~/config');
const router = express.Router();
/**
* Retrieves all user's actions
* @route GET /actions/
* @param {string} req.params.id - Assistant identifier.
* @returns {Action[]} 200 - success response - application/json
*/
router.get('/', async (req, res) => {
try {
res.json(await getActions());
} catch (error) {
res.status(500).json({ error: error.message });
}
});
/**
* Adds or updates actions for a specific assistant.
* @route POST /actions/:assistant_id
@ -51,7 +37,7 @@ router.post('/:assistant_id', async (req, res) => {
return res.status(400).json({ message: 'No domain provided' });
}
const action_id = _action_id ?? v4();
const action_id = _action_id ?? nanoid();
const initialPromises = [];
const { openai } = await getOpenAIClient({ req, res });
@ -178,6 +164,10 @@ router.delete('/:assistant_id/:action_id/:model', async (req, res) => {
domain = await domainParser(req, domain, true);
if (!domain) {
return res.status(400).json({ message: 'No domain provided' });
}
const updatedTools = tools.filter(
(tool) => !(tool.function && tool.function.name.includes(domain)),
);

View file

@ -1,51 +1,53 @@
const ask = require('./ask');
const edit = require('./edit');
const assistants = require('./assistants');
const categories = require('./categories');
const tokenizer = require('./tokenizer');
const endpoints = require('./endpoints');
const staticRoute = require('./static');
const messages = require('./messages');
const convos = require('./convos');
const presets = require('./presets');
const prompts = require('./prompts');
const search = require('./search');
const tokenizer = require('./tokenizer');
const auth = require('./auth');
const keys = require('./keys');
const oauth = require('./oauth');
const endpoints = require('./endpoints');
const balance = require('./balance');
const models = require('./models');
const plugins = require('./plugins');
const user = require('./user');
const search = require('./search');
const models = require('./models');
const convos = require('./convos');
const config = require('./config');
const assistants = require('./assistants');
const files = require('./files');
const staticRoute = require('./static');
const share = require('./share');
const categories = require('./categories');
const agents = require('./agents');
const roles = require('./roles');
const oauth = require('./oauth');
const files = require('./files');
const share = require('./share');
const tags = require('./tags');
const auth = require('./auth');
const edit = require('./edit');
const keys = require('./keys');
const user = require('./user');
const ask = require('./ask');
module.exports = {
search,
ask,
edit,
messages,
convos,
presets,
prompts,
auth,
keys,
oauth,
user,
tokenizer,
endpoints,
balance,
tags,
roles,
oauth,
files,
share,
agents,
convos,
search,
prompts,
config,
models,
plugins,
config,
presets,
balance,
messages,
endpoints,
tokenizer,
assistants,
files,
staticRoute,
share,
categories,
roles,
tags,
staticRoute,
};

View file

@ -6,6 +6,7 @@ const {
isImageVisionTool,
actionDomainSeparator,
} = require('librechat-data-provider');
const { tool } = require('@langchain/core/tools');
const { encryptV2, decryptV2 } = require('~/server/utils/crypto');
const { getActions, deleteActions } = require('~/models/Action');
const { deleteAssistant } = require('~/models/Assistant');
@ -101,7 +102,8 @@ async function domainParser(req, domain, inverse = false) {
*
* @param {Object} searchParams - The parameters for loading action sets.
* @param {string} searchParams.user - The user identifier.
* @param {string} searchParams.assistant_id - The assistant identifier.
* @param {string} [searchParams.agent_id]- The agent identifier.
* @param {string} [searchParams.assistant_id]- The assistant identifier.
* @returns {Promise<Action[] | null>} A promise that resolves to an array of actions or `null` if no match.
*/
async function loadActionSets(searchParams) {
@ -114,10 +116,14 @@ async function loadActionSets(searchParams) {
* @param {Object} params - The parameters for loading action sets.
* @param {Action} params.action - The action set. Necessary for decrypting authentication values.
* @param {ActionRequest} params.requestBuilder - The ActionRequest builder class to execute the API call.
* @returns { { _call: (toolInput: Object) => unknown} } An object with `_call` method to execute the tool input.
* @param {string | undefined} [params.name] - The name of the tool.
* @param {string | undefined} [params.description] - The description for the tool.
* @param {import('zod').ZodTypeAny | undefined} [params.zodSchema] - The Zod schema for tool input validation/definition
* @returns { Promsie<typeof tool | { _call: (toolInput: Object | string) => unknown}> } An object with `_call` method to execute the tool input.
*/
async function createActionTool({ action, requestBuilder }) {
async function createActionTool({ action, requestBuilder, zodSchema, name, description }) {
action.metadata = await decryptMetadata(action.metadata);
/** @type {(toolInput: Object | string) => Promise<unknown>} */
const _call = async (toolInput) => {
try {
requestBuilder.setParams(toolInput);
@ -142,6 +148,14 @@ async function createActionTool({ action, requestBuilder }) {
}
};
if (name) {
return tool(_call, {
name,
description: description || '',
schema: zodSchema,
});
}
return {
_call,
};
@ -180,7 +194,7 @@ async function encryptMetadata(metadata) {
* Decrypts sensitive metadata values for an action.
*
* @param {ActionMetadata} metadata - The action metadata to decrypt.
* @returns {ActionMetadata} The updated action metadata with decrypted values.
* @returns {Promise<ActionMetadata>} The updated action metadata with decrypted values.
*/
async function decryptMetadata(metadata) {
const decryptedMetadata = { ...metadata };

View file

@ -45,5 +45,7 @@ module.exports = {
AZURE_ASSISTANTS_BASE_URL,
EModelEndpoint.azureAssistants,
),
/* key will be part of separate config */
[EModelEndpoint.agents]: generateConfig(process.env.I_AM_A_TEAPOT),
},
};

View file

@ -9,13 +9,22 @@ const { config } = require('./EndpointService');
*/
async function loadDefaultEndpointsConfig(req) {
const { google, gptPlugins } = await loadAsyncEndpoints(req);
const { openAI, assistants, azureAssistants, bingAI, anthropic, azureOpenAI, chatGPTBrowser } =
config;
const {
openAI,
agents,
assistants,
azureAssistants,
bingAI,
anthropic,
azureOpenAI,
chatGPTBrowser,
} = config;
const enabledEndpoints = getEnabledEndpoints();
const endpointConfig = {
[EModelEndpoint.openAI]: openAI,
[EModelEndpoint.agents]: agents,
[EModelEndpoint.assistants]: assistants,
[EModelEndpoint.azureAssistants]: azureAssistants,
[EModelEndpoint.azureOpenAI]: azureOpenAI,

View file

@ -29,6 +29,7 @@ async function loadDefaultModels(req) {
return {
[EModelEndpoint.openAI]: openAI,
[EModelEndpoint.agents]: openAI,
[EModelEndpoint.google]: google,
[EModelEndpoint.anthropic]: anthropic,
[EModelEndpoint.gptPlugins]: gptPlugins,

View file

@ -0,0 +1,30 @@
const { getAgent } = require('~/models/Agent');
const { logger } = require('~/config');
const buildOptions = (req, endpoint, parsedBody) => {
const { agent_id, instructions, spec, ...rest } = parsedBody;
const agentPromise = getAgent({
id: agent_id,
// TODO: better author handling
author: req.user.id,
}).catch((error) => {
logger.error(`[/agents/:${agent_id}] Error retrieving agent during build options step`, error);
return undefined;
});
const endpointOption = {
agent: agentPromise,
endpoint,
agent_id,
instructions,
spec,
modelOptions: {
...rest,
},
};
return endpointOption;
};
module.exports = { buildOptions };

View file

@ -0,0 +1,7 @@
const build = require('./build');
const initialize = require('./initialize');
module.exports = {
...build,
...initialize,
};

View file

@ -0,0 +1,119 @@
// const {
// ErrorTypes,
// EModelEndpoint,
// resolveHeaders,
// mapModelToAzureConfig,
// } = require('librechat-data-provider');
// const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService');
// const { isEnabled, isUserProvided } = require('~/server/utils');
// const { getAzureCredentials } = require('~/utils');
// const { OpenAIClient } = require('~/app');
const { z } = require('zod');
const { tool } = require('@langchain/core/tools');
const { EModelEndpoint, providerEndpointMap } = require('librechat-data-provider');
const { getDefaultHandlers } = require('~/server/controllers/agents/callbacks');
// for testing purposes
// const createTavilySearchTool = require('~/app/clients/tools/structured/TavilySearch');
const initAnthropic = require('~/server/services/Endpoints/anthropic/initializeClient');
const initOpenAI = require('~/server/services/Endpoints/openAI/initializeClient');
const { loadAgentTools } = require('~/server/services/ToolService');
const AgentClient = require('~/server/controllers/agents/client');
const { getModelMaxTokens } = require('~/utils');
/* For testing errors */
const _getWeather = tool(
async ({ location }) => {
if (location === 'SAN FRANCISCO') {
return 'It\'s 60 degrees and foggy';
} else if (location.toLowerCase() === 'san francisco') {
throw new Error('Input queries must be all capitals');
} else {
throw new Error('Invalid input.');
}
},
{
name: 'get_weather',
description: 'Call to get the current weather',
schema: z.object({
location: z.string(),
}),
},
);
const providerConfigMap = {
[EModelEndpoint.openAI]: initOpenAI,
[EModelEndpoint.azureOpenAI]: initOpenAI,
[EModelEndpoint.anthropic]: initAnthropic,
};
const initializeClient = async ({ req, res, endpointOption }) => {
if (!endpointOption) {
throw new Error('Endpoint option not provided');
}
// TODO: use endpointOption to determine options/modelOptions
const eventHandlers = getDefaultHandlers({ res });
// const tools = [createTavilySearchTool()];
// const tools = [_getWeather];
// const tool_calls = [{ name: 'getPeople_action_swapi---dev' }];
// const tool_calls = [{ name: 'dalle' }];
// const tool_calls = [{ name: 'getItmOptions_action_YWlhcGkzLn' }];
// const tool_calls = [{ name: 'tavily_search_results_json' }];
// const tool_calls = [
// { name: 'searchListings_action_emlsbG93NT' },
// { name: 'searchAddress_action_emlsbG93NT' },
// { name: 'searchMLS_action_emlsbG93NT' },
// { name: 'searchCoordinates_action_emlsbG93NT' },
// { name: 'searchUrl_action_emlsbG93NT' },
// { name: 'getPropertyDetails_action_emlsbG93NT' },
// ];
if (!endpointOption.agent) {
throw new Error('No agent promise provided');
}
/** @type {Agent} */
const agent = await endpointOption.agent;
const { tools, toolMap } = await loadAgentTools({
req,
tools: agent.tools,
agent_id: agent.id,
// openAIApiKey: process.env.OPENAI_API_KEY,
});
let modelOptions = { model: agent.model };
const getOptions = providerConfigMap[agent.provider];
if (!getOptions) {
throw new Error(`Provider ${agent.provider} not supported`);
}
// TODO: pass-in override settings that are specific to current run
endpointOption.modelOptions.model = agent.model;
const options = await getOptions({
req,
res,
endpointOption,
optionsOnly: true,
overrideEndpoint: agent.provider,
overrideModel: agent.model,
});
modelOptions = Object.assign(modelOptions, options.llmConfig);
const client = new AgentClient({
req,
agent,
tools,
toolMap,
modelOptions,
eventHandlers,
configOptions: options.configOptions,
maxContextTokens:
agent.max_context_tokens ??
getModelMaxTokens(modelOptions.model, providerEndpointMap[agent.provider]),
});
return { client };
};
module.exports = { initializeClient };

View file

@ -1,8 +1,9 @@
const { EModelEndpoint } = require('librechat-data-provider');
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
const { getLLMConfig } = require('~/server/services/Endpoints/anthropic/llm');
const { AnthropicClient } = require('~/app');
const initializeClient = async ({ req, res, endpointOption }) => {
const initializeClient = async ({ req, res, endpointOption, optionsOnly }) => {
const { ANTHROPIC_API_KEY, ANTHROPIC_REVERSE_PROXY, PROXY } = process.env;
const expiresAt = req.body.key;
const isUserProvided = ANTHROPIC_API_KEY === 'user_provided';
@ -34,6 +35,18 @@ const initializeClient = async ({ req, res, endpointOption }) => {
clientOptions.streamRate = allConfig.streamRate;
}
if (optionsOnly) {
const requestOptions = Object.assign(
{
reverseProxyUrl: ANTHROPIC_REVERSE_PROXY ?? null,
proxy: PROXY ?? null,
modelOptions: endpointOption.modelOptions,
},
clientOptions,
);
return getLLMConfig(anthropicApiKey, requestOptions);
}
const client = new AnthropicClient(anthropicApiKey, {
req,
res,

View file

@ -0,0 +1,55 @@
const { HttpsProxyAgent } = require('https-proxy-agent');
const { anthropicSettings, removeNullishValues } = require('librechat-data-provider');
/**
* Generates configuration options for creating an Anthropic language model (LLM) instance.
*
* @param {string} apiKey - The API key for authentication with Anthropic.
* @param {Object} [options={}] - Additional options for configuring the LLM.
* @param {Object} [options.modelOptions] - Model-specific options.
* @param {string} [options.modelOptions.model] - The name of the model to use.
* @param {number} [options.modelOptions.maxOutputTokens] - The maximum number of tokens to generate.
* @param {number} [options.modelOptions.temperature] - Controls randomness in output generation.
* @param {number} [options.modelOptions.topP] - Controls diversity of output generation.
* @param {number} [options.modelOptions.topK] - Controls the number of top tokens to consider.
* @param {string[]} [options.modelOptions.stop] - Sequences where the API will stop generating further tokens.
* @param {boolean} [options.modelOptions.stream] - Whether to stream the response.
* @param {string} [options.proxy] - Proxy server URL.
* @param {string} [options.reverseProxyUrl] - URL for a reverse proxy, if used.
*
* @returns {Object} Configuration options for creating an Anthropic LLM instance, with null and undefined values removed.
*/
function getLLMConfig(apiKey, options = {}) {
const defaultOptions = {
model: anthropicSettings.model.default,
maxOutputTokens: anthropicSettings.maxOutputTokens.default,
stream: true,
};
const mergedOptions = Object.assign(defaultOptions, options.modelOptions);
const requestOptions = {
apiKey,
model: mergedOptions.model,
stream: mergedOptions.stream,
temperature: mergedOptions.temperature,
top_p: mergedOptions.topP,
top_k: mergedOptions.topK,
stop_sequences: mergedOptions.stop,
max_tokens:
mergedOptions.maxOutputTokens || anthropicSettings.maxOutputTokens.reset(mergedOptions.model),
};
const configOptions = {};
if (options.proxy) {
configOptions.httpAgent = new HttpsProxyAgent(options.proxy);
}
if (options.reverseProxyUrl) {
configOptions.baseURL = options.reverseProxyUrl;
}
return { llmConfig: removeNullishValues(requestOptions), configOptions };
}
module.exports = { getLLMConfig };

View file

@ -5,11 +5,19 @@ const {
mapModelToAzureConfig,
} = require('librechat-data-provider');
const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService');
const { getLLMConfig } = require('~/server/services/Endpoints/openAI/llm');
const { isEnabled, isUserProvided } = require('~/server/utils');
const { getAzureCredentials } = require('~/utils');
const { OpenAIClient } = require('~/app');
const initializeClient = async ({ req, res, endpointOption }) => {
const initializeClient = async ({
req,
res,
endpointOption,
optionsOnly,
overrideEndpoint,
overrideModel,
}) => {
const {
PROXY,
OPENAI_API_KEY,
@ -19,7 +27,9 @@ const initializeClient = async ({ req, res, endpointOption }) => {
OPENAI_SUMMARIZE,
DEBUG_OPENAI,
} = process.env;
const { key: expiresAt, endpoint, model: modelName } = req.body;
const { key: expiresAt } = req.body;
const modelName = overrideModel ?? req.body.model;
const endpoint = overrideEndpoint ?? req.body.endpoint;
const contextStrategy = isEnabled(OPENAI_SUMMARIZE) ? 'summarize' : null;
const credentials = {
@ -45,12 +55,10 @@ const initializeClient = async ({ req, res, endpointOption }) => {
let baseURL = userProvidesURL ? userValues?.baseURL : baseURLOptions[endpoint];
const clientOptions = {
debug: isEnabled(DEBUG_OPENAI),
contextStrategy,
reverseProxyUrl: baseURL ? baseURL : null,
proxy: PROXY ?? null,
req,
res,
debug: isEnabled(DEBUG_OPENAI),
reverseProxyUrl: baseURL ? baseURL : null,
...endpointOption,
};
@ -119,7 +127,17 @@ const initializeClient = async ({ req, res, endpointOption }) => {
throw new Error(`${endpoint} API Key not provided.`);
}
const client = new OpenAIClient(apiKey, clientOptions);
if (optionsOnly) {
const requestOptions = Object.assign(
{
modelOptions: endpointOption.modelOptions,
},
clientOptions,
);
return getLLMConfig(apiKey, requestOptions);
}
const client = new OpenAIClient(apiKey, Object.assign({ req, res }, clientOptions));
return {
client,
openAIApiKey: apiKey,

View file

@ -0,0 +1,120 @@
const { HttpsProxyAgent } = require('https-proxy-agent');
const { sanitizeModelName, constructAzureURL } = require('~/utils');
const { isEnabled } = require('~/server/utils');
/**
* Generates configuration options for creating a language model (LLM) instance.
* @param {string} apiKey - The API key for authentication.
* @param {Object} options - Additional options for configuring the LLM.
* @param {Object} [options.modelOptions] - Model-specific options.
* @param {string} [options.modelOptions.model] - The name of the model to use.
* @param {number} [options.modelOptions.temperature] - Controls randomness in output generation (0-2).
* @param {number} [options.modelOptions.top_p] - Controls diversity via nucleus sampling (0-1).
* @param {number} [options.modelOptions.frequency_penalty] - Reduces repetition of token sequences (-2 to 2).
* @param {number} [options.modelOptions.presence_penalty] - Encourages discussing new topics (-2 to 2).
* @param {number} [options.modelOptions.max_tokens] - The maximum number of tokens to generate.
* @param {string[]} [options.modelOptions.stop] - Sequences where the API will stop generating further tokens.
* @param {string} [options.reverseProxyUrl] - URL for a reverse proxy, if used.
* @param {boolean} [options.useOpenRouter] - Flag to use OpenRouter API.
* @param {Object} [options.headers] - Additional headers for API requests.
* @param {string} [options.proxy] - Proxy server URL.
* @param {Object} [options.azure] - Azure-specific configurations.
* @param {boolean} [options.streaming] - Whether to use streaming mode.
* @param {Object} [options.addParams] - Additional parameters to add to the model options.
* @param {string[]} [options.dropParams] - Parameters to remove from the model options.
* @returns {Object} Configuration options for creating an LLM instance.
*/
function getLLMConfig(apiKey, options = {}) {
const {
modelOptions = {},
reverseProxyUrl,
useOpenRouter,
headers,
proxy,
azure,
streaming = true,
addParams,
dropParams,
} = options;
let llmConfig = {
model: 'gpt-4o-mini',
streaming,
};
Object.assign(llmConfig, modelOptions);
if (addParams && typeof addParams === 'object') {
Object.assign(llmConfig, addParams);
}
if (dropParams && Array.isArray(dropParams)) {
dropParams.forEach((param) => {
delete llmConfig[param];
});
}
const configOptions = {};
// Handle OpenRouter or custom reverse proxy
if (useOpenRouter || reverseProxyUrl === 'https://openrouter.ai/api/v1') {
configOptions.basePath = 'https://openrouter.ai/api/v1';
configOptions.baseOptions = {
headers: Object.assign(
{
'HTTP-Referer': 'https://librechat.ai',
'X-Title': 'LibreChat',
},
headers,
),
};
} else if (reverseProxyUrl) {
configOptions.basePath = reverseProxyUrl;
if (headers) {
configOptions.baseOptions = { headers };
}
}
if (proxy) {
const proxyAgent = new HttpsProxyAgent(proxy);
Object.assign(configOptions, {
httpAgent: proxyAgent,
httpsAgent: proxyAgent,
});
}
if (azure) {
const useModelName = isEnabled(process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME);
azure.azureOpenAIApiDeploymentName = useModelName
? sanitizeModelName(llmConfig.model)
: azure.azureOpenAIApiDeploymentName;
if (process.env.AZURE_OPENAI_DEFAULT_MODEL) {
llmConfig.model = process.env.AZURE_OPENAI_DEFAULT_MODEL;
}
if (configOptions.basePath) {
const azureURL = constructAzureURL({
baseURL: configOptions.basePath,
azureOptions: azure,
});
azure.azureOpenAIBasePath = azureURL.split(`/${azure.azureOpenAIApiDeploymentName}`)[0];
}
Object.assign(llmConfig, azure);
llmConfig.model = llmConfig.azureOpenAIApiDeploymentName;
} else {
llmConfig.openAIApiKey = apiKey;
// Object.assign(llmConfig, {
// configuration: { apiKey },
// });
}
if (process.env.OPENAI_ORGANIZATION && this.azure) {
llmConfig.organization = process.env.OPENAI_ORGANIZATION;
}
return { llmConfig, configOptions };
}
module.exports = { getLLMConfig };

View file

@ -0,0 +1,64 @@
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const { logger } = require('~/config');
class Tokenizer {
constructor() {
this.tokenizersCache = {};
this.tokenizerCallsCount = 0;
}
getTokenizer(encoding, isModelName = false, extendSpecialTokens = {}) {
let tokenizer;
if (this.tokenizersCache[encoding]) {
tokenizer = this.tokenizersCache[encoding];
} else {
if (isModelName) {
tokenizer = encodingForModel(encoding, extendSpecialTokens);
} else {
tokenizer = getEncoding(encoding, extendSpecialTokens);
}
this.tokenizersCache[encoding] = tokenizer;
}
return tokenizer;
}
freeAndResetAllEncoders() {
try {
Object.keys(this.tokenizersCache).forEach((key) => {
if (this.tokenizersCache[key]) {
this.tokenizersCache[key].free();
delete this.tokenizersCache[key];
}
});
this.tokenizerCallsCount = 1;
} catch (error) {
logger.error('[Tokenizer] Free and reset encoders error', error);
}
}
resetTokenizersIfNecessary() {
if (this.tokenizerCallsCount >= 25) {
if (this.options?.debug) {
logger.debug('[Tokenizer] freeAndResetAllEncoders: reached 25 encodings, resetting...');
}
this.freeAndResetAllEncoders();
}
this.tokenizerCallsCount++;
}
getTokenCount(text, encoding = 'cl100k_base') {
this.resetTokenizersIfNecessary();
try {
const tokenizer = this.getTokenizer(encoding);
return tokenizer.encode(text, 'all').length;
} catch (error) {
this.freeAndResetAllEncoders();
const tokenizer = this.getTokenizer(encoding);
return tokenizer.encode(text, 'all').length;
}
}
}
const tokenizerService = new Tokenizer();
module.exports = tokenizerService;

View file

@ -1,6 +1,7 @@
const fs = require('fs');
const path = require('path');
const { StructuredTool } = require('langchain/tools');
const { tool: toolFn } = require('@langchain/core/tools');
const { zodToJsonSchema } = require('zod-to-json-schema');
const { Calculator } = require('langchain/tools/calculator');
const {
@ -180,7 +181,7 @@ async function processRequiredActions(client, requiredActions) {
const tools = requiredActions.map((action) => action.tool);
const loadedTools = await loadTools({
user: client.req.user.id,
model: client.req.body.model ?? 'gpt-3.5-turbo-1106',
model: client.req.body.model ?? 'gpt-4o-mini',
tools,
functions: true,
options: {
@ -372,8 +373,120 @@ async function processRequiredActions(client, requiredActions) {
};
}
/**
* Processes the runtime tool calls and returns a combined toolMap.
* @param {Object} params - Run params containing user and request information.
* @param {ServerRequest} params.req - The request object.
* @param {string} params.agent_id - The agent ID.
* @param {string[]} params.tools - The agent's available tools.
* @param {string | undefined} [params.openAIApiKey] - The OpenAI API key.
* @returns {Promise<{ tools?: StructuredTool[]; toolMap?: Record<string, StructuredTool>}>} The combined toolMap.
*/
async function loadAgentTools({ req, agent_id, tools, openAIApiKey }) {
if (!tools || tools.length === 0) {
return {};
}
const loadedTools = await loadTools({
user: req.user.id,
// model: req.body.model ?? 'gpt-4o-mini',
tools,
functions: true,
options: {
req,
openAIApiKey,
returnMetadata: true,
processFileURL,
uploadImageBuffer,
fileStrategy: req.app.locals.fileStrategy,
},
skipSpecs: true,
});
const agentTools = [];
for (let i = 0; i < loadedTools.length; i++) {
const tool = loadedTools[i];
const toolInstance = toolFn(
async (...args) => {
return tool['_call'](...args);
},
{
name: tool.name,
description: tool.description,
schema: tool.schema,
},
);
agentTools.push(toolInstance);
}
const ToolMap = loadedTools.reduce((map, tool) => {
map[tool.name] = tool;
return map;
}, {});
let actionSets = [];
const ActionToolMap = {};
for (const toolName of tools) {
if (!ToolMap[toolName]) {
if (!actionSets.length) {
actionSets = (await loadActionSets({ agent_id })) ?? [];
}
let actionSet = null;
let currentDomain = '';
for (let action of actionSets) {
const domain = await domainParser(req, action.metadata.domain, true);
if (toolName.includes(domain)) {
currentDomain = domain;
actionSet = action;
break;
}
}
if (actionSet) {
const validationResult = validateAndParseOpenAPISpec(actionSet.metadata.raw_spec);
if (validationResult.spec) {
const { requestBuilders, functionSignatures, zodSchemas } = openapiToFunction(
validationResult.spec,
true,
);
const functionName = toolName.replace(`${actionDelimiter}${currentDomain}`, '');
const functionSig = functionSignatures.find((sig) => sig.name === functionName);
const requestBuilder = requestBuilders[functionName];
const zodSchema = zodSchemas[functionName];
if (requestBuilder) {
const tool = await createActionTool({
action: actionSet,
requestBuilder,
zodSchema,
name: toolName,
description: functionSig.description,
});
agentTools.push(tool);
ActionToolMap[toolName] = tool;
}
}
}
}
}
if (tools.length > 0 && agentTools.length === 0) {
throw new Error('No tools found for the specified tool calls.');
}
const toolMap = { ...ToolMap, ...ActionToolMap };
return {
tools: agentTools,
toolMap,
};
}
module.exports = {
formatToOpenAIAssistantTool,
loadAgentTools,
loadAndFormatTools,
processRequiredActions,
formatToOpenAIAssistantTool,
};