🚧 chore: merge latest dev build to main repo (#3844)

* agents - phase 1 (#30)

* chore: copy assistant files

* feat: frontend and data-provider

* feat: backend get endpoint test

* fix(MessageEndpointIcon): switched to AgentName and AgentAvatar

* fix: small fixes

* fix: agent endpoint config

* fix: show Agent Builder

* chore: install agentus

* chore: initial scaffolding for agents

* fix: updated Assistant logic to Agent Logic for some Agent components

* WIP first pass, demo of agent package

* WIP: initial backend infra for agents

* fix: agent list error

* wip: agents routing

* chore: Refactor useSSE hook to handle different data events

* wip: correctly emit events

* chore: Update @librechat/agentus npm dependency to version 1.0.9

* remove comment

* first pass: streaming agent text

* chore: Remove @librechat/agentus root-level workspace npm dependency

* feat: Agent Schema and Model

* fix: content handling fixes

* fix: content message save

* WIP: new content data

* fix: run step issue with tool calls

* chore: Update @librechat/agentus npm dependency to version 1.1.5

* feat: update controller and agent routes

* wip: initial backend tool and tool error handling support

* wip: tool chunks

* chore: Update @librechat/agentus npm dependency to version 1.1.7

* chore: update tool_call typing, add test conditions and logs

* fix: create agent

* fix: create agent

* first pass: render completed content parts

* fix: remove logging, fix step handler typing

* chore: Update @librechat/agentus npm dependency to version 1.1.9

* refactor: cleanup maps on unmount

* chore: Update BaseClient.js to safely count tokens for string, number, and boolean values

* fix: support subsequent messages with tool_calls

* chore: export order

* fix: select agent

* fix: tool call types and handling

* chore: switch to anthropic for testing

* fix: AgentSelect

* refactor: experimental: OpenAIClient to use array for intermediateReply

* fix(useSSE): revert old condition for streaming legacy client tokens

* fix: lint

* revert `agent_id` to `id`

* chore: update localization keys for agent-related components

* feat: zod schema handling for actions

* refactor(actions): if no params, no zodSchema

* chore: Update @librechat/agentus npm dependency to version 1.2.1

* feat: first pass, actions

* refactor: empty schema for actions without params

* feat: Update createRun function to accept additional options

* fix: message payload formatting; feat: add more client options

* fix: ToolCall component rendering when action has no args but has output

* refactor(ToolCall): allow non-stringy args

* WIP: first pass, correctly formatted tool_calls between providers

* refactor: Remove duplicate import of 'roles' module

* refactor: Exclude 'vite.config.ts' from TypeScript compilation

* refactor: fix agent related types
> - no need to use endpoint/model fields for identifying agent metadata
> - add `provider` distinction for agent-configured 'endpoint'
- no need for agent-endpoint map
- reduce complexity of tools as functions into tools as string[]
- fix types related to above changes
- reduce unnecessary variables for queries/mutations and corresponding react-query keys

* refactor: Add tools and tool_kwargs fields to agent schema

* refactor: Remove unused code and update dependencies

* refactor: Update updateAgentHandler to use req.body directly

* refactor: Update AgentSelect component to use localized hooks

* refactor: Update agent schema to include tools and provider fields

* refactor(AgentPanel): add scrollbar gutter, add provider field to form, fix agent schema required values

* refactor: Update AgentSwitcher component to use selectedAgentId instead of selectedAgent

* refactor: Update AgentPanel component to include alternateName import and defaultAgentFormValues

* refactor(SelectDropDown): allow setting value as option while still supporting legacy usage (string values only)

* refactor: SelectDropdown changes - Only necessary when the available values are objects with label/value fields and the selected value is expected to be a string.

* refactor: TypeError issues and handle provider as option

* feat: Add placeholder for provider selection in AgentPanel component

* refactor: Update agent schema to include author and provider fields

* fix: show expected 'create agent' placeholder when creating agent

* chore: fix localization strings, hide capabilities form for now

* chore: typing

* refactor: import order and use compact agents schema for now

* chore: typing

* refactor: Update AgentForm type to use AgentCapabilities

* fix agent form agent selection issues

* feat: responsive agent selection

* fix: Handle cancelled fetch in useSelectAgent hook

* fix: reset agent form on accordion close/open

* feat: Add agent_id to default conversation for agents endpoint

* feat: agents endpoint request handling

* refactor: reset conversation model on agent select

* refactor: add `additional_instructions` to conversation schema, organize other fields

* chore: casing

* chore: types

* refactor(loadAgentTools): explicitly pass agent_id, do not pass `model` to loadAgentTools for now, load action sets by agent_id

* WIP: initial draft of real agent client initialization

* WIP: first pass, anthropic agent requests

* feat: remember last selected agent

* feat: openai and azure connected

* fix: prioritize agent model for runs unless an explicit override model is passed from client

* feat: Agent Actions

* fix: save agent id to convo

* feat: model panel (#29)

* feat: model panel

* bring back comments

* fix: method still null

* fix: AgentPanel FormContext

* feat: add more parameters

* fix: style issues; refactor: Agent Controller

* fix: cherry-pick

* fix: Update AgentAvatar component to use AssistantIcon instead of BrainCircuit

* feat: OGDialog for delete agent; feat(assistant): update Agent types, introduced `model_parameters`

* feat: icon and general `model_parameters` update

* feat: use react-hook-form better

* fix: agent builder form reset issue when switching panels

* refactor: modularize agent builder form

---------

Co-authored-by: Danny Avila <danny@librechat.ai>

* fix: AgentPanel and ModelPanel type issues and use `useFormContext` and `watch` instead of `methods` directly and `useWatch`.

* fix: tool call issues due to invalid input (anthropic) of empty string

* fix: handle empty text in Part component

---------

Co-authored-by: Marco Beretta <81851188+berry-13@users.noreply.github.com>

* refactor: remove form ModelPanel and fixed nested ternary expressions in AgentConfig

* fix: Model Parameters not saved correctly

* refactor: remove console log

* feat: avatar upload and get for Agents (#36)

Co-authored-by: Marco Beretta <81851188+berry-13@users.noreply.github.com>

* chore: update to public package

* fix: typing, optional chaining

* fix: cursor not showing for content parts

* chore: conditionally enable agents

* ci: fix azure test

* ci: fix frontend tests, fix eslint api

* refactor: Remove unused errorContentPart variable

* continue of the agent message PR (#40)

* last fixes

* fix: agentMap

* pr merge test  (#41)

* fix: model icon not fetching correctly

* remove console logs

* feat: agent name

* refactor: pass documentsMap as a prop to allow re-render of assistant form

* refactor: pass documentsMap as a prop to allow re-render of assistant form

* chore: Bump version to 0.7.419

* fix: TypeError: Cannot read properties of undefined (reading 'id')

* refactor: update AgentSwitcher component to use ControlCombobox instead of Combobox

---------

Co-authored-by: Marco Beretta <81851188+berry-13@users.noreply.github.com>
This commit is contained in:
Danny Avila 2024-08-31 16:33:51 -04:00 committed by GitHub
parent 618be4bf2b
commit a0291ed155
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
141 changed files with 14473 additions and 5714 deletions

View file

@ -6,6 +6,7 @@ const {
isImageVisionTool,
actionDomainSeparator,
} = require('librechat-data-provider');
const { tool } = require('@langchain/core/tools');
const { encryptV2, decryptV2 } = require('~/server/utils/crypto');
const { getActions, deleteActions } = require('~/models/Action');
const { deleteAssistant } = require('~/models/Assistant');
@ -101,7 +102,8 @@ async function domainParser(req, domain, inverse = false) {
*
* @param {Object} searchParams - The parameters for loading action sets.
* @param {string} searchParams.user - The user identifier.
* @param {string} searchParams.assistant_id - The assistant identifier.
* @param {string} [searchParams.agent_id]- The agent identifier.
* @param {string} [searchParams.assistant_id]- The assistant identifier.
* @returns {Promise<Action[] | null>} A promise that resolves to an array of actions or `null` if no match.
*/
async function loadActionSets(searchParams) {
@ -114,10 +116,14 @@ async function loadActionSets(searchParams) {
* @param {Object} params - The parameters for loading action sets.
* @param {Action} params.action - The action set. Necessary for decrypting authentication values.
* @param {ActionRequest} params.requestBuilder - The ActionRequest builder class to execute the API call.
* @returns { { _call: (toolInput: Object) => unknown} } An object with `_call` method to execute the tool input.
* @param {string | undefined} [params.name] - The name of the tool.
* @param {string | undefined} [params.description] - The description for the tool.
* @param {import('zod').ZodTypeAny | undefined} [params.zodSchema] - The Zod schema for tool input validation/definition
* @returns { Promsie<typeof tool | { _call: (toolInput: Object | string) => unknown}> } An object with `_call` method to execute the tool input.
*/
async function createActionTool({ action, requestBuilder }) {
async function createActionTool({ action, requestBuilder, zodSchema, name, description }) {
action.metadata = await decryptMetadata(action.metadata);
/** @type {(toolInput: Object | string) => Promise<unknown>} */
const _call = async (toolInput) => {
try {
requestBuilder.setParams(toolInput);
@ -142,6 +148,14 @@ async function createActionTool({ action, requestBuilder }) {
}
};
if (name) {
return tool(_call, {
name,
description: description || '',
schema: zodSchema,
});
}
return {
_call,
};
@ -180,7 +194,7 @@ async function encryptMetadata(metadata) {
* Decrypts sensitive metadata values for an action.
*
* @param {ActionMetadata} metadata - The action metadata to decrypt.
* @returns {ActionMetadata} The updated action metadata with decrypted values.
* @returns {Promise<ActionMetadata>} The updated action metadata with decrypted values.
*/
async function decryptMetadata(metadata) {
const decryptedMetadata = { ...metadata };

View file

@ -45,5 +45,7 @@ module.exports = {
AZURE_ASSISTANTS_BASE_URL,
EModelEndpoint.azureAssistants,
),
/* key will be part of separate config */
[EModelEndpoint.agents]: generateConfig(process.env.I_AM_A_TEAPOT),
},
};

View file

@ -9,13 +9,22 @@ const { config } = require('./EndpointService');
*/
async function loadDefaultEndpointsConfig(req) {
const { google, gptPlugins } = await loadAsyncEndpoints(req);
const { openAI, assistants, azureAssistants, bingAI, anthropic, azureOpenAI, chatGPTBrowser } =
config;
const {
openAI,
agents,
assistants,
azureAssistants,
bingAI,
anthropic,
azureOpenAI,
chatGPTBrowser,
} = config;
const enabledEndpoints = getEnabledEndpoints();
const endpointConfig = {
[EModelEndpoint.openAI]: openAI,
[EModelEndpoint.agents]: agents,
[EModelEndpoint.assistants]: assistants,
[EModelEndpoint.azureAssistants]: azureAssistants,
[EModelEndpoint.azureOpenAI]: azureOpenAI,

View file

@ -29,6 +29,7 @@ async function loadDefaultModels(req) {
return {
[EModelEndpoint.openAI]: openAI,
[EModelEndpoint.agents]: openAI,
[EModelEndpoint.google]: google,
[EModelEndpoint.anthropic]: anthropic,
[EModelEndpoint.gptPlugins]: gptPlugins,

View file

@ -0,0 +1,30 @@
const { getAgent } = require('~/models/Agent');
const { logger } = require('~/config');
const buildOptions = (req, endpoint, parsedBody) => {
const { agent_id, instructions, spec, ...rest } = parsedBody;
const agentPromise = getAgent({
id: agent_id,
// TODO: better author handling
author: req.user.id,
}).catch((error) => {
logger.error(`[/agents/:${agent_id}] Error retrieving agent during build options step`, error);
return undefined;
});
const endpointOption = {
agent: agentPromise,
endpoint,
agent_id,
instructions,
spec,
modelOptions: {
...rest,
},
};
return endpointOption;
};
module.exports = { buildOptions };

View file

@ -0,0 +1,7 @@
const build = require('./build');
const initialize = require('./initialize');
module.exports = {
...build,
...initialize,
};

View file

@ -0,0 +1,119 @@
// const {
// ErrorTypes,
// EModelEndpoint,
// resolveHeaders,
// mapModelToAzureConfig,
// } = require('librechat-data-provider');
// const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService');
// const { isEnabled, isUserProvided } = require('~/server/utils');
// const { getAzureCredentials } = require('~/utils');
// const { OpenAIClient } = require('~/app');
const { z } = require('zod');
const { tool } = require('@langchain/core/tools');
const { EModelEndpoint, providerEndpointMap } = require('librechat-data-provider');
const { getDefaultHandlers } = require('~/server/controllers/agents/callbacks');
// for testing purposes
// const createTavilySearchTool = require('~/app/clients/tools/structured/TavilySearch');
const initAnthropic = require('~/server/services/Endpoints/anthropic/initializeClient');
const initOpenAI = require('~/server/services/Endpoints/openAI/initializeClient');
const { loadAgentTools } = require('~/server/services/ToolService');
const AgentClient = require('~/server/controllers/agents/client');
const { getModelMaxTokens } = require('~/utils');
/* For testing errors */
const _getWeather = tool(
async ({ location }) => {
if (location === 'SAN FRANCISCO') {
return 'It\'s 60 degrees and foggy';
} else if (location.toLowerCase() === 'san francisco') {
throw new Error('Input queries must be all capitals');
} else {
throw new Error('Invalid input.');
}
},
{
name: 'get_weather',
description: 'Call to get the current weather',
schema: z.object({
location: z.string(),
}),
},
);
const providerConfigMap = {
[EModelEndpoint.openAI]: initOpenAI,
[EModelEndpoint.azureOpenAI]: initOpenAI,
[EModelEndpoint.anthropic]: initAnthropic,
};
const initializeClient = async ({ req, res, endpointOption }) => {
if (!endpointOption) {
throw new Error('Endpoint option not provided');
}
// TODO: use endpointOption to determine options/modelOptions
const eventHandlers = getDefaultHandlers({ res });
// const tools = [createTavilySearchTool()];
// const tools = [_getWeather];
// const tool_calls = [{ name: 'getPeople_action_swapi---dev' }];
// const tool_calls = [{ name: 'dalle' }];
// const tool_calls = [{ name: 'getItmOptions_action_YWlhcGkzLn' }];
// const tool_calls = [{ name: 'tavily_search_results_json' }];
// const tool_calls = [
// { name: 'searchListings_action_emlsbG93NT' },
// { name: 'searchAddress_action_emlsbG93NT' },
// { name: 'searchMLS_action_emlsbG93NT' },
// { name: 'searchCoordinates_action_emlsbG93NT' },
// { name: 'searchUrl_action_emlsbG93NT' },
// { name: 'getPropertyDetails_action_emlsbG93NT' },
// ];
if (!endpointOption.agent) {
throw new Error('No agent promise provided');
}
/** @type {Agent} */
const agent = await endpointOption.agent;
const { tools, toolMap } = await loadAgentTools({
req,
tools: agent.tools,
agent_id: agent.id,
// openAIApiKey: process.env.OPENAI_API_KEY,
});
let modelOptions = { model: agent.model };
const getOptions = providerConfigMap[agent.provider];
if (!getOptions) {
throw new Error(`Provider ${agent.provider} not supported`);
}
// TODO: pass-in override settings that are specific to current run
endpointOption.modelOptions.model = agent.model;
const options = await getOptions({
req,
res,
endpointOption,
optionsOnly: true,
overrideEndpoint: agent.provider,
overrideModel: agent.model,
});
modelOptions = Object.assign(modelOptions, options.llmConfig);
const client = new AgentClient({
req,
agent,
tools,
toolMap,
modelOptions,
eventHandlers,
configOptions: options.configOptions,
maxContextTokens:
agent.max_context_tokens ??
getModelMaxTokens(modelOptions.model, providerEndpointMap[agent.provider]),
});
return { client };
};
module.exports = { initializeClient };

View file

@ -1,8 +1,9 @@
const { EModelEndpoint } = require('librechat-data-provider');
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
const { getLLMConfig } = require('~/server/services/Endpoints/anthropic/llm');
const { AnthropicClient } = require('~/app');
const initializeClient = async ({ req, res, endpointOption }) => {
const initializeClient = async ({ req, res, endpointOption, optionsOnly }) => {
const { ANTHROPIC_API_KEY, ANTHROPIC_REVERSE_PROXY, PROXY } = process.env;
const expiresAt = req.body.key;
const isUserProvided = ANTHROPIC_API_KEY === 'user_provided';
@ -34,6 +35,18 @@ const initializeClient = async ({ req, res, endpointOption }) => {
clientOptions.streamRate = allConfig.streamRate;
}
if (optionsOnly) {
const requestOptions = Object.assign(
{
reverseProxyUrl: ANTHROPIC_REVERSE_PROXY ?? null,
proxy: PROXY ?? null,
modelOptions: endpointOption.modelOptions,
},
clientOptions,
);
return getLLMConfig(anthropicApiKey, requestOptions);
}
const client = new AnthropicClient(anthropicApiKey, {
req,
res,

View file

@ -0,0 +1,55 @@
const { HttpsProxyAgent } = require('https-proxy-agent');
const { anthropicSettings, removeNullishValues } = require('librechat-data-provider');
/**
* Generates configuration options for creating an Anthropic language model (LLM) instance.
*
* @param {string} apiKey - The API key for authentication with Anthropic.
* @param {Object} [options={}] - Additional options for configuring the LLM.
* @param {Object} [options.modelOptions] - Model-specific options.
* @param {string} [options.modelOptions.model] - The name of the model to use.
* @param {number} [options.modelOptions.maxOutputTokens] - The maximum number of tokens to generate.
* @param {number} [options.modelOptions.temperature] - Controls randomness in output generation.
* @param {number} [options.modelOptions.topP] - Controls diversity of output generation.
* @param {number} [options.modelOptions.topK] - Controls the number of top tokens to consider.
* @param {string[]} [options.modelOptions.stop] - Sequences where the API will stop generating further tokens.
* @param {boolean} [options.modelOptions.stream] - Whether to stream the response.
* @param {string} [options.proxy] - Proxy server URL.
* @param {string} [options.reverseProxyUrl] - URL for a reverse proxy, if used.
*
* @returns {Object} Configuration options for creating an Anthropic LLM instance, with null and undefined values removed.
*/
function getLLMConfig(apiKey, options = {}) {
const defaultOptions = {
model: anthropicSettings.model.default,
maxOutputTokens: anthropicSettings.maxOutputTokens.default,
stream: true,
};
const mergedOptions = Object.assign(defaultOptions, options.modelOptions);
const requestOptions = {
apiKey,
model: mergedOptions.model,
stream: mergedOptions.stream,
temperature: mergedOptions.temperature,
top_p: mergedOptions.topP,
top_k: mergedOptions.topK,
stop_sequences: mergedOptions.stop,
max_tokens:
mergedOptions.maxOutputTokens || anthropicSettings.maxOutputTokens.reset(mergedOptions.model),
};
const configOptions = {};
if (options.proxy) {
configOptions.httpAgent = new HttpsProxyAgent(options.proxy);
}
if (options.reverseProxyUrl) {
configOptions.baseURL = options.reverseProxyUrl;
}
return { llmConfig: removeNullishValues(requestOptions), configOptions };
}
module.exports = { getLLMConfig };

View file

@ -5,11 +5,19 @@ const {
mapModelToAzureConfig,
} = require('librechat-data-provider');
const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService');
const { getLLMConfig } = require('~/server/services/Endpoints/openAI/llm');
const { isEnabled, isUserProvided } = require('~/server/utils');
const { getAzureCredentials } = require('~/utils');
const { OpenAIClient } = require('~/app');
const initializeClient = async ({ req, res, endpointOption }) => {
const initializeClient = async ({
req,
res,
endpointOption,
optionsOnly,
overrideEndpoint,
overrideModel,
}) => {
const {
PROXY,
OPENAI_API_KEY,
@ -19,7 +27,9 @@ const initializeClient = async ({ req, res, endpointOption }) => {
OPENAI_SUMMARIZE,
DEBUG_OPENAI,
} = process.env;
const { key: expiresAt, endpoint, model: modelName } = req.body;
const { key: expiresAt } = req.body;
const modelName = overrideModel ?? req.body.model;
const endpoint = overrideEndpoint ?? req.body.endpoint;
const contextStrategy = isEnabled(OPENAI_SUMMARIZE) ? 'summarize' : null;
const credentials = {
@ -45,12 +55,10 @@ const initializeClient = async ({ req, res, endpointOption }) => {
let baseURL = userProvidesURL ? userValues?.baseURL : baseURLOptions[endpoint];
const clientOptions = {
debug: isEnabled(DEBUG_OPENAI),
contextStrategy,
reverseProxyUrl: baseURL ? baseURL : null,
proxy: PROXY ?? null,
req,
res,
debug: isEnabled(DEBUG_OPENAI),
reverseProxyUrl: baseURL ? baseURL : null,
...endpointOption,
};
@ -119,7 +127,17 @@ const initializeClient = async ({ req, res, endpointOption }) => {
throw new Error(`${endpoint} API Key not provided.`);
}
const client = new OpenAIClient(apiKey, clientOptions);
if (optionsOnly) {
const requestOptions = Object.assign(
{
modelOptions: endpointOption.modelOptions,
},
clientOptions,
);
return getLLMConfig(apiKey, requestOptions);
}
const client = new OpenAIClient(apiKey, Object.assign({ req, res }, clientOptions));
return {
client,
openAIApiKey: apiKey,

View file

@ -0,0 +1,120 @@
const { HttpsProxyAgent } = require('https-proxy-agent');
const { sanitizeModelName, constructAzureURL } = require('~/utils');
const { isEnabled } = require('~/server/utils');
/**
* Generates configuration options for creating a language model (LLM) instance.
* @param {string} apiKey - The API key for authentication.
* @param {Object} options - Additional options for configuring the LLM.
* @param {Object} [options.modelOptions] - Model-specific options.
* @param {string} [options.modelOptions.model] - The name of the model to use.
* @param {number} [options.modelOptions.temperature] - Controls randomness in output generation (0-2).
* @param {number} [options.modelOptions.top_p] - Controls diversity via nucleus sampling (0-1).
* @param {number} [options.modelOptions.frequency_penalty] - Reduces repetition of token sequences (-2 to 2).
* @param {number} [options.modelOptions.presence_penalty] - Encourages discussing new topics (-2 to 2).
* @param {number} [options.modelOptions.max_tokens] - The maximum number of tokens to generate.
* @param {string[]} [options.modelOptions.stop] - Sequences where the API will stop generating further tokens.
* @param {string} [options.reverseProxyUrl] - URL for a reverse proxy, if used.
* @param {boolean} [options.useOpenRouter] - Flag to use OpenRouter API.
* @param {Object} [options.headers] - Additional headers for API requests.
* @param {string} [options.proxy] - Proxy server URL.
* @param {Object} [options.azure] - Azure-specific configurations.
* @param {boolean} [options.streaming] - Whether to use streaming mode.
* @param {Object} [options.addParams] - Additional parameters to add to the model options.
* @param {string[]} [options.dropParams] - Parameters to remove from the model options.
* @returns {Object} Configuration options for creating an LLM instance.
*/
function getLLMConfig(apiKey, options = {}) {
const {
modelOptions = {},
reverseProxyUrl,
useOpenRouter,
headers,
proxy,
azure,
streaming = true,
addParams,
dropParams,
} = options;
let llmConfig = {
model: 'gpt-4o-mini',
streaming,
};
Object.assign(llmConfig, modelOptions);
if (addParams && typeof addParams === 'object') {
Object.assign(llmConfig, addParams);
}
if (dropParams && Array.isArray(dropParams)) {
dropParams.forEach((param) => {
delete llmConfig[param];
});
}
const configOptions = {};
// Handle OpenRouter or custom reverse proxy
if (useOpenRouter || reverseProxyUrl === 'https://openrouter.ai/api/v1') {
configOptions.basePath = 'https://openrouter.ai/api/v1';
configOptions.baseOptions = {
headers: Object.assign(
{
'HTTP-Referer': 'https://librechat.ai',
'X-Title': 'LibreChat',
},
headers,
),
};
} else if (reverseProxyUrl) {
configOptions.basePath = reverseProxyUrl;
if (headers) {
configOptions.baseOptions = { headers };
}
}
if (proxy) {
const proxyAgent = new HttpsProxyAgent(proxy);
Object.assign(configOptions, {
httpAgent: proxyAgent,
httpsAgent: proxyAgent,
});
}
if (azure) {
const useModelName = isEnabled(process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME);
azure.azureOpenAIApiDeploymentName = useModelName
? sanitizeModelName(llmConfig.model)
: azure.azureOpenAIApiDeploymentName;
if (process.env.AZURE_OPENAI_DEFAULT_MODEL) {
llmConfig.model = process.env.AZURE_OPENAI_DEFAULT_MODEL;
}
if (configOptions.basePath) {
const azureURL = constructAzureURL({
baseURL: configOptions.basePath,
azureOptions: azure,
});
azure.azureOpenAIBasePath = azureURL.split(`/${azure.azureOpenAIApiDeploymentName}`)[0];
}
Object.assign(llmConfig, azure);
llmConfig.model = llmConfig.azureOpenAIApiDeploymentName;
} else {
llmConfig.openAIApiKey = apiKey;
// Object.assign(llmConfig, {
// configuration: { apiKey },
// });
}
if (process.env.OPENAI_ORGANIZATION && this.azure) {
llmConfig.organization = process.env.OPENAI_ORGANIZATION;
}
return { llmConfig, configOptions };
}
module.exports = { getLLMConfig };

View file

@ -0,0 +1,64 @@
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const { logger } = require('~/config');
class Tokenizer {
constructor() {
this.tokenizersCache = {};
this.tokenizerCallsCount = 0;
}
getTokenizer(encoding, isModelName = false, extendSpecialTokens = {}) {
let tokenizer;
if (this.tokenizersCache[encoding]) {
tokenizer = this.tokenizersCache[encoding];
} else {
if (isModelName) {
tokenizer = encodingForModel(encoding, extendSpecialTokens);
} else {
tokenizer = getEncoding(encoding, extendSpecialTokens);
}
this.tokenizersCache[encoding] = tokenizer;
}
return tokenizer;
}
freeAndResetAllEncoders() {
try {
Object.keys(this.tokenizersCache).forEach((key) => {
if (this.tokenizersCache[key]) {
this.tokenizersCache[key].free();
delete this.tokenizersCache[key];
}
});
this.tokenizerCallsCount = 1;
} catch (error) {
logger.error('[Tokenizer] Free and reset encoders error', error);
}
}
resetTokenizersIfNecessary() {
if (this.tokenizerCallsCount >= 25) {
if (this.options?.debug) {
logger.debug('[Tokenizer] freeAndResetAllEncoders: reached 25 encodings, resetting...');
}
this.freeAndResetAllEncoders();
}
this.tokenizerCallsCount++;
}
getTokenCount(text, encoding = 'cl100k_base') {
this.resetTokenizersIfNecessary();
try {
const tokenizer = this.getTokenizer(encoding);
return tokenizer.encode(text, 'all').length;
} catch (error) {
this.freeAndResetAllEncoders();
const tokenizer = this.getTokenizer(encoding);
return tokenizer.encode(text, 'all').length;
}
}
}
const tokenizerService = new Tokenizer();
module.exports = tokenizerService;

View file

@ -1,6 +1,7 @@
const fs = require('fs');
const path = require('path');
const { StructuredTool } = require('langchain/tools');
const { tool: toolFn } = require('@langchain/core/tools');
const { zodToJsonSchema } = require('zod-to-json-schema');
const { Calculator } = require('langchain/tools/calculator');
const {
@ -180,7 +181,7 @@ async function processRequiredActions(client, requiredActions) {
const tools = requiredActions.map((action) => action.tool);
const loadedTools = await loadTools({
user: client.req.user.id,
model: client.req.body.model ?? 'gpt-3.5-turbo-1106',
model: client.req.body.model ?? 'gpt-4o-mini',
tools,
functions: true,
options: {
@ -372,8 +373,120 @@ async function processRequiredActions(client, requiredActions) {
};
}
/**
* Processes the runtime tool calls and returns a combined toolMap.
* @param {Object} params - Run params containing user and request information.
* @param {ServerRequest} params.req - The request object.
* @param {string} params.agent_id - The agent ID.
* @param {string[]} params.tools - The agent's available tools.
* @param {string | undefined} [params.openAIApiKey] - The OpenAI API key.
* @returns {Promise<{ tools?: StructuredTool[]; toolMap?: Record<string, StructuredTool>}>} The combined toolMap.
*/
async function loadAgentTools({ req, agent_id, tools, openAIApiKey }) {
if (!tools || tools.length === 0) {
return {};
}
const loadedTools = await loadTools({
user: req.user.id,
// model: req.body.model ?? 'gpt-4o-mini',
tools,
functions: true,
options: {
req,
openAIApiKey,
returnMetadata: true,
processFileURL,
uploadImageBuffer,
fileStrategy: req.app.locals.fileStrategy,
},
skipSpecs: true,
});
const agentTools = [];
for (let i = 0; i < loadedTools.length; i++) {
const tool = loadedTools[i];
const toolInstance = toolFn(
async (...args) => {
return tool['_call'](...args);
},
{
name: tool.name,
description: tool.description,
schema: tool.schema,
},
);
agentTools.push(toolInstance);
}
const ToolMap = loadedTools.reduce((map, tool) => {
map[tool.name] = tool;
return map;
}, {});
let actionSets = [];
const ActionToolMap = {};
for (const toolName of tools) {
if (!ToolMap[toolName]) {
if (!actionSets.length) {
actionSets = (await loadActionSets({ agent_id })) ?? [];
}
let actionSet = null;
let currentDomain = '';
for (let action of actionSets) {
const domain = await domainParser(req, action.metadata.domain, true);
if (toolName.includes(domain)) {
currentDomain = domain;
actionSet = action;
break;
}
}
if (actionSet) {
const validationResult = validateAndParseOpenAPISpec(actionSet.metadata.raw_spec);
if (validationResult.spec) {
const { requestBuilders, functionSignatures, zodSchemas } = openapiToFunction(
validationResult.spec,
true,
);
const functionName = toolName.replace(`${actionDelimiter}${currentDomain}`, '');
const functionSig = functionSignatures.find((sig) => sig.name === functionName);
const requestBuilder = requestBuilders[functionName];
const zodSchema = zodSchemas[functionName];
if (requestBuilder) {
const tool = await createActionTool({
action: actionSet,
requestBuilder,
zodSchema,
name: toolName,
description: functionSig.description,
});
agentTools.push(tool);
ActionToolMap[toolName] = tool;
}
}
}
}
}
if (tools.length > 0 && agentTools.length === 0) {
throw new Error('No tools found for the specified tool calls.');
}
const toolMap = { ...ToolMap, ...ActionToolMap };
return {
tools: agentTools,
toolMap,
};
}
module.exports = {
formatToOpenAIAssistantTool,
loadAgentTools,
loadAndFormatTools,
processRequiredActions,
formatToOpenAIAssistantTool,
};