mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-09-22 06:00:56 +02:00

* feat: Code Interpreter API & File Search Agent Uploads chore: add back code files wip: first pass, abstract key dialog refactor: influence checkbox on key changes refactor: update localization keys for 'execute code' to 'run code' wip: run code button refactor: add throwError parameter to loadAuthValues and getUserPluginAuthValue functions feat: first pass, API tool calling fix: handle missing toolId in callTool function and return 404 for non-existent tools feat: show code outputs fix: improve error handling in callTool function and log errors fix: handle potential null value for filepath in attachment destructuring fix: normalize language before rendering and prevent null return fix: add loading indicator in RunCode component while executing code feat: add support for conditional code execution in Markdown components feat: attachments refactor: remove bash fix: pass abort signal to graph/run refactor: debounce and rate limit tool call refactor: increase debounce delay for execute function feat: set code output attachments feat: image attachments refactor: apply message context refactor: pass `partIndex` feat: toolCall schema/model/methods feat: block indexing feat: get tool calls chore: imports chore: typing chore: condense type imports feat: get tool calls fix: block indexing chore: typing refactor: update tool calls mapping to support multiple results fix: add unique key to nav link for rendering wip: first pass, tool call results refactor: update query cache from successful tool call mutation style: improve result switcher styling chore: note on using \`.toObject()\` feat: add agent_id field to conversation schema chore: typing refactor: rename agentMap to agentsMap for consistency feat: Agent Name as chat input placeholder chore: bump agents 📦 chore: update @langchain dependencies to latest versions to match agents package 📦 chore: update @librechat/agents dependency to version 1.8.0 fix: Aborting agent stream removes sender; fix(bedrock): completion removes preset name label refactor: remove direct file parameter to use req.file, add `processAgentFileUpload` for image uploads feat: upload menu feat: prime message_file resources feat: implement conversation access validation in chat route refactor: remove file parameter from processFileUpload and use req.file instead feat: add savedMessageIds set to track saved message IDs in BaseClient, to prevent unnecessary double-write to db feat: prevent duplicate message saves by checking savedMessageIds in AgentController refactor: skip legacy RAG API handling for agents feat: add files field to convoSchema refactor: update request type annotations from Express.Request to ServerRequest in file processing functions feat: track conversation files fix: resendFiles, addPreviousAttachments handling feat: add ID validation for session_id and file_id in download route feat: entity_id for code file uploads/downloads fix: code file edge cases feat: delete related tool calls feat: add stream rate handling for LLM configuration feat: enhance system content with attached file information fix: improve error logging in resource priming function * WIP: PoC, sequential agents WIP: PoC Sequential Agents, first pass content data + bump agents package fix: package-lock WIP: PoC, o1 support, refactor bufferString feat: convertJsonSchemaToZod fix: form issues and schema defining erroneous model fix: max length issue on agent form instructions, limit conversation messages to sequential agents feat: add abort signal support to createRun function and AgentClient feat: PoC, hide prior sequential agent steps fix: update parameter naming from config to metadata in event handlers for clarity, add model to usage data refactor: use only last contentData, track model for usage data chore: bump agents package fix: content parts issue refactor: filter contentParts to include tool calls and relevant indices feat: show function calls refactor: filter context messages to exclude tool calls when no tools are available to the agent fix: ensure tool call content is not undefined in formatMessages feat: add agent_id field to conversationPreset schema feat: hide sequential agents feat: increase upload toast duration to 10 seconds * refactor: tool context handling & update Code API Key Dialog feat: toolContextMap chore: skipSpecs -> useSpecs ci: fix handleTools tests feat: API Key Dialog * feat: Agent Permissions Admin Controls feat: replace label with button for prompt permission toggle feat: update agent permissions feat: enable experimental agents and streamline capability configuration feat: implement access control for agents and enhance endpoint menu items feat: add welcome message for agent selection in localization feat: add agents permission to access control and update version to 0.7.57 * fix: update types in useAssistantListMap and useMentions hooks for better null handling * feat: mention agents * fix: agent tool resource race conditions when deleting agent tool resource files * feat: add error handling for code execution with user feedback * refactor: rename AdminControls to AdminSettings for clarity * style: add gap to button in AdminSettings for improved layout * refactor: separate agent query hooks and check access to enable fetching * fix: remove unused provider from agent initialization options, creates issue with custom endpoints * refactor: remove redundant/deprecated modelOptions from AgentClient processes * chore: update @librechat/agents to version 1.8.5 in package.json and package-lock.json * fix: minor styling issues + agent panel uniformity * fix: agent edge cases when set endpoint is no longer defined * refactor: remove unused cleanup function call from AppService * fix: update link in ApiKeyDialog to point to pricing page * fix: improve type handling and layout calculations in SidePanel component * fix: add missing localization string for agent selection in SidePanel * chore: form styling and localizations for upload filesearch/code interpreter * fix: model selection placeholder logic in AgentConfig component * style: agent capabilities * fix: add localization for provider selection and improve dropdown styling in ModelPanel * refactor: use gpt-4o-mini > gpt-3.5-turbo * fix: agents configuration for loadDefaultInterface and update related tests * feat: DALLE Agents support
175 lines
5 KiB
JavaScript
175 lines
5 KiB
JavaScript
const {
|
|
CacheKeys,
|
|
ErrorTypes,
|
|
envVarRegex,
|
|
FetchTokenConfig,
|
|
extractEnvVariable,
|
|
} = require('librechat-data-provider');
|
|
const { Providers } = require('@librechat/agents');
|
|
const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService');
|
|
const { getLLMConfig } = require('~/server/services/Endpoints/openAI/llm');
|
|
const { getCustomEndpointConfig } = require('~/server/services/Config');
|
|
const { fetchModels } = require('~/server/services/ModelService');
|
|
const { isUserProvided, sleep } = require('~/server/utils');
|
|
const getLogStores = require('~/cache/getLogStores');
|
|
const { OpenAIClient } = require('~/app');
|
|
|
|
const { PROXY } = process.env;
|
|
|
|
const initializeClient = async ({ req, res, endpointOption, optionsOnly, overrideEndpoint }) => {
|
|
const { key: expiresAt } = req.body;
|
|
const endpoint = overrideEndpoint ?? req.body.endpoint;
|
|
|
|
const endpointConfig = await getCustomEndpointConfig(endpoint);
|
|
if (!endpointConfig) {
|
|
throw new Error(`Config not found for the ${endpoint} custom endpoint.`);
|
|
}
|
|
|
|
const CUSTOM_API_KEY = extractEnvVariable(endpointConfig.apiKey);
|
|
const CUSTOM_BASE_URL = extractEnvVariable(endpointConfig.baseURL);
|
|
|
|
let resolvedHeaders = {};
|
|
if (endpointConfig.headers && typeof endpointConfig.headers === 'object') {
|
|
Object.keys(endpointConfig.headers).forEach((key) => {
|
|
resolvedHeaders[key] = extractEnvVariable(endpointConfig.headers[key]);
|
|
});
|
|
}
|
|
|
|
if (CUSTOM_API_KEY.match(envVarRegex)) {
|
|
throw new Error(`Missing API Key for ${endpoint}.`);
|
|
}
|
|
|
|
if (CUSTOM_BASE_URL.match(envVarRegex)) {
|
|
throw new Error(`Missing Base URL for ${endpoint}.`);
|
|
}
|
|
|
|
const userProvidesKey = isUserProvided(CUSTOM_API_KEY);
|
|
const userProvidesURL = isUserProvided(CUSTOM_BASE_URL);
|
|
|
|
let userValues = null;
|
|
if (expiresAt && (userProvidesKey || userProvidesURL)) {
|
|
checkUserKeyExpiry(expiresAt, endpoint);
|
|
userValues = await getUserKeyValues({ userId: req.user.id, name: endpoint });
|
|
}
|
|
|
|
let apiKey = userProvidesKey ? userValues?.apiKey : CUSTOM_API_KEY;
|
|
let baseURL = userProvidesURL ? userValues?.baseURL : CUSTOM_BASE_URL;
|
|
|
|
if (userProvidesKey & !apiKey) {
|
|
throw new Error(
|
|
JSON.stringify({
|
|
type: ErrorTypes.NO_USER_KEY,
|
|
}),
|
|
);
|
|
}
|
|
|
|
if (userProvidesURL && !baseURL) {
|
|
throw new Error(
|
|
JSON.stringify({
|
|
type: ErrorTypes.NO_BASE_URL,
|
|
}),
|
|
);
|
|
}
|
|
|
|
if (!apiKey) {
|
|
throw new Error(`${endpoint} API key not provided.`);
|
|
}
|
|
|
|
if (!baseURL) {
|
|
throw new Error(`${endpoint} Base URL not provided.`);
|
|
}
|
|
|
|
const cache = getLogStores(CacheKeys.TOKEN_CONFIG);
|
|
const tokenKey =
|
|
!endpointConfig.tokenConfig && (userProvidesKey || userProvidesURL)
|
|
? `${endpoint}:${req.user.id}`
|
|
: endpoint;
|
|
|
|
let endpointTokenConfig =
|
|
!endpointConfig.tokenConfig &&
|
|
FetchTokenConfig[endpoint.toLowerCase()] &&
|
|
(await cache.get(tokenKey));
|
|
|
|
if (
|
|
FetchTokenConfig[endpoint.toLowerCase()] &&
|
|
endpointConfig &&
|
|
endpointConfig.models.fetch &&
|
|
!endpointTokenConfig
|
|
) {
|
|
await fetchModels({ apiKey, baseURL, name: endpoint, user: req.user.id, tokenKey });
|
|
endpointTokenConfig = await cache.get(tokenKey);
|
|
}
|
|
|
|
const customOptions = {
|
|
headers: resolvedHeaders,
|
|
addParams: endpointConfig.addParams,
|
|
dropParams: endpointConfig.dropParams,
|
|
titleConvo: endpointConfig.titleConvo,
|
|
titleModel: endpointConfig.titleModel,
|
|
forcePrompt: endpointConfig.forcePrompt,
|
|
summaryModel: endpointConfig.summaryModel,
|
|
modelDisplayLabel: endpointConfig.modelDisplayLabel,
|
|
titleMethod: endpointConfig.titleMethod ?? 'completion',
|
|
contextStrategy: endpointConfig.summarize ? 'summarize' : null,
|
|
directEndpoint: endpointConfig.directEndpoint,
|
|
titleMessageRole: endpointConfig.titleMessageRole,
|
|
streamRate: endpointConfig.streamRate,
|
|
endpointTokenConfig,
|
|
};
|
|
|
|
/** @type {undefined | TBaseEndpoint} */
|
|
const allConfig = req.app.locals.all;
|
|
if (allConfig) {
|
|
customOptions.streamRate = allConfig.streamRate;
|
|
}
|
|
|
|
const clientOptions = {
|
|
reverseProxyUrl: baseURL ?? null,
|
|
proxy: PROXY ?? null,
|
|
req,
|
|
res,
|
|
...customOptions,
|
|
...endpointOption,
|
|
};
|
|
|
|
if (optionsOnly) {
|
|
const modelOptions = endpointOption.model_parameters;
|
|
if (endpoint !== Providers.OLLAMA) {
|
|
const requestOptions = Object.assign(
|
|
{
|
|
modelOptions,
|
|
},
|
|
clientOptions,
|
|
);
|
|
const options = getLLMConfig(apiKey, requestOptions);
|
|
if (!customOptions.streamRate) {
|
|
return options;
|
|
}
|
|
options.llmConfig.callbacks = [
|
|
{
|
|
handleLLMNewToken: async () => {
|
|
await sleep(customOptions.streamRate);
|
|
},
|
|
},
|
|
];
|
|
return options;
|
|
}
|
|
|
|
if (clientOptions.reverseProxyUrl) {
|
|
modelOptions.baseUrl = clientOptions.reverseProxyUrl.split('/v1')[0];
|
|
delete clientOptions.reverseProxyUrl;
|
|
}
|
|
|
|
return {
|
|
llmConfig: modelOptions,
|
|
};
|
|
}
|
|
|
|
const client = new OpenAIClient(apiKey, clientOptions);
|
|
return {
|
|
client,
|
|
openAIApiKey: apiKey,
|
|
};
|
|
};
|
|
|
|
module.exports = initializeClient;
|