mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-20 18:30:15 +01:00
* feat: Code Interpreter API & File Search Agent Uploads chore: add back code files wip: first pass, abstract key dialog refactor: influence checkbox on key changes refactor: update localization keys for 'execute code' to 'run code' wip: run code button refactor: add throwError parameter to loadAuthValues and getUserPluginAuthValue functions feat: first pass, API tool calling fix: handle missing toolId in callTool function and return 404 for non-existent tools feat: show code outputs fix: improve error handling in callTool function and log errors fix: handle potential null value for filepath in attachment destructuring fix: normalize language before rendering and prevent null return fix: add loading indicator in RunCode component while executing code feat: add support for conditional code execution in Markdown components feat: attachments refactor: remove bash fix: pass abort signal to graph/run refactor: debounce and rate limit tool call refactor: increase debounce delay for execute function feat: set code output attachments feat: image attachments refactor: apply message context refactor: pass `partIndex` feat: toolCall schema/model/methods feat: block indexing feat: get tool calls chore: imports chore: typing chore: condense type imports feat: get tool calls fix: block indexing chore: typing refactor: update tool calls mapping to support multiple results fix: add unique key to nav link for rendering wip: first pass, tool call results refactor: update query cache from successful tool call mutation style: improve result switcher styling chore: note on using \`.toObject()\` feat: add agent_id field to conversation schema chore: typing refactor: rename agentMap to agentsMap for consistency feat: Agent Name as chat input placeholder chore: bump agents 📦 chore: update @langchain dependencies to latest versions to match agents package 📦 chore: update @librechat/agents dependency to version 1.8.0 fix: Aborting agent stream removes sender; fix(bedrock): completion removes preset name label refactor: remove direct file parameter to use req.file, add `processAgentFileUpload` for image uploads feat: upload menu feat: prime message_file resources feat: implement conversation access validation in chat route refactor: remove file parameter from processFileUpload and use req.file instead feat: add savedMessageIds set to track saved message IDs in BaseClient, to prevent unnecessary double-write to db feat: prevent duplicate message saves by checking savedMessageIds in AgentController refactor: skip legacy RAG API handling for agents feat: add files field to convoSchema refactor: update request type annotations from Express.Request to ServerRequest in file processing functions feat: track conversation files fix: resendFiles, addPreviousAttachments handling feat: add ID validation for session_id and file_id in download route feat: entity_id for code file uploads/downloads fix: code file edge cases feat: delete related tool calls feat: add stream rate handling for LLM configuration feat: enhance system content with attached file information fix: improve error logging in resource priming function * WIP: PoC, sequential agents WIP: PoC Sequential Agents, first pass content data + bump agents package fix: package-lock WIP: PoC, o1 support, refactor bufferString feat: convertJsonSchemaToZod fix: form issues and schema defining erroneous model fix: max length issue on agent form instructions, limit conversation messages to sequential agents feat: add abort signal support to createRun function and AgentClient feat: PoC, hide prior sequential agent steps fix: update parameter naming from config to metadata in event handlers for clarity, add model to usage data refactor: use only last contentData, track model for usage data chore: bump agents package fix: content parts issue refactor: filter contentParts to include tool calls and relevant indices feat: show function calls refactor: filter context messages to exclude tool calls when no tools are available to the agent fix: ensure tool call content is not undefined in formatMessages feat: add agent_id field to conversationPreset schema feat: hide sequential agents feat: increase upload toast duration to 10 seconds * refactor: tool context handling & update Code API Key Dialog feat: toolContextMap chore: skipSpecs -> useSpecs ci: fix handleTools tests feat: API Key Dialog * feat: Agent Permissions Admin Controls feat: replace label with button for prompt permission toggle feat: update agent permissions feat: enable experimental agents and streamline capability configuration feat: implement access control for agents and enhance endpoint menu items feat: add welcome message for agent selection in localization feat: add agents permission to access control and update version to 0.7.57 * fix: update types in useAssistantListMap and useMentions hooks for better null handling * feat: mention agents * fix: agent tool resource race conditions when deleting agent tool resource files * feat: add error handling for code execution with user feedback * refactor: rename AdminControls to AdminSettings for clarity * style: add gap to button in AdminSettings for improved layout * refactor: separate agent query hooks and check access to enable fetching * fix: remove unused provider from agent initialization options, creates issue with custom endpoints * refactor: remove redundant/deprecated modelOptions from AgentClient processes * chore: update @librechat/agents to version 1.8.5 in package.json and package-lock.json * fix: minor styling issues + agent panel uniformity * fix: agent edge cases when set endpoint is no longer defined * refactor: remove unused cleanup function call from AppService * fix: update link in ApiKeyDialog to point to pricing page * fix: improve type handling and layout calculations in SidePanel component * fix: add missing localization string for agent selection in SidePanel * chore: form styling and localizations for upload filesearch/code interpreter * fix: model selection placeholder logic in AgentConfig component * style: agent capabilities * fix: add localization for provider selection and improve dropdown styling in ModelPanel * refactor: use gpt-4o-mini > gpt-3.5-turbo * fix: agents configuration for loadDefaultInterface and update related tests * feat: DALLE Agents support
164 lines
4.8 KiB
JavaScript
164 lines
4.8 KiB
JavaScript
const {
|
|
ErrorTypes,
|
|
EModelEndpoint,
|
|
resolveHeaders,
|
|
mapModelToAzureConfig,
|
|
} = require('librechat-data-provider');
|
|
const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService');
|
|
const { getLLMConfig } = require('~/server/services/Endpoints/openAI/llm');
|
|
const { isEnabled, isUserProvided, sleep } = require('~/server/utils');
|
|
const { getAzureCredentials } = require('~/utils');
|
|
const { OpenAIClient } = require('~/app');
|
|
|
|
const initializeClient = async ({
|
|
req,
|
|
res,
|
|
endpointOption,
|
|
optionsOnly,
|
|
overrideEndpoint,
|
|
overrideModel,
|
|
}) => {
|
|
const {
|
|
PROXY,
|
|
OPENAI_API_KEY,
|
|
AZURE_API_KEY,
|
|
OPENAI_REVERSE_PROXY,
|
|
AZURE_OPENAI_BASEURL,
|
|
OPENAI_SUMMARIZE,
|
|
DEBUG_OPENAI,
|
|
} = process.env;
|
|
const { key: expiresAt } = req.body;
|
|
const modelName = overrideModel ?? req.body.model;
|
|
const endpoint = overrideEndpoint ?? req.body.endpoint;
|
|
const contextStrategy = isEnabled(OPENAI_SUMMARIZE) ? 'summarize' : null;
|
|
|
|
const credentials = {
|
|
[EModelEndpoint.openAI]: OPENAI_API_KEY,
|
|
[EModelEndpoint.azureOpenAI]: AZURE_API_KEY,
|
|
};
|
|
|
|
const baseURLOptions = {
|
|
[EModelEndpoint.openAI]: OPENAI_REVERSE_PROXY,
|
|
[EModelEndpoint.azureOpenAI]: AZURE_OPENAI_BASEURL,
|
|
};
|
|
|
|
const userProvidesKey = isUserProvided(credentials[endpoint]);
|
|
const userProvidesURL = isUserProvided(baseURLOptions[endpoint]);
|
|
|
|
let userValues = null;
|
|
if (expiresAt && (userProvidesKey || userProvidesURL)) {
|
|
checkUserKeyExpiry(expiresAt, endpoint);
|
|
userValues = await getUserKeyValues({ userId: req.user.id, name: endpoint });
|
|
}
|
|
|
|
let apiKey = userProvidesKey ? userValues?.apiKey : credentials[endpoint];
|
|
let baseURL = userProvidesURL ? userValues?.baseURL : baseURLOptions[endpoint];
|
|
|
|
const clientOptions = {
|
|
contextStrategy,
|
|
proxy: PROXY ?? null,
|
|
debug: isEnabled(DEBUG_OPENAI),
|
|
reverseProxyUrl: baseURL ? baseURL : null,
|
|
...endpointOption,
|
|
};
|
|
|
|
const isAzureOpenAI = endpoint === EModelEndpoint.azureOpenAI;
|
|
/** @type {false | TAzureConfig} */
|
|
const azureConfig = isAzureOpenAI && req.app.locals[EModelEndpoint.azureOpenAI];
|
|
|
|
if (isAzureOpenAI && azureConfig) {
|
|
const { modelGroupMap, groupMap } = azureConfig;
|
|
const {
|
|
azureOptions,
|
|
baseURL,
|
|
headers = {},
|
|
serverless,
|
|
} = mapModelToAzureConfig({
|
|
modelName,
|
|
modelGroupMap,
|
|
groupMap,
|
|
});
|
|
|
|
clientOptions.reverseProxyUrl = baseURL ?? clientOptions.reverseProxyUrl;
|
|
clientOptions.headers = resolveHeaders({ ...headers, ...(clientOptions.headers ?? {}) });
|
|
|
|
clientOptions.titleConvo = azureConfig.titleConvo;
|
|
clientOptions.titleModel = azureConfig.titleModel;
|
|
|
|
const azureRate = modelName.includes('gpt-4') ? 30 : 17;
|
|
clientOptions.streamRate = azureConfig.streamRate ?? azureRate;
|
|
|
|
clientOptions.titleMethod = azureConfig.titleMethod ?? 'completion';
|
|
|
|
const groupName = modelGroupMap[modelName].group;
|
|
clientOptions.addParams = azureConfig.groupMap[groupName].addParams;
|
|
clientOptions.dropParams = azureConfig.groupMap[groupName].dropParams;
|
|
clientOptions.forcePrompt = azureConfig.groupMap[groupName].forcePrompt;
|
|
|
|
apiKey = azureOptions.azureOpenAIApiKey;
|
|
clientOptions.azure = !serverless && azureOptions;
|
|
if (serverless === true) {
|
|
clientOptions.defaultQuery = azureOptions.azureOpenAIApiVersion
|
|
? { 'api-version': azureOptions.azureOpenAIApiVersion }
|
|
: undefined;
|
|
clientOptions.headers['api-key'] = apiKey;
|
|
}
|
|
} else if (isAzureOpenAI) {
|
|
clientOptions.azure = userProvidesKey ? JSON.parse(userValues.apiKey) : getAzureCredentials();
|
|
apiKey = clientOptions.azure.azureOpenAIApiKey;
|
|
}
|
|
|
|
/** @type {undefined | TBaseEndpoint} */
|
|
const openAIConfig = req.app.locals[EModelEndpoint.openAI];
|
|
|
|
if (!isAzureOpenAI && openAIConfig) {
|
|
clientOptions.streamRate = openAIConfig.streamRate;
|
|
}
|
|
|
|
/** @type {undefined | TBaseEndpoint} */
|
|
const allConfig = req.app.locals.all;
|
|
if (allConfig) {
|
|
clientOptions.streamRate = allConfig.streamRate;
|
|
}
|
|
|
|
if (userProvidesKey & !apiKey) {
|
|
throw new Error(
|
|
JSON.stringify({
|
|
type: ErrorTypes.NO_USER_KEY,
|
|
}),
|
|
);
|
|
}
|
|
|
|
if (!apiKey) {
|
|
throw new Error(`${endpoint} API Key not provided.`);
|
|
}
|
|
|
|
if (optionsOnly) {
|
|
const requestOptions = Object.assign(
|
|
{
|
|
modelOptions: endpointOption.model_parameters,
|
|
},
|
|
clientOptions,
|
|
);
|
|
const options = getLLMConfig(apiKey, requestOptions);
|
|
if (!clientOptions.streamRate) {
|
|
return options;
|
|
}
|
|
options.llmConfig.callbacks = [
|
|
{
|
|
handleLLMNewToken: async () => {
|
|
await sleep(clientOptions.streamRate);
|
|
},
|
|
},
|
|
];
|
|
return options;
|
|
}
|
|
|
|
const client = new OpenAIClient(apiKey, Object.assign({ req, res }, clientOptions));
|
|
return {
|
|
client,
|
|
openAIApiKey: apiKey,
|
|
};
|
|
};
|
|
|
|
module.exports = initializeClient;
|