mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-09-22 08:12:00 +02:00

* agents - phase 1 (#30) * chore: copy assistant files * feat: frontend and data-provider * feat: backend get endpoint test * fix(MessageEndpointIcon): switched to AgentName and AgentAvatar * fix: small fixes * fix: agent endpoint config * fix: show Agent Builder * chore: install agentus * chore: initial scaffolding for agents * fix: updated Assistant logic to Agent Logic for some Agent components * WIP first pass, demo of agent package * WIP: initial backend infra for agents * fix: agent list error * wip: agents routing * chore: Refactor useSSE hook to handle different data events * wip: correctly emit events * chore: Update @librechat/agentus npm dependency to version 1.0.9 * remove comment * first pass: streaming agent text * chore: Remove @librechat/agentus root-level workspace npm dependency * feat: Agent Schema and Model * fix: content handling fixes * fix: content message save * WIP: new content data * fix: run step issue with tool calls * chore: Update @librechat/agentus npm dependency to version 1.1.5 * feat: update controller and agent routes * wip: initial backend tool and tool error handling support * wip: tool chunks * chore: Update @librechat/agentus npm dependency to version 1.1.7 * chore: update tool_call typing, add test conditions and logs * fix: create agent * fix: create agent * first pass: render completed content parts * fix: remove logging, fix step handler typing * chore: Update @librechat/agentus npm dependency to version 1.1.9 * refactor: cleanup maps on unmount * chore: Update BaseClient.js to safely count tokens for string, number, and boolean values * fix: support subsequent messages with tool_calls * chore: export order * fix: select agent * fix: tool call types and handling * chore: switch to anthropic for testing * fix: AgentSelect * refactor: experimental: OpenAIClient to use array for intermediateReply * fix(useSSE): revert old condition for streaming legacy client tokens * fix: lint * revert `agent_id` to `id` * chore: update localization keys for agent-related components * feat: zod schema handling for actions * refactor(actions): if no params, no zodSchema * chore: Update @librechat/agentus npm dependency to version 1.2.1 * feat: first pass, actions * refactor: empty schema for actions without params * feat: Update createRun function to accept additional options * fix: message payload formatting; feat: add more client options * fix: ToolCall component rendering when action has no args but has output * refactor(ToolCall): allow non-stringy args * WIP: first pass, correctly formatted tool_calls between providers * refactor: Remove duplicate import of 'roles' module * refactor: Exclude 'vite.config.ts' from TypeScript compilation * refactor: fix agent related types > - no need to use endpoint/model fields for identifying agent metadata > - add `provider` distinction for agent-configured 'endpoint' - no need for agent-endpoint map - reduce complexity of tools as functions into tools as string[] - fix types related to above changes - reduce unnecessary variables for queries/mutations and corresponding react-query keys * refactor: Add tools and tool_kwargs fields to agent schema * refactor: Remove unused code and update dependencies * refactor: Update updateAgentHandler to use req.body directly * refactor: Update AgentSelect component to use localized hooks * refactor: Update agent schema to include tools and provider fields * refactor(AgentPanel): add scrollbar gutter, add provider field to form, fix agent schema required values * refactor: Update AgentSwitcher component to use selectedAgentId instead of selectedAgent * refactor: Update AgentPanel component to include alternateName import and defaultAgentFormValues * refactor(SelectDropDown): allow setting value as option while still supporting legacy usage (string values only) * refactor: SelectDropdown changes - Only necessary when the available values are objects with label/value fields and the selected value is expected to be a string. * refactor: TypeError issues and handle provider as option * feat: Add placeholder for provider selection in AgentPanel component * refactor: Update agent schema to include author and provider fields * fix: show expected 'create agent' placeholder when creating agent * chore: fix localization strings, hide capabilities form for now * chore: typing * refactor: import order and use compact agents schema for now * chore: typing * refactor: Update AgentForm type to use AgentCapabilities * fix agent form agent selection issues * feat: responsive agent selection * fix: Handle cancelled fetch in useSelectAgent hook * fix: reset agent form on accordion close/open * feat: Add agent_id to default conversation for agents endpoint * feat: agents endpoint request handling * refactor: reset conversation model on agent select * refactor: add `additional_instructions` to conversation schema, organize other fields * chore: casing * chore: types * refactor(loadAgentTools): explicitly pass agent_id, do not pass `model` to loadAgentTools for now, load action sets by agent_id * WIP: initial draft of real agent client initialization * WIP: first pass, anthropic agent requests * feat: remember last selected agent * feat: openai and azure connected * fix: prioritize agent model for runs unless an explicit override model is passed from client * feat: Agent Actions * fix: save agent id to convo * feat: model panel (#29) * feat: model panel * bring back comments * fix: method still null * fix: AgentPanel FormContext * feat: add more parameters * fix: style issues; refactor: Agent Controller * fix: cherry-pick * fix: Update AgentAvatar component to use AssistantIcon instead of BrainCircuit * feat: OGDialog for delete agent; feat(assistant): update Agent types, introduced `model_parameters` * feat: icon and general `model_parameters` update * feat: use react-hook-form better * fix: agent builder form reset issue when switching panels * refactor: modularize agent builder form --------- Co-authored-by: Danny Avila <danny@librechat.ai> * fix: AgentPanel and ModelPanel type issues and use `useFormContext` and `watch` instead of `methods` directly and `useWatch`. * fix: tool call issues due to invalid input (anthropic) of empty string * fix: handle empty text in Part component --------- Co-authored-by: Marco Beretta <81851188+berry-13@users.noreply.github.com> * refactor: remove form ModelPanel and fixed nested ternary expressions in AgentConfig * fix: Model Parameters not saved correctly * refactor: remove console log * feat: avatar upload and get for Agents (#36) Co-authored-by: Marco Beretta <81851188+berry-13@users.noreply.github.com> * chore: update to public package * fix: typing, optional chaining * fix: cursor not showing for content parts * chore: conditionally enable agents * ci: fix azure test * ci: fix frontend tests, fix eslint api * refactor: Remove unused errorContentPart variable * continue of the agent message PR (#40) * last fixes * fix: agentMap * pr merge test (#41) * fix: model icon not fetching correctly * remove console logs * feat: agent name * refactor: pass documentsMap as a prop to allow re-render of assistant form * refactor: pass documentsMap as a prop to allow re-render of assistant form * chore: Bump version to 0.7.419 * fix: TypeError: Cannot read properties of undefined (reading 'id') * refactor: update AgentSwitcher component to use ControlCombobox instead of Combobox --------- Co-authored-by: Marco Beretta <81851188+berry-13@users.noreply.github.com>
106 lines
3.3 KiB
JavaScript
106 lines
3.3 KiB
JavaScript
const { HttpsProxyAgent } = require('https-proxy-agent');
|
|
const { resolveHeaders } = require('librechat-data-provider');
|
|
const { createLLM } = require('~/app/clients/llm');
|
|
|
|
/**
|
|
* Initializes and returns a Language Learning Model (LLM) instance.
|
|
*
|
|
* @param {Object} options - Configuration options for the LLM.
|
|
* @param {string} options.model - The model identifier.
|
|
* @param {string} options.modelName - The specific name of the model.
|
|
* @param {number} options.temperature - The temperature setting for the model.
|
|
* @param {number} options.presence_penalty - The presence penalty for the model.
|
|
* @param {number} options.frequency_penalty - The frequency penalty for the model.
|
|
* @param {number} options.max_tokens - The maximum number of tokens for the model output.
|
|
* @param {boolean} options.streaming - Whether to use streaming for the model output.
|
|
* @param {Object} options.context - The context for the conversation.
|
|
* @param {number} options.tokenBuffer - The token buffer size.
|
|
* @param {number} options.initialMessageCount - The initial message count.
|
|
* @param {string} options.conversationId - The ID of the conversation.
|
|
* @param {string} options.user - The user identifier.
|
|
* @param {string} options.langchainProxy - The langchain proxy URL.
|
|
* @param {boolean} options.useOpenRouter - Whether to use OpenRouter.
|
|
* @param {Object} options.options - Additional options.
|
|
* @param {Object} options.options.headers - Custom headers for the request.
|
|
* @param {string} options.options.proxy - Proxy URL.
|
|
* @param {Object} options.options.req - The request object.
|
|
* @param {Object} options.options.res - The response object.
|
|
* @param {boolean} options.options.debug - Whether to enable debug mode.
|
|
* @param {string} options.apiKey - The API key for authentication.
|
|
* @param {Object} options.azure - Azure-specific configuration.
|
|
* @param {Object} options.abortController - The AbortController instance.
|
|
* @returns {Object} The initialized LLM instance.
|
|
*/
|
|
function initializeLLM(options) {
|
|
const {
|
|
model,
|
|
modelName,
|
|
temperature,
|
|
presence_penalty,
|
|
frequency_penalty,
|
|
max_tokens,
|
|
streaming,
|
|
user,
|
|
langchainProxy,
|
|
useOpenRouter,
|
|
options: { headers, proxy },
|
|
apiKey,
|
|
azure,
|
|
} = options;
|
|
|
|
const modelOptions = {
|
|
modelName: modelName || model,
|
|
temperature,
|
|
presence_penalty,
|
|
frequency_penalty,
|
|
user,
|
|
};
|
|
|
|
if (max_tokens) {
|
|
modelOptions.max_tokens = max_tokens;
|
|
}
|
|
|
|
const configOptions = {};
|
|
|
|
if (langchainProxy) {
|
|
configOptions.basePath = langchainProxy;
|
|
}
|
|
|
|
if (useOpenRouter) {
|
|
configOptions.basePath = 'https://openrouter.ai/api/v1';
|
|
configOptions.baseOptions = {
|
|
headers: {
|
|
'HTTP-Referer': 'https://librechat.ai',
|
|
'X-Title': 'LibreChat',
|
|
},
|
|
};
|
|
}
|
|
|
|
if (headers && typeof headers === 'object' && !Array.isArray(headers)) {
|
|
configOptions.baseOptions = {
|
|
headers: resolveHeaders({
|
|
...headers,
|
|
...configOptions?.baseOptions?.headers,
|
|
}),
|
|
};
|
|
}
|
|
|
|
if (proxy) {
|
|
configOptions.httpAgent = new HttpsProxyAgent(proxy);
|
|
configOptions.httpsAgent = new HttpsProxyAgent(proxy);
|
|
}
|
|
|
|
const llm = createLLM({
|
|
modelOptions,
|
|
configOptions,
|
|
openAIApiKey: apiKey,
|
|
azure,
|
|
streaming,
|
|
});
|
|
|
|
return llm;
|
|
}
|
|
|
|
module.exports = {
|
|
initializeLLM,
|
|
};
|