mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-01-06 18:48:50 +01:00
* 🧠 feat: User Memories for Conversational Context
chore: mcp typing, use `t`
WIP: first pass, Memories UI
- Added MemoryViewer component for displaying, editing, and deleting user memories.
- Integrated data provider hooks for fetching, updating, and deleting memories.
- Implemented pagination and loading states for better user experience.
- Created unit tests for MemoryViewer to ensure functionality and interaction with data provider.
- Updated translation files to include new UI strings related to memories.
chore: move mcp-related files to own directory
chore: rename librechat-mcp to librechat-api
WIP: first pass, memory processing and data schemas
chore: linting in fileSearch.js query description
chore: rename librechat-api to @librechat/api across the project
WIP: first pass, functional memory agent
feat: add MemoryEditDialog and MemoryViewer components for managing user memories
- Introduced MemoryEditDialog for editing memory entries with validation and toast notifications.
- Updated MemoryViewer to support editing and deleting memories, including pagination and loading states.
- Enhanced data provider to handle memory updates with optional original key for better management.
- Added new localization strings for memory-related UI elements.
feat: add memory permissions management
- Implemented memory permissions in the backend, allowing roles to have specific permissions for using, creating, updating, and reading memories.
- Added new API endpoints for updating memory permissions associated with roles.
- Created a new AdminSettings component for managing memory permissions in the frontend.
- Integrated memory permissions into the existing roles and permissions schemas.
- Updated the interface to include memory settings and permissions.
- Enhanced the MemoryViewer component to conditionally render admin settings based on user roles.
- Added localization support for memory permissions in the translation files.
feat: move AdminSettings component to a new position in MemoryViewer for better visibility
refactor: clean up commented code in MemoryViewer component
feat: enhance MemoryViewer with search functionality and improve MemoryEditDialog integration
- Added a search input to filter memories in the MemoryViewer component.
- Refactored MemoryEditDialog to accept children for better customization.
- Updated MemoryViewer to utilize the new EditMemoryButton and DeleteMemoryButton components for editing and deleting memories.
- Improved localization support by adding new strings for memory filtering and deletion confirmation.
refactor: optimize memory filtering in MemoryViewer using match-sorter
- Replaced manual filtering logic with match-sorter for improved search functionality.
- Enhanced performance and readability of the filteredMemories computation.
feat: enhance MemoryEditDialog with triggerRef and improve updateMemory mutation handling
feat: implement access control for MemoryEditDialog and MemoryViewer components
refactor: remove commented out code and create runMemory method
refactor: rename role based files
feat: implement access control for memory usage in AgentClient
refactor: simplify checkVisionRequest method in AgentClient by removing commented-out code
refactor: make `agents` dir in api package
refactor: migrate Azure utilities to TypeScript and consolidate imports
refactor: move sanitizeFilename function to a new file and update imports, add related tests
refactor: update LLM configuration types and consolidate Azure options in the API package
chore: linting
chore: import order
refactor: replace getLLMConfig with getOpenAIConfig and remove unused LLM configuration file
chore: update winston-daily-rotate-file to version 5.0.0 and add object-hash dependency in package-lock.json
refactor: move primeResources and optionalChainWithEmptyCheck functions to resources.ts and update imports
refactor: move createRun function to a new run.ts file and update related imports
fix: ensure safeAttachments is correctly typed as an array of TFile
chore: add node-fetch dependency and refactor fetch-related functions into packages/api/utils, removing the old generators file
refactor: enhance TEndpointOption type by using Pick to streamline endpoint fields and add new properties for model parameters and client options
feat: implement initializeOpenAIOptions function and update OpenAI types for enhanced configuration handling
fix: update types due to new TEndpointOption typing
fix: ensure safe access to group parameters in initializeOpenAIOptions function
fix: remove redundant API key validation comment in initializeOpenAIOptions function
refactor: rename initializeOpenAIOptions to initializeOpenAI for consistency and update related documentation
refactor: decouple req.body fields and tool loading from initializeAgentOptions
chore: linting
refactor: adjust column widths in MemoryViewer for improved layout
refactor: simplify agent initialization by creating loadAgent function and removing unused code
feat: add memory configuration loading and validation functions
WIP: first pass, memory processing with config
feat: implement memory callback and artifact handling
feat: implement memory artifacts display and processing updates
feat: add memory configuration options and schema validation for validKeys
fix: update MemoryEditDialog and MemoryViewer to handle memory state and display improvements
refactor: remove padding from BookmarkTable and MemoryViewer headers for consistent styling
WIP: initial tokenLimit config and move Tokenizer to @librechat/api
refactor: update mongoMeili plugin methods to use callback for better error handling
feat: enhance memory management with token tracking and usage metrics
- Added token counting for memory entries to enforce limits and provide usage statistics.
- Updated memory retrieval and update routes to include total token usage and limit.
- Enhanced MemoryEditDialog and MemoryViewer components to display memory usage and token information.
- Refactored memory processing functions to handle token limits and provide feedback on memory capacity.
feat: implement memory artifact handling in attachment handler
- Enhanced useAttachmentHandler to process memory artifacts when receiving updates.
- Introduced handleMemoryArtifact utility to manage memory updates and deletions.
- Updated query client to reflect changes in memory state based on incoming data.
refactor: restructure web search key extraction logic
- Moved the logic for extracting API keys from the webSearchAuth configuration into a dedicated function, getWebSearchKeys.
- Updated webSearchKeys to utilize the new function for improved clarity and maintainability.
- Prevents build time errors
feat: add personalization settings and memory preferences management
- Introduced a new Personalization tab in settings to manage user memory preferences.
- Implemented API endpoints and client-side logic for updating memory preferences.
- Enhanced user interface components to reflect personalization options and memory usage.
- Updated permissions to allow users to opt out of memory features.
- Added localization support for new settings and messages related to personalization.
style: personalization switch class
feat: add PersonalizationIcon and align Side Panel UI
feat: implement memory creation functionality
- Added a new API endpoint for creating memory entries, including validation for key and value.
- Introduced MemoryCreateDialog component for user interface to facilitate memory creation.
- Integrated token limit checks to prevent exceeding user memory capacity.
- Updated MemoryViewer to include a button for opening the memory creation dialog.
- Enhanced localization support for new messages related to memory creation.
feat: enhance message processing with configurable window size
- Updated AgentClient to use a configurable message window size for processing messages.
- Introduced messageWindowSize option in memory configuration schema with a default value of 5.
- Improved logic for selecting messages to process based on the configured window size.
chore: update librechat-data-provider version to 0.7.87 in package.json and package-lock.json
chore: remove OpenAPIPlugin and its associated tests
chore: remove MIGRATION_README.md as migration tasks are completed
ci: fix backend tests
chore: remove unused translation keys from localization file
chore: remove problematic test file and unused var in AgentClient
chore: remove unused import and import directly for JSDoc
* feat: add api package build stage in Dockerfile for improved modularity
* docs: reorder build steps in contributing guide for clarity
454 lines
13 KiB
TypeScript
454 lines
13 KiB
TypeScript
import dayjs from 'dayjs';
|
||
import type { ZodIssue } from 'zod';
|
||
import type * as a from './types/assistants';
|
||
import type * as s from './schemas';
|
||
import type * as t from './types';
|
||
import { ContentTypes } from './types/runs';
|
||
import {
|
||
openAISchema,
|
||
googleSchema,
|
||
EModelEndpoint,
|
||
anthropicSchema,
|
||
assistantSchema,
|
||
gptPluginsSchema,
|
||
// agentsSchema,
|
||
compactAgentsSchema,
|
||
compactGoogleSchema,
|
||
compactPluginsSchema,
|
||
compactAssistantSchema,
|
||
} from './schemas';
|
||
import { bedrockInputSchema } from './bedrock';
|
||
import { extractEnvVariable } from './utils';
|
||
import { alternateName } from './config';
|
||
|
||
type EndpointSchema =
|
||
| typeof openAISchema
|
||
| typeof googleSchema
|
||
| typeof anthropicSchema
|
||
| typeof gptPluginsSchema
|
||
| typeof assistantSchema
|
||
| typeof compactAgentsSchema
|
||
| typeof bedrockInputSchema;
|
||
|
||
export type EndpointSchemaKey = Exclude<EModelEndpoint, EModelEndpoint.chatGPTBrowser>;
|
||
|
||
const endpointSchemas: Record<EndpointSchemaKey, EndpointSchema> = {
|
||
[EModelEndpoint.openAI]: openAISchema,
|
||
[EModelEndpoint.azureOpenAI]: openAISchema,
|
||
[EModelEndpoint.custom]: openAISchema,
|
||
[EModelEndpoint.google]: googleSchema,
|
||
[EModelEndpoint.anthropic]: anthropicSchema,
|
||
[EModelEndpoint.gptPlugins]: gptPluginsSchema,
|
||
[EModelEndpoint.assistants]: assistantSchema,
|
||
[EModelEndpoint.azureAssistants]: assistantSchema,
|
||
[EModelEndpoint.agents]: compactAgentsSchema,
|
||
[EModelEndpoint.bedrock]: bedrockInputSchema,
|
||
};
|
||
|
||
// const schemaCreators: Record<EModelEndpoint, (customSchema: DefaultSchemaValues) => EndpointSchema> = {
|
||
// [EModelEndpoint.google]: createGoogleSchema,
|
||
// };
|
||
|
||
/** Get the enabled endpoints from the `ENDPOINTS` environment variable */
|
||
export function getEnabledEndpoints() {
|
||
const defaultEndpoints: string[] = [
|
||
EModelEndpoint.openAI,
|
||
EModelEndpoint.agents,
|
||
EModelEndpoint.assistants,
|
||
EModelEndpoint.azureAssistants,
|
||
EModelEndpoint.azureOpenAI,
|
||
EModelEndpoint.google,
|
||
EModelEndpoint.chatGPTBrowser,
|
||
EModelEndpoint.gptPlugins,
|
||
EModelEndpoint.anthropic,
|
||
EModelEndpoint.bedrock,
|
||
];
|
||
|
||
const endpointsEnv = process.env.ENDPOINTS ?? '';
|
||
let enabledEndpoints = defaultEndpoints;
|
||
if (endpointsEnv) {
|
||
enabledEndpoints = endpointsEnv
|
||
.split(',')
|
||
.filter((endpoint) => endpoint.trim())
|
||
.map((endpoint) => endpoint.trim());
|
||
}
|
||
return enabledEndpoints;
|
||
}
|
||
|
||
/** Orders an existing EndpointsConfig object based on enabled endpoint/custom ordering */
|
||
export function orderEndpointsConfig(endpointsConfig: t.TEndpointsConfig) {
|
||
if (!endpointsConfig) {
|
||
return {};
|
||
}
|
||
const enabledEndpoints = getEnabledEndpoints();
|
||
const endpointKeys = Object.keys(endpointsConfig);
|
||
const defaultCustomIndex = enabledEndpoints.indexOf(EModelEndpoint.custom);
|
||
return endpointKeys.reduce(
|
||
(accumulatedConfig: Record<string, t.TConfig | null | undefined>, currentEndpointKey) => {
|
||
const isCustom = !(currentEndpointKey in EModelEndpoint);
|
||
const isEnabled = enabledEndpoints.includes(currentEndpointKey);
|
||
if (!isEnabled && !isCustom) {
|
||
return accumulatedConfig;
|
||
}
|
||
|
||
const index = enabledEndpoints.indexOf(currentEndpointKey);
|
||
|
||
if (isCustom) {
|
||
accumulatedConfig[currentEndpointKey] = {
|
||
order: defaultCustomIndex >= 0 ? defaultCustomIndex : 9999,
|
||
...(endpointsConfig[currentEndpointKey] as Omit<t.TConfig, 'order'> & { order?: number }),
|
||
};
|
||
} else if (endpointsConfig[currentEndpointKey]) {
|
||
accumulatedConfig[currentEndpointKey] = {
|
||
...endpointsConfig[currentEndpointKey],
|
||
order: index,
|
||
};
|
||
}
|
||
return accumulatedConfig;
|
||
},
|
||
{},
|
||
);
|
||
}
|
||
|
||
/** Converts an array of Zod issues into a string. */
|
||
export function errorsToString(errors: ZodIssue[]) {
|
||
return errors
|
||
.map((error) => {
|
||
const field = error.path.join('.');
|
||
const message = error.message;
|
||
|
||
return `${field}: ${message}`;
|
||
})
|
||
.join(' ');
|
||
}
|
||
|
||
/** Resolves header values to env variables if detected */
|
||
export function resolveHeaders(headers: Record<string, string> | undefined) {
|
||
const resolvedHeaders = { ...(headers ?? {}) };
|
||
|
||
if (headers && typeof headers === 'object' && !Array.isArray(headers)) {
|
||
Object.keys(headers).forEach((key) => {
|
||
resolvedHeaders[key] = extractEnvVariable(headers[key]);
|
||
});
|
||
}
|
||
|
||
return resolvedHeaders;
|
||
}
|
||
|
||
export function getFirstDefinedValue(possibleValues: string[]) {
|
||
let returnValue;
|
||
for (const value of possibleValues) {
|
||
if (value) {
|
||
returnValue = value;
|
||
break;
|
||
}
|
||
}
|
||
return returnValue;
|
||
}
|
||
|
||
export function getNonEmptyValue(possibleValues: string[]) {
|
||
for (const value of possibleValues) {
|
||
if (value && value.trim() !== '') {
|
||
return value;
|
||
}
|
||
}
|
||
return undefined;
|
||
}
|
||
|
||
export type TPossibleValues = {
|
||
models: string[];
|
||
secondaryModels?: string[];
|
||
};
|
||
|
||
export const parseConvo = ({
|
||
endpoint,
|
||
endpointType,
|
||
conversation,
|
||
possibleValues,
|
||
}: {
|
||
endpoint: EndpointSchemaKey;
|
||
endpointType?: EndpointSchemaKey | null;
|
||
conversation: Partial<s.TConversation | s.TPreset> | null;
|
||
possibleValues?: TPossibleValues;
|
||
// TODO: POC for default schema
|
||
// defaultSchema?: Partial<EndpointSchema>,
|
||
}) => {
|
||
let schema = endpointSchemas[endpoint] as EndpointSchema | undefined;
|
||
|
||
if (!schema && !endpointType) {
|
||
throw new Error(`Unknown endpoint: ${endpoint}`);
|
||
} else if (!schema && endpointType) {
|
||
schema = endpointSchemas[endpointType];
|
||
}
|
||
|
||
// if (defaultSchema && schemaCreators[endpoint]) {
|
||
// schema = schemaCreators[endpoint](defaultSchema);
|
||
// }
|
||
|
||
const convo = schema?.parse(conversation) as s.TConversation | undefined;
|
||
const { models, secondaryModels } = possibleValues ?? {};
|
||
|
||
if (models && convo) {
|
||
convo.model = getFirstDefinedValue(models) ?? convo.model;
|
||
}
|
||
|
||
if (secondaryModels && convo?.agentOptions) {
|
||
convo.agentOptions.model = getFirstDefinedValue(secondaryModels) ?? convo.agentOptions.model;
|
||
}
|
||
|
||
return convo;
|
||
};
|
||
|
||
/** Match GPT followed by digit, optional decimal, and optional suffix
|
||
*
|
||
* Examples: gpt-4, gpt-4o, gpt-4.5, gpt-5a, etc. */
|
||
const extractGPTVersion = (modelStr: string): string => {
|
||
const gptMatch = modelStr.match(/gpt-(\d+(?:\.\d+)?)([a-z])?/i);
|
||
if (gptMatch) {
|
||
const version = gptMatch[1];
|
||
const suffix = gptMatch[2] || '';
|
||
return `GPT-${version}${suffix}`;
|
||
}
|
||
return '';
|
||
};
|
||
|
||
/** Match omni models (o1, o3, etc.), "o" followed by a digit, possibly with decimal */
|
||
const extractOmniVersion = (modelStr: string): string => {
|
||
const omniMatch = modelStr.match(/\bo(\d+(?:\.\d+)?)\b/i);
|
||
if (omniMatch) {
|
||
const version = omniMatch[1];
|
||
return `o${version}`;
|
||
}
|
||
return '';
|
||
};
|
||
|
||
export const getResponseSender = (endpointOption: t.TEndpointOption): string => {
|
||
const {
|
||
model: _m,
|
||
endpoint: _e,
|
||
endpointType,
|
||
modelDisplayLabel: _mdl,
|
||
chatGptLabel: _cgl,
|
||
modelLabel: _ml,
|
||
} = endpointOption;
|
||
|
||
const endpoint = _e as EModelEndpoint;
|
||
|
||
const model = _m ?? '';
|
||
const modelDisplayLabel = _mdl ?? '';
|
||
const chatGptLabel = _cgl ?? '';
|
||
const modelLabel = _ml ?? '';
|
||
if (
|
||
[
|
||
EModelEndpoint.openAI,
|
||
EModelEndpoint.bedrock,
|
||
EModelEndpoint.gptPlugins,
|
||
EModelEndpoint.azureOpenAI,
|
||
EModelEndpoint.chatGPTBrowser,
|
||
].includes(endpoint)
|
||
) {
|
||
if (chatGptLabel) {
|
||
return chatGptLabel;
|
||
} else if (modelLabel) {
|
||
return modelLabel;
|
||
} else if (model && extractOmniVersion(model)) {
|
||
return extractOmniVersion(model);
|
||
} else if (model && (model.includes('mistral') || model.includes('codestral'))) {
|
||
return 'Mistral';
|
||
} else if (model && model.includes('deepseek')) {
|
||
return 'Deepseek';
|
||
} else if (model && model.includes('gpt-')) {
|
||
const gptVersion = extractGPTVersion(model);
|
||
return gptVersion || 'GPT';
|
||
}
|
||
return (alternateName[endpoint] as string | undefined) ?? 'ChatGPT';
|
||
}
|
||
|
||
if (endpoint === EModelEndpoint.anthropic) {
|
||
return modelLabel || 'Claude';
|
||
}
|
||
|
||
if (endpoint === EModelEndpoint.bedrock) {
|
||
return modelLabel || alternateName[endpoint];
|
||
}
|
||
|
||
if (endpoint === EModelEndpoint.google) {
|
||
if (modelLabel) {
|
||
return modelLabel;
|
||
} else if (model && (model.includes('gemini') || model.includes('learnlm'))) {
|
||
return 'Gemini';
|
||
} else if (model?.toLowerCase().includes('gemma') === true) {
|
||
return 'Gemma';
|
||
} else if (model && model.includes('code')) {
|
||
return 'Codey';
|
||
}
|
||
|
||
return 'PaLM2';
|
||
}
|
||
|
||
if (endpoint === EModelEndpoint.custom || endpointType === EModelEndpoint.custom) {
|
||
if (modelLabel) {
|
||
return modelLabel;
|
||
} else if (chatGptLabel) {
|
||
return chatGptLabel;
|
||
} else if (model && extractOmniVersion(model)) {
|
||
return extractOmniVersion(model);
|
||
} else if (model && (model.includes('mistral') || model.includes('codestral'))) {
|
||
return 'Mistral';
|
||
} else if (model && model.includes('deepseek')) {
|
||
return 'Deepseek';
|
||
} else if (model && model.includes('gpt-')) {
|
||
const gptVersion = extractGPTVersion(model);
|
||
return gptVersion || 'GPT';
|
||
} else if (modelDisplayLabel) {
|
||
return modelDisplayLabel;
|
||
}
|
||
|
||
return 'AI';
|
||
}
|
||
|
||
return '';
|
||
};
|
||
|
||
type CompactEndpointSchema =
|
||
| typeof openAISchema
|
||
| typeof compactAssistantSchema
|
||
| typeof compactAgentsSchema
|
||
| typeof compactGoogleSchema
|
||
| typeof anthropicSchema
|
||
| typeof bedrockInputSchema
|
||
| typeof compactPluginsSchema;
|
||
|
||
const compactEndpointSchemas: Record<EndpointSchemaKey, CompactEndpointSchema> = {
|
||
[EModelEndpoint.openAI]: openAISchema,
|
||
[EModelEndpoint.azureOpenAI]: openAISchema,
|
||
[EModelEndpoint.custom]: openAISchema,
|
||
[EModelEndpoint.assistants]: compactAssistantSchema,
|
||
[EModelEndpoint.azureAssistants]: compactAssistantSchema,
|
||
[EModelEndpoint.agents]: compactAgentsSchema,
|
||
[EModelEndpoint.google]: compactGoogleSchema,
|
||
[EModelEndpoint.bedrock]: bedrockInputSchema,
|
||
[EModelEndpoint.anthropic]: anthropicSchema,
|
||
[EModelEndpoint.gptPlugins]: compactPluginsSchema,
|
||
};
|
||
|
||
export const parseCompactConvo = ({
|
||
endpoint,
|
||
endpointType,
|
||
conversation,
|
||
possibleValues,
|
||
}: {
|
||
endpoint?: EndpointSchemaKey;
|
||
endpointType?: EndpointSchemaKey | null;
|
||
conversation: Partial<s.TConversation | s.TPreset>;
|
||
possibleValues?: TPossibleValues;
|
||
// TODO: POC for default schema
|
||
// defaultSchema?: Partial<EndpointSchema>,
|
||
}) => {
|
||
if (!endpoint) {
|
||
throw new Error(`undefined endpoint: ${endpoint}`);
|
||
}
|
||
|
||
let schema = compactEndpointSchemas[endpoint] as CompactEndpointSchema | undefined;
|
||
|
||
if (!schema && !endpointType) {
|
||
throw new Error(`Unknown endpoint: ${endpoint}`);
|
||
} else if (!schema && endpointType) {
|
||
schema = compactEndpointSchemas[endpointType];
|
||
}
|
||
|
||
if (!schema) {
|
||
throw new Error(`Unknown endpointType: ${endpointType}`);
|
||
}
|
||
|
||
const convo = schema.parse(conversation) as s.TConversation | null;
|
||
// const { models, secondaryModels } = possibleValues ?? {};
|
||
const { models } = possibleValues ?? {};
|
||
|
||
if (models && convo) {
|
||
convo.model = getFirstDefinedValue(models) ?? convo.model;
|
||
}
|
||
|
||
// if (secondaryModels && convo.agentOptions) {
|
||
// convo.agentOptionmodel = getFirstDefinedValue(secondaryModels) ?? convo.agentOptionmodel;
|
||
// }
|
||
|
||
return convo;
|
||
};
|
||
|
||
export function parseTextParts(
|
||
contentParts: a.TMessageContentParts[],
|
||
skipReasoning: boolean = false,
|
||
): string {
|
||
let result = '';
|
||
|
||
for (const part of contentParts) {
|
||
if (!part.type) {
|
||
continue;
|
||
}
|
||
if (part.type === ContentTypes.TEXT) {
|
||
const textValue = typeof part.text === 'string' ? part.text : part.text.value;
|
||
|
||
if (
|
||
result.length > 0 &&
|
||
textValue.length > 0 &&
|
||
result[result.length - 1] !== ' ' &&
|
||
textValue[0] !== ' '
|
||
) {
|
||
result += ' ';
|
||
}
|
||
result += textValue;
|
||
} else if (part.type === ContentTypes.THINK && !skipReasoning) {
|
||
const textValue = typeof part.think === 'string' ? part.think : '';
|
||
if (
|
||
result.length > 0 &&
|
||
textValue.length > 0 &&
|
||
result[result.length - 1] !== ' ' &&
|
||
textValue[0] !== ' '
|
||
) {
|
||
result += ' ';
|
||
}
|
||
result += textValue;
|
||
}
|
||
}
|
||
|
||
return result;
|
||
}
|
||
|
||
export const SEPARATORS = ['.', '?', '!', '۔', '。', '‥', ';', '¡', '¿', '\n', '```'];
|
||
|
||
export function findLastSeparatorIndex(text: string, separators = SEPARATORS): number {
|
||
let lastIndex = -1;
|
||
for (const separator of separators) {
|
||
const index = text.lastIndexOf(separator);
|
||
if (index > lastIndex) {
|
||
lastIndex = index;
|
||
}
|
||
}
|
||
return lastIndex;
|
||
}
|
||
|
||
export function replaceSpecialVars({ text, user }: { text: string; user?: t.TUser | null }) {
|
||
let result = text;
|
||
if (!result) {
|
||
return result;
|
||
}
|
||
|
||
// e.g., "2024-04-29 (1)" (1=Monday)
|
||
const currentDate = dayjs().format('YYYY-MM-DD');
|
||
const dayNumber = dayjs().day();
|
||
const combinedDate = `${currentDate} (${dayNumber})`;
|
||
result = result.replace(/{{current_date}}/gi, combinedDate);
|
||
|
||
const currentDatetime = dayjs().format('YYYY-MM-DD HH:mm:ss');
|
||
result = result.replace(/{{current_datetime}}/gi, `${currentDatetime} (${dayNumber})`);
|
||
|
||
const isoDatetime = dayjs().toISOString();
|
||
result = result.replace(/{{iso_datetime}}/gi, isoDatetime);
|
||
|
||
if (user && user.name) {
|
||
result = result.replace(/{{current_user}}/gi, user.name);
|
||
}
|
||
|
||
return result;
|
||
}
|