2024-05-02 08:48:26 +02:00
const { v4 : uuidv4 } = require ( 'uuid' ) ;
🏗️ refactor: Remove Redundant Caching, Migrate Config Services to TypeScript (#12466)
* ♻️ refactor: Remove redundant scopedCacheKey caching, support user-provided key model fetching
Remove redundant cache layers that used `scopedCacheKey()` (tenant-only scoping)
on top of `getAppConfig()` which already caches per-principal (role+user+tenant).
This caused config overrides for different principals within the same tenant to
be invisible due to stale cached data.
Changes:
- Add `requireJwtAuth` to `/api/endpoints` route for proper user context
- Remove ENDPOINT_CONFIG, STARTUP_CONFIG, PLUGINS, TOOLS, and MODELS_CONFIG
cache layers — all derive from `getAppConfig()` with cheap computation
- Enhance MODEL_QUERIES cache: hash(baseURL+apiKey) keys, 2-minute TTL,
caching centralized in `fetchModels()` base function
- Support fetching models with user-provided API keys in `loadConfigModels`
via `getUserKeyValues` lookup (no caching for user keys)
- Update all affected tests
Closes #1028
* ♻️ refactor: Migrate config services to TypeScript in packages/api
Move core config logic from CJS /api wrappers to typed TypeScript in
packages/api using dependency injection factories:
- `createEndpointsConfigService` — endpoint config merging + checkCapability
- `createLoadConfigModels` — custom endpoint model loading with user key support
- `createMCPToolCacheService` — MCP tool cache operations (update, merge, cache)
/api files become thin wrappers that wire dependencies (getAppConfig,
loadDefaultEndpointsConfig, getUserKeyValues, getCachedTools, etc.)
into the typed factories.
Also moves existing `endpoints/config.ts` → `endpoints/config/providers.ts`
to accommodate the new `config/` directory structure.
* 🔄 fix: Invalidate models query when user API key is set or revoked
Without this, users had to refresh the page after entering their API key
to see the updated model list fetched with their credentials.
- Invalidate QueryKeys.models in useUpdateUserKeysMutation onSuccess
- Invalidate QueryKeys.models in useRevokeUserKeyMutation onSuccess
- Invalidate QueryKeys.models in useRevokeAllUserKeysMutation onSuccess
* 🗺️ fix: Remap YAML-level override keys to AppConfig equivalents in mergeConfigOverrides
Config overrides stored in the DB use YAML-level keys (TCustomConfig),
but they're merged into the already-processed AppConfig where some fields
have been renamed by AppService. This caused mcpServers overrides to land
on a nonexistent key instead of mcpConfig, so config-override MCP servers
never appeared in the UI.
- Add OVERRIDE_KEY_MAP to remap mcpServers→mcpConfig, interface→interfaceConfig
- Apply remapping before deep merge in mergeConfigOverrides
- Add test for YAML-level key remapping behavior
- Update existing tests to use AppConfig field names in assertions
* 🧪 test: Update service.spec to use AppConfig field names after override key remapping
* 🛡️ fix: Address code review findings — reliability, types, tests, and performance
- Pass tenant context (getTenantId) in importers.js getEndpointsConfig call
- Add 5 tests for user-provided API key model fetching (key found, no key,
DB error, missing userId, apiKey-only with fixed baseURL)
- Distinguish NO_USER_KEY (debug) from infrastructure errors (warn) in catch
- Switch fetchPromisesMap from Promise.all to Promise.allSettled so one
failing provider doesn't kill the entire model config
- Parallelize getUserKeyValues DB lookups via batched Promise.allSettled
instead of sequential awaits in the loop
- Hoist standardCache instance in fetchModels to avoid double instantiation
- Replace Record<string, unknown> types with Partial<TConfig>-based types;
remove as unknown as T double-cast in endpoints config
- Narrow Bedrock availableRegions to typed destructure
- Narrow version field from string|number|undefined to string|undefined
- Fix import ordering in mcp/tools.ts and config/models.ts per AGENTS.md
- Add JSDoc to getModelsConfig alias clarifying caching semantics
* fix: Guard against null getCachedTools in mergeAppTools
* 🔍 fix: Address follow-up review — deduplicate extractEnvVariable, fix error discrimination, add log-level tests
- Deduplicate extractEnvVariable calls: resolve apiKey/baseURL once, reuse
for both the entry and isUserProvided checks (Finding A)
- Move ResolvedEndpoint interface from function closure to module scope (Finding B)
- Replace fragile msg.includes('NO_USER_KEY') with ErrorTypes.NO_USER_KEY
enum check against actual error message format (Finding C). Also handle
ErrorTypes.INVALID_USER_KEY as an expected "no key" case.
- Add test asserting logger.warn is called for infra errors (not debug)
- Add test asserting logger.debug is called for NO_USER_KEY errors (not warn)
* fix: Preserve numeric assistants version via String() coercion
* 🐛 fix: Address secondary review — Ollama cache bypass, cache tests, type safety
- Fix Ollama success path bypassing cache write in fetchModels (CRITICAL):
store result before returning so Ollama models benefit from 2-minute TTL
- Add 4 fetchModels cache behavior tests: cache write with TTL, cache hit
short-circuits HTTP, skipCache bypasses read+write, empty results not cached
- Type-safe OVERRIDE_KEY_MAP: Partial<Record<keyof TCustomConfig, keyof AppConfig>>
so compiler catches future field rename mismatches
- Fix import ordering in config/models.ts (package types longest→shortest)
- Rename ToolCacheDeps → MCPToolCacheDeps for naming consistency
- Expand getModelsConfig JSDoc to explain caching granularity
* fix: Narrow OVERRIDE_KEY_MAP index to satisfy strict tsconfig
* 🧩 fix: Add allowedProviders to TConfig, remove Record<string, unknown> from PartialEndpointEntry
The agents endpoint config includes allowedProviders (used by the frontend
AgentPanel to filter available providers), but it was missing from TConfig.
This forced PartialEndpointEntry to use & Record<string, unknown> as an
escape hatch, violating AGENTS.md type policy.
- Add allowedProviders?: (string | EModelEndpoint)[] to TConfig
- Remove Record<string, unknown> from PartialEndpointEntry — now just Partial<TConfig>
* 🛡️ fix: Isolate Ollama cache write from fetch try-catch, add Ollama cache tests
- Separate Ollama fetch and cache write into distinct scopes so a cache
failure (e.g., Redis down) doesn't misattribute the error as an Ollama
API failure and fall through to the OpenAI-compatible path (Issue A)
- Add 2 Ollama-specific cache tests: models written with TTL on fetch,
cached models returned without hitting server (Issue B)
- Replace hardcoded 120000 with Time.TWO_MINUTES constant in cache TTL
test assertion (Issue C)
- Fix OVERRIDE_KEY_MAP JSDoc to accurately describe runtime vs compile-time
type enforcement (Issue D)
- Add global beforeEach for cache mock reset to prevent cross-test leakage
* 🧪 fix: Address third review — DI consistency, cache key width, MCP tests
- Inject loadCustomEndpointsConfig via EndpointsConfigDeps with default
fallback, matching loadDefaultEndpointsConfig DI pattern (Finding 3)
- Widen modelsCacheKey from 64-bit (.slice(0,16)) to 128-bit (.slice(0,32))
for collision-sensitive cross-credential cache key (Finding 4)
- Add fetchModels.mockReset() in loadConfigModels.spec beforeEach to
prevent mock implementation leaks across tests (Finding 5)
- Add 11 unit tests for createMCPToolCacheService covering all three
functions: null/empty input, successful ops, error propagation,
cold-cache merge (Finding 2)
- Simplify getModelsConfig JSDoc to @see reference (Finding 10)
* ♻️ refactor: Address remaining follow-ups from reviews
OVERRIDE_KEY_MAP completeness:
- Add missing turnstile→turnstileConfig mapping
- Add exhaustiveness test verifying all three renamed keys are remapped
and original YAML keys don't leak through
Import role context:
- Pass userRole through importConversations job → importLibreChatConvo
so role-based endpoint overrides are honored during conversation import
- Update convos.js route to include req.user.role in the job payload
createEndpointsConfigService unit tests:
- Add 8 tests covering: default+custom merge, Azure/AzureAssistants/
Anthropic Vertex/Bedrock config enrichment, assistants version
coercion, agents allowedProviders, req.config bypass
Plugins/tools efficiency:
- Use Set for includedTools/filteredTools lookups (O(1) vs O(n) per plugin)
- Combine auth check + filter into single pass (eliminates intermediate array)
- Pre-compute toolDefKeys Set for O(1) tool definition lookups
* fix: Scope model query cache by user when userIdQuery is enabled
* fix: Skip model cache for userIdQuery endpoints, fix endpoints test types
- When userIdQuery is true, skip caching entirely (like user_provided keys)
to avoid cross-user model list leakage without duplicating cache data
- Fix AgentCapabilities type error in endpoints.spec.ts — use enum values
and appConfig() helper for partial mock typing
* 🐛 fix: Restore filteredTools+includedTools composition, add checkCapability tests
- Fix filteredTools regression: whitelist and blacklist are now applied
independently (two flat guards), matching original behavior where
includedTools=['a','b'] + filteredTools=['b'] produces ['a'] (Finding A)
- Fix Set spread in toolkit loop: pre-compute toolDefKeysList array once
alongside the Set, reuse for .some() without per-plugin allocation (Finding B)
- Add 2 filteredTools tests: blacklist-only path and combined
whitelist+blacklist composition (Finding C)
- Add 3 checkCapability tests: capability present, capability absent,
fallback to defaultAgentCapabilities for non-agents endpoints (Finding D)
* 🔑 fix: Include config-override MCP servers in filterAuthorizedTools
Config-override MCP servers (defined via admin config overrides for
roles/groups) were rejected by filterAuthorizedTools because it called
getAllServerConfigs(userId) without the configServers parameter. Only
YAML and DB-backed user servers were included in the access check.
- Add configServers parameter to filterAuthorizedTools
- Resolve config servers via resolveConfigServers(req) at all 4 callsites
(create, update, duplicate, revert) using parallel Promise.all
- Pass configServers through to getAllServerConfigs(userId, configServers)
so the registry merges config-source servers into the access check
- Update filterAuthorizedTools.spec.js mock for resolveConfigServers
* fix: Skip model cache for userIdQuery endpoints, fix endpoints test types
For user-provided key endpoints (userProvide: true), skip the full model
list re-fetch during message validation — the user already selected from
a list we served them, and re-fetching with skipCache:true on every
message send is both slow and fragile (5s provider timeout = rejected model).
Instead, validate the model string format only:
- Must be a string, max 256 chars
- Must match [a-zA-Z0-9][a-zA-Z0-9_.:\-/@+ ]* (covers all known provider
model ID formats while rejecting injection attempts)
System-configured endpoints still get full model list validation as before.
* 🧪 test: Add regression tests for filterAuthorizedTools configServers and validateModel
filterAuthorizedTools:
- Add test verifying configServers is passed to getAllServerConfigs and
config-override server tools are allowed through
- Guard resolveConfigServers in createAgentHandler to only run when
MCP tools are present (skip for tool-free agent creates)
validateModel (12 new tests):
- Format validation: missing model, non-string, length overflow, leading
special char, script injection, standard model ID acceptance
- userProvide early-return: next() called immediately, getModelsConfig
not invoked (regression guard for the exact bug this fixes)
- System endpoint list validation: reject unknown model, accept known
model, handle null/missing models config
Also fix unnecessary backslash escape in MODEL_PATTERN regex.
* 🧹 fix: Remove space from MODEL_PATTERN, trim input, clean up nits
- Remove space character from MODEL_PATTERN regex — no real model ID
uses spaces; prevents spurious violation logs from whitespace artifacts
- Add model.trim() before validation to handle accidental whitespace
- Remove redundant filterUniquePlugins call on already-deduplicated output
- Add comment documenting intentional whitelist+blacklist composition
- Add getUserKeyValues.mockReset() in loadConfigModels.spec beforeEach
- Remove narrating JSDoc from getModelsConfig one-liner
- Add 2 tests: trim whitespace handling, reject spaces in model ID
* fix: Match startup tool loader semantics — includedTools takes precedence over filteredTools
The startup tool loader (loadAndFormatTools) explicitly ignores
filteredTools when includedTools is set, with a warning log. The
PluginController was applying both independently, creating inconsistent
behavior where the same config produced different results at startup
vs plugin listing time.
Restored mutually exclusive semantics: when includedTools is non-empty,
filteredTools is not evaluated.
* 🧹 chore: Simplify validateModel flow, note auth requirement on endpoints route
- Separate missing-model from invalid-model checks cleanly: type+presence
guard first, then trim+format guard (reviewer NIT)
- Add route comment noting auth is required for role/tenant scoping
* fix: Write trimmed model back to req.body.model for downstream consumers
2026-03-30 16:49:48 -04:00
const { logger , getTenantId } = require ( '@librechat/data-schemas' ) ;
const { EModelEndpoint , Constants , openAISettings } = require ( 'librechat-data-provider' ) ;
const { getEndpointsConfig } = require ( '~/server/services/Config' ) ;
2024-05-02 08:48:26 +02:00
const { createImportBatchBuilder } = require ( './importBatchBuilder' ) ;
2025-07-05 12:44:19 -04:00
const { cloneMessagesWithTimestamps } = require ( './fork' ) ;
2024-05-02 08:48:26 +02:00
/ * *
* Returns the appropriate importer function based on the provided JSON data .
*
* @ param { Object } jsonData - The JSON data to import .
* @ returns { Function } - The importer function .
* @ throws { Error } - If the import type is not supported .
* /
function getImporter ( jsonData ) {
2025-12-30 03:37:52 +01:00
// For array-based formats (ChatGPT or Claude)
2024-05-02 08:48:26 +02:00
if ( Array . isArray ( jsonData ) ) {
2025-12-30 03:37:52 +01:00
// Claude format has chat_messages array in each conversation
if ( jsonData . length > 0 && jsonData [ 0 ] ? . chat _messages ) {
logger . info ( 'Importing Claude conversation' ) ;
return importClaudeConvo ;
}
// ChatGPT format has mapping object in each conversation
2024-05-02 08:48:26 +02:00
logger . info ( 'Importing ChatGPT conversation' ) ;
return importChatGptConvo ;
}
// For ChatbotUI
if ( jsonData . version && Array . isArray ( jsonData . history ) ) {
logger . info ( 'Importing ChatbotUI conversation' ) ;
return importChatBotUiConvo ;
}
// For LibreChat
2024-05-29 09:15:05 -04:00
if ( jsonData . conversationId && ( jsonData . messagesTree || jsonData . messages ) ) {
2024-05-02 08:48:26 +02:00
logger . info ( 'Importing LibreChat conversation' ) ;
return importLibreChatConvo ;
}
throw new Error ( 'Unsupported import type' ) ;
}
/ * *
* Imports a chatbot - ui V1 conversation from a JSON file and saves it to the database .
*
* @ param { Object } jsonData - The JSON data containing the chatbot conversation .
* @ param { string } requestUserId - The ID of the user making the import request .
* @ param { Function } [ builderFactory = createImportBatchBuilder ] - The factory function to create an import batch builder .
* @ returns { Promise < void > } - A promise that resolves when the import is complete .
* @ throws { Error } - If there is an error creating the conversation from the JSON file .
* /
async function importChatBotUiConvo (
jsonData ,
requestUserId ,
builderFactory = createImportBatchBuilder ,
) {
// this have been tested with chatbot-ui V1 export https://github.com/mckaywrigley/chatbot-ui/tree/b865b0555f53957e96727bc0bbb369c9eaecd83b#legacy-code
try {
🌿 feat: Fork Messages/Conversations (#2617)
* typedef for ImportBatchBuilder
* feat: first pass, fork conversations
* feat: fork - getMessagesUpToTargetLevel
* fix: additional tests and fix getAllMessagesUpToParent
* chore: arrow function return
* refactor: fork 3 options
* chore: remove unused genbuttons
* chore: remove unused hover buttons code
* feat: fork first pass
* wip: fork remember setting
* style: user icon
* chore: move clear chats to data tab
* WIP: fork UI options
* feat: data-provider fork types/services/vars and use generic MutationOptions
* refactor: use single param for fork option, use enum, fix mongo errors, use Date.now(), add records flag for testing, use endpoint from original convo and messages, pass originalConvo to finishConversation
* feat: add fork mutation hook and consolidate type imports
* refactor: use enum
* feat: first pass, fork mutation
* chore: add enum for target level fork option
* chore: add enum for target level fork option
* show toast when checking remember selection
* feat: splitAtTarget
* feat: split at target option
* feat: navigate to new fork, show toasts, set result query data
* feat: hover info for all fork options
* refactor: add Messages settings tab
* fix(Fork): remember text info
* ci: test for single message and is target edge case
* feat: additional tests for getAllMessagesUpToParent
* ci: additional tests and cycle detection for getMessagesUpToTargetLevel
* feat: circular dependency checks for getAllMessagesUpToParent
* fix: getMessagesUpToTargetLevel circular dep. check
* ci: more tests for getMessagesForConversation
* style: hover text for checkbox fork items
* refactor: add statefulness to conversation import
2024-05-05 11:48:20 -04:00
/** @type {ImportBatchBuilder} */
2024-05-02 08:48:26 +02:00
const importBatchBuilder = builderFactory ( requestUserId ) ;
for ( const historyItem of jsonData . history ) {
importBatchBuilder . startConversation ( EModelEndpoint . openAI ) ;
for ( const message of historyItem . messages ) {
if ( message . role === 'assistant' ) {
importBatchBuilder . addGptMessage ( message . content , historyItem . model . id ) ;
} else if ( message . role === 'user' ) {
importBatchBuilder . addUserMessage ( message . content ) ;
}
}
importBatchBuilder . finishConversation ( historyItem . name , new Date ( ) ) ;
}
await importBatchBuilder . saveBatch ( ) ;
logger . info ( ` user: ${ requestUserId } | ChatbotUI conversation imported ` ) ;
} catch ( error ) {
logger . error ( ` user: ${ requestUserId } | Error creating conversation from ChatbotUI file ` , error ) ;
}
}
2025-12-30 03:37:52 +01:00
/ * *
* Extracts text and thinking content from a Claude message .
* @ param { Object } msg - Claude message object with content array and optional text field .
* @ returns { { textContent : string , thinkingContent : string } } Extracted text and thinking content .
* /
function extractClaudeContent ( msg ) {
let textContent = '' ;
let thinkingContent = '' ;
for ( const part of msg . content || [ ] ) {
if ( part . type === 'text' && part . text ) {
textContent += part . text ;
} else if ( part . type === 'thinking' && part . thinking ) {
thinkingContent += part . thinking ;
}
}
// Use the text field as fallback if content array is empty
if ( ! textContent && msg . text ) {
textContent = msg . text ;
}
return { textContent , thinkingContent } ;
}
/ * *
* Imports Claude conversations from provided JSON data .
* Claude export format : array of conversations with chat _messages array .
*
* @ param { Array } jsonData - Array of Claude conversation objects to be imported .
* @ param { string } requestUserId - The ID of the user who initiated the import process .
* @ param { Function } builderFactory - Factory function to create a new import batch builder instance .
* @ returns { Promise < void > } Promise that resolves when all conversations have been imported .
* /
async function importClaudeConvo (
jsonData ,
requestUserId ,
builderFactory = createImportBatchBuilder ,
) {
try {
const importBatchBuilder = builderFactory ( requestUserId ) ;
for ( const conv of jsonData ) {
importBatchBuilder . startConversation ( EModelEndpoint . anthropic ) ;
let lastMessageId = Constants . NO _PARENT ;
let lastTimestamp = null ;
for ( const msg of conv . chat _messages || [ ] ) {
const isCreatedByUser = msg . sender === 'human' ;
const messageId = uuidv4 ( ) ;
const { textContent , thinkingContent } = extractClaudeContent ( msg ) ;
// Skip empty messages
if ( ! textContent && ! thinkingContent ) {
continue ;
}
// Parse timestamp, fallback to conversation create_time or current time
const messageTime = msg . created _at || conv . created _at ;
let createdAt = messageTime ? new Date ( messageTime ) : new Date ( ) ;
// Ensure timestamp is after the previous message.
// Messages are sorted by createdAt and buildTree expects parents to appear before children.
// This guards against any potential ordering issues in exports.
if ( lastTimestamp && createdAt <= lastTimestamp ) {
createdAt = new Date ( lastTimestamp . getTime ( ) + 1 ) ;
}
lastTimestamp = createdAt ;
const message = {
messageId ,
parentMessageId : lastMessageId ,
text : textContent ,
sender : isCreatedByUser ? 'user' : 'Claude' ,
isCreatedByUser ,
user : requestUserId ,
endpoint : EModelEndpoint . anthropic ,
createdAt ,
} ;
// Add content array with thinking if present
if ( thinkingContent && ! isCreatedByUser ) {
message . content = [
{ type : 'think' , think : thinkingContent } ,
{ type : 'text' , text : textContent } ,
] ;
}
importBatchBuilder . saveMessage ( message ) ;
lastMessageId = messageId ;
}
const createdAt = conv . created _at ? new Date ( conv . created _at ) : new Date ( ) ;
importBatchBuilder . finishConversation ( conv . name || 'Imported Claude Chat' , createdAt ) ;
}
await importBatchBuilder . saveBatch ( ) ;
logger . info ( ` user: ${ requestUserId } | Claude conversation imported ` ) ;
} catch ( error ) {
logger . error ( ` user: ${ requestUserId } | Error creating conversation from Claude file ` , error ) ;
}
}
2024-05-02 08:48:26 +02:00
/ * *
* Imports a LibreChat conversation from JSON .
*
* @ param { Object } jsonData - The JSON data representing the conversation .
* @ param { string } requestUserId - The ID of the user making the import request .
* @ param { Function } [ builderFactory = createImportBatchBuilder ] - The factory function to create an import batch builder .
* @ returns { Promise < void > } - A promise that resolves when the import is complete .
* /
async function importLibreChatConvo (
jsonData ,
requestUserId ,
builderFactory = createImportBatchBuilder ,
🏗️ refactor: Remove Redundant Caching, Migrate Config Services to TypeScript (#12466)
* ♻️ refactor: Remove redundant scopedCacheKey caching, support user-provided key model fetching
Remove redundant cache layers that used `scopedCacheKey()` (tenant-only scoping)
on top of `getAppConfig()` which already caches per-principal (role+user+tenant).
This caused config overrides for different principals within the same tenant to
be invisible due to stale cached data.
Changes:
- Add `requireJwtAuth` to `/api/endpoints` route for proper user context
- Remove ENDPOINT_CONFIG, STARTUP_CONFIG, PLUGINS, TOOLS, and MODELS_CONFIG
cache layers — all derive from `getAppConfig()` with cheap computation
- Enhance MODEL_QUERIES cache: hash(baseURL+apiKey) keys, 2-minute TTL,
caching centralized in `fetchModels()` base function
- Support fetching models with user-provided API keys in `loadConfigModels`
via `getUserKeyValues` lookup (no caching for user keys)
- Update all affected tests
Closes #1028
* ♻️ refactor: Migrate config services to TypeScript in packages/api
Move core config logic from CJS /api wrappers to typed TypeScript in
packages/api using dependency injection factories:
- `createEndpointsConfigService` — endpoint config merging + checkCapability
- `createLoadConfigModels` — custom endpoint model loading with user key support
- `createMCPToolCacheService` — MCP tool cache operations (update, merge, cache)
/api files become thin wrappers that wire dependencies (getAppConfig,
loadDefaultEndpointsConfig, getUserKeyValues, getCachedTools, etc.)
into the typed factories.
Also moves existing `endpoints/config.ts` → `endpoints/config/providers.ts`
to accommodate the new `config/` directory structure.
* 🔄 fix: Invalidate models query when user API key is set or revoked
Without this, users had to refresh the page after entering their API key
to see the updated model list fetched with their credentials.
- Invalidate QueryKeys.models in useUpdateUserKeysMutation onSuccess
- Invalidate QueryKeys.models in useRevokeUserKeyMutation onSuccess
- Invalidate QueryKeys.models in useRevokeAllUserKeysMutation onSuccess
* 🗺️ fix: Remap YAML-level override keys to AppConfig equivalents in mergeConfigOverrides
Config overrides stored in the DB use YAML-level keys (TCustomConfig),
but they're merged into the already-processed AppConfig where some fields
have been renamed by AppService. This caused mcpServers overrides to land
on a nonexistent key instead of mcpConfig, so config-override MCP servers
never appeared in the UI.
- Add OVERRIDE_KEY_MAP to remap mcpServers→mcpConfig, interface→interfaceConfig
- Apply remapping before deep merge in mergeConfigOverrides
- Add test for YAML-level key remapping behavior
- Update existing tests to use AppConfig field names in assertions
* 🧪 test: Update service.spec to use AppConfig field names after override key remapping
* 🛡️ fix: Address code review findings — reliability, types, tests, and performance
- Pass tenant context (getTenantId) in importers.js getEndpointsConfig call
- Add 5 tests for user-provided API key model fetching (key found, no key,
DB error, missing userId, apiKey-only with fixed baseURL)
- Distinguish NO_USER_KEY (debug) from infrastructure errors (warn) in catch
- Switch fetchPromisesMap from Promise.all to Promise.allSettled so one
failing provider doesn't kill the entire model config
- Parallelize getUserKeyValues DB lookups via batched Promise.allSettled
instead of sequential awaits in the loop
- Hoist standardCache instance in fetchModels to avoid double instantiation
- Replace Record<string, unknown> types with Partial<TConfig>-based types;
remove as unknown as T double-cast in endpoints config
- Narrow Bedrock availableRegions to typed destructure
- Narrow version field from string|number|undefined to string|undefined
- Fix import ordering in mcp/tools.ts and config/models.ts per AGENTS.md
- Add JSDoc to getModelsConfig alias clarifying caching semantics
* fix: Guard against null getCachedTools in mergeAppTools
* 🔍 fix: Address follow-up review — deduplicate extractEnvVariable, fix error discrimination, add log-level tests
- Deduplicate extractEnvVariable calls: resolve apiKey/baseURL once, reuse
for both the entry and isUserProvided checks (Finding A)
- Move ResolvedEndpoint interface from function closure to module scope (Finding B)
- Replace fragile msg.includes('NO_USER_KEY') with ErrorTypes.NO_USER_KEY
enum check against actual error message format (Finding C). Also handle
ErrorTypes.INVALID_USER_KEY as an expected "no key" case.
- Add test asserting logger.warn is called for infra errors (not debug)
- Add test asserting logger.debug is called for NO_USER_KEY errors (not warn)
* fix: Preserve numeric assistants version via String() coercion
* 🐛 fix: Address secondary review — Ollama cache bypass, cache tests, type safety
- Fix Ollama success path bypassing cache write in fetchModels (CRITICAL):
store result before returning so Ollama models benefit from 2-minute TTL
- Add 4 fetchModels cache behavior tests: cache write with TTL, cache hit
short-circuits HTTP, skipCache bypasses read+write, empty results not cached
- Type-safe OVERRIDE_KEY_MAP: Partial<Record<keyof TCustomConfig, keyof AppConfig>>
so compiler catches future field rename mismatches
- Fix import ordering in config/models.ts (package types longest→shortest)
- Rename ToolCacheDeps → MCPToolCacheDeps for naming consistency
- Expand getModelsConfig JSDoc to explain caching granularity
* fix: Narrow OVERRIDE_KEY_MAP index to satisfy strict tsconfig
* 🧩 fix: Add allowedProviders to TConfig, remove Record<string, unknown> from PartialEndpointEntry
The agents endpoint config includes allowedProviders (used by the frontend
AgentPanel to filter available providers), but it was missing from TConfig.
This forced PartialEndpointEntry to use & Record<string, unknown> as an
escape hatch, violating AGENTS.md type policy.
- Add allowedProviders?: (string | EModelEndpoint)[] to TConfig
- Remove Record<string, unknown> from PartialEndpointEntry — now just Partial<TConfig>
* 🛡️ fix: Isolate Ollama cache write from fetch try-catch, add Ollama cache tests
- Separate Ollama fetch and cache write into distinct scopes so a cache
failure (e.g., Redis down) doesn't misattribute the error as an Ollama
API failure and fall through to the OpenAI-compatible path (Issue A)
- Add 2 Ollama-specific cache tests: models written with TTL on fetch,
cached models returned without hitting server (Issue B)
- Replace hardcoded 120000 with Time.TWO_MINUTES constant in cache TTL
test assertion (Issue C)
- Fix OVERRIDE_KEY_MAP JSDoc to accurately describe runtime vs compile-time
type enforcement (Issue D)
- Add global beforeEach for cache mock reset to prevent cross-test leakage
* 🧪 fix: Address third review — DI consistency, cache key width, MCP tests
- Inject loadCustomEndpointsConfig via EndpointsConfigDeps with default
fallback, matching loadDefaultEndpointsConfig DI pattern (Finding 3)
- Widen modelsCacheKey from 64-bit (.slice(0,16)) to 128-bit (.slice(0,32))
for collision-sensitive cross-credential cache key (Finding 4)
- Add fetchModels.mockReset() in loadConfigModels.spec beforeEach to
prevent mock implementation leaks across tests (Finding 5)
- Add 11 unit tests for createMCPToolCacheService covering all three
functions: null/empty input, successful ops, error propagation,
cold-cache merge (Finding 2)
- Simplify getModelsConfig JSDoc to @see reference (Finding 10)
* ♻️ refactor: Address remaining follow-ups from reviews
OVERRIDE_KEY_MAP completeness:
- Add missing turnstile→turnstileConfig mapping
- Add exhaustiveness test verifying all three renamed keys are remapped
and original YAML keys don't leak through
Import role context:
- Pass userRole through importConversations job → importLibreChatConvo
so role-based endpoint overrides are honored during conversation import
- Update convos.js route to include req.user.role in the job payload
createEndpointsConfigService unit tests:
- Add 8 tests covering: default+custom merge, Azure/AzureAssistants/
Anthropic Vertex/Bedrock config enrichment, assistants version
coercion, agents allowedProviders, req.config bypass
Plugins/tools efficiency:
- Use Set for includedTools/filteredTools lookups (O(1) vs O(n) per plugin)
- Combine auth check + filter into single pass (eliminates intermediate array)
- Pre-compute toolDefKeys Set for O(1) tool definition lookups
* fix: Scope model query cache by user when userIdQuery is enabled
* fix: Skip model cache for userIdQuery endpoints, fix endpoints test types
- When userIdQuery is true, skip caching entirely (like user_provided keys)
to avoid cross-user model list leakage without duplicating cache data
- Fix AgentCapabilities type error in endpoints.spec.ts — use enum values
and appConfig() helper for partial mock typing
* 🐛 fix: Restore filteredTools+includedTools composition, add checkCapability tests
- Fix filteredTools regression: whitelist and blacklist are now applied
independently (two flat guards), matching original behavior where
includedTools=['a','b'] + filteredTools=['b'] produces ['a'] (Finding A)
- Fix Set spread in toolkit loop: pre-compute toolDefKeysList array once
alongside the Set, reuse for .some() without per-plugin allocation (Finding B)
- Add 2 filteredTools tests: blacklist-only path and combined
whitelist+blacklist composition (Finding C)
- Add 3 checkCapability tests: capability present, capability absent,
fallback to defaultAgentCapabilities for non-agents endpoints (Finding D)
* 🔑 fix: Include config-override MCP servers in filterAuthorizedTools
Config-override MCP servers (defined via admin config overrides for
roles/groups) were rejected by filterAuthorizedTools because it called
getAllServerConfigs(userId) without the configServers parameter. Only
YAML and DB-backed user servers were included in the access check.
- Add configServers parameter to filterAuthorizedTools
- Resolve config servers via resolveConfigServers(req) at all 4 callsites
(create, update, duplicate, revert) using parallel Promise.all
- Pass configServers through to getAllServerConfigs(userId, configServers)
so the registry merges config-source servers into the access check
- Update filterAuthorizedTools.spec.js mock for resolveConfigServers
* fix: Skip model cache for userIdQuery endpoints, fix endpoints test types
For user-provided key endpoints (userProvide: true), skip the full model
list re-fetch during message validation — the user already selected from
a list we served them, and re-fetching with skipCache:true on every
message send is both slow and fragile (5s provider timeout = rejected model).
Instead, validate the model string format only:
- Must be a string, max 256 chars
- Must match [a-zA-Z0-9][a-zA-Z0-9_.:\-/@+ ]* (covers all known provider
model ID formats while rejecting injection attempts)
System-configured endpoints still get full model list validation as before.
* 🧪 test: Add regression tests for filterAuthorizedTools configServers and validateModel
filterAuthorizedTools:
- Add test verifying configServers is passed to getAllServerConfigs and
config-override server tools are allowed through
- Guard resolveConfigServers in createAgentHandler to only run when
MCP tools are present (skip for tool-free agent creates)
validateModel (12 new tests):
- Format validation: missing model, non-string, length overflow, leading
special char, script injection, standard model ID acceptance
- userProvide early-return: next() called immediately, getModelsConfig
not invoked (regression guard for the exact bug this fixes)
- System endpoint list validation: reject unknown model, accept known
model, handle null/missing models config
Also fix unnecessary backslash escape in MODEL_PATTERN regex.
* 🧹 fix: Remove space from MODEL_PATTERN, trim input, clean up nits
- Remove space character from MODEL_PATTERN regex — no real model ID
uses spaces; prevents spurious violation logs from whitespace artifacts
- Add model.trim() before validation to handle accidental whitespace
- Remove redundant filterUniquePlugins call on already-deduplicated output
- Add comment documenting intentional whitelist+blacklist composition
- Add getUserKeyValues.mockReset() in loadConfigModels.spec beforeEach
- Remove narrating JSDoc from getModelsConfig one-liner
- Add 2 tests: trim whitespace handling, reject spaces in model ID
* fix: Match startup tool loader semantics — includedTools takes precedence over filteredTools
The startup tool loader (loadAndFormatTools) explicitly ignores
filteredTools when includedTools is set, with a warning log. The
PluginController was applying both independently, creating inconsistent
behavior where the same config produced different results at startup
vs plugin listing time.
Restored mutually exclusive semantics: when includedTools is non-empty,
filteredTools is not evaluated.
* 🧹 chore: Simplify validateModel flow, note auth requirement on endpoints route
- Separate missing-model from invalid-model checks cleanly: type+presence
guard first, then trim+format guard (reviewer NIT)
- Add route comment noting auth is required for role/tenant scoping
* fix: Write trimmed model back to req.body.model for downstream consumers
2026-03-30 16:49:48 -04:00
userRole ,
2024-05-02 08:48:26 +02:00
) {
try {
🌿 feat: Fork Messages/Conversations (#2617)
* typedef for ImportBatchBuilder
* feat: first pass, fork conversations
* feat: fork - getMessagesUpToTargetLevel
* fix: additional tests and fix getAllMessagesUpToParent
* chore: arrow function return
* refactor: fork 3 options
* chore: remove unused genbuttons
* chore: remove unused hover buttons code
* feat: fork first pass
* wip: fork remember setting
* style: user icon
* chore: move clear chats to data tab
* WIP: fork UI options
* feat: data-provider fork types/services/vars and use generic MutationOptions
* refactor: use single param for fork option, use enum, fix mongo errors, use Date.now(), add records flag for testing, use endpoint from original convo and messages, pass originalConvo to finishConversation
* feat: add fork mutation hook and consolidate type imports
* refactor: use enum
* feat: first pass, fork mutation
* chore: add enum for target level fork option
* chore: add enum for target level fork option
* show toast when checking remember selection
* feat: splitAtTarget
* feat: split at target option
* feat: navigate to new fork, show toasts, set result query data
* feat: hover info for all fork options
* refactor: add Messages settings tab
* fix(Fork): remember text info
* ci: test for single message and is target edge case
* feat: additional tests for getAllMessagesUpToParent
* ci: additional tests and cycle detection for getMessagesUpToTargetLevel
* feat: circular dependency checks for getAllMessagesUpToParent
* fix: getMessagesUpToTargetLevel circular dep. check
* ci: more tests for getMessagesForConversation
* style: hover text for checkbox fork items
* refactor: add statefulness to conversation import
2024-05-05 11:48:20 -04:00
/** @type {ImportBatchBuilder} */
2024-05-02 08:48:26 +02:00
const importBatchBuilder = builderFactory ( requestUserId ) ;
2024-05-29 09:15:05 -04:00
const options = jsonData . options || { } ;
/* Endpoint configuration */
let endpoint = jsonData . endpoint ? ? options . endpoint ? ? EModelEndpoint . openAI ;
🏗️ refactor: Remove Redundant Caching, Migrate Config Services to TypeScript (#12466)
* ♻️ refactor: Remove redundant scopedCacheKey caching, support user-provided key model fetching
Remove redundant cache layers that used `scopedCacheKey()` (tenant-only scoping)
on top of `getAppConfig()` which already caches per-principal (role+user+tenant).
This caused config overrides for different principals within the same tenant to
be invisible due to stale cached data.
Changes:
- Add `requireJwtAuth` to `/api/endpoints` route for proper user context
- Remove ENDPOINT_CONFIG, STARTUP_CONFIG, PLUGINS, TOOLS, and MODELS_CONFIG
cache layers — all derive from `getAppConfig()` with cheap computation
- Enhance MODEL_QUERIES cache: hash(baseURL+apiKey) keys, 2-minute TTL,
caching centralized in `fetchModels()` base function
- Support fetching models with user-provided API keys in `loadConfigModels`
via `getUserKeyValues` lookup (no caching for user keys)
- Update all affected tests
Closes #1028
* ♻️ refactor: Migrate config services to TypeScript in packages/api
Move core config logic from CJS /api wrappers to typed TypeScript in
packages/api using dependency injection factories:
- `createEndpointsConfigService` — endpoint config merging + checkCapability
- `createLoadConfigModels` — custom endpoint model loading with user key support
- `createMCPToolCacheService` — MCP tool cache operations (update, merge, cache)
/api files become thin wrappers that wire dependencies (getAppConfig,
loadDefaultEndpointsConfig, getUserKeyValues, getCachedTools, etc.)
into the typed factories.
Also moves existing `endpoints/config.ts` → `endpoints/config/providers.ts`
to accommodate the new `config/` directory structure.
* 🔄 fix: Invalidate models query when user API key is set or revoked
Without this, users had to refresh the page after entering their API key
to see the updated model list fetched with their credentials.
- Invalidate QueryKeys.models in useUpdateUserKeysMutation onSuccess
- Invalidate QueryKeys.models in useRevokeUserKeyMutation onSuccess
- Invalidate QueryKeys.models in useRevokeAllUserKeysMutation onSuccess
* 🗺️ fix: Remap YAML-level override keys to AppConfig equivalents in mergeConfigOverrides
Config overrides stored in the DB use YAML-level keys (TCustomConfig),
but they're merged into the already-processed AppConfig where some fields
have been renamed by AppService. This caused mcpServers overrides to land
on a nonexistent key instead of mcpConfig, so config-override MCP servers
never appeared in the UI.
- Add OVERRIDE_KEY_MAP to remap mcpServers→mcpConfig, interface→interfaceConfig
- Apply remapping before deep merge in mergeConfigOverrides
- Add test for YAML-level key remapping behavior
- Update existing tests to use AppConfig field names in assertions
* 🧪 test: Update service.spec to use AppConfig field names after override key remapping
* 🛡️ fix: Address code review findings — reliability, types, tests, and performance
- Pass tenant context (getTenantId) in importers.js getEndpointsConfig call
- Add 5 tests for user-provided API key model fetching (key found, no key,
DB error, missing userId, apiKey-only with fixed baseURL)
- Distinguish NO_USER_KEY (debug) from infrastructure errors (warn) in catch
- Switch fetchPromisesMap from Promise.all to Promise.allSettled so one
failing provider doesn't kill the entire model config
- Parallelize getUserKeyValues DB lookups via batched Promise.allSettled
instead of sequential awaits in the loop
- Hoist standardCache instance in fetchModels to avoid double instantiation
- Replace Record<string, unknown> types with Partial<TConfig>-based types;
remove as unknown as T double-cast in endpoints config
- Narrow Bedrock availableRegions to typed destructure
- Narrow version field from string|number|undefined to string|undefined
- Fix import ordering in mcp/tools.ts and config/models.ts per AGENTS.md
- Add JSDoc to getModelsConfig alias clarifying caching semantics
* fix: Guard against null getCachedTools in mergeAppTools
* 🔍 fix: Address follow-up review — deduplicate extractEnvVariable, fix error discrimination, add log-level tests
- Deduplicate extractEnvVariable calls: resolve apiKey/baseURL once, reuse
for both the entry and isUserProvided checks (Finding A)
- Move ResolvedEndpoint interface from function closure to module scope (Finding B)
- Replace fragile msg.includes('NO_USER_KEY') with ErrorTypes.NO_USER_KEY
enum check against actual error message format (Finding C). Also handle
ErrorTypes.INVALID_USER_KEY as an expected "no key" case.
- Add test asserting logger.warn is called for infra errors (not debug)
- Add test asserting logger.debug is called for NO_USER_KEY errors (not warn)
* fix: Preserve numeric assistants version via String() coercion
* 🐛 fix: Address secondary review — Ollama cache bypass, cache tests, type safety
- Fix Ollama success path bypassing cache write in fetchModels (CRITICAL):
store result before returning so Ollama models benefit from 2-minute TTL
- Add 4 fetchModels cache behavior tests: cache write with TTL, cache hit
short-circuits HTTP, skipCache bypasses read+write, empty results not cached
- Type-safe OVERRIDE_KEY_MAP: Partial<Record<keyof TCustomConfig, keyof AppConfig>>
so compiler catches future field rename mismatches
- Fix import ordering in config/models.ts (package types longest→shortest)
- Rename ToolCacheDeps → MCPToolCacheDeps for naming consistency
- Expand getModelsConfig JSDoc to explain caching granularity
* fix: Narrow OVERRIDE_KEY_MAP index to satisfy strict tsconfig
* 🧩 fix: Add allowedProviders to TConfig, remove Record<string, unknown> from PartialEndpointEntry
The agents endpoint config includes allowedProviders (used by the frontend
AgentPanel to filter available providers), but it was missing from TConfig.
This forced PartialEndpointEntry to use & Record<string, unknown> as an
escape hatch, violating AGENTS.md type policy.
- Add allowedProviders?: (string | EModelEndpoint)[] to TConfig
- Remove Record<string, unknown> from PartialEndpointEntry — now just Partial<TConfig>
* 🛡️ fix: Isolate Ollama cache write from fetch try-catch, add Ollama cache tests
- Separate Ollama fetch and cache write into distinct scopes so a cache
failure (e.g., Redis down) doesn't misattribute the error as an Ollama
API failure and fall through to the OpenAI-compatible path (Issue A)
- Add 2 Ollama-specific cache tests: models written with TTL on fetch,
cached models returned without hitting server (Issue B)
- Replace hardcoded 120000 with Time.TWO_MINUTES constant in cache TTL
test assertion (Issue C)
- Fix OVERRIDE_KEY_MAP JSDoc to accurately describe runtime vs compile-time
type enforcement (Issue D)
- Add global beforeEach for cache mock reset to prevent cross-test leakage
* 🧪 fix: Address third review — DI consistency, cache key width, MCP tests
- Inject loadCustomEndpointsConfig via EndpointsConfigDeps with default
fallback, matching loadDefaultEndpointsConfig DI pattern (Finding 3)
- Widen modelsCacheKey from 64-bit (.slice(0,16)) to 128-bit (.slice(0,32))
for collision-sensitive cross-credential cache key (Finding 4)
- Add fetchModels.mockReset() in loadConfigModels.spec beforeEach to
prevent mock implementation leaks across tests (Finding 5)
- Add 11 unit tests for createMCPToolCacheService covering all three
functions: null/empty input, successful ops, error propagation,
cold-cache merge (Finding 2)
- Simplify getModelsConfig JSDoc to @see reference (Finding 10)
* ♻️ refactor: Address remaining follow-ups from reviews
OVERRIDE_KEY_MAP completeness:
- Add missing turnstile→turnstileConfig mapping
- Add exhaustiveness test verifying all three renamed keys are remapped
and original YAML keys don't leak through
Import role context:
- Pass userRole through importConversations job → importLibreChatConvo
so role-based endpoint overrides are honored during conversation import
- Update convos.js route to include req.user.role in the job payload
createEndpointsConfigService unit tests:
- Add 8 tests covering: default+custom merge, Azure/AzureAssistants/
Anthropic Vertex/Bedrock config enrichment, assistants version
coercion, agents allowedProviders, req.config bypass
Plugins/tools efficiency:
- Use Set for includedTools/filteredTools lookups (O(1) vs O(n) per plugin)
- Combine auth check + filter into single pass (eliminates intermediate array)
- Pre-compute toolDefKeys Set for O(1) tool definition lookups
* fix: Scope model query cache by user when userIdQuery is enabled
* fix: Skip model cache for userIdQuery endpoints, fix endpoints test types
- When userIdQuery is true, skip caching entirely (like user_provided keys)
to avoid cross-user model list leakage without duplicating cache data
- Fix AgentCapabilities type error in endpoints.spec.ts — use enum values
and appConfig() helper for partial mock typing
* 🐛 fix: Restore filteredTools+includedTools composition, add checkCapability tests
- Fix filteredTools regression: whitelist and blacklist are now applied
independently (two flat guards), matching original behavior where
includedTools=['a','b'] + filteredTools=['b'] produces ['a'] (Finding A)
- Fix Set spread in toolkit loop: pre-compute toolDefKeysList array once
alongside the Set, reuse for .some() without per-plugin allocation (Finding B)
- Add 2 filteredTools tests: blacklist-only path and combined
whitelist+blacklist composition (Finding C)
- Add 3 checkCapability tests: capability present, capability absent,
fallback to defaultAgentCapabilities for non-agents endpoints (Finding D)
* 🔑 fix: Include config-override MCP servers in filterAuthorizedTools
Config-override MCP servers (defined via admin config overrides for
roles/groups) were rejected by filterAuthorizedTools because it called
getAllServerConfigs(userId) without the configServers parameter. Only
YAML and DB-backed user servers were included in the access check.
- Add configServers parameter to filterAuthorizedTools
- Resolve config servers via resolveConfigServers(req) at all 4 callsites
(create, update, duplicate, revert) using parallel Promise.all
- Pass configServers through to getAllServerConfigs(userId, configServers)
so the registry merges config-source servers into the access check
- Update filterAuthorizedTools.spec.js mock for resolveConfigServers
* fix: Skip model cache for userIdQuery endpoints, fix endpoints test types
For user-provided key endpoints (userProvide: true), skip the full model
list re-fetch during message validation — the user already selected from
a list we served them, and re-fetching with skipCache:true on every
message send is both slow and fragile (5s provider timeout = rejected model).
Instead, validate the model string format only:
- Must be a string, max 256 chars
- Must match [a-zA-Z0-9][a-zA-Z0-9_.:\-/@+ ]* (covers all known provider
model ID formats while rejecting injection attempts)
System-configured endpoints still get full model list validation as before.
* 🧪 test: Add regression tests for filterAuthorizedTools configServers and validateModel
filterAuthorizedTools:
- Add test verifying configServers is passed to getAllServerConfigs and
config-override server tools are allowed through
- Guard resolveConfigServers in createAgentHandler to only run when
MCP tools are present (skip for tool-free agent creates)
validateModel (12 new tests):
- Format validation: missing model, non-string, length overflow, leading
special char, script injection, standard model ID acceptance
- userProvide early-return: next() called immediately, getModelsConfig
not invoked (regression guard for the exact bug this fixes)
- System endpoint list validation: reject unknown model, accept known
model, handle null/missing models config
Also fix unnecessary backslash escape in MODEL_PATTERN regex.
* 🧹 fix: Remove space from MODEL_PATTERN, trim input, clean up nits
- Remove space character from MODEL_PATTERN regex — no real model ID
uses spaces; prevents spurious violation logs from whitespace artifacts
- Add model.trim() before validation to handle accidental whitespace
- Remove redundant filterUniquePlugins call on already-deduplicated output
- Add comment documenting intentional whitelist+blacklist composition
- Add getUserKeyValues.mockReset() in loadConfigModels.spec beforeEach
- Remove narrating JSDoc from getModelsConfig one-liner
- Add 2 tests: trim whitespace handling, reject spaces in model ID
* fix: Match startup tool loader semantics — includedTools takes precedence over filteredTools
The startup tool loader (loadAndFormatTools) explicitly ignores
filteredTools when includedTools is set, with a warning log. The
PluginController was applying both independently, creating inconsistent
behavior where the same config produced different results at startup
vs plugin listing time.
Restored mutually exclusive semantics: when includedTools is non-empty,
filteredTools is not evaluated.
* 🧹 chore: Simplify validateModel flow, note auth requirement on endpoints route
- Separate missing-model from invalid-model checks cleanly: type+presence
guard first, then trim+format guard (reviewer NIT)
- Add route comment noting auth is required for role/tenant scoping
* fix: Write trimmed model back to req.body.model for downstream consumers
2026-03-30 16:49:48 -04:00
const endpointsConfig = await getEndpointsConfig ( {
user : { id : requestUserId , role : userRole , tenantId : getTenantId ( ) } ,
} ) ;
2024-05-29 09:15:05 -04:00
const endpointConfig = endpointsConfig ? . [ endpoint ] ;
if ( ! endpointConfig && endpointsConfig ) {
endpoint = Object . keys ( endpointsConfig ) [ 0 ] ;
} else if ( ! endpointConfig ) {
endpoint = EModelEndpoint . openAI ;
}
importBatchBuilder . startConversation ( endpoint ) ;
2024-05-02 08:48:26 +02:00
let firstMessageDate = null ;
2024-05-29 09:15:05 -04:00
const messagesToImport = jsonData . messagesTree || jsonData . messages ;
if ( jsonData . recursive ) {
/ * *
2025-07-05 12:44:19 -04:00
* Flatten the recursive message tree into a flat array
2024-05-29 09:15:05 -04:00
* @ param { TMessage [ ] } messages
* @ param { string } parentMessageId
2025-07-05 12:44:19 -04:00
* @ param { TMessage [ ] } flatMessages
2024-05-29 09:15:05 -04:00
* /
2025-07-05 12:44:19 -04:00
const flattenMessages = (
messages ,
parentMessageId = Constants . NO _PARENT ,
flatMessages = [ ] ,
) => {
2024-05-29 09:15:05 -04:00
for ( const message of messages ) {
2024-12-30 13:01:47 -05:00
if ( ! message . text && ! message . content ) {
2024-05-29 09:15:05 -04:00
continue ;
}
2025-07-05 12:44:19 -04:00
const flatMessage = {
... message ,
parentMessageId : parentMessageId ,
children : undefined , // Remove children from flat structure
} ;
flatMessages . push ( flatMessage ) ;
2024-05-02 08:48:26 +02:00
2024-06-07 21:06:47 +02:00
if ( ! firstMessageDate && message . createdAt ) {
2024-05-29 09:15:05 -04:00
firstMessageDate = new Date ( message . createdAt ) ;
}
if ( message . children && message . children . length > 0 ) {
2025-07-05 12:44:19 -04:00
flattenMessages ( message . children , message . messageId , flatMessages ) ;
2024-05-29 09:15:05 -04:00
}
2024-05-02 08:48:26 +02:00
}
2025-07-05 12:44:19 -04:00
return flatMessages ;
2024-05-29 09:15:05 -04:00
} ;
2025-07-05 12:44:19 -04:00
const flatMessages = flattenMessages ( messagesToImport ) ;
cloneMessagesWithTimestamps ( flatMessages , importBatchBuilder ) ;
2024-05-29 09:15:05 -04:00
} else if ( messagesToImport ) {
2025-07-05 12:44:19 -04:00
cloneMessagesWithTimestamps ( messagesToImport , importBatchBuilder ) ;
2024-05-29 09:15:05 -04:00
for ( const message of messagesToImport ) {
2024-06-07 21:06:47 +02:00
if ( ! firstMessageDate && message . createdAt ) {
2024-05-02 08:48:26 +02:00
firstMessageDate = new Date ( message . createdAt ) ;
}
2024-05-29 09:15:05 -04:00
}
} else {
throw new Error ( 'Invalid LibreChat file format' ) ;
}
2024-05-02 08:48:26 +02:00
2024-06-07 21:06:47 +02:00
if ( firstMessageDate === 'Invalid Date' ) {
firstMessageDate = null ;
}
2024-05-29 09:15:05 -04:00
importBatchBuilder . finishConversation ( jsonData . title , firstMessageDate ? ? new Date ( ) , options ) ;
2024-05-02 08:48:26 +02:00
await importBatchBuilder . saveBatch ( ) ;
logger . debug ( ` user: ${ requestUserId } | Conversation " ${ jsonData . title } " imported ` ) ;
} catch ( error ) {
logger . error ( ` user: ${ requestUserId } | Error creating conversation from LibreChat file ` , error ) ;
}
}
/ * *
* Imports ChatGPT conversations from provided JSON data .
* Initializes the import process by creating a batch builder and processing each conversation in the data .
*
* @ param { ChatGPTConvo [ ] } jsonData - Array of conversation objects to be imported .
* @ param { string } requestUserId - The ID of the user who initiated the import process .
* @ param { Function } builderFactory - Factory function to create a new import batch builder instance , defaults to createImportBatchBuilder .
* @ returns { Promise < void > } Promise that resolves when all conversations have been imported .
* /
async function importChatGptConvo (
jsonData ,
requestUserId ,
builderFactory = createImportBatchBuilder ,
) {
try {
const importBatchBuilder = builderFactory ( requestUserId ) ;
for ( const conv of jsonData ) {
processConversation ( conv , importBatchBuilder , requestUserId ) ;
}
await importBatchBuilder . saveBatch ( ) ;
} catch ( error ) {
logger . error ( ` user: ${ requestUserId } | Error creating conversation from imported file ` , error ) ;
}
}
/ * *
* Processes a single conversation , adding messages to the batch builder based on author roles and handling text content .
* It directly manages the addition of messages for different roles and handles citations for assistant messages .
*
* @ param { ChatGPTConvo } conv - A single conversation object that contains multiple messages and other details .
🌿 feat: Fork Messages/Conversations (#2617)
* typedef for ImportBatchBuilder
* feat: first pass, fork conversations
* feat: fork - getMessagesUpToTargetLevel
* fix: additional tests and fix getAllMessagesUpToParent
* chore: arrow function return
* refactor: fork 3 options
* chore: remove unused genbuttons
* chore: remove unused hover buttons code
* feat: fork first pass
* wip: fork remember setting
* style: user icon
* chore: move clear chats to data tab
* WIP: fork UI options
* feat: data-provider fork types/services/vars and use generic MutationOptions
* refactor: use single param for fork option, use enum, fix mongo errors, use Date.now(), add records flag for testing, use endpoint from original convo and messages, pass originalConvo to finishConversation
* feat: add fork mutation hook and consolidate type imports
* refactor: use enum
* feat: first pass, fork mutation
* chore: add enum for target level fork option
* chore: add enum for target level fork option
* show toast when checking remember selection
* feat: splitAtTarget
* feat: split at target option
* feat: navigate to new fork, show toasts, set result query data
* feat: hover info for all fork options
* refactor: add Messages settings tab
* fix(Fork): remember text info
* ci: test for single message and is target edge case
* feat: additional tests for getAllMessagesUpToParent
* ci: additional tests and cycle detection for getMessagesUpToTargetLevel
* feat: circular dependency checks for getAllMessagesUpToParent
* fix: getMessagesUpToTargetLevel circular dep. check
* ci: more tests for getMessagesForConversation
* style: hover text for checkbox fork items
* refactor: add statefulness to conversation import
2024-05-05 11:48:20 -04:00
* @ param { ImportBatchBuilder } importBatchBuilder - The batch builder instance used to manage and batch conversation data .
2024-05-02 08:48:26 +02:00
* @ param { string } requestUserId - The ID of the user who initiated the import process .
* @ returns { void }
* /
function processConversation ( conv , importBatchBuilder , requestUserId ) {
importBatchBuilder . startConversation ( EModelEndpoint . openAI ) ;
// Map all message IDs to new UUIDs
const messageMap = new Map ( ) ;
for ( const [ id , mapping ] of Object . entries ( conv . mapping ) ) {
if ( mapping . message && mapping . message . content . content _type ) {
const newMessageId = uuidv4 ( ) ;
messageMap . set ( id , newMessageId ) ;
}
}
2025-09-09 13:51:26 -04:00
/ * *
2026-03-19 17:15:12 -04:00
* Finds the nearest valid parent by traversing up through skippable messages
* ( system , reasoning _recap , thoughts ) . Uses iterative traversal to avoid
* stack overflow on deep chains of skippable messages .
*
* @ param { string } startId - The ID of the starting parent message .
2025-12-30 03:31:18 +01:00
* @ returns { string } The ID of the nearest valid parent message .
2025-09-09 13:51:26 -04:00
* /
2026-03-19 17:15:12 -04:00
const findValidParent = ( startId ) => {
const visited = new Set ( ) ;
let parentId = startId ;
2025-09-09 13:51:26 -04:00
2026-03-19 17:15:12 -04:00
while ( parentId ) {
if ( ! messageMap . has ( parentId ) || visited . has ( parentId ) ) {
return Constants . NO _PARENT ;
}
visited . add ( parentId ) ;
2025-09-09 13:51:26 -04:00
2026-03-19 17:15:12 -04:00
const parentMapping = conv . mapping [ parentId ] ;
if ( ! parentMapping ? . message ) {
return Constants . NO _PARENT ;
}
2025-12-30 03:31:18 +01:00
2026-03-19 17:15:12 -04:00
const contentType = parentMapping . message . content ? . content _type ;
const shouldSkip =
parentMapping . message . author ? . role === 'system' ||
contentType === 'reasoning_recap' ||
contentType === 'thoughts' ;
if ( ! shouldSkip ) {
return messageMap . get ( parentId ) ;
}
parentId = parentMapping . parent ;
2025-09-09 13:51:26 -04:00
}
2026-03-19 17:15:12 -04:00
return Constants . NO _PARENT ;
2025-09-09 13:51:26 -04:00
} ;
2025-12-30 03:31:18 +01:00
/ * *
* Helper function to find thinking content from parent chain ( thoughts messages )
* @ param { string } parentId - The ID of the parent message .
* @ param { Set } visited - Set of already - visited IDs to prevent cycles .
* @ returns { Array } The thinking content array ( empty if not found ) .
* /
const findThinkingContent = ( parentId , visited = new Set ( ) ) => {
// Guard against circular references in malformed imports
if ( ! parentId || visited . has ( parentId ) ) {
return [ ] ;
}
visited . add ( parentId ) ;
const parentMapping = conv . mapping [ parentId ] ;
if ( ! parentMapping ? . message ) {
return [ ] ;
}
const contentType = parentMapping . message . content ? . content _type ;
// If this is a thoughts message, extract the thinking content
if ( contentType === 'thoughts' ) {
const thoughts = parentMapping . message . content . thoughts || [ ] ;
const thinkingText = thoughts
. map ( ( t ) => t . content || t . summary || '' )
. filter ( Boolean )
. join ( '\n\n' ) ;
if ( thinkingText ) {
return [ { type : 'think' , think : thinkingText } ] ;
}
return [ ] ;
}
// If this is reasoning_recap, look at its parent for thoughts
if ( contentType === 'reasoning_recap' ) {
return findThinkingContent ( parentMapping . parent , visited ) ;
}
return [ ] ;
} ;
2024-05-02 08:48:26 +02:00
// Create and save messages using the mapped IDs
const messages = [ ] ;
for ( const [ id , mapping ] of Object . entries ( conv . mapping ) ) {
const role = mapping . message ? . author ? . role ;
if ( ! mapping . message ) {
messageMap . delete ( id ) ;
continue ;
} else if ( role === 'system' ) {
2025-09-09 13:51:26 -04:00
// Skip system messages but keep their ID in messageMap for parent references
2024-05-02 08:48:26 +02:00
continue ;
}
2025-12-30 03:31:18 +01:00
const contentType = mapping . message . content ? . content _type ;
// Skip thoughts messages - they will be merged into the response message
if ( contentType === 'thoughts' ) {
continue ;
}
// Skip reasoning_recap messages (just summaries like "Thought for 44s")
if ( contentType === 'reasoning_recap' ) {
continue ;
}
2024-05-02 08:48:26 +02:00
const newMessageId = messageMap . get ( id ) ;
2025-12-30 03:31:18 +01:00
const parentMessageId = findValidParent ( mapping . parent ) ;
2024-05-02 08:48:26 +02:00
const messageText = formatMessageText ( mapping . message ) ;
const isCreatedByUser = role === 'user' ;
2025-09-09 13:51:26 -04:00
let sender = isCreatedByUser ? 'user' : 'assistant' ;
2024-05-02 08:48:26 +02:00
const model = mapping . message . metadata . model _slug || openAISettings . model . default ;
2025-09-09 13:51:26 -04:00
if ( ! isCreatedByUser ) {
/** Extracted model name from model slug */
const gptMatch = model . match ( /gpt-(.+)/i ) ;
if ( gptMatch ) {
sender = ` GPT- ${ gptMatch [ 1 ] } ` ;
} else {
sender = model || 'assistant' ;
}
2024-05-02 08:48:26 +02:00
}
2025-12-30 03:31:18 +01:00
// Use create_time from ChatGPT export to ensure proper message ordering
// For null timestamps, use the conversation's create_time as fallback, or current time as last resort
const messageTime = mapping . message . create _time || conv . create _time ;
const createdAt = messageTime ? new Date ( messageTime * 1000 ) : new Date ( ) ;
const message = {
2024-05-02 08:48:26 +02:00
messageId : newMessageId ,
parentMessageId ,
text : messageText ,
sender ,
isCreatedByUser ,
model ,
user : requestUserId ,
endpoint : EModelEndpoint . openAI ,
2025-12-30 03:31:18 +01:00
createdAt ,
} ;
// For assistant messages, check if there's thinking content in the parent chain
if ( ! isCreatedByUser ) {
const thinkingContent = findThinkingContent ( mapping . parent ) ;
if ( thinkingContent . length > 0 ) {
// Combine thinking content with the text response
message . content = [ ... thinkingContent , { type : 'text' , text : messageText } ] ;
}
}
messages . push ( message ) ;
2024-05-02 08:48:26 +02:00
}
2026-03-19 17:15:12 -04:00
const cycleDetected = adjustTimestampsForOrdering ( messages ) ;
if ( cycleDetected ) {
breakParentCycles ( messages ) ;
}
2025-12-30 03:31:18 +01:00
2024-05-02 08:48:26 +02:00
for ( const message of messages ) {
importBatchBuilder . saveMessage ( message ) ;
}
importBatchBuilder . finishConversation ( conv . title , new Date ( conv . create _time * 1000 ) ) ;
}
/ * *
* Processes text content of messages authored by an assistant , inserting citation links as required .
2024-10-24 15:50:48 -04:00
* Uses citation start and end indices to place links at the correct positions .
2024-05-02 08:48:26 +02:00
*
* @ param { ChatGPTMessage } messageData - The message data containing metadata about citations .
* @ param { string } messageText - The original text of the message which may be altered by inserting citation links .
* @ returns { string } - The updated message text after processing for citations .
* /
function processAssistantMessage ( messageData , messageText ) {
2024-10-24 15:50:48 -04:00
if ( ! messageText ) {
return messageText ;
}
const citations = messageData . metadata ? . citations ? ? [ ] ;
const sortedCitations = [ ... citations ] . sort ( ( a , b ) => b . start _ix - a . start _ix ) ;
2024-05-02 08:48:26 +02:00
2024-10-24 15:50:48 -04:00
let result = messageText ;
for ( const citation of sortedCitations ) {
2024-05-02 08:48:26 +02:00
if (
2024-10-24 15:50:48 -04:00
! citation . metadata ? . type ||
citation . metadata . type !== 'webpage' ||
typeof citation . start _ix !== 'number' ||
typeof citation . end _ix !== 'number' ||
citation . start _ix >= citation . end _ix
2024-05-02 08:48:26 +02:00
) {
continue ;
}
const replacement = ` ([ ${ citation . metadata . title } ]( ${ citation . metadata . url } )) ` ;
2024-10-24 15:50:48 -04:00
result = result . slice ( 0 , citation . start _ix ) + replacement + result . slice ( citation . end _ix ) ;
2024-05-02 08:48:26 +02:00
}
2024-10-24 15:50:48 -04:00
return result ;
2024-05-02 08:48:26 +02:00
}
/ * *
* Formats the text content of a message based on its content type and author role .
* @ param { ChatGPTMessage } messageData - The message data .
2025-12-30 03:31:18 +01:00
* @ returns { string } - The formatted message text .
2024-05-02 08:48:26 +02:00
* /
function formatMessageText ( messageData ) {
2025-12-30 03:31:18 +01:00
const contentType = messageData . content . content _type ;
const isText = contentType === 'text' ;
2024-05-02 08:48:26 +02:00
let messageText = '' ;
if ( isText && messageData . content . parts ) {
messageText = messageData . content . parts . join ( ' ' ) ;
2025-12-30 03:31:18 +01:00
} else if ( contentType === 'code' ) {
2024-05-02 08:48:26 +02:00
messageText = ` \` \` \` ${ messageData . content . language } \n ${ messageData . content . text } \n \` \` \` ` ;
2025-12-30 03:31:18 +01:00
} else if ( contentType === 'execution_output' ) {
2024-05-02 08:48:26 +02:00
messageText = ` Execution Output: \n > ${ messageData . content . text } ` ;
} else if ( messageData . content . parts ) {
for ( const part of messageData . content . parts ) {
if ( typeof part === 'string' ) {
messageText += part + ' ' ;
} else if ( typeof part === 'object' ) {
messageText = ` \` \` \` json \n ${ JSON . stringify ( part , null , 2 ) } \n \` \` \` \n ` ;
}
}
messageText = messageText . trim ( ) ;
} else {
messageText = ` \` \` \` json \n ${ JSON . stringify ( messageData . content , null , 2 ) } \n \` \` \` ` ;
}
if ( isText && messageData . author . role !== 'user' ) {
messageText = processAssistantMessage ( messageData , messageText ) ;
}
return messageText ;
}
2025-12-30 03:31:18 +01:00
/ * *
* Adjusts message timestamps to ensure children always come after parents .
* Messages are sorted by createdAt and buildTree expects parents to appear before children .
* ChatGPT exports can have slight timestamp inversions ( e . g . , tool call results
* arriving a few ms before their parent ) . Uses multiple passes to handle cascading adjustments .
2026-03-19 17:15:12 -04:00
* Capped at N passes ( where N = message count ) to guarantee termination on cyclic graphs .
2025-12-30 03:31:18 +01:00
*
* @ param { Array } messages - Array of message objects with messageId , parentMessageId , and createdAt .
2026-03-19 17:15:12 -04:00
* @ returns { boolean } True if cyclic parent relationships were detected .
2025-12-30 03:31:18 +01:00
* /
function adjustTimestampsForOrdering ( messages ) {
2026-03-19 17:15:12 -04:00
if ( messages . length === 0 ) {
return false ;
}
2025-12-30 03:31:18 +01:00
const timestampMap = new Map ( ) ;
2026-03-19 17:15:12 -04:00
for ( const msg of messages ) {
timestampMap . set ( msg . messageId , msg . createdAt ) ;
}
2025-12-30 03:31:18 +01:00
let hasChanges = true ;
2026-03-19 17:15:12 -04:00
let remainingPasses = messages . length ;
while ( hasChanges && remainingPasses > 0 ) {
2025-12-30 03:31:18 +01:00
hasChanges = false ;
2026-03-19 17:15:12 -04:00
remainingPasses -- ;
2025-12-30 03:31:18 +01:00
for ( const message of messages ) {
if ( message . parentMessageId && message . parentMessageId !== Constants . NO _PARENT ) {
const parentTimestamp = timestampMap . get ( message . parentMessageId ) ;
if ( parentTimestamp && message . createdAt <= parentTimestamp ) {
message . createdAt = new Date ( parentTimestamp . getTime ( ) + 1 ) ;
timestampMap . set ( message . messageId , message . createdAt ) ;
hasChanges = true ;
}
}
}
}
2026-03-19 17:15:12 -04:00
const cycleDetected = remainingPasses === 0 && hasChanges ;
if ( cycleDetected ) {
logger . warn (
'[importers] Detected cyclic parent relationships while adjusting import timestamps' ,
) ;
}
return cycleDetected ;
}
/ * *
* Severs cyclic parentMessageId back - edges so saved messages form a valid tree .
* Walks each message ' s parent chain ; if a message is visited twice , its parentMessageId
* is set to NO _PARENT to break the cycle .
*
* @ param { Array } messages - Array of message objects with messageId and parentMessageId .
* /
function breakParentCycles ( messages ) {
const parentLookup = new Map ( ) ;
for ( const msg of messages ) {
parentLookup . set ( msg . messageId , msg ) ;
}
const settled = new Set ( ) ;
for ( const message of messages ) {
const chain = new Set ( ) ;
let current = message ;
while ( current && ! settled . has ( current . messageId ) ) {
if ( chain . has ( current . messageId ) ) {
current . parentMessageId = Constants . NO _PARENT ;
break ;
}
chain . add ( current . messageId ) ;
const parentId = current . parentMessageId ;
if ( ! parentId || parentId === Constants . NO _PARENT ) {
break ;
}
current = parentLookup . get ( parentId ) ;
}
for ( const id of chain ) {
settled . add ( id ) ;
}
}
2025-12-30 03:31:18 +01:00
}
2024-10-24 15:50:48 -04:00
module . exports = { getImporter , processAssistantMessage } ;