Merge branch 'main' into feat/hide-attach-files

This commit is contained in:
Fahleen Arif 2026-02-18 14:14:48 +05:00 committed by GitHub
commit 2bc8957689
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
53 changed files with 2352 additions and 393 deletions

View file

@ -47,6 +47,10 @@ TRUST_PROXY=1
# password policies.
# MIN_PASSWORD_LENGTH=8
# When enabled, the app will continue running after encountering uncaught exceptions
# instead of exiting the process. Not recommended for production unless necessary.
# CONTINUE_ON_UNCAUGHT_EXCEPTION=false
#===============#
# JSON Logging #
#===============#
@ -131,7 +135,7 @@ PROXY=
#============#
ANTHROPIC_API_KEY=user_provided
# ANTHROPIC_MODELS=claude-opus-4-6,claude-opus-4-20250514,claude-sonnet-4-20250514,claude-3-7-sonnet-20250219,claude-3-5-sonnet-20241022,claude-3-5-haiku-20241022,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307
# ANTHROPIC_MODELS=claude-sonnet-4-6,claude-opus-4-6,claude-opus-4-20250514,claude-sonnet-4-20250514,claude-3-7-sonnet-20250219,claude-3-5-sonnet-20241022,claude-3-5-haiku-20241022,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307
# ANTHROPIC_REVERSE_PROXY=
# Set to true to use Anthropic models through Google Vertex AI instead of direct API
@ -166,8 +170,8 @@ ANTHROPIC_API_KEY=user_provided
# BEDROCK_AWS_SESSION_TOKEN=someSessionToken
# Note: This example list is not meant to be exhaustive. If omitted, all known, supported model IDs will be included for you.
# BEDROCK_AWS_MODELS=anthropic.claude-opus-4-6-v1,anthropic.claude-3-5-sonnet-20240620-v1:0,meta.llama3-1-8b-instruct-v1:0
# Cross-region inference model IDs: us.anthropic.claude-opus-4-6-v1,global.anthropic.claude-opus-4-6-v1
# BEDROCK_AWS_MODELS=anthropic.claude-sonnet-4-6,anthropic.claude-opus-4-6-v1,anthropic.claude-3-5-sonnet-20240620-v1:0,meta.llama3-1-8b-instruct-v1:0
# Cross-region inference model IDs: us.anthropic.claude-sonnet-4-6,us.anthropic.claude-opus-4-6-v1,global.anthropic.claude-opus-4-6-v1
# See all Bedrock model IDs here: https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns

View file

@ -40,6 +40,10 @@ if (!cached) {
cached = global.mongoose = { conn: null, promise: null };
}
mongoose.connection.on('error', (err) => {
logger.error('[connectDb] MongoDB connection error:', err);
});
async function connectDb() {
if (cached.conn && cached.conn?._readyState === 1) {
return cached.conn;

View file

@ -176,6 +176,7 @@ const tokenValues = Object.assign(
'claude-opus-4-5': { prompt: 5, completion: 25 },
'claude-opus-4-6': { prompt: 5, completion: 25 },
'claude-sonnet-4': { prompt: 3, completion: 15 },
'claude-sonnet-4-6': { prompt: 3, completion: 15 },
'command-r': { prompt: 0.5, completion: 1.5 },
'command-r-plus': { prompt: 3, completion: 15 },
'command-text': { prompt: 1.5, completion: 2.0 },
@ -309,6 +310,7 @@ const cacheTokenValues = {
'claude-3-haiku': { write: 0.3, read: 0.03 },
'claude-haiku-4-5': { write: 1.25, read: 0.1 },
'claude-sonnet-4': { write: 3.75, read: 0.3 },
'claude-sonnet-4-6': { write: 3.75, read: 0.3 },
'claude-opus-4': { write: 18.75, read: 1.5 },
'claude-opus-4-5': { write: 6.25, read: 0.5 },
'claude-opus-4-6': { write: 6.25, read: 0.5 },
@ -337,6 +339,7 @@ const cacheTokenValues = {
*/
const premiumTokenValues = {
'claude-opus-4-6': { threshold: 200000, prompt: 10, completion: 37.5 },
'claude-sonnet-4-6': { threshold: 200000, prompt: 6, completion: 22.5 },
};
/**

View file

@ -44,7 +44,7 @@
"@google/genai": "^1.19.0",
"@keyv/redis": "^4.3.3",
"@langchain/core": "^0.3.80",
"@librechat/agents": "^3.1.43",
"@librechat/agents": "^3.1.50",
"@librechat/api": "*",
"@librechat/data-schemas": "*",
"@microsoft/microsoft-graph-client": "^3.0.7",

View file

@ -18,7 +18,6 @@ const {
findUser,
} = require('~/models');
const { getGraphApiToken } = require('~/server/services/GraphTokenService');
const { getOAuthReconnectionManager } = require('~/config');
const { getOpenIdConfig } = require('~/strategies');
const registrationController = async (req, res) => {
@ -79,7 +78,12 @@ const refreshController = async (req, res) => {
try {
const openIdConfig = getOpenIdConfig();
const tokenset = await openIdClient.refreshTokenGrant(openIdConfig, refreshToken);
const refreshParams = process.env.OPENID_SCOPE ? { scope: process.env.OPENID_SCOPE } : {};
const tokenset = await openIdClient.refreshTokenGrant(
openIdConfig,
refreshToken,
refreshParams,
);
const claims = tokenset.claims();
const { user, error, migration } = await findOpenIDUser({
findUser,
@ -161,17 +165,6 @@ const refreshController = async (req, res) => {
if (session && session.expiration > new Date()) {
const token = await setAuthTokens(userId, res, session);
// trigger OAuth MCP server reconnection asynchronously (best effort)
try {
void getOAuthReconnectionManager()
.reconnectServers(userId)
.catch((err) => {
logger.error('[refreshController] Error reconnecting OAuth MCP servers:', err);
});
} catch (err) {
logger.warn(`[refreshController] Cannot attempt OAuth MCP servers reconnection:`, err);
}
res.status(200).send({ token, user });
} else if (req?.query?.retry) {
// Retrying from a refresh token request that failed (401)

View file

@ -20,7 +20,6 @@ jest.mock('@librechat/agents', () => ({
getMessageId: jest.fn(),
ToolEndHandler: jest.fn(),
handleToolCalls: jest.fn(),
ChatModelStreamHandler: jest.fn(),
}));
jest.mock('~/server/services/Files/Citations', () => ({

View file

@ -30,9 +30,6 @@ jest.mock('@librechat/agents', () => ({
messages: [],
indexTokenCountMap: {},
}),
ChatModelStreamHandler: jest.fn().mockImplementation(() => ({
handle: jest.fn(),
})),
}));
jest.mock('@librechat/api', () => ({

View file

@ -34,9 +34,6 @@ jest.mock('@librechat/agents', () => ({
messages: [],
indexTokenCountMap: {},
}),
ChatModelStreamHandler: jest.fn().mockImplementation(() => ({
handle: jest.fn(),
})),
}));
jest.mock('@librechat/api', () => ({

View file

@ -1,22 +1,13 @@
const { nanoid } = require('nanoid');
const { Constants } = require('@librechat/agents');
const { logger } = require('@librechat/data-schemas');
const { Constants, EnvVar, GraphEvents, ToolEndHandler } = require('@librechat/agents');
const { Tools, StepTypes, FileContext, ErrorTypes } = require('librechat-data-provider');
const {
sendEvent,
GenerationJobManager,
writeAttachmentEvent,
createToolExecuteHandler,
} = require('@librechat/api');
const { Tools, StepTypes, FileContext, ErrorTypes } = require('librechat-data-provider');
const {
EnvVar,
Providers,
GraphEvents,
getMessageId,
ToolEndHandler,
handleToolCalls,
ChatModelStreamHandler,
} = require('@librechat/agents');
const { processFileCitations } = require('~/server/services/Files/Citations');
const { processCodeOutput } = require('~/server/services/Files/Code/process');
const { loadAuthValues } = require('~/server/services/Tools/credentials');
@ -57,8 +48,6 @@ class ModelEndHandler {
let errorMessage;
try {
const agentContext = graph.getAgentContext(metadata);
const isGoogle = agentContext.provider === Providers.GOOGLE;
const streamingDisabled = !!agentContext.clientOptions?.disableStreaming;
if (data?.output?.additional_kwargs?.stop_reason === 'refusal') {
const info = { ...data.output.additional_kwargs };
errorMessage = JSON.stringify({
@ -73,21 +62,6 @@ class ModelEndHandler {
});
}
const toolCalls = data?.output?.tool_calls;
let hasUnprocessedToolCalls = false;
if (Array.isArray(toolCalls) && toolCalls.length > 0 && graph?.toolCallStepIds?.has) {
try {
hasUnprocessedToolCalls = toolCalls.some(
(tc) => tc?.id && !graph.toolCallStepIds.has(tc.id),
);
} catch {
hasUnprocessedToolCalls = false;
}
}
if (isGoogle || streamingDisabled || hasUnprocessedToolCalls) {
await handleToolCalls(toolCalls, metadata, graph);
}
const usage = data?.output?.usage_metadata;
if (!usage) {
return this.finalize(errorMessage);
@ -98,38 +72,6 @@ class ModelEndHandler {
}
this.collectedUsage.push(usage);
if (!streamingDisabled) {
return this.finalize(errorMessage);
}
if (!data.output.content) {
return this.finalize(errorMessage);
}
const stepKey = graph.getStepKey(metadata);
const message_id = getMessageId(stepKey, graph) ?? '';
if (message_id) {
await graph.dispatchRunStep(stepKey, {
type: StepTypes.MESSAGE_CREATION,
message_creation: {
message_id,
},
});
}
const stepId = graph.getStepIdByKey(stepKey);
const content = data.output.content;
if (typeof content === 'string') {
await graph.dispatchMessageDelta(stepId, {
content: [
{
type: 'text',
text: content,
},
],
});
} else if (content.every((c) => c.type?.startsWith('text'))) {
await graph.dispatchMessageDelta(stepId, {
content,
});
}
} catch (error) {
logger.error('Error handling model end event:', error);
return this.finalize(errorMessage);
@ -200,7 +142,6 @@ function getDefaultHandlers({
const handlers = {
[GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(collectedUsage),
[GraphEvents.TOOL_END]: new ToolEndHandler(toolEndCallback, logger),
[GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
[GraphEvents.ON_RUN_STEP]: {
/**
* Handle ON_RUN_STEP event.

View file

@ -1,12 +1,7 @@
const { nanoid } = require('nanoid');
const { logger } = require('@librechat/data-schemas');
const { Callback, ToolEndHandler, formatAgentMessages } = require('@librechat/agents');
const { EModelEndpoint, ResourceType, PermissionBits } = require('librechat-data-provider');
const {
Callback,
ToolEndHandler,
formatAgentMessages,
ChatModelStreamHandler,
} = require('@librechat/agents');
const {
writeSSE,
createRun,
@ -325,18 +320,8 @@ const OpenAIChatCompletionController = async (req, res) => {
}
};
// Built-in handler for processing raw model stream chunks
const chatModelStreamHandler = new ChatModelStreamHandler();
// Event handlers for OpenAI-compatible streaming
const handlers = {
// Process raw model chunks and dispatch message/reasoning deltas
on_chat_model_stream: {
handle: async (event, data, metadata, graph) => {
await chatModelStreamHandler.handle(event, data, metadata, graph);
},
},
// Text content streaming
on_message_delta: createHandler((data) => {
const content = data?.delta?.content;
@ -577,7 +562,14 @@ const OpenAIChatCompletionController = async (req, res) => {
writeSSE(res, '[DONE]');
res.end();
} else {
sendErrorResponse(res, 500, errorMessage, 'server_error');
// Forward upstream provider status codes (e.g., Anthropic 400s) instead of masking as 500
const statusCode =
typeof error?.status === 'number' && error.status >= 400 && error.status < 600
? error.status
: 500;
const errorType =
statusCode >= 400 && statusCode < 500 ? 'invalid_request_error' : 'server_error';
sendErrorResponse(res, statusCode, errorMessage, errorType);
}
}
};

View file

@ -1,13 +1,8 @@
const { nanoid } = require('nanoid');
const { v4: uuidv4 } = require('uuid');
const { logger } = require('@librechat/data-schemas');
const { Callback, ToolEndHandler, formatAgentMessages } = require('@librechat/agents');
const { EModelEndpoint, ResourceType, PermissionBits } = require('librechat-data-provider');
const {
Callback,
ToolEndHandler,
formatAgentMessages,
ChatModelStreamHandler,
} = require('@librechat/agents');
const {
createRun,
buildToolSet,
@ -410,9 +405,6 @@ const createResponse = async (req, res) => {
// Collect usage for balance tracking
const collectedUsage = [];
// Built-in handler for processing raw model stream chunks
const chatModelStreamHandler = new ChatModelStreamHandler();
// Artifact promises for processing tool outputs
/** @type {Promise<import('librechat-data-provider').TAttachment | null>[]} */
const artifactPromises = [];
@ -443,11 +435,6 @@ const createResponse = async (req, res) => {
// Combine handlers
const handlers = {
on_chat_model_stream: {
handle: async (event, data, metadata, graph) => {
await chatModelStreamHandler.handle(event, data, metadata, graph);
},
},
on_message_delta: responsesHandlers.on_message_delta,
on_reasoning_delta: responsesHandlers.on_reasoning_delta,
on_run_step: responsesHandlers.on_run_step,
@ -570,8 +557,6 @@ const createResponse = async (req, res) => {
} else {
const aggregatorHandlers = createAggregatorEventHandlers(aggregator);
const chatModelStreamHandler = new ChatModelStreamHandler();
// Collect usage for balance tracking
const collectedUsage = [];
@ -596,11 +581,6 @@ const createResponse = async (req, res) => {
};
const handlers = {
on_chat_model_stream: {
handle: async (event, data, metadata, graph) => {
await chatModelStreamHandler.handle(event, data, metadata, graph);
},
},
on_message_delta: aggregatorHandlers.on_message_delta,
on_reasoning_delta: aggregatorHandlers.on_reasoning_delta,
on_run_step: aggregatorHandlers.on_run_step,
@ -727,7 +707,13 @@ const createResponse = async (req, res) => {
writeDone(res);
res.end();
} else {
sendResponsesErrorResponse(res, 500, errorMessage, 'server_error');
// Forward upstream provider status codes (e.g., Anthropic 400s) instead of masking as 500
const statusCode =
typeof error?.status === 'number' && error.status >= 400 && error.status < 600
? error.status
: 500;
const errorType = statusCode >= 400 && statusCode < 500 ? 'invalid_request' : 'server_error';
sendResponsesErrorResponse(res, statusCode, errorMessage, errorType);
}
}
};

View file

@ -251,6 +251,15 @@ process.on('uncaughtException', (err) => {
return;
}
if (isEnabled(process.env.CONTINUE_ON_UNCAUGHT_EXCEPTION)) {
logger.error('Unhandled error encountered. The app will continue running.', {
name: err?.name,
message: err?.message,
stack: err?.stack,
});
return;
}
process.exit(1);
});

View file

@ -462,6 +462,22 @@ const setOpenIDAuthTokens = (tokenset, req, res, userId, existingRefreshToken) =
*/
const appAuthToken = tokenset.id_token || tokenset.access_token;
/**
* Always set refresh token cookie so it survives express session expiry.
* The session cookie maxAge (SESSION_EXPIRY, default 15 min) is typically shorter
* than the OIDC token lifetime (~1 hour). Without this cookie fallback, the refresh
* token stored only in the session is lost when the session expires, causing the user
* to be signed out on the next token refresh attempt.
* The refresh token is small (opaque string) so it doesn't hit the HTTP/2 header
* size limits that motivated session storage for the larger access_token/id_token.
*/
res.cookie('refreshToken', refreshToken, {
expires: expirationDate,
httpOnly: true,
secure: shouldUseSecureCookie(),
sameSite: 'strict',
});
/** Store tokens server-side in session to avoid large cookies */
if (req.session) {
req.session.openidTokens = {
@ -472,12 +488,6 @@ const setOpenIDAuthTokens = (tokenset, req, res, userId, existingRefreshToken) =
};
} else {
logger.warn('[setOpenIDAuthTokens] No session available, falling back to cookies');
res.cookie('refreshToken', refreshToken, {
expires: expirationDate,
httpOnly: true,
secure: shouldUseSecureCookie(),
sameSite: 'strict',
});
res.cookie('openid_access_token', tokenset.access_token, {
expires: expirationDate,
httpOnly: true,

View file

@ -1162,6 +1162,56 @@ describe('Claude Model Tests', () => {
expect(matchModelName(model, EModelEndpoint.anthropic)).toBe('claude-opus-4-6');
});
});
it('should return correct context length for Claude Sonnet 4.6 (1M)', () => {
expect(getModelMaxTokens('claude-sonnet-4-6', EModelEndpoint.anthropic)).toBe(
maxTokensMap[EModelEndpoint.anthropic]['claude-sonnet-4-6'],
);
expect(getModelMaxTokens('claude-sonnet-4-6')).toBe(
maxTokensMap[EModelEndpoint.anthropic]['claude-sonnet-4-6'],
);
});
it('should return correct max output tokens for Claude Sonnet 4.6 (64K)', () => {
const { getModelMaxOutputTokens } = require('@librechat/api');
expect(getModelMaxOutputTokens('claude-sonnet-4-6', EModelEndpoint.anthropic)).toBe(
maxOutputTokensMap[EModelEndpoint.anthropic]['claude-sonnet-4-6'],
);
});
it('should handle Claude Sonnet 4.6 model name variations', () => {
const modelVariations = [
'claude-sonnet-4-6',
'claude-sonnet-4-6-20260101',
'claude-sonnet-4-6-latest',
'anthropic/claude-sonnet-4-6',
'claude-sonnet-4-6/anthropic',
'claude-sonnet-4-6-preview',
];
modelVariations.forEach((model) => {
const modelKey = findMatchingPattern(model, maxTokensMap[EModelEndpoint.anthropic]);
expect(modelKey).toBe('claude-sonnet-4-6');
expect(getModelMaxTokens(model, EModelEndpoint.anthropic)).toBe(
maxTokensMap[EModelEndpoint.anthropic]['claude-sonnet-4-6'],
);
});
});
it('should match model names correctly for Claude Sonnet 4.6', () => {
const modelVariations = [
'claude-sonnet-4-6',
'claude-sonnet-4-6-20260101',
'claude-sonnet-4-6-latest',
'anthropic/claude-sonnet-4-6',
'claude-sonnet-4-6/anthropic',
'claude-sonnet-4-6-preview',
];
modelVariations.forEach((model) => {
expect(matchModelName(model, EModelEndpoint.anthropic)).toBe('claude-sonnet-4-6');
});
});
});
describe('Moonshot/Kimi Model Tests', () => {

View file

@ -80,7 +80,7 @@
"lodash": "^4.17.23",
"lucide-react": "^0.394.0",
"match-sorter": "^8.1.0",
"mermaid": "^11.12.2",
"mermaid": "^11.12.3",
"micromark-extension-llm-math": "^3.1.0",
"qrcode.react": "^4.2.0",
"rc-input-number": "^7.4.2",

View file

@ -41,7 +41,8 @@ const SubmitButton = React.memo(
const SendButton = React.memo(
forwardRef((props: SendButtonProps, ref: React.ForwardedRef<HTMLButtonElement>) => {
const data = useWatch({ control: props.control });
return <SubmitButton ref={ref} disabled={props.disabled || !data.text} />;
const content = data?.text?.trim();
return <SubmitButton ref={ref} disabled={props.disabled || !content} />;
}),
);

View file

@ -46,7 +46,7 @@ export default function MCPTools({
return null;
}
if (serverInfo.isConnected) {
if (serverInfo?.tools?.length && serverInfo.tools.length > 0) {
return (
<MCPTool key={`${serverInfo.serverName}-${agentId}`} serverInfo={serverInfo} />
);

View file

@ -96,17 +96,17 @@ function MCPToolSelectDialog({
await new Promise((resolve) => setTimeout(resolve, 500));
}
// Then initialize server if needed
// Only initialize if no cached tools exist; skip if tools are already available from DB
const serverInfo = mcpServersMap.get(serverName);
if (!serverInfo?.isConnected) {
if (!serverInfo?.tools?.length) {
const result = await initializeServer(serverName);
if (result?.success && result.oauthRequired && result.oauthUrl) {
if (result?.oauthRequired && result.oauthUrl) {
setIsInitializing(null);
return;
return; // OAuth flow must complete first
}
}
// Finally, add tools to form
// Add tools to form (refetches from backend's persisted cache)
await addToolsToForm(serverName);
setIsInitializing(null);
} catch (error) {

View file

@ -12,10 +12,10 @@ export const useMCPServersQuery = <TData = t.MCPServersListResponse>(
[QueryKeys.mcpServers],
() => dataService.getMCPServers(),
{
staleTime: 1000 * 60 * 5, // 5 minutes - data stays fresh longer
refetchOnWindowFocus: false,
staleTime: 30 * 1000, // 30 seconds — short enough to pick up servers that finish initializing after first load
refetchOnWindowFocus: true,
refetchOnReconnect: false,
refetchOnMount: false,
refetchOnMount: true,
retry: false,
...config,
},

View file

@ -97,6 +97,8 @@ export default function useChatFunctions({
) => {
setShowStopButton(false);
resetLatestMultiMessage();
text = text.trim();
if (!!isSubmitting || text === '') {
return;
}
@ -134,7 +136,6 @@ export default function useChatFunctions({
// construct the query message
// this is not a real messageId, it is used as placeholder before real messageId returned
text = text.trim();
const intermediateId = overrideUserMessageId ?? v4();
parentMessageId = parentMessageId ?? latestMessage?.messageId ?? Constants.NO_PARENT;

View file

@ -106,6 +106,9 @@ export default function useExportConversation({
// TEXT
const textPart = content[ContentTypes.TEXT];
const text = typeof textPart === 'string' ? textPart : (textPart?.value ?? '');
if (text.trim().length === 0) {
return [];
}
return [sender, text];
}

View file

@ -433,33 +433,6 @@ export function useMCPServerManager({
[startupConfig?.interface?.mcpServers?.placeholder, localize],
);
const batchToggleServers = useCallback(
(serverNames: string[]) => {
const connectedServers: string[] = [];
const disconnectedServers: string[] = [];
serverNames.forEach((serverName) => {
if (isInitializing(serverName)) {
return;
}
const serverStatus = connectionStatus?.[serverName];
if (serverStatus?.connectionState === 'connected') {
connectedServers.push(serverName);
} else {
disconnectedServers.push(serverName);
}
});
setMCPValues(connectedServers);
disconnectedServers.forEach((serverName) => {
initializeServer(serverName);
});
},
[connectionStatus, setMCPValues, initializeServer, isInitializing],
);
const toggleServerSelection = useCallback(
(serverName: string) => {
if (isInitializing(serverName)) {
@ -473,15 +446,10 @@ export function useMCPServerManager({
const filteredValues = currentValues.filter((name) => name !== serverName);
setMCPValues(filteredValues);
} else {
const serverStatus = connectionStatus?.[serverName];
if (serverStatus?.connectionState === 'connected') {
setMCPValues([...currentValues, serverName]);
} else {
initializeServer(serverName);
}
setMCPValues([...currentValues, serverName]);
}
},
[mcpValues, setMCPValues, connectionStatus, initializeServer, isInitializing],
[mcpValues, setMCPValues, isInitializing],
);
const handleConfigSave = useCallback(
@ -677,7 +645,6 @@ export function useMCPServerManager({
isPinned,
setIsPinned,
placeholderText,
batchToggleServers,
toggleServerSelection,
localize,

View file

@ -224,6 +224,7 @@
"com_endpoint_agent": "Aģents",
"com_endpoint_agent_placeholder": "Lūdzu, izvēlieties aģentu",
"com_endpoint_ai": "Mākslīgais intelekts",
"com_endpoint_anthropic_effort": "Kontrolē, cik lielu skaitļošanas piepūli piemēro Claude. Mazāka piepūle ietaupa tokenus un samazina ātrumu; lielāka piepūle nodrošina rūpīgākas atbildes. 'Max' ļauj veikt visdziļāko argumentāciju (tikai Opus 4.6).",
"com_endpoint_anthropic_maxoutputtokens": "Maksimālais atbildē ģenerējamo tokenu skaits. Norādiet zemāku vērtību īsākām atbildēm un augstāku vērtību garākām atbildēm. Piezīme: modeļi var apstāties pirms šī maksimālā skaita sasniegšanas.",
"com_endpoint_anthropic_prompt_cache": "Uzvednes kešatmiņa ļauj atkārtoti izmantot lielu kontekstu vai instrukcijas API izsaukumos, samazinot izmaksas un ābildes ātrumu.",
"com_endpoint_anthropic_temp": "Diapazons no 0 līdz 1. Analītiskiem/atbilžu variantiem izmantot temp vērtību tuvāk 0, bet radošiem un ģeneratīviem uzdevumiem — tuvāk 1. Iesakām mainīt šo vai Top P, bet ne abus.",
@ -265,6 +266,7 @@
"com_endpoint_default_with_num": "noklusējums: {{0}}",
"com_endpoint_disable_streaming": "Izslēgt atbilžu straumēšanu un saņemt visu atbildi uzreiz. Noderīgi tādiem modeļiem kā o3, kas pieprasa organizācijas pārbaudi straumēšanai.",
"com_endpoint_disable_streaming_label": "Atspējot straumēšanu",
"com_endpoint_effort": "Piepūle",
"com_endpoint_examples": "Iestatījumi",
"com_endpoint_export": "Eksportēt",
"com_endpoint_export_share": "Eksportēt/kopīgot",
@ -857,11 +859,13 @@
"com_ui_create_api_key": "Izveidot API atslēgu",
"com_ui_create_assistant": "Izveidot palīgu",
"com_ui_create_link": "Izveidot saiti",
"com_ui_create_mcp_server": "Izveidot MCP serveri",
"com_ui_create_memory": "Izveidot atmiņu",
"com_ui_create_new_agent": "Izveidot jaunu aģentu",
"com_ui_create_prompt": "Izveidot uzvedni",
"com_ui_create_prompt_page": "Jauna uzvedņu konfigurācijas lapa",
"com_ui_created": "Izveidots",
"com_ui_creating": "Notiek izveide...",
"com_ui_creating_image": "Attēla izveide. Var aizņemt brīdi.",
"com_ui_current": "Pašreizējais",
"com_ui_currently_production": "Pašlaik produkcijā",
@ -902,6 +906,8 @@
"com_ui_delete_confirm_strong": "Šis izdzēsīs <strong>{{title}}</strong>",
"com_ui_delete_conversation": "Dzēst sarunu?",
"com_ui_delete_conversation_tooltip": "Dzēst sarunu",
"com_ui_delete_mcp_server": "Vai dzēst MCP serveri?",
"com_ui_delete_mcp_server_name": "Dzēst MCP serveri {{0}}",
"com_ui_delete_memory": "Dzēst atmiņu",
"com_ui_delete_not_allowed": "Dzēšanas darbība nav atļauta",
"com_ui_delete_preset": "Vai dzēst iestatījumu?",
@ -914,6 +920,7 @@
"com_ui_delete_tool_confirm": "Vai tiešām vēlaties dzēst šo rīku?",
"com_ui_delete_tool_save_reminder": "Rīks noņemts. Saglabājiet aģentu, lai piemērotu izmaiņas.",
"com_ui_deleted": "Dzēsts",
"com_ui_deleting": "Dzēš...",
"com_ui_deleting_file": "Dzēšu failu...",
"com_ui_descending": "Dilstošs",
"com_ui_description": "Apraksts",
@ -1084,6 +1091,7 @@
"com_ui_manage": "Pārvaldīt",
"com_ui_marketplace": "Katalogs",
"com_ui_marketplace_allow_use": "Atļaut izmantot katalogu",
"com_ui_max": "Maksimums",
"com_ui_max_favorites_reached": "Sasniegts maksimālais piesprausto elementu skaits ({{0}}). Atvienojiet elementu, lai pievienotu citu.",
"com_ui_max_file_size": "PNG, JPG vai JPEG (maks. {{0}})",
"com_ui_max_tags": "Maksimālais atļautais skaits ir {{0}}, izmantojot jaunākās vērtības.",
@ -1437,6 +1445,8 @@
"com_ui_unset": "Neuzlikts",
"com_ui_untitled": "Bez nosaukuma",
"com_ui_update": "Atjauninājums",
"com_ui_update_mcp_server": "Atjaunināt MCP serveri",
"com_ui_updating": "Atjaunina...",
"com_ui_upload": "Augšupielādēt",
"com_ui_upload_agent_avatar": "Aģenta avatars veiksmīgi atjaunināts",
"com_ui_upload_agent_avatar_label": "Augšupielādēt aģenta avatāra attēlu",

View file

@ -3,11 +3,13 @@
"chat_direction_right_to_left": "Høyre til venstre",
"com_a11y_ai_composing": "KI-en skriver fortsatt.",
"com_a11y_end": "KI-en har fullført svaret sitt.",
"com_a11y_selected": "valgt",
"com_a11y_start": "KI-en har begynt å svare.",
"com_agents_agent_card_label": "{{name}}-agent. {{description}}",
"com_agents_all": "Alle agenter",
"com_agents_all_category": "Alle",
"com_agents_all_description": "Utforsk delte agenter på tvers av alle kategorier",
"com_agents_avatar_upload_error": "Kunne ikke laste opp agentavatar",
"com_agents_by_librechat": "av LibreChat",
"com_agents_category_aftersales": "Salgsoppfølging",
"com_agents_category_aftersales_description": "Agenter for kundeservice, support og oppfølging etter et gjennomført salg.",
@ -26,6 +28,7 @@
"com_agents_category_sales_description": "Agenter som bistår i salgsprosesser og med kundekontakt.",
"com_agents_category_tab_label": "Kategorien {{category}}, {{position}} av {{total}}",
"com_agents_category_tabs_label": "Agentkategorier",
"com_agents_chat_with": "Chat med {{navn}}",
"com_agents_clear_search": "Tøm søket",
"com_agents_code_interpreter": "Når aktivert, kan agenten din bruke LibreChat Code Interpreter API for å kjøre generert kode sikkert, inkludert filbehandling. Krever en gyldig API-nøkkel.",
"com_agents_code_interpreter_title": "Code Interpreter API",
@ -33,6 +36,7 @@
"com_agents_copy_link": "Kopier lenke",
"com_agents_create_error": "Det oppstod en feil under oppretting av agenten.",
"com_agents_created_by": "av",
"com_agents_description_card": "Beskrivelse: {{description}}",
"com_agents_description_placeholder": "Valgfritt: Beskriv agenten din her.",
"com_agents_empty_state_heading": "Ingen agenter funnet",
"com_agents_enable_file_search": "Aktiver filsøk",
@ -59,7 +63,9 @@
"com_agents_error_timeout_suggestion": "Sjekk internettforbindelsen din og prøv igjen.",
"com_agents_error_timeout_title": "Tidsavbrudd for tilkobling",
"com_agents_error_title": "Noe gikk galt",
"com_agents_file_context_description": "Filer lastet opp som \"Kontekst\" er analysert son tekst for å supplementere agenten sine instruksjoner. Dersom OCR er tilgjengelig, eller er konfigurert for den opplastede filtypen, vil prosessen bli brukt til å hente ut tekst. Dette er ideelt for dokumenter, bilder med tekst, eller PDFer som krever det fulle tekstinnholdet i en fil.",
"com_agents_file_context_disabled": "Agenten må være opprettet før du kan laste opp filer for filkontekst.",
"com_agents_file_context_label": "Filkontekst",
"com_agents_file_search_disabled": "Agenten må være opprettet før du kan laste opp filer for filsøk.",
"com_agents_file_search_info": "Når dette er aktivert, vil agenten bruke de eksakte filnavnene listet nedenfor for å hente relevant kontekst fra disse filene.",
"com_agents_grid_announcement": "Viser {{count}} agenter i kategorien {{category}}.",
@ -87,7 +93,7 @@
"com_agents_search_empty_heading": "Ingen søkeresultater",
"com_agents_search_info": "Når aktivert, kan agenten din søke på nettet for oppdatert informasjon. Krever en gyldig API-nøkkel.",
"com_agents_search_instructions": "Skriv for å søke etter agenter etter navn eller beskrivelse.",
"com_agents_search_name": "Søk agenter etter navn",
"com_agents_search_name": "Søk etter agenter ved navn",
"com_agents_search_no_results": "Ingen agenter funnet for «{{query}}».",
"com_agents_search_placeholder": "Søk agenter ...",
"com_agents_see_more": "Se mer",
@ -139,6 +145,7 @@
"com_assistants_update_actions_success": "Handlingen ble opprettet eller oppdatert.",
"com_assistants_update_error": "Det oppstod en feil under oppdatering av assistenten.",
"com_assistants_update_success": "Oppdatering fullført",
"com_assistants_update_success_name": "Oppdatering av {{name}} vellykket",
"com_auth_already_have_account": "Har du allerede en konto?",
"com_auth_apple_login": "Logg inn med Apple",
"com_auth_back_to_login": "Tilbake til innlogging",
@ -217,10 +224,11 @@
"com_endpoint_agent": "Agent",
"com_endpoint_agent_placeholder": "Velg en agent",
"com_endpoint_ai": "KI",
"com_endpoint_anthropic_effort": "Kontrollerer hvor mye innsats Claude legger i beregning. Lavere innsats sparer tokens og reduserer treghet, høyere innsats produserer mer gjennom responser. \"Maks\" gir den høyeste graden av resonnering (kun Opus 4.6)",
"com_endpoint_anthropic_maxoutputtokens": "Maksimalt antall tokens som kan genereres i svaret. Angi en lavere verdi for kortere svar og en høyere verdi for lengre svar. Merk: Modeller kan stoppe før de når dette maksimumet.",
"com_endpoint_anthropic_prompt_cache": "Prompt-mellomlagring gjør det mulig å gjenbruke stor kontekst eller instruksjoner på tvers av API-kall, noe som reduserer kostnader og ventetid.",
"com_endpoint_anthropic_temp": "Varierer fra 0 til 1. Bruk en temperatur nærmere 0 for analytiske oppgaver, og nærmere 1 for kreative og generative oppgaver. Vi anbefaler å endre enten denne eller Topp P, men ikke begge.",
"com_endpoint_anthropic_thinking": "Aktiverer intern resonnering for støttede Claude-modeller (f.eks. 3.7 Sonnet). Merk: Krever at \"Tenkebudsjett\" er satt og er lavere enn \"Maks utdata-tokens\".",
"com_endpoint_anthropic_thinking": "Aktiverer intern resonnering for støttede Claude-modeller. For nyere modeller (Opus 4.6+) brukes adaptiv tenkning kontrollert av Effort-parameteren. For eldre modeller kreves det at \"Thinking Budget\" er satt og lavere enn \"Max Output Tokens\".",
"com_endpoint_anthropic_thinking_budget": "Bestemmer det maksimale antallet tokens Claude kan bruke for sin interne resonneringsprosess. Et større budsjett kan forbedre svarkvaliteten for komplekse problemer. Denne verdien må være lavere enn \"Maks utdata-tokens\".",
"com_endpoint_anthropic_topk": "Top-k endrer hvordan modellen velger tokens for utdata. En top-k på 1 betyr at det valgte tokenet er det mest sannsynlige (grådig dekoding). En top-k på 3 betyr at det neste tokenet velges blant de 3 mest sannsynlige (ved hjelp av temperatur).",
"com_endpoint_anthropic_topp": "Top-p endrer hvordan modellen velger tokens for utdata. Tokens velges fra de mest sannsynlige til summen av sannsynlighetene deres er lik top-p-verdien.",
@ -258,6 +266,7 @@
"com_endpoint_default_with_num": "standard: {{0}}",
"com_endpoint_disable_streaming": "Deaktiver strømming av svar og motta hele svaret på en gang. Nyttig for modeller som krever organisasjonsverifisering for strømming.",
"com_endpoint_disable_streaming_label": "Deaktiver strømming",
"com_endpoint_effort": "Innsats",
"com_endpoint_examples": "Forhåndsinnstillinger",
"com_endpoint_export": "Eksporter",
"com_endpoint_export_share": "Eksporter/Del",
@ -274,7 +283,7 @@
"com_endpoint_instructions_assistants_placeholder": "Overstyrer assistentens instruksjoner. Nyttig for å endre atferden for en enkelt kjøring.",
"com_endpoint_max_output_tokens": "Maks utdata-tokens",
"com_endpoint_message": "Melding",
"com_endpoint_message_new": "Melding {{0}}",
"com_endpoint_message_new": "Send melding til {{0}}",
"com_endpoint_message_not_appendable": "Rediger meldingen din eller regenerer.",
"com_endpoint_my_preset": "Min forhåndsinnstilling",
"com_endpoint_no_presets": "Ingen forhåndsinnstillinger ennå. Bruk innstillingsknappen for å lage en.",
@ -308,6 +317,7 @@
"com_endpoint_preset_default_removed": "er ikke lenger standard forhåndsinnstilling.",
"com_endpoint_preset_delete_confirm": "Er du sikker på at du vil slette denne forhåndsinnstillingen?",
"com_endpoint_preset_delete_error": "Det oppstod en feil under sletting av forhåndsinnstillingen. Vennligst prøv igjen.",
"com_endpoint_preset_delete_success": "Sletting av forhåndsinnstilling vellykket",
"com_endpoint_preset_import": "Forhåndsinnstilling importert!",
"com_endpoint_preset_import_error": "Det oppstod en feil under importering av forhåndsinnstillingen. Vennligst prøv igjen.",
"com_endpoint_preset_name": "Navn på forhåndsinnstilling",
@ -348,6 +358,7 @@
"com_error_files_process": "Det oppstod en feil under behandling av filen.",
"com_error_files_upload": "Det oppstod en feil under opplasting av filen.",
"com_error_files_upload_canceled": "Forespørselen om filopplasting ble avbrutt. Merk: Filopplastingen kan fortsatt behandles og må slettes manuelt.",
"com_error_files_upload_too_large": "Filen er for stor. Vennligst last opp en fil som er mindre enn {{}} MB",
"com_error_files_validation": "Det oppstod en feil under validering av filen.",
"com_error_google_tool_conflict": "Bruk av innebygde Google-verktøy støttes ikke sammen med eksterne verktøy. Deaktiver enten de innebygde eller de eksterne verktøyene.",
"com_error_heic_conversion": "Konvertering av HEIC-bilde til JPEG mislyktes. Prøv å konvertere bildet manuelt eller bruk et annet format.",
@ -360,6 +371,7 @@
"com_error_moderation": "Innholdet du sendte inn ble flagget av vårt moderasjonssystem. Vi kan ikke fortsette med dette emnet. Rediger meldingen din eller start en ny samtale.",
"com_error_no_base_url": "Ingen base-URL funnet. Oppgi en og prøv igjen.",
"com_error_no_user_key": "Ingen nøkkel funnet. Oppgi en nøkkel og prøv igjen.",
"com_error_refusal": "Responsen ble avslått av sikkerhetsfiltere. Skriv om på meldingen din og prøv igjen. Dersom denne feilmeldingen forekommer ofte imens du bruker Claude Sonnet 4.5 eller Opus 4.1, kan du prøve Sonnet 4, som har andre bruksrestriksjoner.",
"com_file_pages": "Sider: {{pages}}",
"com_file_source": "Fil",
"com_file_unknown": "Ukjent fil",
@ -368,9 +380,12 @@
"com_files_download_progress": "{{0}} av {{1}} filer",
"com_files_downloading": "Laster ned filer",
"com_files_filter": "Filtrer filer ...",
"com_files_filter_by": "Filtrer filer etter...",
"com_files_no_results": "Ingen resultater.",
"com_files_number_selected": "{{0}} av {{1}} valgt",
"com_files_preparing_download": "Forbereder nedlasting ...",
"com_files_result_found": "{{count}} resultater funnet",
"com_files_results_found": "{{count}} resultater funnet",
"com_files_sharepoint_picker_title": "Velg filer",
"com_files_table": "Fil-tabell",
"com_files_upload_local_machine": "Fra lokal datamaskin",
@ -421,6 +436,7 @@
"com_nav_chat_commands": "Samtalekommandoer",
"com_nav_chat_commands_info": "Disse kommandoene aktiveres ved å skrive bestemte tegn i begynnelsen av meldingen din. Hver kommando utløses av sitt angitte prefiks. Du kan deaktivere dem hvis du ofte bruker disse tegnene til å starte meldinger.",
"com_nav_chat_direction": "Samtaleretning",
"com_nav_chat_direction_selected": "Chat retning: {{direction}}",
"com_nav_clear_all_chats": "Fjern alle samtaler",
"com_nav_clear_cache_confirm_message": "Er du sikker på at du vil tømme mellomlageret?",
"com_nav_clear_conversation": "Fjern samtaler",
@ -428,9 +444,11 @@
"com_nav_close_sidebar": "Lukk sidefelt",
"com_nav_commands": "Kommandoer",
"com_nav_confirm_clear": "Bekreft fjerning",
"com_nav_control_panel": "Kontrollpanel",
"com_nav_conversation_mode": "Samtalemodus",
"com_nav_convo_menu_options": "Samtalemenyvalg",
"com_nav_db_sensitivity": "Desibelfølsomhet",
"com_nav_default_temporary_chat": "Midlertidig Chat som standard",
"com_nav_delete_account": "Slett konto",
"com_nav_delete_account_button": "Slett kontoen min permanent",
"com_nav_delete_account_confirm": "Slett konto er du sikker?",
@ -464,6 +482,7 @@
"com_nav_info_code_artifacts": "Aktiverer visning av eksperimentelle kodeartefakter ved siden av samtalen.",
"com_nav_info_code_artifacts_agent": "Aktiverer bruk av kodeartefakter for denne agenten. Som standard legges det til tilleggsinstruksjoner for bruk av artefakter, med mindre \"Egendefinert prompt-modus\" er aktivert.",
"com_nav_info_custom_prompt_mode": "Når aktivert, vil standard systemprompt for artefakter ikke bli inkludert. Alle instruksjoner for å generere artefakter må gis manuelt i denne modusen.",
"com_nav_info_default_temporary_chat": "Når dette er påskrudd vil nye chatter starte med \"midlertidig chat\" som standard. Midlertidige chatter blir ikke lagret til historikken din.",
"com_nav_info_enter_to_send": "Når aktivert, vil et trykk på `ENTER` sende meldingen din. Når deaktivert, vil et trykk på Enter legge til en ny linje. Du må da trykke `CTRL + ENTER` / `⌘ + ENTER` for å sende.",
"com_nav_info_fork_change_default": "`Kun synlige meldinger` inkluderer bare den direkte stien til den valgte meldingen. `Inkluder relaterte grener` legger til grener langs stien. `Inkluder alt til/fra her` inkluderer alle tilknyttede meldinger og grener.",
"com_nav_info_fork_split_target_setting": "Når aktivert, vil forgreningen starte fra målmeldingen til den siste meldingen i samtalen, i henhold til den valgte atferden.",
@ -473,6 +492,7 @@
"com_nav_info_save_draft": "Når aktivert, vil teksten og vedleggene du skriver inn bli lagret lokalt som et utkast. Utkastet er tilgjengelig selv om du laster siden på nytt eller bytter samtale. Utkastet slettes når meldingen er sendt.",
"com_nav_info_show_thinking": "Når aktivert, vil tenke-nedtrekksmenyene vises som standard, slik at du kan se KI-ens resonnement i sanntid. Når deaktivert, vil de være lukket for et renere grensesnitt.",
"com_nav_info_user_name_display": "Når aktivert, vil brukernavnet ditt vises over hver melding du sender. Når deaktivert, vil du bare se \"Du\" over meldingene dine.",
"com_nav_keep_screen_awake": "Hold skjermen på gjennom generering av respons",
"com_nav_lang_arabic": "Arabisk (العربية)",
"com_nav_lang_armenian": "Armensk (Հայերեն)",
"com_nav_lang_auto": "Automatisk gjenkjenning",
@ -491,16 +511,20 @@
"com_nav_lang_german": "Tysk (Deutsch)",
"com_nav_lang_hebrew": "Hebraisk (עברית)",
"com_nav_lang_hungarian": "Ungarsk (Magyar)",
"com_nav_lang_icelandic": "Islandsk",
"com_nav_lang_indonesia": "Indonesisk (Indonesia)",
"com_nav_lang_italian": "Italiensk (Italiano)",
"com_nav_lang_japanese": "Japansk (日本語)",
"com_nav_lang_korean": "Koreansk (한국어)",
"com_nav_lang_latvian": "Latvisk (Latviski)",
"com_nav_lang_lithuanian": "Litauisk",
"com_nav_lang_norwegian_bokmal": "Norsk bokmål",
"com_nav_lang_norwegian_nynorsk": "Norsk nynorsk",
"com_nav_lang_persian": "Persisk (فارسی)",
"com_nav_lang_polish": "Polsk (Polski)",
"com_nav_lang_portuguese": "Portugisisk (Português)",
"com_nav_lang_russian": "Russisk (Русский)",
"com_nav_lang_slovak": "Slovensk",
"com_nav_lang_slovenian": "Slovensk",
"com_nav_lang_spanish": "Spansk (Español)",
"com_nav_lang_swedish": "Svensk (Svenska)",
@ -516,8 +540,18 @@
"com_nav_log_out": "Logg ut",
"com_nav_long_audio_warning": "Lengre tekster vil ta lengre tid å behandle.",
"com_nav_maximize_chat_space": "Maksimer samtaleplass",
"com_nav_mcp_access_revoked": "Tilbakekalling av MCP servertilgang vellykket.",
"com_nav_mcp_configure_server": "Konfigurer {{0}}",
"com_nav_mcp_connect": "Koble til",
"com_nav_mcp_connect_server": "Koble til {{0}}",
"com_nav_mcp_reconnect": "Koble til på nytt",
"com_nav_mcp_status_connected": "Tilkoblet",
"com_nav_mcp_status_connecting": "{{0}} - Kobler til",
"com_nav_mcp_status_disconnected": "Frakoblet",
"com_nav_mcp_status_error": "Feil",
"com_nav_mcp_status_initializing": "Starter",
"com_nav_mcp_status_needs_auth": "Trenger Auth",
"com_nav_mcp_status_unknown": "Ukjent",
"com_nav_mcp_vars_update_error": "Feil ved oppdatering av egendefinerte MCP-brukervariabler.",
"com_nav_mcp_vars_updated": "Egendefinerte MCP-brukervariabler ble oppdatert.",
"com_nav_modular_chat": "Aktiver bytte av endepunkter midt i en samtale",
@ -538,6 +572,7 @@
"com_nav_setting_balance": "Saldo",
"com_nav_setting_chat": "Samtale",
"com_nav_setting_data": "Datakontroll",
"com_nav_setting_delay": "Forsinkelse (s)",
"com_nav_setting_general": "Generelt",
"com_nav_setting_mcp": "MCP-innstillinger",
"com_nav_setting_personalization": "Personalisering",
@ -555,6 +590,7 @@
"com_nav_theme_dark": "Mørkt",
"com_nav_theme_light": "Lyst",
"com_nav_theme_system": "System",
"com_nav_toggle_sidebar": "Skru sidebar av/på",
"com_nav_tool_dialog": "Assistentverktøy",
"com_nav_tool_dialog_agents": "Agentverktøy",
"com_nav_tool_dialog_description": "Assistenten må lagres for at verktøyvalg skal vedvare.",
@ -605,17 +641,27 @@
"com_ui_action_button": "Handlingsknapp",
"com_ui_active": "Aktiv",
"com_ui_add": "Legg til",
"com_ui_add_code_interpreter_api_key": "Legg til kodetolk API nøkkel",
"com_ui_add_first_bookmark": "Klikk på en chat for å legge til",
"com_ui_add_first_mcp_server": "Lag din første MCP server for å komme i gang",
"com_ui_add_first_prompt": "Lag din første prompt for å komme i gang",
"com_ui_add_mcp": "Legg til MCP",
"com_ui_add_mcp_server": "Legg til MCP-server",
"com_ui_add_model_preset": "Legg til en modell eller forhåndsinnstilling for et ekstra svar.",
"com_ui_add_multi_conversation": "Legg til flersamtale",
"com_ui_add_special_variables": "Legg til spesialvariable",
"com_ui_add_web_search_api_keys": "Legg til nettsøk API-nøkler",
"com_ui_adding_details": "Legger til detaljer",
"com_ui_additional_details": "Flere detaljer",
"com_ui_admin": "Admin",
"com_ui_admin_access_warning": "Deaktivering av admin-tilgang til denne funksjonen kan forårsake uventede UI-problemer. Hvis lagret, kan dette kun tilbakestilles via konfigurasjonsfilen (librechat.yaml).",
"com_ui_admin_settings": "Admin-innstillinger",
"com_ui_admin_settings_section": "Admininnstillinger - {{section}}",
"com_ui_advanced": "Avansert",
"com_ui_advanced_settings": "Avanserte innstillinger",
"com_ui_agent": "Agent",
"com_ui_agent_api_keys": "Agent API-nøkler",
"com_ui_agent_api_keys_description": "Lag API-nøkler for å få tilgang til agenter via API",
"com_ui_agent_category_aftersales": "Ettersalg",
"com_ui_agent_category_finance": "Finans",
"com_ui_agent_category_general": "Generelt",
@ -631,6 +677,17 @@
"com_ui_agent_deleted": "Agenten ble slettet.",
"com_ui_agent_duplicate_error": "Det oppstod en feil under duplisering av agenten.",
"com_ui_agent_duplicated": "Agenten ble duplisert.",
"com_ui_agent_handoff_add": "Legg til overleveringsagent",
"com_ui_agent_handoff_description": "Beskrivelse av overlevering",
"com_ui_agent_handoff_description_placeholder": "f.eks., Overfør til dataanalytiker for statistisk analyse",
"com_ui_agent_handoff_info": "Konfigurer agenter som denne agenten kan overføre samtaler til når spesifikk ekspertise er nødvendig",
"com_ui_agent_handoff_info_2": "Hver overlevering lager et overføringsverktøy som tillater sømløs ruting til spesialistagenter med kontekst.",
"com_ui_agent_handoff_max": "Maksgrensen på {{0}} overleveringsagenter er nådd",
"com_ui_agent_handoff_prompt": "Gjennomføringsinnhold",
"com_ui_agent_handoff_prompt_key": "Innholdsparameter navn (standard: \"instruksjoner\")",
"com_ui_agent_handoff_prompt_key_placeholder": "Merk innholdet som er sendt (standard: \"instruksjoner\")",
"com_ui_agent_handoff_prompt_placeholder": "Fortell denne agenten hvilket innhold den skal generere og videreføre til overleveringsagenten. Du må legge til noe her for å skru på denne funksjonen.",
"com_ui_agent_handoffs": "Agentoverleveringer",
"com_ui_agent_name_is_required": "Agentnavn er påkrevd.",
"com_ui_agent_recursion_limit": "Maks agentsteg",
"com_ui_agent_recursion_limit_info": "Begrenser hvor mange steg agenten kan ta i en kjøring før den gir et endelig svar. Standard er 25 steg. Et steg er enten en API-forespørsel eller bruk av et verktøy.",
@ -652,12 +709,23 @@
"com_ui_agents": "Agenter",
"com_ui_agents_allow_create": "Tillat oppretting av agenter",
"com_ui_agents_allow_share": "Tillat deling av agenter",
"com_ui_agents_allow_share_public": "Tillat offentlig deling av agenter",
"com_ui_agents_allow_use": "Tillat bruk av agenter",
"com_ui_all": "alle",
"com_ui_all_proper": "Alle",
"com_ui_analyzing": "Analyserer",
"com_ui_analyzing_finished": "Ferdig med å analysere",
"com_ui_api_key": "API-nøkkel",
"com_ui_api_key_copied": "API-nøkler kopiert til utklippstavlen",
"com_ui_api_key_create_error": "Kunne ikke lage API-nøkkel",
"com_ui_api_key_created": "Oppretting av API-nøkkel vellykket",
"com_ui_api_key_delete_error": "Kunne ikke slette API-nøkkel",
"com_ui_api_key_deleted": "Sletting av API-nøkkel vellykket",
"com_ui_api_key_name": "Navn på nøkkel",
"com_ui_api_key_name_placeholder": "Min API-nøkkel",
"com_ui_api_key_name_required": "Navn på API-nøkkel påkrevd",
"com_ui_api_key_warning": "Husk å kopiere API-nøkkelen din nå, du vil ikke kunne se den igjen!",
"com_ui_api_keys_load_error": "Kunne ikke laste inn API-nøkler",
"com_ui_archive": "Arkiver",
"com_ui_archive_delete_error": "Sletting av arkivert samtale mislyktes.",
"com_ui_archive_error": "Arkivering av samtale mislyktes.",
@ -674,6 +742,7 @@
"com_ui_assistants_output": "Assistent-utdata",
"com_ui_at_least_one_owner_required": "Minst én eier er påkrevd.",
"com_ui_attach_error": "Kan ikke legge ved fil. Opprett eller velg en samtale, eller prøv å laste siden på nytt.",
"com_ui_attach_error_disabled": "FIlopplasting er deaktivert for dette endepunktet",
"com_ui_attach_error_openai": "Kan ikke legge ved assistentfiler til andre endepunkter.",
"com_ui_attach_error_size": "Filstørrelsesgrensen er overskredet for endepunktet:",
"com_ui_attach_error_type": "Filtypen støttes ikke for endepunktet:",
@ -690,6 +759,7 @@
"com_ui_azure": "Azure",
"com_ui_azure_ad": "Entra ID",
"com_ui_back": "Tilbake",
"com_ui_back_to_builder": "Tilbake til bygger",
"com_ui_back_to_chat": "Tilbake til samtale",
"com_ui_back_to_prompts": "Tilbake til prompter",
"com_ui_backup_code_number": "Kode #{{number}}",
@ -701,10 +771,12 @@
"com_ui_basic": "Grunnleggende",
"com_ui_basic_auth_header": "Grunnleggende autorisasjonshode",
"com_ui_bearer": "Bearer",
"com_ui_beta": "Beta",
"com_ui_bookmark_delete_confirm": "Er du sikker på at du vil slette dette bokmerket?",
"com_ui_bookmarks": "Bokmerker",
"com_ui_bookmarks_add": "Legg til bokmerker",
"com_ui_bookmarks_add_to_conversation": "Legg til i gjeldende samtale",
"com_ui_bookmarks_count_selected": "Bokmerker, {{count}} valgt",
"com_ui_bookmarks_create_error": "Det oppstod en feil under oppretting av bokmerket.",
"com_ui_bookmarks_create_exists": "Dette bokmerket finnes allerede.",
"com_ui_bookmarks_create_success": "Bokmerket ble opprettet.",
@ -719,52 +791,88 @@
"com_ui_bookmarks_title": "Tittel",
"com_ui_bookmarks_update_error": "Det oppstod en feil under oppdatering av bokmerket.",
"com_ui_bookmarks_update_success": "Bokmerket ble oppdatert.",
"com_ui_branch_created": "Oppretting av gren vellykket",
"com_ui_branch_error": "Kunne ikke opprette gren",
"com_ui_branch_message": "Lag en gren fra denne responsen",
"com_ui_by_author": "av {{0}}",
"com_ui_callback_url": "Tilbakekallings-URL",
"com_ui_cancel": "Avbryt",
"com_ui_cancelled": "Avbrutt",
"com_ui_category": "Kategori",
"com_ui_change_version": "Endre versjon",
"com_ui_chat": "Samtale",
"com_ui_chat_history": "Samtalehistorikk",
"com_ui_chats": "Samtaler",
"com_ui_check_internet": "Sjekk din internettforbindelse",
"com_ui_clear": "Fjern",
"com_ui_clear_all": "Fjern alle",
"com_ui_clear_browser_cache": "Tøm nettleserbufferen",
"com_ui_clear_presets": "Tøm forhåndsinnstillinger",
"com_ui_clear_search": "Tøm søk",
"com_ui_click_to_close": "Klikk her for å lukke",
"com_ui_click_to_view_var": "Klikk her for å se {{0}}",
"com_ui_client_id": "Klient-ID",
"com_ui_client_secret": "Klienthemmelighet",
"com_ui_close": "Lukk",
"com_ui_close_menu": "Lukk meny",
"com_ui_close_settings": "Lukk innstillinger",
"com_ui_close_var": "Lukk {{0}}",
"com_ui_close_window": "Lukk vindu",
"com_ui_code": "Kode",
"com_ui_collapse": "Skjul",
"com_ui_collapse_chat": "Skjul samtale",
"com_ui_collapse_thoughts": "Skjul tanker",
"com_ui_command_placeholder": "Valgfritt: Skriv inn en kommando for prompten, ellers vil navnet bli brukt.",
"com_ui_command_usage_placeholder": "Velg en prompt med kommando eller navn.",
"com_ui_complete_setup": "Fullfør oppsett",
"com_ui_concise": "Kortfattet",
"com_ui_configure": "Konfigurer",
"com_ui_configure_mcp_variables_for": "Konfigurer variabler for {{0}}",
"com_ui_confirm": "Bekreft",
"com_ui_confirm_action": "Bekreft handling",
"com_ui_confirm_admin_use_change": "Endring av denne innstillingen vil blokkere tilgang for administratorer, inkludert deg selv. Er du sikker på at du vil fortsette?",
"com_ui_confirm_change": "Bekreft endring",
"com_ui_connecting": "Kobler til",
"com_ui_contact_admin_if_issue_persists": "Kontakt en adiministrator dersom problemet vedvarer",
"com_ui_context": "Kontekst",
"com_ui_context_filter_sort": "Filtrer og sortér etter kontekst",
"com_ui_continue": "Fortsett",
"com_ui_continue_oauth": "Fortsett med OAuth",
"com_ui_control_bar": "Kontroller bar",
"com_ui_controls": "Kontroller",
"com_ui_conversation": "samtale",
"com_ui_conversation_label": "{{tittel}} samtale",
"com_ui_conversations": "samtaler",
"com_ui_convo_archived": "Samtale arkivert",
"com_ui_convo_delete_error": "Sletting av samtale mislyktes.",
"com_ui_convo_delete_success": "Sletting av samtale vellykket",
"com_ui_copied": "Kopiert!",
"com_ui_copied_to_clipboard": "Kopiert til utklippstavlen",
"com_ui_copy": "Kopier",
"com_ui_copy_code": "Kopier kode",
"com_ui_copy_link": "Kopier lenke",
"com_ui_copy_stack_trace": "Kopier stack trace",
"com_ui_copy_thoughts_to_clipboard": "Kopier tanker til utklippstavle",
"com_ui_copy_to_clipboard": "Kopier til utklippstavlen",
"com_ui_copy_url_to_clipboard": "Kopier URL til utklippstavlen",
"com_ui_create": "Opprett",
"com_ui_create_api_key": "Opprett API-nøkkel",
"com_ui_create_assistant": "Lag assistent",
"com_ui_create_link": "Opprett lenke",
"com_ui_create_mcp_server": "Lag MCP-server",
"com_ui_create_memory": "Opprett minne",
"com_ui_create_new_agent": "L",
"com_ui_create_prompt": "Opprett prompt",
"com_ui_create_prompt_page": "ag ",
"com_ui_created": "Opprettet",
"com_ui_creating": "Oppretter...",
"com_ui_creating_image": "Oppretter bilde. Dette kan ta et øyeblikk.",
"com_ui_current": "Gjeldende",
"com_ui_currently_production": "For øyeblikket i produksjon",
"com_ui_custom": "Egendefinert",
"com_ui_custom_header_name": "Egendefinert overskriftsnavn",
"com_ui_custom_prompt_mode": "Egendefinert prompt-modus",
"com_ui_dark_theme_enabled": "Mørkt tema aktivert",
"com_ui_dashboard": "Oversikt",
"com_ui_date": "Dato",
"com_ui_date_april": "April",
@ -781,6 +889,7 @@
"com_ui_date_previous_30_days": "Siste 30 dager",
"com_ui_date_previous_7_days": "Siste 7 dager",
"com_ui_date_september": "September",
"com_ui_date_sort": "Sorter etter dato",
"com_ui_date_today": "I dag",
"com_ui_date_yesterday": "I går",
"com_ui_decline": "Jeg godtar ikke",
@ -788,19 +897,30 @@
"com_ui_delete": "Slett",
"com_ui_delete_action": "Slett handling",
"com_ui_delete_action_confirm": "Er du sikker på at du vil slette denne handlingen?",
"com_ui_delete_agent": "Slett agent",
"com_ui_delete_agent_confirm": "Er du sikker på at du vil slette denne agenten?",
"com_ui_delete_assistant": "Slett assistent",
"com_ui_delete_assistant_confirm": "Er du sikker på at du vil slette denne assistenten? Dette kan ikke angres.",
"com_ui_delete_confirm": "Dette vil slette",
"com_ui_delete_confirm_prompt_version_var": "Dette vil slette den valgte versjonen for \"{{0}}\". Hvis ingen andre versjoner eksisterer, vil prompten bli slettet.",
"com_ui_delete_confirm_strong": "Dette vil slette <strong>{{title}}</strong>",
"com_ui_delete_conversation": "Slette samtalen?",
"com_ui_delete_conversation_tooltip": "Slett samtale",
"com_ui_delete_mcp_server": "Ønsker du å slette MCP-serveren?",
"com_ui_delete_mcp_server_name": "Slett MCP-server {{0}}",
"com_ui_delete_memory": "Slett minne",
"com_ui_delete_not_allowed": "Sletteoperasjon er ikke tillatt.",
"com_ui_delete_preset": "Ønsker du å slette forhåndsinnstillingen",
"com_ui_delete_prompt": "Slette prompten?",
"com_ui_delete_prompt_name": "Slett prompt - {{name}}",
"com_ui_delete_shared_link": "Slette delt lenke?",
"com_ui_delete_shared_link_heading": "Slett delt lenke",
"com_ui_delete_success": "Vellykket slettet",
"com_ui_delete_tool": "Slett verktøy",
"com_ui_delete_tool_confirm": "Er du sikker på at du vil slette dette verktøyet?",
"com_ui_delete_tool_save_reminder": "Verktøy fjernet. Lagre agenten for å ta i bruk endreinger.",
"com_ui_deleted": "Slettet",
"com_ui_deleting": "Sletter...",
"com_ui_deleting_file": "Sletter fil ...",
"com_ui_descending": "Synkende",
"com_ui_description": "Beskrivelse",
@ -808,37 +928,52 @@
"com_ui_deselect_all": "Fravelg alle",
"com_ui_detailed": "Detaljert",
"com_ui_disabling": "Deaktiverer ...",
"com_ui_done": "Ferdig",
"com_ui_download": "Last ned",
"com_ui_download_artifact": "Last ned artefakt",
"com_ui_download_backup": "Last ned reservekoder",
"com_ui_download_backup_tooltip": "Før du fortsetter, last ned reservekodene dine. Du vil trenge dem for å få tilgang igjen hvis du mister autentiseringsenheten din.",
"com_ui_download_error": "Feil ved nedlasting av fil. Filen kan ha blitt slettet.",
"com_ui_download_error_logs": "Last ned feillogger",
"com_ui_drag_drop": "Dra og slipp fil(er) her, eller klikk for å velge.",
"com_ui_dropdown_variables": "Nedtrekksvariabler:",
"com_ui_dropdown_variables_info": "Opprett egendefinerte nedtrekksmenyer for promptene dine: `{{variabelnavn:valg1|valg2|valg3}}`",
"com_ui_duplicate": "Dupliser",
"com_ui_duplicate_agent": "Dupliser Agent",
"com_ui_duplication_error": "Det oppstod en feil under duplisering av samtalen.",
"com_ui_duplication_processing": "Dupliserer samtale ...",
"com_ui_duplication_success": "Samtalen ble duplisert.",
"com_ui_edit": "Rediger",
"com_ui_edit_editing_image": "Redigerer bilde",
"com_ui_edit_mcp_server": "Rediger MCP-server",
"com_ui_edit_mcp_server_dialog_description": "Unik Serveridentifikator: {{serverName}}",
"com_ui_edit_memory": "Rediger minne",
"com_ui_edit_preset_title": "Rediger forhåndsinnstilling - {{title}}",
"com_ui_edit_prompt_page": "Rediger promptside",
"com_ui_editable_message": "Redigerbar melding",
"com_ui_editor_instructions": "Dra bildet for å flytte • Bruk zoom slider eller knapper for å justere størrelse",
"com_ui_empty_category": "-",
"com_ui_endpoint": "Endepunkt",
"com_ui_endpoint_menu": "LLM-endepunktmeny",
"com_ui_enter": "Enter",
"com_ui_enter_api_key": "Skriv inn API-nøkkel",
"com_ui_enter_description": "Angi beskrivelse (valgfritt)",
"com_ui_enter_key": "Skriv inn nøkkel",
"com_ui_enter_name": "Angi navn",
"com_ui_enter_openapi_schema": "Skriv inn ditt OpenAPI-skjema her.",
"com_ui_enter_value": "Skriv inn verdi",
"com_ui_error": "Feil",
"com_ui_error_connection": "Feil ved tilkobling til serveren, prøv å laste siden på nytt.",
"com_ui_error_message_prefix": "Feilmelding:",
"com_ui_error_save_admin_settings": "Det oppstod en feil under lagring av admin-innstillingene.",
"com_ui_error_try_following_prefix": "Vennligst prøv en av de følgende",
"com_ui_error_unexpected": "Oops! Noe uforventet skjedde",
"com_ui_error_updating_preferences": "Feil ved oppdatering av preferanser.",
"com_ui_everyone_permission_level": "Alles tillatelsesnivå",
"com_ui_examples": "Eksempler",
"com_ui_expand": "Utvid",
"com_ui_expand_chat": "Utvid samtale",
"com_ui_expand_thoughts": "Utvidede tanker",
"com_ui_export_convo_modal": "Eksporter samtale-modal",
"com_ui_feedback_more": "Mer ...",
"com_ui_feedback_more_information": "Gi ytterligere tilbakemelding",
@ -858,10 +993,12 @@
"com_ui_feedback_tag_unjustified_refusal": "Nektet uten grunn",
"com_ui_field_max_length": "{{field}} må inneholde mindre enn {{length}} tegn",
"com_ui_field_required": "Dette feltet er påkrevd.",
"com_ui_file_input_avatar_label": "Filinput for avatar",
"com_ui_file_size": "Filstørrelse",
"com_ui_file_token_limit": "Tokengrense for filer",
"com_ui_file_token_limit_desc": "Angir maksimalt antall tokens som kan benyttes for filhåndtering. En høyere grense kan øke behandlingstid og kostnader.",
"com_ui_files": "Filer",
"com_ui_filter_mcp_servers": "Filtrer MCP-servere etter navn",
"com_ui_filter_prompts": "Filtrer prompter",
"com_ui_filter_prompts_name": "Filtrer prompter etter navn",
"com_ui_final_touch": "Siste finpuss",
@ -885,6 +1022,7 @@
"com_ui_fork_info_visible": "Dette alternativet forgrener kun de synlige meldingene, altså den direkte stien til målmeldingen, uten noen grener.",
"com_ui_fork_more_details_about": "Se tilleggsinformasjon om forgrening-alternativet «{{0}}»",
"com_ui_fork_more_info_options": "Se detaljert forklaring av alle forgrening-alternativer.",
"com_ui_fork_open_menu": "Åpne forgreningsmeny",
"com_ui_fork_processing": "Forgrener samtale ...",
"com_ui_fork_remember": "Husk",
"com_ui_fork_remember_checked": "Ditt valg vil bli husket. Endre dette når som helst i innstillingene.",
@ -903,7 +1041,11 @@
"com_ui_good_evening": "God kveld",
"com_ui_good_morning": "God morgen",
"com_ui_group": "Gruppe",
"com_ui_handoff_instructions": "Overleveringsinstruksjoner",
"com_ui_happy_birthday": "Det er min første bursdag!",
"com_ui_header_format": "Overskriftsformat",
"com_ui_hide": "Skjul",
"com_ui_hide_code": "Skjul kode",
"com_ui_hide_image_details": "Skjul bildedetaljer",
"com_ui_hide_password": "Skjul passord",
"com_ui_hide_qr": "Skjul QR-kode",
@ -920,18 +1062,26 @@
"com_ui_import_conversation_file_type_error": "Importtypen støttes ikke.",
"com_ui_import_conversation_info": "Importer samtaler fra en JSON-fil.",
"com_ui_import_conversation_success": "Samtalene ble importert.",
"com_ui_import_conversation_upload_error": "Feil under opplasting av fil. Vennligst prøv igjen.",
"com_ui_importing": "Importerer",
"com_ui_include_shadcnui": "Inkluder instruksjoner for shadcn/ui-komponenter",
"com_ui_initializing": "Initialiserer...",
"com_ui_input": "Inndata",
"com_ui_instructions": "Instruksjoner",
"com_ui_key": "Nøkkel",
"com_ui_key_required": "API-nøkkel påkrevd",
"com_ui_last_used": "Sist brukt",
"com_ui_late_night": "God senkveld",
"com_ui_latest_footer": "Én KI for alle.",
"com_ui_latest_production_version": "Siste produksjonsversjon",
"com_ui_latest_version": "Siste versjon",
"com_ui_leave_blank_to_keep": "La stå tomt for å beholde eksisterende",
"com_ui_librechat_code_api_key": "Få din LibreChat Kodetolk API-nøkkel",
"com_ui_librechat_code_api_subtitle": "Sikker. Flerspråklig. Fil-input/output.",
"com_ui_librechat_code_api_title": "Kjør KI-kode",
"com_ui_light_theme_enabled": "Lyst tema aktivert",
"com_ui_link_copied": "Lenke kopiert",
"com_ui_link_refreshed": "Lenken er oppdatert",
"com_ui_loading": "Laster ...",
"com_ui_locked": "Låst",
"com_ui_logo": "{{0}}-logo",
@ -939,8 +1089,12 @@
"com_ui_manage": "Administrer",
"com_ui_marketplace": "Markedsplass",
"com_ui_marketplace_allow_use": "Tillat bruk av markedsplass",
"com_ui_max": "Maks",
"com_ui_max_favorites_reached": "Maksimalt antall festede gjenstander nådd ({{0}}). Fjern noen gjenstander for å legge til flere.",
"com_ui_max_file_size": "PNG, JPG eller JPEG (maks {{0}})",
"com_ui_max_tags": "Maksimalt antall er {{0}}. Bruker siste verdier.",
"com_ui_mcp_authenticated_success": "MCP-serveren '{{0}}' ble autentisert.",
"com_ui_mcp_click_to_defer": "Klikk for å utsette verktøyet vil være synlig via søk, men ikke lastet inn før det trengs",
"com_ui_mcp_configure_server": "Konfigurer {{0}}",
"com_ui_mcp_configure_server_description": "Konfigurer egendefinerte variabler for {{0}}",
"com_ui_mcp_enter_var": "Skriv inn verdi for {{0}}",

153
package-lock.json generated
View file

@ -59,7 +59,7 @@
"@google/genai": "^1.19.0",
"@keyv/redis": "^4.3.3",
"@langchain/core": "^0.3.80",
"@librechat/agents": "^3.1.43",
"@librechat/agents": "^3.1.50",
"@librechat/api": "*",
"@librechat/data-schemas": "*",
"@microsoft/microsoft-graph-client": "^3.0.7",
@ -458,7 +458,7 @@
"lodash": "^4.17.23",
"lucide-react": "^0.394.0",
"match-sorter": "^8.1.0",
"mermaid": "^11.12.2",
"mermaid": "^11.12.3",
"micromark-extension-llm-math": "^3.1.0",
"qrcode.react": "^4.2.0",
"rc-input-number": "^7.4.2",
@ -6576,54 +6576,42 @@
"license": "MIT"
},
"node_modules/@chevrotain/cst-dts-gen": {
"version": "11.0.3",
"resolved": "https://registry.npmjs.org/@chevrotain/cst-dts-gen/-/cst-dts-gen-11.0.3.tgz",
"integrity": "sha512-BvIKpRLeS/8UbfxXxgC33xOumsacaeCKAjAeLyOn7Pcp95HiRbrpl14S+9vaZLolnbssPIUuiUd8IvgkRyt6NQ==",
"version": "11.1.1",
"resolved": "https://registry.npmjs.org/@chevrotain/cst-dts-gen/-/cst-dts-gen-11.1.1.tgz",
"integrity": "sha512-fRHyv6/f542qQqiRGalrfJl/evD39mAvbJLCekPazhiextEatq1Jx1K/i9gSd5NNO0ds03ek0Cbo/4uVKmOBcw==",
"license": "Apache-2.0",
"dependencies": {
"@chevrotain/gast": "11.0.3",
"@chevrotain/types": "11.0.3",
"lodash-es": "4.17.21"
"@chevrotain/gast": "11.1.1",
"@chevrotain/types": "11.1.1",
"lodash-es": "4.17.23"
}
},
"node_modules/@chevrotain/cst-dts-gen/node_modules/lodash-es": {
"version": "4.17.21",
"resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz",
"integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==",
"license": "MIT"
},
"node_modules/@chevrotain/gast": {
"version": "11.0.3",
"resolved": "https://registry.npmjs.org/@chevrotain/gast/-/gast-11.0.3.tgz",
"integrity": "sha512-+qNfcoNk70PyS/uxmj3li5NiECO+2YKZZQMbmjTqRI3Qchu8Hig/Q9vgkHpI3alNjr7M+a2St5pw5w5F6NL5/Q==",
"version": "11.1.1",
"resolved": "https://registry.npmjs.org/@chevrotain/gast/-/gast-11.1.1.tgz",
"integrity": "sha512-Ko/5vPEYy1vn5CbCjjvnSO4U7GgxyGm+dfUZZJIWTlQFkXkyym0jFYrWEU10hyCjrA7rQtiHtBr0EaZqvHFZvg==",
"license": "Apache-2.0",
"dependencies": {
"@chevrotain/types": "11.0.3",
"lodash-es": "4.17.21"
"@chevrotain/types": "11.1.1",
"lodash-es": "4.17.23"
}
},
"node_modules/@chevrotain/gast/node_modules/lodash-es": {
"version": "4.17.21",
"resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz",
"integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==",
"license": "MIT"
},
"node_modules/@chevrotain/regexp-to-ast": {
"version": "11.0.3",
"resolved": "https://registry.npmjs.org/@chevrotain/regexp-to-ast/-/regexp-to-ast-11.0.3.tgz",
"integrity": "sha512-1fMHaBZxLFvWI067AVbGJav1eRY7N8DDvYCTwGBiE/ytKBgP8azTdgyrKyWZ9Mfh09eHWb5PgTSO8wi7U824RA==",
"version": "11.1.1",
"resolved": "https://registry.npmjs.org/@chevrotain/regexp-to-ast/-/regexp-to-ast-11.1.1.tgz",
"integrity": "sha512-ctRw1OKSXkOrR8VTvOxrQ5USEc4sNrfwXHa1NuTcR7wre4YbjPcKw+82C2uylg/TEwFRgwLmbhlln4qkmDyteg==",
"license": "Apache-2.0"
},
"node_modules/@chevrotain/types": {
"version": "11.0.3",
"resolved": "https://registry.npmjs.org/@chevrotain/types/-/types-11.0.3.tgz",
"integrity": "sha512-gsiM3G8b58kZC2HaWR50gu6Y1440cHiJ+i3JUvcp/35JchYejb2+5MVeJK0iKThYpAa/P2PYFV4hoi44HD+aHQ==",
"version": "11.1.1",
"resolved": "https://registry.npmjs.org/@chevrotain/types/-/types-11.1.1.tgz",
"integrity": "sha512-wb2ToxG8LkgPYnKe9FH8oGn3TMCBdnwiuNC5l5y+CtlaVRbCytU0kbVsk6CGrqTL4ZN4ksJa0TXOYbxpbthtqw==",
"license": "Apache-2.0"
},
"node_modules/@chevrotain/utils": {
"version": "11.0.3",
"resolved": "https://registry.npmjs.org/@chevrotain/utils/-/utils-11.0.3.tgz",
"integrity": "sha512-YslZMgtJUyuMbZ+aKvfF3x1f5liK4mWNxghFRv7jqRR9C3R3fAOGTTKvxXDa2Y1s9zSbcpuO0cAxDYsc9SrXoQ==",
"version": "11.1.1",
"resolved": "https://registry.npmjs.org/@chevrotain/utils/-/utils-11.1.1.tgz",
"integrity": "sha512-71eTYMzYXYSFPrbg/ZwftSaSDld7UYlS8OQa3lNnn9jzNtpFbaReRRyghzqS7rI3CDaorqpPJJcXGHK+FE1TVQ==",
"license": "Apache-2.0"
},
"node_modules/@codemirror/autocomplete": {
@ -11208,9 +11196,9 @@
}
},
"node_modules/@librechat/agents": {
"version": "3.1.43",
"resolved": "https://registry.npmjs.org/@librechat/agents/-/agents-3.1.43.tgz",
"integrity": "sha512-KtcBA7b/63RfYAQwVVt3TNAcZHAfmHe8wLNnSjF9NTEXI1EETBHCqh9yuGicjwdIMXVQhi64qmLPDqEFoFr7ww==",
"version": "3.1.50",
"resolved": "https://registry.npmjs.org/@librechat/agents/-/agents-3.1.50.tgz",
"integrity": "sha512-+gdfUJ7X3PJ20/c+8lETY68D6QpxFlCIlGUQBF4A8VKv+Po9J/TO5rWE+OmzmPByYpye7GrcxVCBLfRTvZKraw==",
"license": "MIT",
"dependencies": {
"@anthropic-ai/sdk": "^0.73.0",
@ -11289,12 +11277,12 @@
}
},
"node_modules/@mermaid-js/parser": {
"version": "0.6.3",
"resolved": "https://registry.npmjs.org/@mermaid-js/parser/-/parser-0.6.3.tgz",
"integrity": "sha512-lnjOhe7zyHjc+If7yT4zoedx2vo4sHaTmtkl1+or8BRTnCtDmcTpAjpzDSfCZrshM5bCoz0GyidzadJAH1xobA==",
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/@mermaid-js/parser/-/parser-1.0.0.tgz",
"integrity": "sha512-vvK0Hi/VWndxoh03Mmz6wa1KDriSPjS2XMZL/1l19HFwygiObEEoEwSDxOqyLzzAI6J2PU3261JjTMTO7x+BPw==",
"license": "MIT",
"dependencies": {
"langium": "3.3.1"
"langium": "^4.0.0"
}
},
"node_modules/@microsoft/microsoft-graph-client": {
@ -22254,17 +22242,17 @@
}
},
"node_modules/chevrotain": {
"version": "11.0.3",
"resolved": "https://registry.npmjs.org/chevrotain/-/chevrotain-11.0.3.tgz",
"integrity": "sha512-ci2iJH6LeIkvP9eJW6gpueU8cnZhv85ELY8w8WiFtNjMHA5ad6pQLaJo9mEly/9qUyCpvqX8/POVUTf18/HFdw==",
"version": "11.1.1",
"resolved": "https://registry.npmjs.org/chevrotain/-/chevrotain-11.1.1.tgz",
"integrity": "sha512-f0yv5CPKaFxfsPTBzX7vGuim4oIC1/gcS7LUGdBSwl2dU6+FON6LVUksdOo1qJjoUvXNn45urgh8C+0a24pACQ==",
"license": "Apache-2.0",
"dependencies": {
"@chevrotain/cst-dts-gen": "11.0.3",
"@chevrotain/gast": "11.0.3",
"@chevrotain/regexp-to-ast": "11.0.3",
"@chevrotain/types": "11.0.3",
"@chevrotain/utils": "11.0.3",
"lodash-es": "4.17.21"
"@chevrotain/cst-dts-gen": "11.1.1",
"@chevrotain/gast": "11.1.1",
"@chevrotain/regexp-to-ast": "11.1.1",
"@chevrotain/types": "11.1.1",
"@chevrotain/utils": "11.1.1",
"lodash-es": "4.17.23"
}
},
"node_modules/chevrotain-allstar": {
@ -22279,12 +22267,6 @@
"chevrotain": "^11.0.0"
}
},
"node_modules/chevrotain/node_modules/lodash-es": {
"version": "4.17.21",
"resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz",
"integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==",
"license": "MIT"
},
"node_modules/chokidar": {
"version": "3.5.3",
"resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz",
@ -25952,9 +25934,9 @@
"license": "BSD-3-Clause"
},
"node_modules/fast-xml-parser": {
"version": "5.3.4",
"resolved": "https://registry.npmjs.org/fast-xml-parser/-/fast-xml-parser-5.3.4.tgz",
"integrity": "sha512-EFd6afGmXlCx8H8WTZHhAoDaWaGyuIBoZJ2mknrNxug+aZKjkp0a0dlars9Izl+jF+7Gu1/5f/2h68cQpe0IiA==",
"version": "5.3.6",
"resolved": "https://registry.npmjs.org/fast-xml-parser/-/fast-xml-parser-5.3.6.tgz",
"integrity": "sha512-QNI3sAvSvaOiaMl8FYU4trnEzCwiRr8XMWgAHzlrWpTSj+QaCSvOf1h82OEP1s4hiAXhnbXSyFWCf4ldZzZRVA==",
"funding": [
{
"type": "github",
@ -30141,13 +30123,14 @@
}
},
"node_modules/katex": {
"version": "0.16.21",
"resolved": "https://registry.npmjs.org/katex/-/katex-0.16.21.tgz",
"integrity": "sha512-XvqR7FgOHtWupfMiigNzmh+MgUVmDGU2kXZm899ZkPfcuoPuFxyHmXsgATDpFZDAXCI8tvinaVcDo8PIIJSo4A==",
"version": "0.16.28",
"resolved": "https://registry.npmjs.org/katex/-/katex-0.16.28.tgz",
"integrity": "sha512-YHzO7721WbmAL6Ov1uzN/l5mY5WWWhJBSW+jq4tkfZfsxmo1hu6frS0EOswvjBUnWE6NtjEs48SFn5CQESRLZg==",
"funding": [
"https://opencollective.com/katex",
"https://github.com/sponsors/katex"
],
"license": "MIT",
"dependencies": {
"commander": "^8.3.0"
},
@ -30207,32 +30190,32 @@
"integrity": "sha512-Xq9nH7KlWZmXAtodXDDRE7vs6DU1gTU8zYDHDiWLSip45Egwq3plLHzPn27NgvzL2r1LMPC1vdqh98sQxtqj4A=="
},
"node_modules/langium": {
"version": "3.3.1",
"resolved": "https://registry.npmjs.org/langium/-/langium-3.3.1.tgz",
"integrity": "sha512-QJv/h939gDpvT+9SiLVlY7tZC3xB2qK57v0J04Sh9wpMb6MP1q8gB21L3WIo8T5P1MSMg3Ep14L7KkDCFG3y4w==",
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/langium/-/langium-4.2.1.tgz",
"integrity": "sha512-zu9QWmjpzJcomzdJQAHgDVhLGq5bLosVak1KVa40NzQHXfqr4eAHupvnPOVXEoLkg6Ocefvf/93d//SB7du4YQ==",
"license": "MIT",
"dependencies": {
"chevrotain": "~11.0.3",
"chevrotain-allstar": "~0.3.0",
"chevrotain": "~11.1.1",
"chevrotain-allstar": "~0.3.1",
"vscode-languageserver": "~9.0.1",
"vscode-languageserver-textdocument": "~1.0.11",
"vscode-uri": "~3.0.8"
"vscode-uri": "~3.1.0"
},
"engines": {
"node": ">=16.0.0"
"node": ">=20.10.0",
"npm": ">=10.2.3"
}
},
"node_modules/langsmith": {
"version": "0.3.67",
"resolved": "https://registry.npmjs.org/langsmith/-/langsmith-0.3.67.tgz",
"integrity": "sha512-l4y3RmJ9yWF5a29fLg3eWZQxn6Q6dxTOgLGgQHzPGZHF3NUynn+A+airYIe/Yt4rwjGbuVrABAPsXBkVu/Hi7g==",
"version": "0.4.12",
"resolved": "https://registry.npmjs.org/langsmith/-/langsmith-0.4.12.tgz",
"integrity": "sha512-YWt0jcGvKqjUgIvd78rd4QcdMss0lUkeUaqp0UpVRq7H2yNDx8H5jOUO/laWUmaPtWGgcip0qturykXe1g9Gqw==",
"license": "MIT",
"dependencies": {
"@types/uuid": "^10.0.0",
"chalk": "^4.1.2",
"console-table-printer": "^2.12.1",
"p-queue": "^6.6.2",
"p-retry": "4",
"semver": "^7.6.3",
"uuid": "^10.0.0"
},
@ -30722,9 +30705,9 @@
"license": "MIT"
},
"node_modules/lodash-es": {
"version": "4.17.22",
"resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.22.tgz",
"integrity": "sha512-XEawp1t0gxSi9x01glktRZ5HDy0HXqrM0x5pXQM98EaI0NxO6jVM7omDOxsuEo5UIASAnm2bRp1Jt/e0a2XU8Q==",
"version": "4.17.23",
"resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.23.tgz",
"integrity": "sha512-kVI48u3PZr38HdYz98UmfPnXl2DXrpdctLrFLCd3kOx1xUkOmpFPx7gCWWM5MPkL/fD8zb+Ph0QzjGFs4+hHWg==",
"license": "MIT"
},
"node_modules/lodash.camelcase": {
@ -31907,14 +31890,14 @@
}
},
"node_modules/mermaid": {
"version": "11.12.2",
"resolved": "https://registry.npmjs.org/mermaid/-/mermaid-11.12.2.tgz",
"integrity": "sha512-n34QPDPEKmaeCG4WDMGy0OT6PSyxKCfy2pJgShP+Qow2KLrvWjclwbc3yXfSIf4BanqWEhQEpngWwNp/XhZt6w==",
"version": "11.12.3",
"resolved": "https://registry.npmjs.org/mermaid/-/mermaid-11.12.3.tgz",
"integrity": "sha512-wN5ZSgJQIC+CHJut9xaKWsknLxaFBwCPwPkGTSUYrTiHORWvpT8RxGk849HPnpUAQ+/9BPRqYb80jTpearrHzQ==",
"license": "MIT",
"dependencies": {
"@braintree/sanitize-url": "^7.1.1",
"@iconify/utils": "^3.0.1",
"@mermaid-js/parser": "^0.6.3",
"@mermaid-js/parser": "^1.0.0",
"@types/d3": "^7.4.3",
"cytoscape": "^3.29.3",
"cytoscape-cose-bilkent": "^4.1.0",
@ -31926,7 +31909,7 @@
"dompurify": "^3.2.5",
"katex": "^0.16.22",
"khroma": "^2.1.0",
"lodash-es": "^4.17.21",
"lodash-es": "^4.17.23",
"marked": "^16.2.1",
"roughjs": "^4.6.6",
"stylis": "^4.3.6",
@ -41121,9 +41104,9 @@
"license": "MIT"
},
"node_modules/vscode-uri": {
"version": "3.0.8",
"resolved": "https://registry.npmjs.org/vscode-uri/-/vscode-uri-3.0.8.tgz",
"integrity": "sha512-AyFQ0EVmsOZOlAnxoFOGOq1SQDWAB7C6aqMGS23svWAllfOaxbuFvcT8D1i8z3Gyn8fraVeZNNmN6e9bxxXkKw==",
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/vscode-uri/-/vscode-uri-3.1.0.tgz",
"integrity": "sha512-/BpdSx+yCQGnCvecbyXdxHDkuk55/G3xwnC0GqY4gmQ3j+A+g8kzzgB4Nk/SINjqn6+waqw3EgbVF2QKExkRxQ==",
"license": "MIT"
},
"node_modules/w3c-keyname": {
@ -42205,7 +42188,7 @@
"@google/genai": "^1.19.0",
"@keyv/redis": "^4.3.3",
"@langchain/core": "^0.3.80",
"@librechat/agents": "^3.1.43",
"@librechat/agents": "^3.1.50",
"@librechat/data-schemas": "*",
"@modelcontextprotocol/sdk": "^1.26.0",
"@smithy/node-http-handler": "^4.4.5",

View file

@ -138,14 +138,14 @@
"@librechat/agents": {
"@langchain/anthropic": {
"@anthropic-ai/sdk": "0.73.0",
"fast-xml-parser": "5.3.4"
"fast-xml-parser": "5.3.6"
},
"@anthropic-ai/sdk": "0.73.0",
"fast-xml-parser": "5.3.4"
"fast-xml-parser": "5.3.6"
},
"axios": "1.12.1",
"elliptic": "^6.6.1",
"fast-xml-parser": "5.3.4",
"fast-xml-parser": "5.3.6",
"form-data": "^4.0.4",
"tslib": "^2.8.1",
"mdast-util-gfm-autolink-literal": "2.0.0",
@ -163,7 +163,8 @@
"micromark-extension-math": {
"katex": "^0.16.21"
}
}
},
"langsmith": "0.4.12"
},
"nodemonConfig": {
"ignore": [

View file

@ -10,6 +10,17 @@ export default {
],
coverageReporters: ['text', 'cobertura'],
testResultsProcessor: 'jest-junit',
transform: {
'\\.[jt]sx?$': [
'babel-jest',
{
presets: [
['@babel/preset-env', { targets: { node: 'current' } }],
'@babel/preset-typescript',
],
},
],
},
moduleNameMapper: {
'^@src/(.*)$': '<rootDir>/src/$1',
'~/(.*)': '<rootDir>/src/$1',

View file

@ -87,7 +87,7 @@
"@google/genai": "^1.19.0",
"@keyv/redis": "^4.3.3",
"@langchain/core": "^0.3.80",
"@librechat/agents": "^3.1.43",
"@librechat/agents": "^3.1.50",
"@librechat/data-schemas": "*",
"@modelcontextprotocol/sdk": "^1.26.0",
"@smithy/node-http-handler": "^4.4.5",

View file

@ -0,0 +1,193 @@
#!/usr/bin/env bash
#
# Live integration tests for the Responses API endpoint.
# Sends curl requests to a running LibreChat server to verify
# multi-turn conversations with output_text / refusal blocks work.
#
# Usage:
# ./responses-api.live.test.sh <BASE_URL> <API_KEY> <AGENT_ID>
#
# Example:
# ./responses-api.live.test.sh http://localhost:3080 sk-abc123 agent_xyz
set -euo pipefail
BASE_URL="${1:?Usage: $0 <BASE_URL> <API_KEY> <AGENT_ID>}"
API_KEY="${2:?Usage: $0 <BASE_URL> <API_KEY> <AGENT_ID>}"
AGENT_ID="${3:?Usage: $0 <BASE_URL> <API_KEY> <AGENT_ID>}"
ENDPOINT="${BASE_URL}/v1/responses"
PASS=0
FAIL=0
# ── Helpers ───────────────────────────────────────────────────────────
post_json() {
local label="$1"
local body="$2"
local stream="${3:-false}"
echo "──────────────────────────────────────────────"
echo "TEST: ${label}"
echo "──────────────────────────────────────────────"
local http_code
local response
if [ "$stream" = "true" ]; then
# For streaming, just check we get a 200 and some SSE data
response=$(curl -s -w "\n%{http_code}" \
-X POST "${ENDPOINT}" \
-H "Content-Type: application/json" \
-H "Authorization: Bearer ${API_KEY}" \
-d "${body}" \
--max-time 60)
else
response=$(curl -s -w "\n%{http_code}" \
-X POST "${ENDPOINT}" \
-H "Content-Type: application/json" \
-H "Authorization: Bearer ${API_KEY}" \
-d "${body}" \
--max-time 60)
fi
http_code=$(echo "$response" | tail -1)
local body_out
body_out=$(echo "$response" | sed '$d')
if [ "$http_code" = "200" ]; then
echo " ✓ HTTP 200"
PASS=$((PASS + 1))
else
echo " ✗ HTTP ${http_code}"
echo " Response: ${body_out}"
FAIL=$((FAIL + 1))
fi
# Print truncated response for inspection
echo " Response (first 300 chars): ${body_out:0:300}"
echo ""
# Return the body for chaining
echo "$body_out"
}
extract_response_id() {
# Extract "id" field from JSON response
echo "$1" | grep -o '"id":"[^"]*"' | head -1 | cut -d'"' -f4
}
# ── Test 1: Basic single-turn request ─────────────────────────────────
RESP1=$(post_json "Basic single-turn request" "$(cat <<EOF
{
"model": "${AGENT_ID}",
"input": "Say hello in exactly 5 words.",
"stream": false
}
EOF
)")
# ── Test 2: Multi-turn with output_text assistant blocks ──────────────
post_json "Multi-turn with output_text blocks (the original bug)" "$(cat <<EOF
{
"model": "${AGENT_ID}",
"input": [
{
"type": "message",
"role": "user",
"content": [{"type": "input_text", "text": "What is 2+2?"}]
},
{
"type": "message",
"role": "assistant",
"content": [{"type": "output_text", "text": "2+2 equals 4.", "annotations": [], "logprobs": []}]
},
{
"type": "message",
"role": "user",
"content": [{"type": "input_text", "text": "And what is 3+3?"}]
}
],
"stream": false
}
EOF
)" > /dev/null
# ── Test 3: Multi-turn with refusal blocks ────────────────────────────
post_json "Multi-turn with refusal blocks" "$(cat <<EOF
{
"model": "${AGENT_ID}",
"input": [
{
"type": "message",
"role": "user",
"content": [{"type": "input_text", "text": "Do something bad"}]
},
{
"type": "message",
"role": "assistant",
"content": [{"type": "refusal", "refusal": "I cannot help with that."}]
},
{
"type": "message",
"role": "user",
"content": [{"type": "input_text", "text": "OK, just say hello then."}]
}
],
"stream": false
}
EOF
)" > /dev/null
# ── Test 4: Streaming request ─────────────────────────────────────────
post_json "Streaming single-turn request" "$(cat <<EOF
{
"model": "${AGENT_ID}",
"input": "Say hi in one word.",
"stream": true
}
EOF
)" "true" > /dev/null
# ── Test 5: Back-and-forth using previous_response_id ─────────────────
RESP5=$(post_json "First turn for previous_response_id chain" "$(cat <<EOF
{
"model": "${AGENT_ID}",
"input": "Remember this number: 42. Just confirm you got it.",
"stream": false
}
EOF
)")
RESP5_ID=$(extract_response_id "$RESP5")
if [ -n "$RESP5_ID" ]; then
echo " Extracted response ID: ${RESP5_ID}"
post_json "Follow-up using previous_response_id" "$(cat <<EOF
{
"model": "${AGENT_ID}",
"input": "What number did I ask you to remember?",
"previous_response_id": "${RESP5_ID}",
"stream": false
}
EOF
)" > /dev/null
else
echo " ⚠ Could not extract response ID, skipping follow-up test"
FAIL=$((FAIL + 1))
fi
# ── Summary ───────────────────────────────────────────────────────────
echo "══════════════════════════════════════════════"
echo "RESULTS: ${PASS} passed, ${FAIL} failed"
echo "══════════════════════════════════════════════"
if [ "$FAIL" -gt 0 ]; then
exit 1
fi

View file

@ -0,0 +1,333 @@
import { convertInputToMessages } from '../service';
import type { InputItem } from '../types';
describe('convertInputToMessages', () => {
// ── String input shorthand ─────────────────────────────────────────
it('converts a string input to a single user message', () => {
const result = convertInputToMessages('Hello');
expect(result).toEqual([{ role: 'user', content: 'Hello' }]);
});
// ── Empty input array ──────────────────────────────────────────────
it('returns an empty array for empty input', () => {
const result = convertInputToMessages([]);
expect(result).toEqual([]);
});
// ── Role mapping ───────────────────────────────────────────────────
it('maps developer role to system', () => {
const input: InputItem[] = [
{ type: 'message', role: 'developer', content: 'You are helpful.' },
];
expect(convertInputToMessages(input)).toEqual([
{ role: 'system', content: 'You are helpful.' },
]);
});
it('maps system role to system', () => {
const input: InputItem[] = [{ type: 'message', role: 'system', content: 'System prompt.' }];
expect(convertInputToMessages(input)).toEqual([{ role: 'system', content: 'System prompt.' }]);
});
it('maps user role to user', () => {
const input: InputItem[] = [{ type: 'message', role: 'user', content: 'Hi' }];
expect(convertInputToMessages(input)).toEqual([{ role: 'user', content: 'Hi' }]);
});
it('maps assistant role to assistant', () => {
const input: InputItem[] = [{ type: 'message', role: 'assistant', content: 'Hello!' }];
expect(convertInputToMessages(input)).toEqual([{ role: 'assistant', content: 'Hello!' }]);
});
it('defaults unknown roles to user', () => {
const input = [
{ type: 'message', role: 'unknown_role', content: 'test' },
] as unknown as InputItem[];
expect(convertInputToMessages(input)[0].role).toBe('user');
});
// ── input_text content blocks ──────────────────────────────────────
it('converts input_text blocks to text blocks', () => {
const input: InputItem[] = [
{
type: 'message',
role: 'user',
content: [{ type: 'input_text', text: 'Hello world' }],
},
];
const result = convertInputToMessages(input);
expect(result).toEqual([{ role: 'user', content: [{ type: 'text', text: 'Hello world' }] }]);
});
// ── output_text content blocks (the original bug) ──────────────────
it('converts output_text blocks to text blocks', () => {
const input: InputItem[] = [
{
type: 'message',
role: 'assistant',
content: [{ type: 'output_text', text: 'I can help!', annotations: [], logprobs: [] }],
},
];
const result = convertInputToMessages(input);
expect(result).toEqual([
{ role: 'assistant', content: [{ type: 'text', text: 'I can help!' }] },
]);
});
// ── refusal content blocks ─────────────────────────────────────────
it('converts refusal blocks to text blocks', () => {
const input: InputItem[] = [
{
type: 'message',
role: 'assistant',
content: [{ type: 'refusal', refusal: 'I cannot do that.' }],
},
];
const result = convertInputToMessages(input);
expect(result).toEqual([
{ role: 'assistant', content: [{ type: 'text', text: 'I cannot do that.' }] },
]);
});
// ── input_image content blocks ─────────────────────────────────────
it('converts input_image blocks to image_url blocks', () => {
const input: InputItem[] = [
{
type: 'message',
role: 'user',
content: [
{ type: 'input_image', image_url: 'https://example.com/img.png', detail: 'high' },
],
},
];
const result = convertInputToMessages(input);
expect(result).toEqual([
{
role: 'user',
content: [
{
type: 'image_url',
image_url: { url: 'https://example.com/img.png', detail: 'high' },
},
],
},
]);
});
// ── input_file content blocks ──────────────────────────────────────
it('converts input_file blocks to text placeholders', () => {
const input: InputItem[] = [
{
type: 'message',
role: 'user',
content: [{ type: 'input_file', filename: 'report.pdf', file_id: 'f_123' }],
},
];
const result = convertInputToMessages(input);
expect(result).toEqual([
{ role: 'user', content: [{ type: 'text', text: '[File: report.pdf]' }] },
]);
});
it('uses "unknown" for input_file without filename', () => {
const input: InputItem[] = [
{
type: 'message',
role: 'user',
content: [{ type: 'input_file', file_id: 'f_123' }],
},
];
const result = convertInputToMessages(input);
expect(result).toEqual([
{ role: 'user', content: [{ type: 'text', text: '[File: unknown]' }] },
]);
});
// ── Null / undefined filtering ─────────────────────────────────────
it('filters out null elements in content arrays', () => {
const input = [
{
type: 'message',
role: 'user',
content: [null, { type: 'input_text', text: 'valid' }, undefined],
},
] as unknown as InputItem[];
const result = convertInputToMessages(input);
expect(result).toEqual([{ role: 'user', content: [{ type: 'text', text: 'valid' }] }]);
});
// ── Missing text field defaults to empty string ────────────────────
it('defaults to empty string when text field is missing on input_text', () => {
const input = [
{
type: 'message',
role: 'user',
content: [{ type: 'input_text' }],
},
] as unknown as InputItem[];
const result = convertInputToMessages(input);
expect(result).toEqual([{ role: 'user', content: [{ type: 'text', text: '' }] }]);
});
it('defaults to empty string when text field is missing on output_text', () => {
const input = [
{
type: 'message',
role: 'assistant',
content: [{ type: 'output_text' }],
},
] as unknown as InputItem[];
const result = convertInputToMessages(input);
expect(result).toEqual([{ role: 'assistant', content: [{ type: 'text', text: '' }] }]);
});
it('defaults to empty string when refusal field is missing on refusal block', () => {
const input = [
{
type: 'message',
role: 'assistant',
content: [{ type: 'refusal' }],
},
] as unknown as InputItem[];
const result = convertInputToMessages(input);
expect(result).toEqual([{ role: 'assistant', content: [{ type: 'text', text: '' }] }]);
});
// ── Unknown block types are filtered out ───────────────────────────
it('filters out unknown content block types', () => {
const input = [
{
type: 'message',
role: 'user',
content: [
{ type: 'input_text', text: 'keep me' },
{ type: 'some_future_type', data: 'ignore' },
],
},
] as unknown as InputItem[];
const result = convertInputToMessages(input);
expect(result).toEqual([{ role: 'user', content: [{ type: 'text', text: 'keep me' }] }]);
});
// ── Mixed valid/invalid content in same array ──────────────────────
it('handles mixed valid and invalid content blocks', () => {
const input = [
{
type: 'message',
role: 'assistant',
content: [
{ type: 'output_text', text: 'Hello', annotations: [], logprobs: [] },
null,
{ type: 'unknown_type' },
{ type: 'refusal', refusal: 'No can do' },
],
},
] as unknown as InputItem[];
const result = convertInputToMessages(input);
expect(result).toEqual([
{
role: 'assistant',
content: [
{ type: 'text', text: 'Hello' },
{ type: 'text', text: 'No can do' },
],
},
]);
});
// ── Non-array, non-string content defaults to empty string ─────────
it('defaults to empty string for non-array non-string content', () => {
const input = [{ type: 'message', role: 'user', content: 42 }] as unknown as InputItem[];
const result = convertInputToMessages(input);
expect(result).toEqual([{ role: 'user', content: '' }]);
});
// ── Function call items ────────────────────────────────────────────
it('converts function_call items to assistant messages with tool_calls', () => {
const input: InputItem[] = [
{
type: 'function_call',
id: 'fc_1',
call_id: 'call_abc',
name: 'get_weather',
arguments: '{"city":"NYC"}',
},
];
const result = convertInputToMessages(input);
expect(result).toEqual([
{
role: 'assistant',
content: '',
tool_calls: [
{
id: 'call_abc',
type: 'function',
function: { name: 'get_weather', arguments: '{"city":"NYC"}' },
},
],
},
]);
});
// ── Function call output items ─────────────────────────────────────
it('converts function_call_output items to tool messages', () => {
const input: InputItem[] = [
{
type: 'function_call_output',
call_id: 'call_abc',
output: '{"temp":72}',
},
];
const result = convertInputToMessages(input);
expect(result).toEqual([
{
role: 'tool',
content: '{"temp":72}',
tool_call_id: 'call_abc',
},
]);
});
// ── Item references are skipped ────────────────────────────────────
it('skips item_reference items', () => {
const input: InputItem[] = [
{ type: 'item_reference', id: 'ref_123' },
{ type: 'message', role: 'user', content: 'Hello' },
];
const result = convertInputToMessages(input);
expect(result).toEqual([{ role: 'user', content: 'Hello' }]);
});
// ── Multi-turn conversation (the real-world scenario) ──────────────
it('handles a full multi-turn conversation with output_text blocks', () => {
const input: InputItem[] = [
{
type: 'message',
role: 'developer',
content: [{ type: 'input_text', text: 'You are a helpful assistant.' }],
},
{
type: 'message',
role: 'user',
content: [{ type: 'input_text', text: 'What is 2+2?' }],
},
{
type: 'message',
role: 'assistant',
content: [{ type: 'output_text', text: '2+2 is 4.', annotations: [], logprobs: [] }],
},
{
type: 'message',
role: 'user',
content: [{ type: 'input_text', text: 'And 3+3?' }],
},
];
const result = convertInputToMessages(input);
expect(result).toEqual([
{ role: 'system', content: [{ type: 'text', text: 'You are a helpful assistant.' }] },
{ role: 'user', content: [{ type: 'text', text: 'What is 2+2?' }] },
{ role: 'assistant', content: [{ type: 'text', text: '2+2 is 4.' }] },
{ role: 'user', content: [{ type: 'text', text: 'And 3+3?' }] },
]);
});
});

View file

@ -6,11 +6,12 @@
*/
import type { Response as ServerResponse } from 'express';
import type {
ResponseRequest,
RequestValidationResult,
InputItem,
InputContent,
ResponseRequest,
ResponseContext,
InputContent,
ModelContent,
InputItem,
Response,
} from './types';
import {
@ -134,7 +135,7 @@ export function convertInputToMessages(input: string | InputItem[]): InternalMes
const messageItem = item as {
type: 'message';
role: string;
content: string | InputContent[];
content: string | (InputContent | ModelContent)[];
};
let content: InternalMessage['content'];
@ -142,21 +143,31 @@ export function convertInputToMessages(input: string | InputItem[]): InternalMes
if (typeof messageItem.content === 'string') {
content = messageItem.content;
} else if (Array.isArray(messageItem.content)) {
content = messageItem.content.map((part) => {
if (part.type === 'input_text') {
return { type: 'text', text: part.text };
}
if (part.type === 'input_image') {
return {
type: 'image_url',
image_url: {
url: (part as { image_url?: string }).image_url,
detail: (part as { detail?: string }).detail,
},
};
}
return { type: part.type };
});
content = messageItem.content
.filter((part): part is InputContent | ModelContent => part != null)
.map((part) => {
if (part.type === 'input_text' || part.type === 'output_text') {
return { type: 'text', text: (part as { text?: string }).text ?? '' };
}
if (part.type === 'refusal') {
return { type: 'text', text: (part as { refusal?: string }).refusal ?? '' };
}
if (part.type === 'input_image') {
return {
type: 'image_url',
image_url: {
url: (part as { image_url?: string }).image_url,
detail: (part as { detail?: string }).detail,
},
};
}
if (part.type === 'input_file') {
const filePart = part as { filename?: string };
return { type: 'text', text: `[File: ${filePart.filename ?? 'unknown'}]` };
}
return null;
})
.filter((part): part is NonNullable<typeof part> => part != null);
} else {
content = '';
}

View file

@ -120,7 +120,9 @@ export const limiterCache = (prefix: string): RedisStore | undefined => {
if (!cacheConfig.USE_REDIS) {
return undefined;
}
// TODO: The prefix is not actually applied. Also needs to account for global prefix.
// Note: The `prefix` is applied by RedisStore internally to its key operations.
// The global REDIS_KEY_PREFIX is applied by ioredisClient's keyPrefix setting.
// Combined key format: `{REDIS_KEY_PREFIX}::{prefix}{identifier}`
prefix = prefix.endsWith(':') ? prefix : `${prefix}:`;
try {

View file

@ -29,7 +29,9 @@ if (cacheConfig.USE_REDIS) {
);
return null;
}
const delay = Math.min(times * 50, cacheConfig.REDIS_RETRY_MAX_DELAY);
const base = Math.min(Math.pow(2, times) * 50, cacheConfig.REDIS_RETRY_MAX_DELAY);
const jitter = Math.floor(Math.random() * Math.min(base, 1000));
const delay = Math.min(base + jitter, cacheConfig.REDIS_RETRY_MAX_DELAY);
logger.info(`ioredis reconnecting... attempt ${times}, delay ${delay}ms`);
return delay;
},
@ -71,7 +73,9 @@ if (cacheConfig.USE_REDIS) {
);
return null;
}
const delay = Math.min(times * 100, cacheConfig.REDIS_RETRY_MAX_DELAY);
const base = Math.min(Math.pow(2, times) * 100, cacheConfig.REDIS_RETRY_MAX_DELAY);
const jitter = Math.floor(Math.random() * Math.min(base, 1000));
const delay = Math.min(base + jitter, cacheConfig.REDIS_RETRY_MAX_DELAY);
logger.info(`ioredis cluster reconnecting... attempt ${times}, delay ${delay}ms`);
return delay;
},
@ -149,7 +153,9 @@ if (cacheConfig.USE_REDIS) {
);
return new Error('Max reconnection attempts reached');
}
const delay = Math.min(retries * 100, cacheConfig.REDIS_RETRY_MAX_DELAY);
const base = Math.min(Math.pow(2, retries) * 100, cacheConfig.REDIS_RETRY_MAX_DELAY);
const jitter = Math.floor(Math.random() * Math.min(base, 1000));
const delay = Math.min(base + jitter, cacheConfig.REDIS_RETRY_MAX_DELAY);
logger.info(`@keyv/redis reconnecting... attempt ${retries}, delay ${delay}ms`);
return delay;
},

View file

@ -65,7 +65,7 @@ function getClaudeHeaders(
/**
* Configures reasoning-related options for Claude models.
* Models supporting adaptive thinking (Opus 4.6+, Sonnet 5+) use effort control instead of manual budget_tokens.
* Models supporting adaptive thinking (Opus 4.6+, Sonnet 4.6+) use effort control instead of manual budget_tokens.
*/
function configureReasoning(
anthropicInput: AnthropicClientOptions & { max_tokens?: number },

View file

@ -121,6 +121,39 @@ describe('getLLMConfig', () => {
});
});
it('should add "context-1m" beta header for claude-sonnet-4-6 model', () => {
const modelOptions = {
model: 'claude-sonnet-4-6',
promptCache: true,
};
const result = getLLMConfig('test-key', { modelOptions });
const clientOptions = result.llmConfig.clientOptions;
expect(clientOptions?.defaultHeaders).toBeDefined();
expect(clientOptions?.defaultHeaders).toHaveProperty('anthropic-beta');
const defaultHeaders = clientOptions?.defaultHeaders as Record<string, string>;
expect(defaultHeaders['anthropic-beta']).toBe('context-1m-2025-08-07');
expect(result.llmConfig.promptCache).toBe(true);
});
it('should add "context-1m" beta header for claude-sonnet-4-6 model formats', () => {
const modelVariations = [
'claude-sonnet-4-6',
'claude-sonnet-4-6-20260101',
'anthropic/claude-sonnet-4-6',
];
modelVariations.forEach((model) => {
const modelOptions = { model, promptCache: true };
const result = getLLMConfig('test-key', { modelOptions });
const clientOptions = result.llmConfig.clientOptions;
expect(clientOptions?.defaultHeaders).toBeDefined();
expect(clientOptions?.defaultHeaders).toHaveProperty('anthropic-beta');
const defaultHeaders = clientOptions?.defaultHeaders as Record<string, string>;
expect(defaultHeaders['anthropic-beta']).toBe('context-1m-2025-08-07');
expect(result.llmConfig.promptCache).toBe(true);
});
});
it('should pass promptCache boolean for claude-opus-4-5 model (no beta header needed)', () => {
const modelOptions = {
model: 'claude-opus-4-5',
@ -963,6 +996,51 @@ describe('getLLMConfig', () => {
});
});
it('should use adaptive thinking for Sonnet 4.6 instead of enabled + budget_tokens', () => {
const result = getLLMConfig('test-key', {
modelOptions: {
model: 'claude-sonnet-4-6',
thinking: true,
thinkingBudget: 10000,
},
});
expect((result.llmConfig.thinking as unknown as { type: string }).type).toBe('adaptive');
expect(result.llmConfig.thinking).not.toHaveProperty('budget_tokens');
expect(result.llmConfig.maxTokens).toBe(64000);
});
it('should set effort via output_config for Sonnet 4.6', () => {
const result = getLLMConfig('test-key', {
modelOptions: {
model: 'claude-sonnet-4-6',
thinking: true,
effort: AnthropicEffort.high,
},
});
expect((result.llmConfig.thinking as unknown as { type: string }).type).toBe('adaptive');
expect(result.llmConfig.invocationKwargs).toHaveProperty('output_config');
expect(result.llmConfig.invocationKwargs?.output_config).toEqual({
effort: AnthropicEffort.high,
});
});
it('should exclude topP/topK for Sonnet 4.6 with adaptive thinking', () => {
const result = getLLMConfig('test-key', {
modelOptions: {
model: 'claude-sonnet-4-6',
thinking: true,
topP: 0.9,
topK: 40,
},
});
expect((result.llmConfig.thinking as unknown as { type: string }).type).toBe('adaptive');
expect(result.llmConfig).not.toHaveProperty('topP');
expect(result.llmConfig).not.toHaveProperty('topK');
});
it('should NOT set adaptive thinking or effort for non-adaptive models', () => {
const nonAdaptiveModels = [
'claude-opus-4-5',

View file

@ -4,6 +4,8 @@ import { MCPConnection } from './connection';
import { MCPServersRegistry } from '~/mcp/registry/MCPServersRegistry';
import type * as t from './types';
const CONNECT_CONCURRENCY = 3;
/**
* Manages MCP connections with lazy loading and reconnection.
* Maintains a pool of connections and handles connection lifecycle management.
@ -84,9 +86,17 @@ export class ConnectionsRepository {
/** Gets or creates connections for multiple servers concurrently */
async getMany(serverNames: string[]): Promise<Map<string, MCPConnection>> {
const connectionPromises = serverNames.map(async (name) => [name, await this.get(name)]);
const connections = await Promise.all(connectionPromises);
return new Map((connections as [string, MCPConnection][]).filter((v) => !!v[1]));
const results: [string, MCPConnection | null][] = [];
for (let i = 0; i < serverNames.length; i += CONNECT_CONCURRENCY) {
const batch = serverNames.slice(i, i + CONNECT_CONCURRENCY);
const batchResults = await Promise.all(
batch.map(
async (name): Promise<[string, MCPConnection | null]> => [name, await this.get(name)],
),
);
results.push(...batchResults);
}
return new Map(results.filter((v): v is [string, MCPConnection] => v[1] != null));
}
/** Returns all currently loaded connections without creating new ones */

View file

@ -559,7 +559,11 @@ export class MCPConnection extends EventEmitter {
}
this.isReconnecting = true;
const backoffDelay = (attempt: number) => Math.min(1000 * Math.pow(2, attempt), 30000);
const backoffDelay = (attempt: number) => {
const base = Math.min(1000 * Math.pow(2, attempt), 30000);
const jitter = Math.floor(Math.random() * 1000); // up to 1s of random jitter
return base + jitter;
};
try {
while (

View file

@ -336,6 +336,69 @@ describe('OAuthReconnectionManager', () => {
});
});
describe('reconnection staggering', () => {
let reconnectionTracker: OAuthReconnectionTracker;
beforeEach(async () => {
jest.useFakeTimers();
reconnectionTracker = new OAuthReconnectionTracker();
reconnectionManager = await OAuthReconnectionManager.createInstance(
flowManager,
tokenMethods,
reconnectionTracker,
);
});
afterEach(() => {
jest.useRealTimers();
});
it('should stagger reconnection attempts for multiple servers', async () => {
const userId = 'user-123';
const oauthServers = new Set(['server1', 'server2', 'server3']);
(mockRegistryInstance.getOAuthServers as jest.Mock).mockResolvedValue(oauthServers);
// All servers have valid tokens and are not connected
tokenMethods.findToken.mockImplementation(async ({ identifier }) => {
return {
userId,
identifier,
expiresAt: new Date(Date.now() + 3600000),
} as unknown as MCPOAuthTokens;
});
const mockNewConnection = {
isConnected: jest.fn().mockResolvedValue(true),
disconnect: jest.fn(),
};
mockMCPManager.getUserConnection.mockResolvedValue(
mockNewConnection as unknown as MCPConnection,
);
(mockRegistryInstance.getServerConfig as jest.Mock).mockResolvedValue(
{} as unknown as MCPOptions,
);
await reconnectionManager.reconnectServers(userId);
// Only the first server should have been attempted immediately
expect(mockMCPManager.getUserConnection).toHaveBeenCalledTimes(1);
expect(mockMCPManager.getUserConnection).toHaveBeenCalledWith(
expect.objectContaining({ serverName: 'server1' }),
);
// After advancing all timers, all servers should have been attempted
await jest.runAllTimersAsync();
expect(mockMCPManager.getUserConnection).toHaveBeenCalledTimes(3);
expect(mockMCPManager.getUserConnection).toHaveBeenCalledWith(
expect.objectContaining({ serverName: 'server2' }),
);
expect(mockMCPManager.getUserConnection).toHaveBeenCalledWith(
expect.objectContaining({ serverName: 'server3' }),
);
});
});
describe('reconnection timeout behavior', () => {
let reconnectionTracker: OAuthReconnectionTracker;

View file

@ -7,6 +7,7 @@ import { MCPManager } from '~/mcp/MCPManager';
import { MCPServersRegistry } from '~/mcp/registry/MCPServersRegistry';
const DEFAULT_CONNECTION_TIMEOUT_MS = 10_000; // ms
const RECONNECT_STAGGER_MS = 500; // ms between each server reconnection
export class OAuthReconnectionManager {
private static instance: OAuthReconnectionManager | null = null;
@ -84,9 +85,14 @@ export class OAuthReconnectionManager {
this.reconnectionsTracker.setActive(userId, serverName);
}
// 3. attempt to reconnect the servers
for (const serverName of serversToReconnect) {
void this.tryReconnect(userId, serverName);
// 3. attempt to reconnect the servers with staggered delays to avoid connection storms
for (let i = 0; i < serversToReconnect.length; i++) {
const serverName = serversToReconnect[i];
if (i === 0) {
void this.tryReconnect(userId, serverName);
} else {
setTimeout(() => void this.tryReconnect(userId, serverName), i * RECONNECT_STAGGER_MS);
}
}
}

View file

@ -0,0 +1,258 @@
import type { Redis, Cluster } from 'ioredis';
/**
* Integration tests for concurrency middleware atomic Lua scripts.
*
* Tests that the Lua-based check-and-increment / decrement operations
* are truly atomic and eliminate the INCR+check+DECR race window.
*
* Run with: USE_REDIS=true npx jest --config packages/api/jest.config.js concurrency.cache_integration
*/
describe('Concurrency Middleware Integration Tests', () => {
let originalEnv: NodeJS.ProcessEnv;
let ioredisClient: Redis | Cluster | null = null;
let checkAndIncrementPendingRequest: (
userId: string,
) => Promise<{ allowed: boolean; pendingRequests: number; limit: number }>;
let decrementPendingRequest: (userId: string) => Promise<void>;
const testPrefix = 'Concurrency-Integration-Test';
beforeAll(async () => {
originalEnv = { ...process.env };
process.env.USE_REDIS = process.env.USE_REDIS ?? 'true';
process.env.USE_REDIS_CLUSTER = process.env.USE_REDIS_CLUSTER ?? 'false';
process.env.REDIS_URI = process.env.REDIS_URI ?? 'redis://127.0.0.1:6379';
process.env.REDIS_KEY_PREFIX = testPrefix;
process.env.REDIS_PING_INTERVAL = '0';
process.env.REDIS_RETRY_MAX_ATTEMPTS = '5';
process.env.LIMIT_CONCURRENT_MESSAGES = 'true';
process.env.CONCURRENT_MESSAGE_MAX = '2';
jest.resetModules();
const { ioredisClient: client } = await import('../../cache/redisClients');
ioredisClient = client;
if (!ioredisClient) {
console.warn('Redis not available, skipping integration tests');
return;
}
// Import concurrency module after Redis client is available
const concurrency = await import('../concurrency');
checkAndIncrementPendingRequest = concurrency.checkAndIncrementPendingRequest;
decrementPendingRequest = concurrency.decrementPendingRequest;
});
afterEach(async () => {
if (!ioredisClient) {
return;
}
try {
const keys = await ioredisClient.keys(`${testPrefix}*`);
if (keys.length > 0) {
await Promise.all(keys.map((key) => ioredisClient!.del(key)));
}
} catch (error) {
console.warn('Error cleaning up test keys:', error);
}
});
afterAll(async () => {
if (ioredisClient) {
try {
await ioredisClient.quit();
} catch {
try {
ioredisClient.disconnect();
} catch {
// Ignore
}
}
}
process.env = originalEnv;
});
describe('Atomic Check and Increment', () => {
test('should allow requests within the concurrency limit', async () => {
if (!ioredisClient) {
return;
}
const userId = `user-allow-${Date.now()}`;
// First request - should be allowed (count = 1, limit = 2)
const result1 = await checkAndIncrementPendingRequest(userId);
expect(result1.allowed).toBe(true);
expect(result1.pendingRequests).toBe(1);
expect(result1.limit).toBe(2);
// Second request - should be allowed (count = 2, limit = 2)
const result2 = await checkAndIncrementPendingRequest(userId);
expect(result2.allowed).toBe(true);
expect(result2.pendingRequests).toBe(2);
});
test('should reject requests over the concurrency limit', async () => {
if (!ioredisClient) {
return;
}
const userId = `user-reject-${Date.now()}`;
// Fill up to the limit
await checkAndIncrementPendingRequest(userId);
await checkAndIncrementPendingRequest(userId);
// Third request - should be rejected (count would be 3, limit = 2)
const result = await checkAndIncrementPendingRequest(userId);
expect(result.allowed).toBe(false);
expect(result.pendingRequests).toBe(3); // Reports the count that was over-limit
});
test('should not leave stale counter after rejection (atomic rollback)', async () => {
if (!ioredisClient) {
return;
}
const userId = `user-rollback-${Date.now()}`;
// Fill up to the limit
await checkAndIncrementPendingRequest(userId);
await checkAndIncrementPendingRequest(userId);
// Attempt over-limit (should be rejected and atomically rolled back)
const rejected = await checkAndIncrementPendingRequest(userId);
expect(rejected.allowed).toBe(false);
// The key value should still be 2, not 3 — verify the Lua script decremented back
const key = `PENDING_REQ:${userId}`;
const rawValue = await ioredisClient.get(key);
expect(rawValue).toBe('2');
});
test('should handle concurrent requests atomically (no over-admission)', async () => {
if (!ioredisClient) {
return;
}
const userId = `user-concurrent-${Date.now()}`;
// Fire 20 concurrent requests for the same user (limit = 2)
const results = await Promise.all(
Array.from({ length: 20 }, () => checkAndIncrementPendingRequest(userId)),
);
const allowed = results.filter((r) => r.allowed);
const rejected = results.filter((r) => !r.allowed);
// Exactly 2 should be allowed (the concurrency limit)
expect(allowed.length).toBe(2);
expect(rejected.length).toBe(18);
// The key value should be exactly 2 after all atomic operations
const key = `PENDING_REQ:${userId}`;
const rawValue = await ioredisClient.get(key);
expect(rawValue).toBe('2');
// Clean up
await decrementPendingRequest(userId);
await decrementPendingRequest(userId);
});
});
describe('Atomic Decrement', () => {
test('should decrement pending requests', async () => {
if (!ioredisClient) {
return;
}
const userId = `user-decrement-${Date.now()}`;
await checkAndIncrementPendingRequest(userId);
await checkAndIncrementPendingRequest(userId);
// Decrement once
await decrementPendingRequest(userId);
const key = `PENDING_REQ:${userId}`;
const rawValue = await ioredisClient.get(key);
expect(rawValue).toBe('1');
});
test('should clean up key when count reaches zero', async () => {
if (!ioredisClient) {
return;
}
const userId = `user-cleanup-${Date.now()}`;
await checkAndIncrementPendingRequest(userId);
await decrementPendingRequest(userId);
// Key should be deleted (not left as "0")
const key = `PENDING_REQ:${userId}`;
const exists = await ioredisClient.exists(key);
expect(exists).toBe(0);
});
test('should clean up key on double-decrement (negative protection)', async () => {
if (!ioredisClient) {
return;
}
const userId = `user-double-decr-${Date.now()}`;
await checkAndIncrementPendingRequest(userId);
await decrementPendingRequest(userId);
await decrementPendingRequest(userId); // Double-decrement
// Key should be deleted, not negative
const key = `PENDING_REQ:${userId}`;
const exists = await ioredisClient.exists(key);
expect(exists).toBe(0);
});
test('should allow new requests after decrement frees a slot', async () => {
if (!ioredisClient) {
return;
}
const userId = `user-free-slot-${Date.now()}`;
// Fill to limit
await checkAndIncrementPendingRequest(userId);
await checkAndIncrementPendingRequest(userId);
// Verify at limit
const atLimit = await checkAndIncrementPendingRequest(userId);
expect(atLimit.allowed).toBe(false);
// Free a slot
await decrementPendingRequest(userId);
// Should now be allowed again
const allowed = await checkAndIncrementPendingRequest(userId);
expect(allowed.allowed).toBe(true);
expect(allowed.pendingRequests).toBe(2);
});
});
describe('TTL Behavior', () => {
test('should set TTL on the concurrency key', async () => {
if (!ioredisClient) {
return;
}
const userId = `user-ttl-${Date.now()}`;
await checkAndIncrementPendingRequest(userId);
const key = `PENDING_REQ:${userId}`;
const ttl = await ioredisClient.ttl(key);
expect(ttl).toBeGreaterThan(0);
expect(ttl).toBeLessThanOrEqual(60);
});
});
});

View file

@ -9,6 +9,40 @@ const LIMIT_CONCURRENT_MESSAGES = process.env.LIMIT_CONCURRENT_MESSAGES;
const CONCURRENT_MESSAGE_MAX = math(process.env.CONCURRENT_MESSAGE_MAX, 2);
const CONCURRENT_VIOLATION_SCORE = math(process.env.CONCURRENT_VIOLATION_SCORE, 1);
/**
* Lua script for atomic check-and-increment.
* Increments the key, sets TTL, and if over limit decrements back.
* Returns positive count if allowed, negative count if rejected.
* Single round-trip, fully atomic eliminates the INCR/check/DECR race window.
*/
const CHECK_AND_INCREMENT_SCRIPT = `
local key = KEYS[1]
local limit = tonumber(ARGV[1])
local ttl = tonumber(ARGV[2])
local current = redis.call('INCR', key)
redis.call('EXPIRE', key, ttl)
if current > limit then
redis.call('DECR', key)
return -current
end
return current
`;
/**
* Lua script for atomic decrement-and-cleanup.
* Decrements the key and deletes it if the count reaches zero or below.
* Eliminates the DECR-then-DEL race window.
*/
const DECREMENT_SCRIPT = `
local key = KEYS[1]
local current = redis.call('DECR', key)
if current <= 0 then
redis.call('DEL', key)
return 0
end
return current
`;
/** Lazily initialized cache for pending requests (used only for in-memory fallback) */
let pendingReqCache: ReturnType<typeof standardCache> | null = null;
@ -80,36 +114,28 @@ export async function checkAndIncrementPendingRequest(
return { allowed: true, pendingRequests: 0, limit };
}
// Use atomic Redis INCR when available to prevent race conditions
// Use atomic Lua script when Redis is available to prevent race conditions.
// A single EVAL round-trip atomically increments, checks, and decrements if over-limit.
if (USE_REDIS && ioredisClient) {
const key = buildKey(userId);
try {
// Pipeline ensures INCR and EXPIRE execute atomically in one round-trip
// This prevents edge cases where crash between operations leaves key without TTL
const pipeline = ioredisClient.pipeline();
pipeline.incr(key);
pipeline.expire(key, 60);
const results = await pipeline.exec();
const result = (await ioredisClient.eval(
CHECK_AND_INCREMENT_SCRIPT,
1,
key,
limit,
60,
)) as number;
if (!results || results[0][0]) {
throw new Error('Pipeline execution failed');
if (result < 0) {
// Negative return means over-limit (absolute value is the count before decrement)
const count = -result;
logger.debug(`[concurrency] User ${userId} exceeded concurrent limit: ${count}/${limit}`);
return { allowed: false, pendingRequests: count, limit };
}
const newCount = results[0][1] as number;
if (newCount > limit) {
// Over limit - decrement back and reject
await ioredisClient.decr(key);
logger.debug(
`[concurrency] User ${userId} exceeded concurrent limit: ${newCount}/${limit}`,
);
return { allowed: false, pendingRequests: newCount, limit };
}
logger.debug(
`[concurrency] User ${userId} incremented pending requests: ${newCount}/${limit}`,
);
return { allowed: true, pendingRequests: newCount, limit };
logger.debug(`[concurrency] User ${userId} incremented pending requests: ${result}/${limit}`);
return { allowed: true, pendingRequests: result, limit };
} catch (error) {
logger.error('[concurrency] Redis atomic increment failed:', error);
// On Redis error, allow the request to proceed (fail-open)
@ -164,18 +190,12 @@ export async function decrementPendingRequest(userId: string): Promise<void> {
return;
}
// Use atomic Redis DECR when available
// Use atomic Lua script to decrement and clean up zero/negative keys in one round-trip
if (USE_REDIS && ioredisClient) {
const key = buildKey(userId);
try {
const newCount = await ioredisClient.decr(key);
if (newCount < 0) {
// Counter went negative - reset to 0 and delete
await ioredisClient.del(key);
logger.debug(`[concurrency] User ${userId} pending requests cleared (was negative)`);
} else if (newCount === 0) {
// Clean up zero-value keys
await ioredisClient.del(key);
const newCount = (await ioredisClient.eval(DECREMENT_SCRIPT, 1, key)) as number;
if (newCount === 0) {
logger.debug(`[concurrency] User ${userId} pending requests cleared`);
} else {
logger.debug(`[concurrency] User ${userId} decremented pending requests: ${newCount}`);

View file

@ -19,8 +19,11 @@ describe('RedisEventTransport Integration Tests', () => {
originalEnv = { ...process.env };
process.env.USE_REDIS = process.env.USE_REDIS ?? 'true';
process.env.USE_REDIS_CLUSTER = process.env.USE_REDIS_CLUSTER ?? 'false';
process.env.REDIS_URI = process.env.REDIS_URI ?? 'redis://127.0.0.1:6379';
process.env.REDIS_KEY_PREFIX = testPrefix;
process.env.REDIS_PING_INTERVAL = '0';
process.env.REDIS_RETRY_MAX_ATTEMPTS = '5';
jest.resetModules();
@ -890,4 +893,121 @@ describe('RedisEventTransport Integration Tests', () => {
subscriber.disconnect();
});
});
describe('Publish Error Propagation', () => {
test('should swallow emitChunk publish errors (callers fire-and-forget)', async () => {
const { RedisEventTransport } = await import('../implementations/RedisEventTransport');
const mockPublisher = {
publish: jest.fn().mockRejectedValue(new Error('Redis connection lost')),
};
const mockSubscriber = {
on: jest.fn(),
subscribe: jest.fn().mockResolvedValue(undefined),
unsubscribe: jest.fn().mockResolvedValue(undefined),
};
const transport = new RedisEventTransport(
mockPublisher as unknown as Redis,
mockSubscriber as unknown as Redis,
);
const streamId = `error-prop-chunk-${Date.now()}`;
// emitChunk swallows errors because callers often fire-and-forget (no await).
// Throwing would cause unhandled promise rejections.
await expect(transport.emitChunk(streamId, { data: 'test' })).resolves.toBeUndefined();
transport.destroy();
});
test('should throw when emitDone publish fails', async () => {
const { RedisEventTransport } = await import('../implementations/RedisEventTransport');
const mockPublisher = {
publish: jest.fn().mockRejectedValue(new Error('Redis connection lost')),
};
const mockSubscriber = {
on: jest.fn(),
subscribe: jest.fn().mockResolvedValue(undefined),
unsubscribe: jest.fn().mockResolvedValue(undefined),
};
const transport = new RedisEventTransport(
mockPublisher as unknown as Redis,
mockSubscriber as unknown as Redis,
);
const streamId = `error-prop-done-${Date.now()}`;
await expect(transport.emitDone(streamId, { finished: true })).rejects.toThrow(
'Redis connection lost',
);
transport.destroy();
});
test('should throw when emitError publish fails', async () => {
const { RedisEventTransport } = await import('../implementations/RedisEventTransport');
const mockPublisher = {
publish: jest.fn().mockRejectedValue(new Error('Redis connection lost')),
};
const mockSubscriber = {
on: jest.fn(),
subscribe: jest.fn().mockResolvedValue(undefined),
unsubscribe: jest.fn().mockResolvedValue(undefined),
};
const transport = new RedisEventTransport(
mockPublisher as unknown as Redis,
mockSubscriber as unknown as Redis,
);
const streamId = `error-prop-error-${Date.now()}`;
await expect(transport.emitError(streamId, 'some error')).rejects.toThrow(
'Redis connection lost',
);
transport.destroy();
});
test('should still deliver events successfully when publish succeeds', async () => {
if (!ioredisClient) {
console.warn('Redis not available, skipping test');
return;
}
const { RedisEventTransport } = await import('../implementations/RedisEventTransport');
const subscriber = (ioredisClient as Redis).duplicate();
const transport = new RedisEventTransport(ioredisClient, subscriber);
const streamId = `error-prop-success-${Date.now()}`;
const receivedChunks: unknown[] = [];
let doneEvent: unknown = null;
transport.subscribe(streamId, {
onChunk: (event) => receivedChunks.push(event),
onDone: (event) => {
doneEvent = event;
},
});
await new Promise((resolve) => setTimeout(resolve, 200));
// These should NOT throw
await transport.emitChunk(streamId, { text: 'hello' });
await transport.emitDone(streamId, { finished: true });
await new Promise((resolve) => setTimeout(resolve, 200));
expect(receivedChunks.length).toBe(1);
expect(doneEvent).toEqual({ finished: true });
transport.destroy();
subscriber.disconnect();
});
});
});

View file

@ -24,8 +24,11 @@ describe('RedisJobStore Integration Tests', () => {
// Set up test environment
process.env.USE_REDIS = process.env.USE_REDIS ?? 'true';
process.env.USE_REDIS_CLUSTER = process.env.USE_REDIS_CLUSTER ?? 'false';
process.env.REDIS_URI = process.env.REDIS_URI ?? 'redis://127.0.0.1:6379';
process.env.REDIS_KEY_PREFIX = testPrefix;
process.env.REDIS_PING_INTERVAL = '0';
process.env.REDIS_RETRY_MAX_ATTEMPTS = '5';
jest.resetModules();
@ -1033,4 +1036,196 @@ describe('RedisJobStore Integration Tests', () => {
await instance2.destroy();
});
});
describe('Batched Cleanup', () => {
test('should clean up many stale jobs in parallel batches', async () => {
if (!ioredisClient) {
return;
}
const { RedisJobStore } = await import('../implementations/RedisJobStore');
// Very short TTL so jobs are immediately stale
const store = new RedisJobStore(ioredisClient, { runningTtl: 1 });
await store.initialize();
const jobCount = 75; // More than one batch of 50
const veryOldTimestamp = Date.now() - 10000; // 10 seconds ago
// Create many stale jobs directly in Redis
for (let i = 0; i < jobCount; i++) {
const streamId = `batch-cleanup-${Date.now()}-${i}`;
const jobKey = `stream:{${streamId}}:job`;
await ioredisClient.hmset(jobKey, {
streamId,
userId: 'batch-user',
status: 'running',
createdAt: veryOldTimestamp.toString(),
syncSent: '0',
});
await ioredisClient.sadd('stream:running', streamId);
}
// Verify jobs are in the running set
const runningBefore = await ioredisClient.scard('stream:running');
expect(runningBefore).toBeGreaterThanOrEqual(jobCount);
// Run cleanup - should process in batches of 50
const cleaned = await store.cleanup();
expect(cleaned).toBeGreaterThanOrEqual(jobCount);
await store.destroy();
});
test('should not clean up valid running jobs during batch cleanup', async () => {
if (!ioredisClient) {
return;
}
const { RedisJobStore } = await import('../implementations/RedisJobStore');
const store = new RedisJobStore(ioredisClient, { runningTtl: 1200 });
await store.initialize();
// Create a mix of valid and stale jobs
const validStreamId = `valid-job-${Date.now()}`;
await store.createJob(validStreamId, 'user-1', validStreamId);
const staleStreamId = `stale-job-${Date.now()}`;
const jobKey = `stream:{${staleStreamId}}:job`;
await ioredisClient.hmset(jobKey, {
streamId: staleStreamId,
userId: 'user-1',
status: 'running',
createdAt: (Date.now() - 2000000).toString(), // Very old
syncSent: '0',
});
await ioredisClient.sadd('stream:running', staleStreamId);
const cleaned = await store.cleanup();
expect(cleaned).toBeGreaterThanOrEqual(1);
// Valid job should still exist
const validJob = await store.getJob(validStreamId);
expect(validJob).not.toBeNull();
expect(validJob?.status).toBe('running');
await store.destroy();
});
});
describe('appendChunk TTL Refresh', () => {
test('should set TTL on the chunk stream', async () => {
if (!ioredisClient) {
return;
}
const { RedisJobStore } = await import('../implementations/RedisJobStore');
const store = new RedisJobStore(ioredisClient, { runningTtl: 120 });
await store.initialize();
const streamId = `append-ttl-${Date.now()}`;
await store.createJob(streamId, 'user-1', streamId);
await store.appendChunk(streamId, {
event: 'on_message_delta',
data: { id: 'step-1', type: 'text', text: 'first' },
});
const chunkKey = `stream:{${streamId}}:chunks`;
const ttl = await ioredisClient.ttl(chunkKey);
expect(ttl).toBeGreaterThan(0);
expect(ttl).toBeLessThanOrEqual(120);
await store.destroy();
});
test('should refresh TTL on subsequent chunks (not just first)', async () => {
if (!ioredisClient) {
return;
}
const { RedisJobStore } = await import('../implementations/RedisJobStore');
const store = new RedisJobStore(ioredisClient, { runningTtl: 120 });
await store.initialize();
const streamId = `append-refresh-${Date.now()}`;
await store.createJob(streamId, 'user-1', streamId);
// Append first chunk
await store.appendChunk(streamId, {
event: 'on_message_delta',
data: { id: 'step-1', type: 'text', text: 'first' },
});
const chunkKey = `stream:{${streamId}}:chunks`;
const ttl1 = await ioredisClient.ttl(chunkKey);
expect(ttl1).toBeGreaterThan(0);
// Manually reduce TTL to simulate time passing
await ioredisClient.expire(chunkKey, 30);
const reducedTtl = await ioredisClient.ttl(chunkKey);
expect(reducedTtl).toBeLessThanOrEqual(30);
// Append another chunk - TTL should be refreshed back to running TTL
await store.appendChunk(streamId, {
event: 'on_message_delta',
data: { id: 'step-1', type: 'text', text: 'second' },
});
const ttl2 = await ioredisClient.ttl(chunkKey);
// Should be refreshed to ~120, not still ~30
expect(ttl2).toBeGreaterThan(30);
expect(ttl2).toBeLessThanOrEqual(120);
await store.destroy();
});
test('should store chunks correctly via pipeline', async () => {
if (!ioredisClient) {
return;
}
const { RedisJobStore } = await import('../implementations/RedisJobStore');
const store = new RedisJobStore(ioredisClient);
await store.initialize();
const streamId = `append-pipeline-${Date.now()}`;
await store.createJob(streamId, 'user-1', streamId);
const chunks = [
{
event: 'on_run_step',
data: {
id: 'step-1',
runId: 'run-1',
index: 0,
stepDetails: { type: 'message_creation' },
},
},
{
event: 'on_message_delta',
data: { id: 'step-1', delta: { content: { type: 'text', text: 'Hello ' } } },
},
{
event: 'on_message_delta',
data: { id: 'step-1', delta: { content: { type: 'text', text: 'world!' } } },
},
];
for (const chunk of chunks) {
await store.appendChunk(streamId, chunk);
}
// Verify all chunks were stored
const chunkKey = `stream:{${streamId}}:chunks`;
const len = await ioredisClient.xlen(chunkKey);
expect(len).toBe(3);
// Verify content can be reconstructed
const content = await store.getContentParts(streamId);
expect(content).not.toBeNull();
expect(content!.content.length).toBeGreaterThan(0);
await store.destroy();
});
});
});

View file

@ -0,0 +1,450 @@
import type { Redis, Cluster } from 'ioredis';
import { RedisEventTransport } from '~/stream/implementations/RedisEventTransport';
import { GenerationJobManagerClass } from '~/stream/GenerationJobManager';
import { createStreamServices } from '~/stream/createStreamServices';
import {
ioredisClient as staticRedisClient,
keyvRedisClient as staticKeyvClient,
keyvRedisClientReady,
} from '~/cache/redisClients';
/**
* Regression tests for the reconnect reorder buffer desync bug.
*
* Bug: When a user disconnects and reconnects to a stream multiple times,
* the second+ reconnect lost chunks because the transport deleted stream state
* on last unsubscribe, destroying the allSubscribersLeftCallbacks registered
* by createJob(). This prevented hasSubscriber from being reset, which in turn
* prevented syncReorderBuffer from being called on reconnect.
*
* Fix: Preserve stream state (callbacks, abort handlers) across reconnect cycles
* instead of deleting it. The state is fully cleaned up by cleanup() when the
* job completes.
*
* Run with: USE_REDIS=true npx jest reconnect-reorder-desync
*/
describe('Reconnect Reorder Buffer Desync (Regression)', () => {
describe('Callback preservation across reconnect cycles (Unit)', () => {
test('allSubscribersLeft callback fires on every disconnect, not just the first', () => {
const mockPublisher = {
publish: jest.fn().mockResolvedValue(1),
};
const mockSubscriber = {
on: jest.fn(),
subscribe: jest.fn().mockResolvedValue(undefined),
unsubscribe: jest.fn().mockResolvedValue(undefined),
};
const transport = new RedisEventTransport(
mockPublisher as unknown as Redis,
mockSubscriber as unknown as Redis,
);
const streamId = 'callback-persist-test';
let callbackFireCount = 0;
// Register callback (simulates what createJob does)
transport.onAllSubscribersLeft(streamId, () => {
callbackFireCount++;
});
// First subscribe/unsubscribe cycle
const sub1 = transport.subscribe(streamId, { onChunk: () => {} });
sub1.unsubscribe();
expect(callbackFireCount).toBe(1);
// Second subscribe/unsubscribe cycle — callback must still fire
const sub2 = transport.subscribe(streamId, { onChunk: () => {} });
sub2.unsubscribe();
expect(callbackFireCount).toBe(2);
// Third cycle — continues to work
const sub3 = transport.subscribe(streamId, { onChunk: () => {} });
sub3.unsubscribe();
expect(callbackFireCount).toBe(3);
transport.destroy();
});
test('abort callback survives across reconnect cycles', () => {
const mockPublisher = {
publish: jest.fn().mockResolvedValue(1),
};
const mockSubscriber = {
on: jest.fn(),
subscribe: jest.fn().mockResolvedValue(undefined),
unsubscribe: jest.fn().mockResolvedValue(undefined),
};
const transport = new RedisEventTransport(
mockPublisher as unknown as Redis,
mockSubscriber as unknown as Redis,
);
const streamId = 'abort-callback-persist-test';
let abortCallbackFired = false;
// Register abort callback (simulates what createJob does)
transport.onAbort(streamId, () => {
abortCallbackFired = true;
});
// Subscribe/unsubscribe cycle
const sub1 = transport.subscribe(streamId, { onChunk: () => {} });
sub1.unsubscribe();
// Re-subscribe and receive an abort signal
const sub2 = transport.subscribe(streamId, { onChunk: () => {} });
const messageHandler = mockSubscriber.on.mock.calls.find(
(call) => call[0] === 'message',
)?.[1] as (channel: string, message: string) => void;
const channel = `stream:{${streamId}}:events`;
messageHandler(channel, JSON.stringify({ type: 'abort' }));
// Abort callback should fire — it was preserved across the reconnect
expect(abortCallbackFired).toBe(true);
sub2.unsubscribe();
transport.destroy();
});
});
describe('Reorder buffer sync on reconnect (Unit)', () => {
/**
* After the fix, the allSubscribersLeft callback fires on every disconnect,
* which resets hasSubscriber. GenerationJobManager.subscribe() then enters
* the if (!runtime.hasSubscriber) block and calls syncReorderBuffer.
*
* This test verifies at the transport level that when syncReorderBuffer IS
* called (as it now will be on every reconnect), messages are delivered
* immediately regardless of how many reconnect cycles have occurred.
*/
test('syncReorderBuffer works correctly on third+ reconnect', async () => {
const mockPublisher = {
publish: jest.fn().mockResolvedValue(1),
};
const mockSubscriber = {
on: jest.fn(),
subscribe: jest.fn().mockResolvedValue(undefined),
unsubscribe: jest.fn().mockResolvedValue(undefined),
};
const transport = new RedisEventTransport(
mockPublisher as unknown as Redis,
mockSubscriber as unknown as Redis,
);
const streamId = 'reorder-multi-reconnect-test';
transport.onAllSubscribersLeft(streamId, () => {
// Simulates the callback from createJob
});
const messageHandler = mockSubscriber.on.mock.calls.find(
(call) => call[0] === 'message',
)?.[1] as (channel: string, message: string) => void;
const channel = `stream:{${streamId}}:events`;
// Run 3 full subscribe/emit/unsubscribe cycles
for (let cycle = 0; cycle < 3; cycle++) {
const chunks: unknown[] = [];
const sub = transport.subscribe(streamId, {
onChunk: (event) => chunks.push(event),
});
// Sync reorder buffer (as GenerationJobManager.subscribe does)
transport.syncReorderBuffer(streamId);
const baseSeq = cycle * 10;
// Emit 10 chunks (advances publisher sequence)
for (let i = 0; i < 10; i++) {
await transport.emitChunk(streamId, { index: baseSeq + i });
}
// Deliver messages via pub/sub handler
for (let i = 0; i < 10; i++) {
messageHandler(
channel,
JSON.stringify({ type: 'chunk', seq: baseSeq + i, data: { index: baseSeq + i } }),
);
}
// Messages should be delivered immediately on every cycle
expect(chunks.length).toBe(10);
expect(chunks.map((c) => (c as { index: number }).index)).toEqual(
Array.from({ length: 10 }, (_, i) => baseSeq + i),
);
sub.unsubscribe();
}
transport.destroy();
});
test('reorder buffer works correctly when syncReorderBuffer IS called', async () => {
const mockPublisher = {
publish: jest.fn().mockResolvedValue(1),
};
const mockSubscriber = {
on: jest.fn(),
subscribe: jest.fn().mockResolvedValue(undefined),
unsubscribe: jest.fn().mockResolvedValue(undefined),
};
const transport = new RedisEventTransport(
mockPublisher as unknown as Redis,
mockSubscriber as unknown as Redis,
);
const streamId = 'reorder-sync-test';
// Emit 20 chunks to advance publisher sequence
for (let i = 0; i < 20; i++) {
await transport.emitChunk(streamId, { index: i });
}
// Subscribe and sync the reorder buffer
const chunks: unknown[] = [];
const sub = transport.subscribe(streamId, {
onChunk: (event) => chunks.push(event),
});
// This is the critical call - sync nextSeq to match publisher
transport.syncReorderBuffer(streamId);
// Deliver messages starting at seq 20
const messageHandler = mockSubscriber.on.mock.calls.find(
(call) => call[0] === 'message',
)?.[1] as (channel: string, message: string) => void;
const channel = `stream:{${streamId}}:events`;
for (let i = 20; i < 25; i++) {
messageHandler(channel, JSON.stringify({ type: 'chunk', seq: i, data: { index: i } }));
}
// Messages should be delivered IMMEDIATELY (no 500ms wait)
// because nextSeq was synced to 20
expect(chunks.length).toBe(5);
expect(chunks.map((c) => (c as { index: number }).index)).toEqual([20, 21, 22, 23, 24]);
sub.unsubscribe();
transport.destroy();
});
});
describe('End-to-end reconnect with GenerationJobManager (Integration)', () => {
let originalEnv: NodeJS.ProcessEnv;
let ioredisClient: Redis | Cluster | null = null;
let dynamicKeyvClient: unknown = null;
let dynamicKeyvReady: Promise<unknown> | null = null;
const testPrefix = 'ReconnectDesync-Test';
beforeAll(async () => {
originalEnv = { ...process.env };
process.env.USE_REDIS = process.env.USE_REDIS ?? 'true';
process.env.REDIS_URI = process.env.REDIS_URI ?? 'redis://127.0.0.1:6379';
process.env.REDIS_KEY_PREFIX = testPrefix;
jest.resetModules();
const redisModule = await import('~/cache/redisClients');
ioredisClient = redisModule.ioredisClient;
dynamicKeyvClient = redisModule.keyvRedisClient;
dynamicKeyvReady = redisModule.keyvRedisClientReady;
});
afterEach(async () => {
jest.resetModules();
if (ioredisClient) {
try {
const keys = await ioredisClient.keys(`${testPrefix}*`);
const streamKeys = await ioredisClient.keys('stream:*');
const allKeys = [...keys, ...streamKeys];
await Promise.all(allKeys.map((key) => ioredisClient!.del(key)));
} catch {
// Ignore cleanup errors
}
}
});
afterAll(async () => {
for (const ready of [keyvRedisClientReady, dynamicKeyvReady]) {
if (ready) {
await ready.catch(() => {});
}
}
const clients = [ioredisClient, staticRedisClient, staticKeyvClient, dynamicKeyvClient];
for (const client of clients) {
if (!client) {
continue;
}
try {
await (client as { disconnect: () => void | Promise<void> }).disconnect();
} catch {
/* ignore */
}
}
process.env = originalEnv;
});
/**
* Verifies that all reconnect cycles deliver chunks immediately
* not just the first reconnect.
*/
test('chunks are delivered immediately on every reconnect cycle', async () => {
if (!ioredisClient) {
console.warn('Redis not available, skipping test');
return;
}
const manager = new GenerationJobManagerClass();
const services = createStreamServices({
useRedis: true,
redisClient: ioredisClient,
});
manager.configure(services);
manager.initialize();
const streamId = `reconnect-fixed-${Date.now()}`;
await manager.createJob(streamId, 'user-1');
// Run 3 subscribe/emit/unsubscribe cycles
for (let cycle = 0; cycle < 3; cycle++) {
const chunks: unknown[] = [];
const sub = await manager.subscribe(streamId, (event) => chunks.push(event));
await new Promise((resolve) => setTimeout(resolve, 100));
// Emit 10 chunks
for (let i = 0; i < 10; i++) {
await manager.emitChunk(streamId, {
event: 'on_message_delta',
data: {
delta: { content: { type: 'text', text: `c${cycle}-${i}` } },
index: cycle * 10 + i,
},
});
}
// Chunks should arrive within 200ms (well under the 500ms force-flush timeout)
await new Promise((resolve) => setTimeout(resolve, 200));
expect(chunks.length).toBe(10);
sub!.unsubscribe();
await new Promise((resolve) => setTimeout(resolve, 100));
}
await manager.destroy();
});
/**
* Verifies that syncSent is correctly reset on every disconnect,
* proving the onAllSubscribersLeft callback survives reconnect cycles.
*/
test('onAllSubscribersLeft callback resets state on every disconnect', async () => {
if (!ioredisClient) {
console.warn('Redis not available, skipping test');
return;
}
const manager = new GenerationJobManagerClass();
const services = createStreamServices({
useRedis: true,
redisClient: ioredisClient,
});
manager.configure(services);
manager.initialize();
const streamId = `callback-persist-integ-${Date.now()}`;
await manager.createJob(streamId, 'user-1');
for (let cycle = 0; cycle < 3; cycle++) {
const sub = await manager.subscribe(streamId, () => {});
await new Promise((resolve) => setTimeout(resolve, 50));
// Mark sync as sent
manager.markSyncSent(streamId);
await new Promise((resolve) => setTimeout(resolve, 50));
let syncSent = await manager.wasSyncSent(streamId);
expect(syncSent).toBe(true);
// Disconnect
sub!.unsubscribe();
await new Promise((resolve) => setTimeout(resolve, 100));
// Callback should reset syncSent on every disconnect
syncSent = await manager.wasSyncSent(streamId);
expect(syncSent).toBe(false);
}
await manager.destroy();
});
/**
* Verifies all reconnect cycles deliver chunks immediately with no
* increasing gap pattern.
*/
test('no increasing gap pattern across reconnect cycles', async () => {
if (!ioredisClient) {
console.warn('Redis not available, skipping test');
return;
}
const manager = new GenerationJobManagerClass();
const services = createStreamServices({
useRedis: true,
redisClient: ioredisClient,
});
manager.configure(services);
manager.initialize();
const streamId = `no-gaps-${Date.now()}`;
await manager.createJob(streamId, 'user-1');
const chunksPerCycle = 15;
for (let cycle = 0; cycle < 4; cycle++) {
const chunks: unknown[] = [];
const sub = await manager.subscribe(streamId, (event) => chunks.push(event));
await new Promise((resolve) => setTimeout(resolve, 100));
// Emit chunks
for (let i = 0; i < chunksPerCycle; i++) {
await manager.emitChunk(streamId, {
event: 'on_message_delta',
data: {
delta: { content: { type: 'text', text: `c${cycle}-${i}` } },
index: cycle * chunksPerCycle + i,
},
});
}
// All chunks should arrive within 200ms on every cycle
await new Promise((resolve) => setTimeout(resolve, 200));
expect(chunks.length).toBe(chunksPerCycle);
sub!.unsubscribe();
await new Promise((resolve) => setTimeout(resolve, 100));
}
await manager.destroy();
});
});
});

View file

@ -58,9 +58,11 @@ export class InMemoryEventTransport implements IEventTransport {
// Check if all subscribers left - cleanup and notify
if (currentState.emitter.listenerCount('chunk') === 0) {
currentState.allSubscribersLeftCallback?.();
// Auto-cleanup the stream entry when no subscribers remain
/* Remove all EventEmitter listeners but preserve stream state
* (including allSubscribersLeftCallback) for reconnection.
* State is fully cleaned up by cleanup() when the job completes.
*/
currentState.emitter.removeAllListeners();
this.streams.delete(streamId);
}
}
},

View file

@ -425,8 +425,15 @@ export class RedisEventTransport implements IEventTransport {
logger.error(`[RedisEventTransport] Error in allSubscribersLeft callback:`, err);
}
}
this.streams.delete(streamId);
/**
* Preserve stream state (callbacks, abort handlers) for reconnection.
* Previously this deleted the entire state, which lost the
* allSubscribersLeftCallbacks and abortCallbacks registered by
* GenerationJobManager.createJob(). On the next subscribe() call,
* fresh state was created without those callbacks, causing
* hasSubscriber to never reset and syncReorderBuffer to be skipped.
* State is fully cleaned up by cleanup() when the job completes.
*/
}
},
};
@ -461,6 +468,7 @@ export class RedisEventTransport implements IEventTransport {
await this.publisher.publish(channel, JSON.stringify(message));
} catch (err) {
logger.error(`[RedisEventTransport] Failed to publish done:`, err);
throw err;
}
}
@ -477,6 +485,7 @@ export class RedisEventTransport implements IEventTransport {
await this.publisher.publish(channel, JSON.stringify(message));
} catch (err) {
logger.error(`[RedisEventTransport] Failed to publish error:`, err);
throw err;
}
}

View file

@ -302,32 +302,46 @@ export class RedisJobStore implements IJobStore {
}
}
for (const streamId of streamIds) {
const job = await this.getJob(streamId);
// Process in batches of 50 to avoid sequential per-job round-trips
const BATCH_SIZE = 50;
for (let i = 0; i < streamIds.length; i += BATCH_SIZE) {
const batch = streamIds.slice(i, i + BATCH_SIZE);
const results = await Promise.allSettled(
batch.map(async (streamId) => {
const job = await this.getJob(streamId);
// Job no longer exists (TTL expired) - remove from set
if (!job) {
await this.redis.srem(KEYS.runningJobs, streamId);
this.localGraphCache.delete(streamId);
this.localCollectedUsageCache.delete(streamId);
cleaned++;
continue;
}
// Job no longer exists (TTL expired) - remove from set
if (!job) {
await this.redis.srem(KEYS.runningJobs, streamId);
this.localGraphCache.delete(streamId);
this.localCollectedUsageCache.delete(streamId);
return 1;
}
// Job completed but still in running set (shouldn't happen, but handle it)
if (job.status !== 'running') {
await this.redis.srem(KEYS.runningJobs, streamId);
this.localGraphCache.delete(streamId);
this.localCollectedUsageCache.delete(streamId);
cleaned++;
continue;
}
// Job completed but still in running set (shouldn't happen, but handle it)
if (job.status !== 'running') {
await this.redis.srem(KEYS.runningJobs, streamId);
this.localGraphCache.delete(streamId);
this.localCollectedUsageCache.delete(streamId);
return 1;
}
// Stale running job (failsafe - running for > configured TTL)
if (now - job.createdAt > this.ttl.running * 1000) {
logger.warn(`[RedisJobStore] Cleaning up stale job: ${streamId}`);
await this.deleteJob(streamId);
cleaned++;
// Stale running job (failsafe - running for > configured TTL)
if (now - job.createdAt > this.ttl.running * 1000) {
logger.warn(`[RedisJobStore] Cleaning up stale job: ${streamId}`);
await this.deleteJob(streamId);
return 1;
}
return 0;
}),
);
for (const result of results) {
if (result.status === 'fulfilled') {
cleaned += result.value;
} else {
logger.warn(`[RedisJobStore] Cleanup failed for a job:`, result.reason);
}
}
}
@ -592,16 +606,14 @@ export class RedisJobStore implements IJobStore {
*/
async appendChunk(streamId: string, event: unknown): Promise<void> {
const key = KEYS.chunks(streamId);
const added = await this.redis.xadd(key, '*', 'event', JSON.stringify(event));
// Set TTL on first chunk (when stream is created)
// Subsequent chunks inherit the stream's TTL
if (added) {
const len = await this.redis.xlen(key);
if (len === 1) {
await this.redis.expire(key, this.ttl.running);
}
}
// Pipeline XADD + EXPIRE in a single round-trip.
// EXPIRE is O(1) and idempotent — refreshing TTL on every chunk is better than
// only setting it once, since the original approach could let the TTL expire
// during long-running streams.
const pipeline = this.redis.pipeline();
pipeline.xadd(key, '*', 'event', JSON.stringify(event));
pipeline.expire(key, this.ttl.running);
await pipeline.exec();
}
/**

View file

@ -148,6 +148,7 @@ const anthropicModels = {
'claude-3.5-sonnet-latest': 200000,
'claude-haiku-4-5': 200000,
'claude-sonnet-4': 1000000,
'claude-sonnet-4-6': 1000000,
'claude-4': 200000,
'claude-opus-4': 200000,
'claude-opus-4-5': 200000,
@ -401,6 +402,7 @@ const anthropicMaxOutputs = {
'claude-3-opus': 4096,
'claude-haiku-4-5': 64000,
'claude-sonnet-4': 64000,
'claude-sonnet-4-6': 64000,
'claude-opus-4': 32000,
'claude-opus-4-5': 64000,
'claude-opus-4-6': 128000,

View file

@ -6,7 +6,7 @@
"": {
"name": "librechat-data-provider/react-query",
"dependencies": {
"axios": "^1.12.1"
"axios": "^1.13.5"
}
},
"node_modules/asynckit": {
@ -16,13 +16,13 @@
"license": "MIT"
},
"node_modules/axios": {
"version": "1.12.1",
"resolved": "https://registry.npmjs.org/axios/-/axios-1.12.1.tgz",
"integrity": "sha512-Kn4kbSXpkFHCGE6rBFNwIv0GQs4AvDT80jlveJDKFxjbTYMUeB4QtsdPCv6H8Cm19Je7IU6VFtRl2zWZI0rudQ==",
"version": "1.13.5",
"resolved": "https://registry.npmjs.org/axios/-/axios-1.13.5.tgz",
"integrity": "sha512-cz4ur7Vb0xS4/KUN0tPWe44eqxrIu31me+fbang3ijiNscE129POzipJJA6zniq2C/Z6sJCjMimjS8Lc/GAs8Q==",
"license": "MIT",
"dependencies": {
"follow-redirects": "^1.15.6",
"form-data": "^4.0.4",
"follow-redirects": "^1.15.11",
"form-data": "^4.0.5",
"proxy-from-env": "^1.1.0"
}
},
@ -140,9 +140,9 @@
}
},
"node_modules/form-data": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz",
"integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==",
"version": "4.0.5",
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz",
"integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==",
"license": "MIT",
"dependencies": {
"asynckit": "^0.4.0",

View file

@ -5,6 +5,6 @@
"module": "./index.es.js",
"types": "../dist/types/react-query/index.d.ts",
"dependencies": {
"axios": "^1.12.1"
"axios": "^1.13.5"
}
}

View file

@ -46,6 +46,30 @@ describe('supportsAdaptiveThinking', () => {
expect(supportsAdaptiveThinking('claude-opus-4-0')).toBe(false);
});
test('should return true for claude-sonnet-4-6', () => {
expect(supportsAdaptiveThinking('claude-sonnet-4-6')).toBe(true);
});
test('should return true for claude-sonnet-4.6', () => {
expect(supportsAdaptiveThinking('claude-sonnet-4.6')).toBe(true);
});
test('should return true for claude-sonnet-4-7 (future)', () => {
expect(supportsAdaptiveThinking('claude-sonnet-4-7')).toBe(true);
});
test('should return true for anthropic.claude-sonnet-4-6 (Bedrock)', () => {
expect(supportsAdaptiveThinking('anthropic.claude-sonnet-4-6')).toBe(true);
});
test('should return true for us.anthropic.claude-sonnet-4-6 (cross-region Bedrock)', () => {
expect(supportsAdaptiveThinking('us.anthropic.claude-sonnet-4-6')).toBe(true);
});
test('should return true for claude-4-6-sonnet (alternate naming)', () => {
expect(supportsAdaptiveThinking('claude-4-6-sonnet')).toBe(true);
});
test('should return false for claude-sonnet-4-5', () => {
expect(supportsAdaptiveThinking('claude-sonnet-4-5')).toBe(false);
});
@ -104,6 +128,14 @@ describe('supportsContext1m', () => {
expect(supportsContext1m('claude-sonnet-4-5')).toBe(true);
});
test('should return true for claude-sonnet-4-6', () => {
expect(supportsContext1m('claude-sonnet-4-6')).toBe(true);
});
test('should return true for anthropic.claude-sonnet-4-6 (Bedrock)', () => {
expect(supportsContext1m('anthropic.claude-sonnet-4-6')).toBe(true);
});
test('should return true for claude-sonnet-5 (future)', () => {
expect(supportsContext1m('claude-sonnet-5')).toBe(true);
});
@ -237,14 +269,42 @@ describe('bedrockInputParser', () => {
]);
});
test('should match anthropic.claude-4-7-sonnet model with 1M context header', () => {
test('should match anthropic.claude-sonnet-4-6 with adaptive thinking and 1M context header', () => {
const input = {
model: 'anthropic.claude-sonnet-4-6',
};
const result = bedrockInputParser.parse(input) as Record<string, unknown>;
const additionalFields = result.additionalModelRequestFields as Record<string, unknown>;
expect(additionalFields.thinking).toEqual({ type: 'adaptive' });
expect(additionalFields.thinkingBudget).toBeUndefined();
expect(additionalFields.anthropic_beta).toEqual([
'output-128k-2025-02-19',
'context-1m-2025-08-07',
]);
});
test('should match us.anthropic.claude-sonnet-4-6 with adaptive thinking and 1M context header', () => {
const input = {
model: 'us.anthropic.claude-sonnet-4-6',
};
const result = bedrockInputParser.parse(input) as Record<string, unknown>;
const additionalFields = result.additionalModelRequestFields as Record<string, unknown>;
expect(additionalFields.thinking).toEqual({ type: 'adaptive' });
expect(additionalFields.thinkingBudget).toBeUndefined();
expect(additionalFields.anthropic_beta).toEqual([
'output-128k-2025-02-19',
'context-1m-2025-08-07',
]);
});
test('should match anthropic.claude-4-7-sonnet model with adaptive thinking and 1M context header', () => {
const input = {
model: 'anthropic.claude-4-7-sonnet',
};
const result = bedrockInputParser.parse(input) as Record<string, unknown>;
const additionalFields = result.additionalModelRequestFields as Record<string, unknown>;
expect(additionalFields.thinking).toBe(true);
expect(additionalFields.thinkingBudget).toBe(2000);
expect(additionalFields.thinking).toEqual({ type: 'adaptive' });
expect(additionalFields.thinkingBudget).toBeUndefined();
expect(additionalFields.anthropic_beta).toEqual([
'output-128k-2025-02-19',
'context-1m-2025-08-07',

View file

@ -35,27 +35,34 @@ function parseOpusVersion(model: string): { major: number; minor: number } | nul
return null;
}
/** Extracts sonnet major version from both naming formats */
function parseSonnetVersion(model: string): number | null {
const nameFirst = model.match(/claude-sonnet[-.]?(\d+)/);
/** Extracts sonnet major/minor version from both naming formats.
* Uses single-digit minor capture to avoid matching date suffixes (e.g., -20250514). */
function parseSonnetVersion(model: string): { major: number; minor: number } | null {
const nameFirst = model.match(/claude-sonnet[-.]?(\d+)(?:[-.](\d)(?!\d))?/);
if (nameFirst) {
return parseInt(nameFirst[1], 10);
return {
major: parseInt(nameFirst[1], 10),
minor: nameFirst[2] != null ? parseInt(nameFirst[2], 10) : 0,
};
}
const numFirst = model.match(/claude-(\d+)(?:[-.]?\d+)?-sonnet/);
const numFirst = model.match(/claude-(\d+)(?:[-.](\d)(?!\d))?-sonnet/);
if (numFirst) {
return parseInt(numFirst[1], 10);
return {
major: parseInt(numFirst[1], 10),
minor: numFirst[2] != null ? parseInt(numFirst[2], 10) : 0,
};
}
return null;
}
/** Checks if a model supports adaptive thinking (Opus 4.6+, Sonnet 5+) */
/** Checks if a model supports adaptive thinking (Opus 4.6+, Sonnet 4.6+) */
export function supportsAdaptiveThinking(model: string): boolean {
const opus = parseOpusVersion(model);
if (opus && (opus.major > 4 || (opus.major === 4 && opus.minor >= 6))) {
return true;
}
const sonnet = parseSonnetVersion(model);
if (sonnet != null && sonnet >= 5) {
if (sonnet != null && (sonnet.major > 4 || (sonnet.major === 4 && sonnet.minor >= 6))) {
return true;
}
return false;
@ -64,7 +71,7 @@ export function supportsAdaptiveThinking(model: string): boolean {
/** Checks if a model qualifies for the context-1m beta header (Sonnet 4+, Opus 4.6+, Opus 5+) */
export function supportsContext1m(model: string): boolean {
const sonnet = parseSonnetVersion(model);
if (sonnet != null && sonnet >= 4) {
if (sonnet != null && sonnet.major >= 4) {
return true;
}
const opus = parseOpusVersion(model);

View file

@ -1135,6 +1135,7 @@ const sharedOpenAIModels = [
];
const sharedAnthropicModels = [
'claude-sonnet-4-6',
'claude-opus-4-6',
'claude-sonnet-4-5',
'claude-sonnet-4-5-20250929',
@ -1156,6 +1157,7 @@ const sharedAnthropicModels = [
];
export const bedrockModels = [
'anthropic.claude-sonnet-4-6',
'anthropic.claude-opus-4-6-v1',
'anthropic.claude-sonnet-4-5-20250929-v1:0',
'anthropic.claude-haiku-4-5-20251001-v1:0',