Merge branch 'main' into feat/model-spec-group-icons

This commit is contained in:
Odrec 2025-12-14 03:27:05 +01:00 committed by GitHub
commit 6023ec64f5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
37 changed files with 1324 additions and 131 deletions

View file

@ -13,9 +13,14 @@ on:
required: false
default: 'Manual publish requested'
permissions:
id-token: write # Required for OIDC trusted publishing
contents: read
jobs:
build-and-publish:
runs-on: ubuntu-latest
environment: publish # Must match npm trusted publisher config
steps:
- uses: actions/checkout@v4
@ -23,6 +28,10 @@ jobs:
uses: actions/setup-node@v4
with:
node-version: '20.x'
registry-url: 'https://registry.npmjs.org'
- name: Update npm for OIDC support
run: npm install -g npm@latest # Must be 11.5.1+ for provenance
- name: Install client dependencies
run: cd packages/client && npm ci
@ -30,9 +39,6 @@ jobs:
- name: Build client
run: cd packages/client && npm run build
- name: Set up npm authentication
run: echo "//registry.npmjs.org/:_authToken=${{ secrets.PUBLISH_NPM_TOKEN }}" > ~/.npmrc
- name: Check version change
id: check
working-directory: packages/client
@ -55,4 +61,4 @@ jobs:
- name: Publish
if: steps.check.outputs.skip != 'true'
working-directory: packages/client
run: npm publish *.tgz --access public
run: npm publish *.tgz --access public --provenance

View file

@ -13,6 +13,10 @@ on:
required: false
default: 'Manual publish requested'
permissions:
id-token: write # Required for OIDC trusted publishing
contents: read
jobs:
build:
runs-on: ubuntu-latest
@ -27,14 +31,17 @@ jobs:
publish-npm:
needs: build
runs-on: ubuntu-latest
environment: publish # Must match npm trusted publisher config
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: 20
registry-url: 'https://registry.npmjs.org'
- name: Update npm for OIDC support
run: npm install -g npm@latest # Must be 11.5.1+ for provenance
- run: cd packages/data-provider && npm ci
- run: cd packages/data-provider && npm run build
- run: cd packages/data-provider && npm publish
env:
NODE_AUTH_TOKEN: ${{secrets.NPM_TOKEN}}
- run: cd packages/data-provider && npm publish --provenance

View file

@ -13,9 +13,14 @@ on:
required: false
default: 'Manual publish requested'
permissions:
id-token: write # Required for OIDC trusted publishing
contents: read
jobs:
build-and-publish:
runs-on: ubuntu-latest
environment: publish # Must match npm trusted publisher config
steps:
- uses: actions/checkout@v4
@ -23,6 +28,10 @@ jobs:
uses: actions/setup-node@v4
with:
node-version: '20.x'
registry-url: 'https://registry.npmjs.org'
- name: Update npm for OIDC support
run: npm install -g npm@latest # Must be 11.5.1+ for provenance
- name: Install dependencies
run: cd packages/data-schemas && npm ci
@ -30,9 +39,6 @@ jobs:
- name: Build
run: cd packages/data-schemas && npm run build
- name: Set up npm authentication
run: echo "//registry.npmjs.org/:_authToken=${{ secrets.PUBLISH_NPM_TOKEN }}" > ~/.npmrc
- name: Check version change
id: check
working-directory: packages/data-schemas
@ -55,4 +61,4 @@ jobs:
- name: Publish
if: steps.check.outputs.skip != 'true'
working-directory: packages/data-schemas
run: npm publish *.tgz --access public
run: npm publish *.tgz --access public --provenance

View file

@ -1,4 +1,4 @@
# v0.8.1-rc2
# v0.8.1
# Base node image
FROM node:20-alpine AS node

View file

@ -1,5 +1,5 @@
# Dockerfile.multi
# v0.8.1-rc2
# v0.8.1
# Base for all builds
FROM node:20-alpine AS base-min

View file

@ -86,7 +86,6 @@ const createFileSearchTool = async ({ userId, files, entity_id, fileCitations =
}
/**
*
* @param {import('librechat-data-provider').TFile} file
* @returns {{ file_id: string, query: string, k: number, entity_id?: string }}
*/
@ -135,11 +134,16 @@ const createFileSearchTool = async ({ userId, files, entity_id, fileCitations =
page: docInfo.metadata.page || null,
})),
)
// TODO: results should be sorted by relevance, not distance
.sort((a, b) => a.distance - b.distance)
// TODO: make this configurable
.slice(0, 10);
if (formattedResults.length === 0) {
return [
'No content found in the files. The files may not have been processed correctly or you may need to refine your query.',
undefined,
];
}
const formattedString = formattedResults
.map(
(result, index) =>
@ -169,11 +173,12 @@ const createFileSearchTool = async ({ userId, files, entity_id, fileCitations =
? `
**CITE FILE SEARCH RESULTS:**
Use anchor markers immediately after statements derived from file content. Reference the filename in your text:
Use the EXACT anchor markers shown below (copy them verbatim) immediately after statements derived from file content. Reference the filename in your text:
- File citation: "The document.pdf states that... \\ue202turn0file0"
- Page reference: "According to report.docx... \\ue202turn0file1"
- Multi-file: "Multiple sources confirm... \\ue200\\ue202turn0file0\\ue202turn0file1\\ue201"
**CRITICAL:** Output these escape sequences EXACTLY as shown (e.g., \\ue202turn0file0). Do NOT substitute with other characters like or similar symbols.
**ALWAYS mention the filename in your text before the citation marker. NEVER use markdown links or footnotes.**`
: ''
}`,

View file

@ -320,19 +320,19 @@ Current Date & Time: ${replaceSpecialVars({ text: '{{iso_datetime}}' })}
**Execute immediately without preface.** After search, provide a brief summary addressing the query directly, then structure your response with clear Markdown formatting (## headers, lists, tables). Cite sources properly, tailor tone to query type, and provide comprehensive details.
**CITATION FORMAT - INVISIBLE UNICODE ANCHORS ONLY:**
Use these Unicode characters: \\ue202 (before each anchor), \\ue200 (group start), \\ue201 (group end), \\ue203 (highlight start), \\ue204 (highlight end)
**CITATION FORMAT - UNICODE ESCAPE SEQUENCES ONLY:**
Use these EXACT escape sequences (copy verbatim): \\ue202 (before each anchor), \\ue200 (group start), \\ue201 (group end), \\ue203 (highlight start), \\ue204 (highlight end)
Anchor pattern: turn{N}{type}{index} where N=turn number, type=search|news|image|ref, index=0,1,2...
Anchor pattern: \\ue202turn{N}{type}{index} where N=turn number, type=search|news|image|ref, index=0,1,2...
**Examples:**
**Examples (copy these exactly):**
- Single: "Statement.\\ue202turn0search0"
- Multiple: "Statement.\\ue202turn0search0\\ue202turn0news1"
- Group: "Statement. \\ue200\\ue202turn0search0\\ue202turn0news1\\ue201"
- Highlight: "\\ue203Cited text.\\ue204\\ue202turn0search0"
- Image: "See photo\\ue202turn0image0."
**CRITICAL:** Place anchors AFTER punctuation. Cite every non-obvious fact/quote. NEVER use markdown links, [1], footnotes, or HTML tags.`.trim();
**CRITICAL:** Output escape sequences EXACTLY as shown. Do NOT substitute with or other symbols. Place anchors AFTER punctuation. Cite every non-obvious fact/quote. NEVER use markdown links, [1], footnotes, or HTML tags.`.trim();
return createSearchTool({
...result.authResult,
onSearchResults,

View file

@ -1,6 +1,6 @@
{
"name": "@librechat/backend",
"version": "v0.8.1-rc2",
"version": "v0.8.1",
"description": "",
"scripts": {
"start": "echo 'please run this from the root directory'",

View file

@ -429,13 +429,23 @@ router.get('/connection/status', requireJwtAuth, async (req, res) => {
const connectionStatus = {};
for (const [serverName] of Object.entries(mcpConfig)) {
connectionStatus[serverName] = await getServerConnectionStatus(
user.id,
serverName,
appConnections,
userConnections,
oauthServers,
);
try {
connectionStatus[serverName] = await getServerConnectionStatus(
user.id,
serverName,
appConnections,
userConnections,
oauthServers,
);
} catch (error) {
const message = `Failed to get status for server "${serverName}"`;
logger.error(`[MCP Connection Status] ${message},`, error);
connectionStatus[serverName] = {
connectionState: 'error',
requiresOAuth: oauthServers.has(serverName),
error: message,
};
}
}
res.json({

View file

@ -169,14 +169,24 @@ function extractFirebaseFilePath(urlString) {
const deleteFirebaseFile = async (req, file) => {
if (file.embedded && process.env.RAG_API_URL) {
const jwtToken = req.headers.authorization.split(' ')[1];
axios.delete(`${process.env.RAG_API_URL}/documents`, {
headers: {
Authorization: `Bearer ${jwtToken}`,
'Content-Type': 'application/json',
accept: 'application/json',
},
data: [file.file_id],
});
try {
await axios.delete(`${process.env.RAG_API_URL}/documents`, {
headers: {
Authorization: `Bearer ${jwtToken}`,
'Content-Type': 'application/json',
accept: 'application/json',
},
data: [file.file_id],
});
} catch (error) {
if (error.response?.status === 404) {
logger.warn(
`[deleteFirebaseFile] Document ${file.file_id} not found in RAG API, may have been deleted already`,
);
} else {
logger.error('[deleteFirebaseFile] Error deleting document from RAG API:', error);
}
}
}
const fileName = extractFirebaseFilePath(file.filepath);

View file

@ -210,14 +210,24 @@ const deleteLocalFile = async (req, file) => {
if (file.embedded && process.env.RAG_API_URL) {
const jwtToken = generateShortLivedToken(req.user.id);
axios.delete(`${process.env.RAG_API_URL}/documents`, {
headers: {
Authorization: `Bearer ${jwtToken}`,
'Content-Type': 'application/json',
accept: 'application/json',
},
data: [file.file_id],
});
try {
await axios.delete(`${process.env.RAG_API_URL}/documents`, {
headers: {
Authorization: `Bearer ${jwtToken}`,
'Content-Type': 'application/json',
accept: 'application/json',
},
data: [file.file_id],
});
} catch (error) {
if (error.response?.status === 404) {
logger.warn(
`[deleteLocalFile] Document ${file.file_id} not found in RAG API, may have been deleted already`,
);
} else {
logger.error('[deleteLocalFile] Error deleting document from RAG API:', error);
}
}
}
if (cleanFilepath.startsWith(`/uploads/${req.user.id}`)) {

View file

@ -259,7 +259,7 @@
},
"packages/api": {
"name": "@librechat/api",
"version": "1.6.0",
"version": "1.7.0",
"devDependencies": {
"@babel/preset-env": "^7.21.5",
"@babel/preset-react": "^7.18.6",
@ -326,7 +326,7 @@
},
"packages/client": {
"name": "@librechat/client",
"version": "0.4.0",
"version": "0.4.1",
"devDependencies": {
"@rollup/plugin-alias": "^5.1.0",
"@rollup/plugin-commonjs": "^29.0.0",
@ -402,7 +402,7 @@
},
"packages/data-provider": {
"name": "librechat-data-provider",
"version": "0.8.100",
"version": "0.8.200",
"dependencies": {
"axios": "^1.12.1",
"dayjs": "^1.11.13",
@ -440,7 +440,7 @@
},
"packages/data-schemas": {
"name": "@librechat/data-schemas",
"version": "0.0.30",
"version": "0.0.31",
"devDependencies": {
"@rollup/plugin-alias": "^5.1.0",
"@rollup/plugin-commonjs": "^29.0.0",

View file

@ -1,4 +1,4 @@
/** v0.8.1-rc2 */
/** v0.8.1 */
module.exports = {
roots: ['<rootDir>/src'],
testEnvironment: 'jsdom',

View file

@ -1,6 +1,6 @@
{
"name": "@librechat/frontend",
"version": "v0.8.1-rc2",
"version": "v0.8.1",
"description": "",
"type": "module",
"scripts": {

View file

@ -4,13 +4,29 @@ import type { Citation, CitationNode } from './types';
import { SPAN_REGEX, STANDALONE_PATTERN, CLEANUP_REGEX, COMPOSITE_REGEX } from '~/utils/citations';
/**
* Checks if a standalone marker is truly standalone (not inside a composite block)
* Checks if a standalone marker is truly standalone (not inside a composite block).
* A marker is inside a composite if there's an opening \ue200 without a closing \ue201 after it.
*
* Handles both literal text format ("\ue200") and actual Unicode (U+E200) by checking
* for both and using the rightmost occurrence. This correctly handles:
* - Pure literal format: "\ue200...\ue201"
* - Pure Unicode format: "..."
* - Mixed formats: "\ue200..." (different formats for open/close)
*/
function isStandaloneMarker(text: string, position: number): boolean {
const beforeText = text.substring(0, position);
const lastUe200 = beforeText.lastIndexOf('\\ue200');
const lastUe201 = beforeText.lastIndexOf('\\ue201');
// Find rightmost composite block start (either format)
const lastUe200Literal = beforeText.lastIndexOf('\\ue200');
const lastUe200Char = beforeText.lastIndexOf('\ue200');
const lastUe200 = Math.max(lastUe200Literal, lastUe200Char);
// Find rightmost composite block end (either format)
const lastUe201Literal = beforeText.lastIndexOf('\\ue201');
const lastUe201Char = beforeText.lastIndexOf('\ue201');
const lastUe201 = Math.max(lastUe201Literal, lastUe201Char);
// Standalone if: no opening marker OR closing marker appears after opening
return lastUe200 === -1 || (lastUe201 !== -1 && lastUe201 > lastUe200);
}

View file

@ -129,7 +129,7 @@ function processCitations(text: string, searchResults: { [key: string]: SearchRe
// Step 1: Process highlighted text first (simplify by just making it bold in markdown)
formattedText = formattedText.replace(SPAN_REGEX, (match) => {
const text = match.replace(/\\ue203|\\ue204/g, '');
const text = match.replace(/\\ue203|\\ue204|\ue203|\ue204/g, '');
return `**${text}**`;
});

View file

@ -493,6 +493,7 @@
"com_nav_info_save_draft": "Ja šī opcija ir iespējota, sarunas veidlapā ievadītais teksts un pielikumi tiks automātiski saglabāti lokāli kā melnraksti. Šie melnraksti būs pieejami pat tad, ja atkārtoti ielādēsiet lapu vai pārslēgsieties uz citu sarunu. Melnraksti tiek saglabāti lokāli jūsu ierīcē un tiek dzēsti, tiklīdz ziņa ir nosūtīts.",
"com_nav_info_show_thinking": "Ja šī opcija ir iespējota, sarunas pēc noklusējuma tiks atvērtas domāšanas nolaižamās izvēlnes, ļaujot reāllaikā skatīt mākslīgā intelekta spriešanu. Ja šī opcija ir atspējota, domāšanas nolaižamās izvēlnes pēc noklusējuma paliks aizvērtas, lai saskarne būtu tīrāka un vienkāršāka.",
"com_nav_info_user_name_display": "Ja šī opcija ir iespējota, sūtītāja lietotājvārds tiks rādīts virs katra jūsu nosūtītās ziņas. Ja šī opcija ir atspējota, virs ziņām redzēsiet tikai vārdu \"Jūs\".",
"com_nav_keep_screen_awake": "Atbildes ģenerēšanas laikā atstājiet ekrānu nomodā",
"com_nav_lang_arabic": "العربية",
"com_nav_lang_armenian": "Հայերեն",
"com_nav_lang_auto": "Automātiska noteikšana",
@ -798,6 +799,7 @@
"com_ui_continue_oauth": "Turpināt ar OAuth",
"com_ui_controls": "Pārvaldība",
"com_ui_convo_delete_error": "Neizdevās izdzēst sarunu",
"com_ui_convo_delete_success": "Saruna veiksmīgi dzēsta",
"com_ui_copied": "Nokopēts!",
"com_ui_copied_to_clipboard": "Kopēts starpliktuvē",
"com_ui_copy_code": "Kopēt kodu",

View file

@ -0,0 +1,558 @@
import {
SPAN_REGEX,
COMPOSITE_REGEX,
STANDALONE_PATTERN,
CLEANUP_REGEX,
INVALID_CITATION_REGEX,
} from '../citations';
describe('Citation Regex Patterns', () => {
beforeEach(() => {
// Reset regex lastIndex for global patterns
SPAN_REGEX.lastIndex = 0;
COMPOSITE_REGEX.lastIndex = 0;
STANDALONE_PATTERN.lastIndex = 0;
CLEANUP_REGEX.lastIndex = 0;
INVALID_CITATION_REGEX.lastIndex = 0;
});
describe('STANDALONE_PATTERN', () => {
describe('literal text format (\\ue202)', () => {
it('should match literal text search citation', () => {
const text = 'Some fact \\ue202turn0search0 here';
STANDALONE_PATTERN.lastIndex = 0;
const match = STANDALONE_PATTERN.exec(text);
expect(match).not.toBeNull();
expect(match?.[1]).toBe('0'); // turn number
expect(match?.[2]).toBe('search'); // type
expect(match?.[3]).toBe('0'); // index
});
it('should match literal text file citation', () => {
const text = 'Document says \\ue202turn0file0 (doc.pdf)';
STANDALONE_PATTERN.lastIndex = 0;
const match = STANDALONE_PATTERN.exec(text);
expect(match).not.toBeNull();
expect(match?.[1]).toBe('0');
expect(match?.[2]).toBe('file');
expect(match?.[3]).toBe('0');
});
it('should match literal text news citation', () => {
const text = 'Breaking news \\ue202turn0news1';
STANDALONE_PATTERN.lastIndex = 0;
const match = STANDALONE_PATTERN.exec(text);
expect(match).not.toBeNull();
expect(match?.[1]).toBe('0');
expect(match?.[2]).toBe('news');
expect(match?.[3]).toBe('1');
});
it('should match multiple literal text citations', () => {
const text = 'Fact one \\ue202turn0search0 and fact two \\ue202turn0file1';
const matches: RegExpExecArray[] = [];
let match: RegExpExecArray | null;
STANDALONE_PATTERN.lastIndex = 0;
while ((match = STANDALONE_PATTERN.exec(text)) !== null) {
matches.push(match);
}
expect(matches).toHaveLength(2);
expect(matches[0][2]).toBe('search');
expect(matches[1][2]).toBe('file');
});
it('should match all supported types in literal text format', () => {
const types = ['search', 'image', 'news', 'video', 'ref', 'file'];
for (const type of types) {
const text = `Test \\ue202turn0${type}0`;
STANDALONE_PATTERN.lastIndex = 0;
const match = STANDALONE_PATTERN.exec(text);
expect(match).not.toBeNull();
expect(match?.[2]).toBe(type);
}
});
});
describe('actual Unicode character format (U+E202)', () => {
it('should match actual Unicode search citation', () => {
const text = 'Some fact \ue202turn0search0 here';
STANDALONE_PATTERN.lastIndex = 0;
const match = STANDALONE_PATTERN.exec(text);
expect(match).not.toBeNull();
expect(match?.[1]).toBe('0');
expect(match?.[2]).toBe('search');
expect(match?.[3]).toBe('0');
});
it('should match actual Unicode file citation', () => {
const text = 'Document says \ue202turn0file0 (doc.pdf)';
STANDALONE_PATTERN.lastIndex = 0;
const match = STANDALONE_PATTERN.exec(text);
expect(match).not.toBeNull();
expect(match?.[1]).toBe('0');
expect(match?.[2]).toBe('file');
expect(match?.[3]).toBe('0');
});
it('should match all supported types in actual Unicode format', () => {
const types = ['search', 'image', 'news', 'video', 'ref', 'file'];
for (const type of types) {
const text = `Test \ue202turn0${type}0`;
STANDALONE_PATTERN.lastIndex = 0;
const match = STANDALONE_PATTERN.exec(text);
expect(match).not.toBeNull();
expect(match?.[2]).toBe(type);
}
});
});
describe('mixed format handling', () => {
it('should match both formats in the same text', () => {
const text = 'Literal \\ue202turn0search0 and Unicode \ue202turn0file1';
const matches: RegExpExecArray[] = [];
let match: RegExpExecArray | null;
STANDALONE_PATTERN.lastIndex = 0;
while ((match = STANDALONE_PATTERN.exec(text)) !== null) {
matches.push(match);
}
expect(matches).toHaveLength(2);
expect(matches[0][2]).toBe('search');
expect(matches[1][2]).toBe('file');
});
});
});
describe('SPAN_REGEX', () => {
it('should match literal text span markers', () => {
const text = 'Before \\ue203highlighted text\\ue204 after';
SPAN_REGEX.lastIndex = 0;
const match = SPAN_REGEX.exec(text);
expect(match).not.toBeNull();
expect(match?.[0]).toContain('highlighted text');
});
it('should match actual Unicode span markers', () => {
const text = 'Before \ue203highlighted text\ue204 after';
SPAN_REGEX.lastIndex = 0;
const match = SPAN_REGEX.exec(text);
expect(match).not.toBeNull();
expect(match?.[0]).toContain('highlighted text');
});
});
describe('COMPOSITE_REGEX', () => {
it('should match literal text composite markers', () => {
const text = 'Statement \\ue200\\ue202turn0search0\\ue202turn0news0\\ue201';
COMPOSITE_REGEX.lastIndex = 0;
const match = COMPOSITE_REGEX.exec(text);
expect(match).not.toBeNull();
});
it('should match actual Unicode composite markers', () => {
const text = 'Statement \ue200\ue202turn0search0\ue202turn0news0\ue201';
COMPOSITE_REGEX.lastIndex = 0;
const match = COMPOSITE_REGEX.exec(text);
expect(match).not.toBeNull();
});
});
describe('CLEANUP_REGEX', () => {
it('should clean up literal text markers', () => {
const text = '\\ue200\\ue201\\ue202\\ue203\\ue204\\ue206';
const cleaned = text.replace(CLEANUP_REGEX, '');
expect(cleaned).toBe('');
});
it('should clean up actual Unicode markers', () => {
const text = '\ue200\ue201\ue202\ue203\ue204\ue206';
const cleaned = text.replace(CLEANUP_REGEX, '');
expect(cleaned).toBe('');
});
it('should preserve normal text while cleaning markers', () => {
const text = 'Hello \\ue202turn0search0 world';
const cleaned = text.replace(CLEANUP_REGEX, '');
expect(cleaned).toBe('Hello turn0search0 world');
});
});
describe('INVALID_CITATION_REGEX', () => {
it('should match invalid literal text citations with leading whitespace', () => {
const text = 'Text \\ue202turn0search5';
INVALID_CITATION_REGEX.lastIndex = 0;
const match = INVALID_CITATION_REGEX.exec(text);
expect(match).not.toBeNull();
});
it('should match invalid actual Unicode citations with leading whitespace', () => {
const text = 'Text \ue202turn0search5';
INVALID_CITATION_REGEX.lastIndex = 0;
const match = INVALID_CITATION_REGEX.exec(text);
expect(match).not.toBeNull();
});
});
describe('Integration: Full Citation Processing Flow', () => {
/**
* Simulates the citation processing flow used in the markdown plugin and copy-to-clipboard
*/
const processFullCitationFlow = (text: string) => {
// Step 1: Extract highlighted spans
const spans: Array<{ content: string; position: number }> = [];
let spanMatch;
const spanRegex = new RegExp(SPAN_REGEX.source, 'g');
while ((spanMatch = spanRegex.exec(text)) !== null) {
const content = spanMatch[0].replace(/\\ue203|\\ue204|\ue203|\ue204/g, '');
spans.push({ content, position: spanMatch.index });
}
// Step 2: Extract composite blocks
const composites: Array<{ citations: string[]; position: number }> = [];
let compMatch;
const compRegex = new RegExp(COMPOSITE_REGEX.source, 'g');
while ((compMatch = compRegex.exec(text)) !== null) {
const block = compMatch[0];
const citations: string[] = [];
let citMatch;
const citRegex = new RegExp(STANDALONE_PATTERN.source, 'g');
while ((citMatch = citRegex.exec(block)) !== null) {
citations.push(`turn${citMatch[1]}${citMatch[2]}${citMatch[3]}`);
}
composites.push({ citations, position: compMatch.index });
}
// Step 3: Extract standalone citations (not in composites)
const standalones: Array<{ citation: string; position: number }> = [];
let standMatch;
const standRegex = new RegExp(STANDALONE_PATTERN.source, 'g');
while ((standMatch = standRegex.exec(text)) !== null) {
// Check if this position is inside a composite
const isInComposite = composites.some(
(c) => standMatch && standMatch.index >= c.position && standMatch.index < c.position + 50,
);
if (!isInComposite) {
standalones.push({
citation: `turn${standMatch[1]}${standMatch[2]}${standMatch[3]}`,
position: standMatch.index,
});
}
}
// Step 4: Clean up text
const cleanedText = text.replace(INVALID_CITATION_REGEX, '').replace(CLEANUP_REGEX, '');
return { spans, composites, standalones, cleanedText };
};
describe('literal text format integration', () => {
it('should process complex LLM response with multiple citation types', () => {
const llmResponse = `Here's what I found about the topic.
\\ue203This is an important quote from the source.\\ue204 \\ue202turn0search0
The data shows several key findings \\ue202turn0search1 including:
- First finding \\ue202turn0news0
- Second finding \\ue200\\ue202turn0search2\\ue202turn0file0\\ue201
For more details, see the attached document \\ue202turn0file1.`;
const result = processFullCitationFlow(llmResponse);
expect(result.spans).toHaveLength(1);
expect(result.spans[0].content).toBe('This is an important quote from the source.');
expect(result.composites).toHaveLength(1);
expect(result.composites[0].citations).toEqual(['turn0search2', 'turn0file0']);
expect(result.standalones.length).toBeGreaterThanOrEqual(3);
expect(result.cleanedText).not.toContain('\\ue202');
expect(result.cleanedText).not.toContain('\\ue200');
});
it('should handle file citations from document search', () => {
const fileSearchResponse = `Based on the document medical-anthem-blue-cross.pdf:
- **Annual deductible:** $3,300 per person \\ue202turn0file0
- **Out-of-pocket maximum:** $4,000 per person \\ue202turn0file0
- **Network:** Prudent Buyer PPO \\ue202turn0file1
Multiple sources confirm these details. \\ue200\\ue202turn0file0\\ue202turn0file1\\ue202turn0file2\\ue201`;
const result = processFullCitationFlow(fileSearchResponse);
expect(result.composites).toHaveLength(1);
expect(result.composites[0].citations).toHaveLength(3);
// Should find standalone file citations
const fileCitations = result.standalones.filter((s) => s.citation.includes('file'));
expect(fileCitations.length).toBeGreaterThanOrEqual(2);
});
});
describe('actual Unicode format integration', () => {
it('should process response with actual Unicode characters', () => {
const llmResponse = `Research findings indicate the following:
\ue203Key insight from the study.\ue204 \ue202turn0search0
Additional context \ue202turn0news0 supports this conclusion \ue200\ue202turn0search1\ue202turn0ref0\ue201.`;
const result = processFullCitationFlow(llmResponse);
expect(result.spans).toHaveLength(1);
expect(result.composites).toHaveLength(1);
expect(result.standalones.length).toBeGreaterThanOrEqual(1);
expect(result.cleanedText).not.toContain('\ue202');
});
});
describe('mixed format integration', () => {
it('should handle mixed literal and Unicode formats in same response', () => {
const mixedResponse = `First citation uses literal \\ue202turn0search0 format.
Second citation uses Unicode \ue202turn0search1 format.
Composite with mixed: \\ue200\\ue202turn0file0\ue202turn0file1\\ue201`;
const result = processFullCitationFlow(mixedResponse);
// Should find citations from both formats
expect(result.standalones.length).toBeGreaterThanOrEqual(2);
expect(result.composites).toHaveLength(1);
expect(result.composites[0].citations).toHaveLength(2);
});
});
});
describe('Performance: Regex Benchmarks', () => {
/**
* Generates a realistic citation-heavy text with specified number of citations
*/
const generateCitationHeavyText = (citationCount: number, format: 'literal' | 'unicode') => {
const marker = format === 'literal' ? '\\ue202' : '\ue202';
const spanStart = format === 'literal' ? '\\ue203' : '\ue203';
const spanEnd = format === 'literal' ? '\\ue204' : '\ue204';
const compStart = format === 'literal' ? '\\ue200' : '\ue200';
const compEnd = format === 'literal' ? '\\ue201' : '\ue201';
const types = ['search', 'news', 'file', 'ref', 'image', 'video'];
let text = '';
for (let i = 0; i < citationCount; i++) {
const type = types[i % types.length];
const turn = Math.floor(i / 10);
const index = i % 10;
if (i % 5 === 0) {
// Add highlighted text every 5th citation
text += `${spanStart}Important fact number ${i}.${spanEnd} ${marker}turn${turn}${type}${index} `;
} else if (i % 7 === 0) {
// Add composite every 7th citation
text += `Multiple sources ${compStart}${marker}turn${turn}${type}${index}${marker}turn${turn}${types[(i + 1) % types.length]}${(index + 1) % 10}${compEnd} confirm this. `;
} else {
text += `This is fact ${i} ${marker}turn${turn}${type}${index} from the research. `;
}
}
return text;
};
it('should process 100 literal citations in reasonable time (<100ms)', () => {
const text = generateCitationHeavyText(100, 'literal');
const start = performance.now();
// Run all regex operations
const results = { spans: 0, composites: 0, standalones: 0, cleaned: '' };
SPAN_REGEX.lastIndex = 0;
while (SPAN_REGEX.exec(text) !== null) {
results.spans++;
}
COMPOSITE_REGEX.lastIndex = 0;
while (COMPOSITE_REGEX.exec(text) !== null) {
results.composites++;
}
STANDALONE_PATTERN.lastIndex = 0;
while (STANDALONE_PATTERN.exec(text) !== null) {
results.standalones++;
}
results.cleaned = text.replace(CLEANUP_REGEX, '');
const duration = performance.now() - start;
expect(duration).toBeLessThan(100);
expect(results.standalones).toBeGreaterThan(80); // Most should be standalone
expect(results.spans).toBeGreaterThan(10); // Some highlighted
expect(results.composites).toBeGreaterThan(5); // Some composites
});
it('should process 100 Unicode citations in reasonable time (<100ms)', () => {
const text = generateCitationHeavyText(100, 'unicode');
const start = performance.now();
const results = { spans: 0, composites: 0, standalones: 0, cleaned: '' };
SPAN_REGEX.lastIndex = 0;
while (SPAN_REGEX.exec(text) !== null) {
results.spans++;
}
COMPOSITE_REGEX.lastIndex = 0;
while (COMPOSITE_REGEX.exec(text) !== null) {
results.composites++;
}
STANDALONE_PATTERN.lastIndex = 0;
while (STANDALONE_PATTERN.exec(text) !== null) {
results.standalones++;
}
results.cleaned = text.replace(CLEANUP_REGEX, '');
const duration = performance.now() - start;
expect(duration).toBeLessThan(100);
expect(results.standalones).toBeGreaterThan(80);
});
it('should process 500 citations without timeout (<500ms)', () => {
const text = generateCitationHeavyText(500, 'literal');
const start = performance.now();
let count = 0;
STANDALONE_PATTERN.lastIndex = 0;
while (STANDALONE_PATTERN.exec(text) !== null) {
count++;
}
const cleaned = text.replace(CLEANUP_REGEX, '');
const duration = performance.now() - start;
expect(duration).toBeLessThan(500);
expect(count).toBeGreaterThan(400);
expect(cleaned.length).toBeLessThan(text.length);
});
it('should handle mixed formats efficiently (<100ms for 100 citations)', () => {
// Generate text with alternating formats
const literalText = generateCitationHeavyText(50, 'literal');
const unicodeText = generateCitationHeavyText(50, 'unicode');
const mixedText = literalText + '\n\n' + unicodeText;
const start = performance.now();
let count = 0;
STANDALONE_PATTERN.lastIndex = 0;
while (STANDALONE_PATTERN.exec(mixedText) !== null) {
count++;
}
const duration = performance.now() - start;
expect(duration).toBeLessThan(100);
expect(count).toBeGreaterThan(80); // Should find citations from both halves
});
it('should handle repeated execution during streaming simulation (<1000ms cumulative)', () => {
/**
* Simulates the markdown plugin running repeatedly during LLM streaming.
* Each "token" adds ~10 characters, plugin runs on every update.
*/
const fullText = generateCitationHeavyText(50, 'literal');
const tokens: string[] = [];
// Simulate streaming: break text into ~100 incremental chunks
const chunkSize = Math.ceil(fullText.length / 100);
for (let i = 0; i < fullText.length; i += chunkSize) {
tokens.push(fullText.slice(0, i + chunkSize));
}
const start = performance.now();
let totalMatches = 0;
let spanCount = 0;
let compositeCount = 0;
// Simulate plugin running on each streaming update
for (const partialText of tokens) {
// Run all regex operations (simulating unicodeCitation plugin)
SPAN_REGEX.lastIndex = 0;
while (SPAN_REGEX.exec(partialText) !== null) {
spanCount++;
}
COMPOSITE_REGEX.lastIndex = 0;
while (COMPOSITE_REGEX.exec(partialText) !== null) {
compositeCount++;
}
STANDALONE_PATTERN.lastIndex = 0;
while (STANDALONE_PATTERN.exec(partialText) !== null) {
totalMatches++;
}
// Cleanup would also run
void partialText.replace(CLEANUP_REGEX, '');
}
const duration = performance.now() - start;
// 100 streaming updates processing up to 50 citations each
// Should complete in under 1 second cumulative
expect(duration).toBeLessThan(1000);
expect(totalMatches).toBeGreaterThan(1000); // Many matches across all iterations
expect(spanCount).toBeGreaterThan(0);
expect(compositeCount).toBeGreaterThan(0);
});
it('should handle rapid repeated execution (300 renders with 20 citations)', () => {
/**
* Realistic streaming scenario: 300 token updates, final text has ~20 citations
*/
const fullText = generateCitationHeavyText(20, 'literal');
const renderCount = 300;
const start = performance.now();
let totalOps = 0;
// Simulate 300 renders, each processing progressively more text
for (let i = 0; i < renderCount; i++) {
const progress = Math.min(1, (i + 1) / renderCount);
const partialText = fullText.slice(0, Math.floor(fullText.length * progress));
SPAN_REGEX.lastIndex = 0;
while (SPAN_REGEX.exec(partialText) !== null) {
totalOps++;
}
COMPOSITE_REGEX.lastIndex = 0;
while (COMPOSITE_REGEX.exec(partialText) !== null) {
totalOps++;
}
STANDALONE_PATTERN.lastIndex = 0;
while (STANDALONE_PATTERN.exec(partialText) !== null) {
totalOps++;
}
void partialText.replace(CLEANUP_REGEX, '');
}
const duration = performance.now() - start;
const avgPerRender = duration / renderCount;
// Should complete all 300 renders in under 500ms total
// Average per render should be under 2ms
expect(duration).toBeLessThan(500);
expect(avgPerRender).toBeLessThan(2);
expect(totalOps).toBeGreaterThan(0);
});
});
});

View file

@ -1,5 +1,46 @@
export const SPAN_REGEX = /(\\ue203.*?\\ue204)/g;
export const COMPOSITE_REGEX = /(\\ue200.*?\\ue201)/g;
export const STANDALONE_PATTERN = /\\ue202turn(\d+)(search|image|news|video|ref|file)(\d+)/g;
export const CLEANUP_REGEX = /\\ue200|\\ue201|\\ue202|\\ue203|\\ue204|\\ue206/g;
export const INVALID_CITATION_REGEX = /\s*\\ue202turn\d+(search|news|image|video|ref|file)\d+/g;
/**
* Citation Regex Patterns
*
* These patterns handle two formats that LLMs may output:
* 1. Literal escape sequences: "\ue202turn0search0" (backslash + "ue202" = 6 chars)
* 2. Actual Unicode characters: "turn0search0" (U+E202 = 1 char, private use area)
*
* The system instructs LLMs to output literal escape sequences, but some models
* may convert them to actual Unicode characters during text generation. These
* dual-format patterns ensure robust citation handling regardless of output format.
*
* Citation Format:
* - \ue202 / U+E202: Standalone citation marker (before each anchor)
* - \ue200 / U+E200: Composite group start
* - \ue201 / U+E201: Composite group end
* - \ue203 / U+E203: Highlight span start
* - \ue204 / U+E204: Highlight span end
*
* Anchor Pattern: turn{N}{type}{index}
* - N: Turn number (0-based)
* - type: search|image|news|video|ref|file
* - index: Result index within that type (0-based)
*
* Examples:
* - Standalone: "Statement.\ue202turn0search0"
* - Composite: "\ue200\ue202turn0search0\ue202turn0news1\ue201"
* - Highlighted: "\ue203Cited text.\ue204\ue202turn0search0"
*/
/** Matches highlighted text spans in both literal and Unicode formats */
export const SPAN_REGEX = /((?:\\ue203|\ue203).*?(?:\\ue204|\ue204))/g;
/** Matches composite citation blocks (multiple citations grouped together) */
export const COMPOSITE_REGEX = /((?:\\ue200|\ue200).*?(?:\\ue201|\ue201))/g;
/** Matches standalone citation anchors with turn, type, and index capture groups */
export const STANDALONE_PATTERN =
/(?:\\ue202|\ue202)turn(\d+)(search|image|news|video|ref|file)(\d+)/g;
/** Removes all citation marker characters from text for clean display */
export const CLEANUP_REGEX =
/\\ue200|\\ue201|\\ue202|\\ue203|\\ue204|\\ue206|\ue200|\ue201|\ue202|\ue203|\ue204|\ue206/g;
/** Matches invalid/orphaned citations (with leading whitespace) for removal */
export const INVALID_CITATION_REGEX =
/\s*(?:\\ue202|\ue202)turn\d+(search|news|image|video|ref|file)\d+/g;

View file

@ -1,3 +1,3 @@
// v0.8.1-rc2
// v0.8.1
// See .env.test.example for an example of the '.env.test' file.
require('dotenv').config({ path: './e2e/.env.test' });

View file

@ -15,7 +15,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 1.9.3
version: 1.9.4
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
@ -23,7 +23,7 @@ version: 1.9.3
# It is recommended to use it with quotes.
# renovate: image=ghcr.io/danny-avila/librechat
appVersion: "v0.8.1-rc2"
appVersion: "v0.8.1"
home: https://www.librechat.ai

61
package-lock.json generated
View file

@ -1,12 +1,12 @@
{
"name": "LibreChat",
"version": "v0.8.1-rc2",
"version": "v0.8.1",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "LibreChat",
"version": "v0.8.1-rc2",
"version": "v0.8.1",
"license": "ISC",
"workspaces": [
"api",
@ -45,7 +45,7 @@
},
"api": {
"name": "@librechat/backend",
"version": "v0.8.1-rc2",
"version": "v0.8.1",
"license": "ISC",
"dependencies": {
"@anthropic-ai/sdk": "^0.52.0",
@ -446,7 +446,7 @@
},
"client": {
"name": "@librechat/frontend",
"version": "v0.8.1-rc2",
"version": "v0.8.1",
"license": "ISC",
"dependencies": {
"@ariakit/react": "^0.4.15",
@ -26567,7 +26567,8 @@
"node_modules/buffer-equal-constant-time": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz",
"integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA=="
"integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==",
"license": "BSD-3-Clause"
},
"node_modules/buffer-from": {
"version": "1.1.2",
@ -28028,9 +28029,9 @@
"license": "MIT"
},
"node_modules/debug": {
"version": "4.4.0",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz",
"integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==",
"version": "4.4.3",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz",
"integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==",
"license": "MIT",
"dependencies": {
"ms": "^2.1.3"
@ -34392,21 +34393,23 @@
}
},
"node_modules/jsonwebtoken/node_modules/jwa": {
"version": "1.4.1",
"resolved": "https://registry.npmjs.org/jwa/-/jwa-1.4.1.tgz",
"integrity": "sha512-qiLX/xhEEFKUAJ6FiBMbes3w9ATzyk5W7Hvzpa/SLYdxNtng+gcurvrI7TbACjIXlsJyr05/S1oUhZrc63evQA==",
"version": "1.4.2",
"resolved": "https://registry.npmjs.org/jwa/-/jwa-1.4.2.tgz",
"integrity": "sha512-eeH5JO+21J78qMvTIDdBXidBd6nG2kZjg5Ohz/1fpa28Z4CcsWUzJ1ZZyFq/3z3N17aZy+ZuBoHljASbL1WfOw==",
"license": "MIT",
"dependencies": {
"buffer-equal-constant-time": "1.0.1",
"buffer-equal-constant-time": "^1.0.1",
"ecdsa-sig-formatter": "1.0.11",
"safe-buffer": "^5.0.1"
}
},
"node_modules/jsonwebtoken/node_modules/jws": {
"version": "3.2.2",
"resolved": "https://registry.npmjs.org/jws/-/jws-3.2.2.tgz",
"integrity": "sha512-YHlZCB6lMTllWDtSPHz/ZXTsi8S00usEV6v1tjq8tOUZzw7DpSDWVXjXDre6ed1w/pd495ODpHZYSdkRTsa0HA==",
"version": "3.2.3",
"resolved": "https://registry.npmjs.org/jws/-/jws-3.2.3.tgz",
"integrity": "sha512-byiJ0FLRdLdSVSReO/U4E7RoEyOCKnEnEPMjq3HxWtvzLsV08/i5RQKsFVNkCldrCaPr2vDNAOMsfs8T/Hze7g==",
"license": "MIT",
"dependencies": {
"jwa": "^1.4.1",
"jwa": "^1.4.2",
"safe-buffer": "^5.0.1"
}
},
@ -34426,11 +34429,12 @@
}
},
"node_modules/jwa": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/jwa/-/jwa-2.0.0.tgz",
"integrity": "sha512-jrZ2Qx916EA+fq9cEAeCROWPTfCwi1IVHqT2tapuqLEVVDKFDENFw1oL+MwrTvH6msKxsd1YTDVw6uKEcsrLEA==",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/jwa/-/jwa-2.0.1.tgz",
"integrity": "sha512-hRF04fqJIP8Abbkq5NKGN0Bbr3JxlQ+qhZufXVr0DvujKy93ZCbXZMHDL4EOtodSbCWxOqR8MS1tXA5hwqCXDg==",
"license": "MIT",
"dependencies": {
"buffer-equal-constant-time": "1.0.1",
"buffer-equal-constant-time": "^1.0.1",
"ecdsa-sig-formatter": "1.0.11",
"safe-buffer": "^5.0.1"
}
@ -34477,11 +34481,12 @@
}
},
"node_modules/jws": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/jws/-/jws-4.0.0.tgz",
"integrity": "sha512-KDncfTmOZoOMTFG4mBlG0qUIOlc03fmzH+ru6RgYVZhPkyiy/92Owlt/8UEN+a4TXR1FQetfIpJE8ApdvdVxTg==",
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/jws/-/jws-4.0.1.tgz",
"integrity": "sha512-EKI/M/yqPncGUUh44xz0PxSidXFr/+r0pA70+gIYhjv+et7yxM+s29Y+VGDkovRofQem0fs7Uvf4+YmAdyRduA==",
"license": "MIT",
"dependencies": {
"jwa": "^2.0.0",
"jwa": "^2.0.1",
"safe-buffer": "^5.0.1"
}
},
@ -46236,7 +46241,7 @@
},
"packages/api": {
"name": "@librechat/api",
"version": "1.6.0",
"version": "1.7.0",
"license": "ISC",
"devDependencies": {
"@babel/preset-env": "^7.21.5",
@ -46342,7 +46347,7 @@
},
"packages/client": {
"name": "@librechat/client",
"version": "0.4.0",
"version": "0.4.1",
"devDependencies": {
"@rollup/plugin-alias": "^5.1.0",
"@rollup/plugin-commonjs": "^29.0.0",
@ -46585,7 +46590,7 @@
},
"packages/data-provider": {
"name": "librechat-data-provider",
"version": "0.8.100",
"version": "0.8.200",
"license": "ISC",
"dependencies": {
"axios": "^1.12.1",
@ -46644,7 +46649,7 @@
},
"packages/data-schemas": {
"name": "@librechat/data-schemas",
"version": "0.0.30",
"version": "0.0.31",
"license": "MIT",
"devDependencies": {
"@rollup/plugin-alias": "^5.1.0",

View file

@ -1,6 +1,6 @@
{
"name": "LibreChat",
"version": "v0.8.1-rc2",
"version": "v0.8.1",
"description": "",
"workspaces": [
"api",

View file

@ -1,6 +1,6 @@
{
"name": "@librechat/api",
"version": "1.6.0",
"version": "1.7.0",
"type": "commonjs",
"description": "MCP services for LibreChat",
"main": "dist/index.js",

View file

@ -5,6 +5,7 @@ import { mcpServersRegistry as serversRegistry } from '~/mcp/registry/MCPServers
import { MCPConnection } from './connection';
import type * as t from './types';
import { ConnectionsRepository } from '~/mcp/ConnectionsRepository';
import { mcpConfig } from './mcpConfig';
/**
* Abstract base class for managing user-specific MCP connections with lifecycle management.
@ -20,7 +21,6 @@ export abstract class UserConnectionManager {
protected userConnections: Map<string, Map<string, MCPConnection>> = new Map();
/** Last activity timestamp for users (not per server) */
protected userLastActivity: Map<string, number> = new Map();
protected readonly USER_CONNECTION_IDLE_TIMEOUT = 15 * 60 * 1000; // 15 minutes (TODO: make configurable)
/** Updates the last activity timestamp for a user */
protected updateUserLastActivity(userId: string): void {
@ -67,7 +67,7 @@ export abstract class UserConnectionManager {
// Check if user is idle
const lastActivity = this.userLastActivity.get(userId);
if (lastActivity && now - lastActivity > this.USER_CONNECTION_IDLE_TIMEOUT) {
if (lastActivity && now - lastActivity > mcpConfig.USER_CONNECTION_IDLE_TIMEOUT) {
logger.info(`[MCP][User: ${userId}] User idle for too long. Disconnecting all connections.`);
// Disconnect all user connections
try {
@ -217,7 +217,7 @@ export abstract class UserConnectionManager {
if (currentUserId && currentUserId === userId) {
continue;
}
if (now - lastActivity > this.USER_CONNECTION_IDLE_TIMEOUT) {
if (now - lastActivity > mcpConfig.USER_CONNECTION_IDLE_TIMEOUT) {
logger.info(
`[MCP][User: ${userId}] User idle for too long. Disconnecting all connections...`,
);

View file

@ -25,7 +25,7 @@ describe('OAuth Detection Integration Tests', () => {
name: 'GitHub Copilot MCP Server',
url: 'https://api.githubcopilot.com/mcp',
expectedOAuth: true,
expectedMethod: '401-challenge-metadata',
expectedMethod: 'protected-resource-metadata',
withMeta: true,
},
{
@ -42,6 +42,13 @@ describe('OAuth Detection Integration Tests', () => {
expectedMethod: 'protected-resource-metadata',
withMeta: true,
},
{
name: 'StackOverflow MCP (HEAD=405, POST=401+Bearer)',
url: 'https://mcp.stackoverflow.com',
expectedOAuth: true,
expectedMethod: '401-challenge-metadata',
withMeta: false,
},
{
name: 'HTTPBin (Non-OAuth)',
url: 'https://httpbin.org',

View file

@ -992,4 +992,147 @@ describe('MCPOAuthHandler - Configurable OAuth Metadata', () => {
expect(headers.get('foo')).toBe('bar');
});
});
describe('Fallback OAuth Metadata (Legacy Server Support)', () => {
const originalFetch = global.fetch;
const mockFetch = jest.fn();
beforeEach(() => {
jest.clearAllMocks();
global.fetch = mockFetch as unknown as typeof fetch;
});
afterAll(() => {
global.fetch = originalFetch;
});
it('should use fallback metadata when discoverAuthorizationServerMetadata returns undefined', async () => {
// Mock resource metadata discovery to fail
mockDiscoverOAuthProtectedResourceMetadata.mockRejectedValueOnce(
new Error('No resource metadata'),
);
// Mock authorization server metadata discovery to return undefined (no .well-known)
mockDiscoverAuthorizationServerMetadata.mockResolvedValueOnce(undefined);
// Mock client registration to succeed
mockRegisterClient.mockResolvedValueOnce({
client_id: 'dynamic-client-id',
client_secret: 'dynamic-client-secret',
redirect_uris: ['http://localhost:3080/api/mcp/test-server/oauth/callback'],
});
// Mock startAuthorization to return a successful response
mockStartAuthorization.mockResolvedValueOnce({
authorizationUrl: new URL('https://mcp.example.com/authorize?client_id=dynamic-client-id'),
codeVerifier: 'test-code-verifier',
});
await MCPOAuthHandler.initiateOAuthFlow(
'test-server',
'https://mcp.example.com',
'user-123',
{},
undefined,
);
// Verify registerClient was called with fallback metadata
expect(mockRegisterClient).toHaveBeenCalledWith(
'https://mcp.example.com/',
expect.objectContaining({
metadata: expect.objectContaining({
issuer: 'https://mcp.example.com/',
authorization_endpoint: 'https://mcp.example.com/authorize',
token_endpoint: 'https://mcp.example.com/token',
registration_endpoint: 'https://mcp.example.com/register',
response_types_supported: ['code'],
grant_types_supported: ['authorization_code', 'refresh_token'],
code_challenge_methods_supported: ['S256', 'plain'],
token_endpoint_auth_methods_supported: [
'client_secret_basic',
'client_secret_post',
'none',
],
}),
}),
);
});
it('should use fallback /token endpoint for refresh when metadata discovery fails', async () => {
const metadata = {
serverName: 'test-server',
serverUrl: 'https://mcp.example.com',
clientInfo: {
client_id: 'test-client-id',
client_secret: 'test-client-secret',
},
};
// Mock metadata discovery to return undefined (no .well-known)
mockDiscoverAuthorizationServerMetadata.mockResolvedValueOnce(undefined);
// Mock successful token refresh
mockFetch.mockResolvedValueOnce({
ok: true,
json: async () => ({
access_token: 'new-access-token',
refresh_token: 'new-refresh-token',
expires_in: 3600,
}),
} as Response);
const result = await MCPOAuthHandler.refreshOAuthTokens(
'test-refresh-token',
metadata,
{},
{},
);
// Verify fetch was called with fallback /token endpoint
expect(mockFetch).toHaveBeenCalledWith(
'https://mcp.example.com/token',
expect.objectContaining({
method: 'POST',
}),
);
expect(result.access_token).toBe('new-access-token');
});
it('should use fallback auth methods when metadata discovery fails during refresh', async () => {
const metadata = {
serverName: 'test-server',
serverUrl: 'https://mcp.example.com',
clientInfo: {
client_id: 'test-client-id',
client_secret: 'test-client-secret',
},
};
// Mock metadata discovery to return undefined
mockDiscoverAuthorizationServerMetadata.mockResolvedValueOnce(undefined);
// Mock successful token refresh
mockFetch.mockResolvedValueOnce({
ok: true,
json: async () => ({
access_token: 'new-access-token',
expires_in: 3600,
}),
} as Response);
await MCPOAuthHandler.refreshOAuthTokens('test-refresh-token', metadata, {}, {});
// Verify it uses client_secret_basic (first in fallback auth methods)
const expectedAuth = `Basic ${Buffer.from('test-client-id:test-client-secret').toString('base64')}`;
expect(mockFetch).toHaveBeenCalledWith(
expect.any(String),
expect.objectContaining({
headers: expect.objectContaining({
Authorization: expectedAuth,
}),
}),
);
});
});
});

View file

@ -8,4 +8,6 @@ export const mcpConfig = {
OAUTH_ON_AUTH_ERROR: isEnabled(process.env.MCP_OAUTH_ON_AUTH_ERROR ?? true),
OAUTH_DETECTION_TIMEOUT: math(process.env.MCP_OAUTH_DETECTION_TIMEOUT ?? 5000),
CONNECTION_CHECK_TTL: math(process.env.MCP_CONNECTION_CHECK_TTL ?? 60000),
/** Idle timeout (ms) after which user connections are disconnected. Default: 15 minutes */
USER_CONNECTION_IDLE_TIMEOUT: math(process.env.MCP_USER_CONNECTION_IDLE_TIMEOUT ?? 15 * 60 * 1000),
};

View file

@ -0,0 +1,267 @@
import { detectOAuthRequirement } from './detectOAuth';
jest.mock('@modelcontextprotocol/sdk/client/auth.js', () => ({
discoverOAuthProtectedResourceMetadata: jest.fn(),
}));
import { discoverOAuthProtectedResourceMetadata } from '@modelcontextprotocol/sdk/client/auth.js';
const mockDiscoverOAuthProtectedResourceMetadata =
discoverOAuthProtectedResourceMetadata as jest.MockedFunction<
typeof discoverOAuthProtectedResourceMetadata
>;
describe('detectOAuthRequirement', () => {
const originalFetch = global.fetch;
const mockFetch = jest.fn() as unknown as jest.MockedFunction<typeof fetch>;
beforeEach(() => {
jest.clearAllMocks();
global.fetch = mockFetch;
mockDiscoverOAuthProtectedResourceMetadata.mockRejectedValue(
new Error('No protected resource metadata'),
);
});
afterAll(() => {
global.fetch = originalFetch;
});
describe('POST fallback when HEAD fails', () => {
it('should try POST when HEAD returns 405 Method Not Allowed', async () => {
// HEAD returns 405 (Method Not Allowed)
mockFetch.mockResolvedValueOnce({
status: 405,
headers: new Headers(),
} as Response);
// POST returns 401 with Bearer
mockFetch.mockResolvedValueOnce({
status: 401,
headers: new Headers({ 'www-authenticate': 'Bearer' }),
} as Response);
const result = await detectOAuthRequirement('https://mcp.example.com');
expect(result.requiresOAuth).toBe(true);
expect(result.method).toBe('401-challenge-metadata');
expect(mockFetch).toHaveBeenCalledTimes(2);
// Verify HEAD was called first
expect(mockFetch.mock.calls[0][1]).toEqual(expect.objectContaining({ method: 'HEAD' }));
// Verify POST was called second with proper headers and body
expect(mockFetch.mock.calls[1][1]).toEqual(
expect.objectContaining({
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({}),
}),
);
});
it('should try POST when HEAD returns non-401 status', async () => {
// HEAD returns 200 OK (no auth required for HEAD)
mockFetch.mockResolvedValueOnce({
status: 200,
headers: new Headers(),
} as Response);
// POST returns 401 with Bearer
mockFetch.mockResolvedValueOnce({
status: 401,
headers: new Headers({ 'www-authenticate': 'Bearer' }),
} as Response);
const result = await detectOAuthRequirement('https://mcp.example.com');
expect(result.requiresOAuth).toBe(true);
expect(mockFetch).toHaveBeenCalledTimes(2);
});
it('should not try POST if HEAD returns 401', async () => {
// HEAD returns 401 with Bearer
mockFetch.mockResolvedValueOnce({
status: 401,
headers: new Headers({ 'www-authenticate': 'Bearer' }),
} as Response);
const result = await detectOAuthRequirement('https://mcp.example.com');
expect(result.requiresOAuth).toBe(true);
// Only HEAD should be called since it returned 401
expect(mockFetch).toHaveBeenCalledTimes(1);
});
});
describe('Bearer detection without resource_metadata URL', () => {
it('should detect OAuth when 401 has WWW-Authenticate: Bearer (case insensitive)', async () => {
mockFetch.mockResolvedValueOnce({
status: 401,
headers: new Headers({ 'www-authenticate': 'bearer' }),
} as Response);
const result = await detectOAuthRequirement('https://mcp.example.com');
expect(result.requiresOAuth).toBe(true);
expect(result.method).toBe('401-challenge-metadata');
expect(result.metadata).toBeNull();
});
it('should detect OAuth when 401 has WWW-Authenticate: BEARER (uppercase)', async () => {
mockFetch.mockResolvedValueOnce({
status: 401,
headers: new Headers({ 'www-authenticate': 'BEARER' }),
} as Response);
const result = await detectOAuthRequirement('https://mcp.example.com');
expect(result.requiresOAuth).toBe(true);
expect(result.method).toBe('401-challenge-metadata');
});
it('should detect OAuth when Bearer is part of a larger header value', async () => {
mockFetch.mockResolvedValueOnce({
status: 401,
headers: new Headers({ 'www-authenticate': 'Bearer realm="api"' }),
} as Response);
const result = await detectOAuthRequirement('https://mcp.example.com');
expect(result.requiresOAuth).toBe(true);
});
it('should not detect OAuth when 401 has no WWW-Authenticate header', async () => {
mockFetch.mockResolvedValueOnce({
status: 401,
headers: new Headers(),
} as Response);
// POST also returns 401 without header
mockFetch.mockResolvedValueOnce({
status: 401,
headers: new Headers(),
} as Response);
const result = await detectOAuthRequirement('https://mcp.example.com');
expect(result.requiresOAuth).toBe(false);
expect(result.method).toBe('no-metadata-found');
});
it('should not detect OAuth when 401 has non-Bearer auth scheme', async () => {
mockFetch.mockResolvedValueOnce({
status: 401,
headers: new Headers({ 'www-authenticate': 'Basic realm="api"' }),
} as Response);
// POST also returns 401 with Basic
mockFetch.mockResolvedValueOnce({
status: 401,
headers: new Headers({ 'www-authenticate': 'Basic realm="api"' }),
} as Response);
const result = await detectOAuthRequirement('https://mcp.example.com');
expect(result.requiresOAuth).toBe(false);
});
});
describe('resource_metadata URL in WWW-Authenticate', () => {
it('should prefer resource_metadata URL when provided with Bearer', async () => {
const metadataUrl = 'https://auth.example.com/.well-known/oauth-protected-resource';
mockFetch
// HEAD request - 401 with resource_metadata URL
.mockResolvedValueOnce({
status: 401,
headers: new Headers({
'www-authenticate': `Bearer resource_metadata="${metadataUrl}"`,
}),
} as Response)
// Metadata fetch
.mockResolvedValueOnce({
ok: true,
json: async () => ({
authorization_servers: ['https://auth.example.com'],
}),
} as Response);
const result = await detectOAuthRequirement('https://mcp.example.com');
expect(result.requiresOAuth).toBe(true);
expect(result.method).toBe('401-challenge-metadata');
expect(result.metadata).toEqual({
authorization_servers: ['https://auth.example.com'],
});
});
it('should fall back to Bearer detection if metadata fetch fails', async () => {
const metadataUrl = 'https://auth.example.com/.well-known/oauth-protected-resource';
mockFetch
// HEAD request - 401 with resource_metadata URL
.mockResolvedValueOnce({
status: 401,
headers: new Headers({
'www-authenticate': `Bearer resource_metadata="${metadataUrl}"`,
}),
} as Response)
// Metadata fetch fails
.mockRejectedValueOnce(new Error('Network error'));
const result = await detectOAuthRequirement('https://mcp.example.com');
// Should still detect OAuth via Bearer
expect(result.requiresOAuth).toBe(true);
expect(result.metadata).toBeNull();
});
});
describe('StackOverflow-like server behavior', () => {
it('should detect OAuth for servers that return 405 for HEAD and 401+Bearer for POST', async () => {
// This mimics StackOverflow's actual behavior:
// HEAD -> 405 Method Not Allowed
// POST -> 401 with WWW-Authenticate: Bearer
mockFetch
// HEAD returns 405
.mockResolvedValueOnce({
status: 405,
headers: new Headers(),
} as Response)
// POST returns 401 with Bearer
.mockResolvedValueOnce({
status: 401,
headers: new Headers({ 'www-authenticate': 'Bearer' }),
} as Response);
const result = await detectOAuthRequirement('https://mcp.stackoverflow.com');
expect(result.requiresOAuth).toBe(true);
expect(result.method).toBe('401-challenge-metadata');
expect(result.metadata).toBeNull();
});
});
describe('error handling', () => {
it('should return no OAuth required when all checks fail', async () => {
mockFetch.mockRejectedValue(new Error('Network error'));
const result = await detectOAuthRequirement('https://unreachable.example.com');
expect(result.requiresOAuth).toBe(false);
expect(result.method).toBe('no-metadata-found');
});
it('should handle timeout gracefully', async () => {
mockFetch.mockImplementation(
() => new Promise((_, reject) => setTimeout(() => reject(new Error('Timeout')), 100)),
);
const result = await detectOAuthRequirement('https://slow.example.com');
expect(result.requiresOAuth).toBe(false);
});
});
});

View file

@ -66,32 +66,81 @@ async function checkProtectedResourceMetadata(
}
}
// Checks for OAuth using 401 challenge with resource metadata URL
/**
* Checks for OAuth using 401 challenge with resource metadata URL or Bearer token.
* Tries HEAD first, then falls back to POST if HEAD doesn't return 401.
* Some servers (like StackOverflow) only return 401 for POST requests.
*/
async function check401ChallengeMetadata(serverUrl: string): Promise<OAuthDetectionResult | null> {
// Try HEAD first (lighter weight)
const headResult = await check401WithMethod(serverUrl, 'HEAD');
if (headResult) return headResult;
// Fall back to POST if HEAD didn't return 401 (some servers don't support HEAD)
const postResult = await check401WithMethod(serverUrl, 'POST');
if (postResult) return postResult;
return null;
}
async function check401WithMethod(
serverUrl: string,
method: 'HEAD' | 'POST',
): Promise<OAuthDetectionResult | null> {
try {
const response = await fetch(serverUrl, {
method: 'HEAD',
const fetchOptions: RequestInit = {
method,
signal: AbortSignal.timeout(mcpConfig.OAUTH_DETECTION_TIMEOUT),
});
};
// POST requests need headers and body for MCP servers
if (method === 'POST') {
fetchOptions.headers = { 'Content-Type': 'application/json' };
fetchOptions.body = JSON.stringify({});
}
const response = await fetch(serverUrl, fetchOptions);
if (response.status !== 401) return null;
const wwwAuth = response.headers.get('www-authenticate');
const metadataUrl = wwwAuth?.match(/resource_metadata="([^"]+)"/)?.[1];
if (!metadataUrl) return null;
const metadataResponse = await fetch(metadataUrl, {
signal: AbortSignal.timeout(mcpConfig.OAUTH_DETECTION_TIMEOUT),
});
const metadata = await metadataResponse.json();
if (metadataUrl) {
try {
// Try to fetch resource metadata from the provided URL
const metadataResponse = await fetch(metadataUrl, {
signal: AbortSignal.timeout(mcpConfig.OAUTH_DETECTION_TIMEOUT),
});
const metadata = await metadataResponse.json();
if (!metadata?.authorization_servers?.length) return null;
if (metadata?.authorization_servers?.length) {
return {
requiresOAuth: true,
method: '401-challenge-metadata',
metadata,
};
}
} catch {
// Metadata fetch failed, continue to Bearer check below
}
}
return {
requiresOAuth: true,
method: '401-challenge-metadata',
metadata,
};
/**
* If we got a 401 with WWW-Authenticate containing "Bearer" (case-insensitive),
* the server requires OAuth authentication even without discovery metadata.
* This handles "legacy" OAuth servers (like StackOverflow's MCP) that use standard
* OAuth endpoints (/authorize, /token, /register) without .well-known metadata.
*/
if (wwwAuth && /bearer/i.test(wwwAuth)) {
return {
requiresOAuth: true,
method: '401-challenge-metadata',
metadata: null,
};
}
return null;
} catch {
return null;
}

View file

@ -93,10 +93,37 @@ export class MCPOAuthHandler {
});
if (!rawMetadata) {
logger.error(
`[MCPOAuth] Failed to discover OAuth metadata from ${sanitizeUrlForLogging(authServerUrl)}`,
/**
* No metadata discovered - create fallback metadata using default OAuth endpoint paths.
* This mirrors the MCP SDK's behavior where it falls back to /authorize, /token, /register
* when metadata discovery fails (e.g., servers without .well-known endpoints).
* See: https://github.com/modelcontextprotocol/sdk/blob/main/src/client/auth.ts
*/
logger.warn(
`[MCPOAuth] No OAuth metadata discovered from ${sanitizeUrlForLogging(authServerUrl)}, using legacy fallback endpoints`,
);
throw new Error('Failed to discover OAuth metadata');
const fallbackMetadata: OAuthMetadata = {
issuer: authServerUrl.toString(),
authorization_endpoint: new URL('/authorize', authServerUrl).toString(),
token_endpoint: new URL('/token', authServerUrl).toString(),
registration_endpoint: new URL('/register', authServerUrl).toString(),
response_types_supported: ['code'],
grant_types_supported: ['authorization_code', 'refresh_token'],
code_challenge_methods_supported: ['S256', 'plain'],
token_endpoint_auth_methods_supported: [
'client_secret_basic',
'client_secret_post',
'none',
],
};
logger.debug(`[MCPOAuth] Using fallback metadata:`, fallbackMetadata);
return {
metadata: fallbackMetadata,
resourceMetadata,
authServerUrl,
};
}
logger.debug(`[MCPOAuth] OAuth metadata discovered successfully`);
@ -562,13 +589,21 @@ export class MCPOAuthHandler {
fetchFn: this.createOAuthFetch(oauthHeaders),
});
if (!oauthMetadata) {
throw new Error('Failed to discover OAuth metadata for token refresh');
}
if (!oauthMetadata.token_endpoint) {
/**
* No metadata discovered - use fallback /token endpoint.
* This mirrors the MCP SDK's behavior for legacy servers without .well-known endpoints.
*/
logger.warn(
`[MCPOAuth] No OAuth metadata discovered for token refresh, using fallback /token endpoint`,
);
tokenUrl = new URL('/token', metadata.serverUrl).toString();
authMethods = ['client_secret_basic', 'client_secret_post', 'none'];
} else if (!oauthMetadata.token_endpoint) {
throw new Error('No token endpoint found in OAuth metadata');
} else {
tokenUrl = oauthMetadata.token_endpoint;
authMethods = oauthMetadata.token_endpoint_auth_methods_supported;
}
tokenUrl = oauthMetadata.token_endpoint;
authMethods = oauthMetadata.token_endpoint_auth_methods_supported;
}
const body = new URLSearchParams({
@ -741,12 +776,20 @@ export class MCPOAuthHandler {
fetchFn: this.createOAuthFetch(oauthHeaders),
});
let tokenUrl: URL;
if (!oauthMetadata?.token_endpoint) {
throw new Error('No token endpoint found in OAuth metadata');
/**
* No metadata or token_endpoint discovered - use fallback /token endpoint.
* This mirrors the MCP SDK's behavior for legacy servers without .well-known endpoints.
*/
logger.warn(
`[MCPOAuth] No OAuth metadata or token endpoint found, using fallback /token endpoint`,
);
tokenUrl = new URL('/token', metadata.serverUrl);
} else {
tokenUrl = new URL(oauthMetadata.token_endpoint);
}
const tokenUrl = new URL(oauthMetadata.token_endpoint);
const body = new URLSearchParams({
grant_type: 'refresh_token',
refresh_token: refreshToken,

View file

@ -18,6 +18,8 @@ export interface OAuthMetadata {
token_endpoint_auth_methods_supported?: string[];
/** Code challenge methods supported */
code_challenge_methods_supported?: string[];
/** Dynamic client registration endpoint (RFC 7591) */
registration_endpoint?: string;
/** Revocation endpoint */
revocation_endpoint?: string;
/** Revocation endpoint auth methods supported */

View file

@ -1,7 +1,11 @@
{
"name": "@librechat/client",
"version": "0.4.0",
"version": "0.4.1",
"description": "React components for LibreChat",
"repository": {
"type": "git",
"url": "https://github.com/danny-avila/LibreChat"
},
"main": "dist/index.js",
"module": "dist/index.es.js",
"types": "dist/types/index.d.ts",

View file

@ -1,6 +1,6 @@
{
"name": "librechat-data-provider",
"version": "0.8.100",
"version": "0.8.200",
"description": "data services for librechat apps",
"main": "dist/index.js",
"module": "dist/index.es.js",
@ -30,7 +30,7 @@
},
"repository": {
"type": "git",
"url": "git+https://github.com/danny-avila/LibreChat.git"
"url": "https://github.com/danny-avila/LibreChat"
},
"author": "",
"license": "ISC",

View file

@ -1586,7 +1586,7 @@ export enum TTSProviders {
/** Enum for app-wide constants */
export enum Constants {
/** Key for the app's version. */
VERSION = 'v0.8.1-rc2',
VERSION = 'v0.8.1',
/** Key for the Custom Config's version (librechat.yaml). */
CONFIG_VERSION = '1.3.1',
/** Standard value for the first message's `parentMessageId` value, to indicate no parent exists. */

View file

@ -185,8 +185,8 @@ export interface MCPConnectionStatusResponse {
export interface MCPServerConnectionStatusResponse {
success: boolean;
serverName: string;
connectionStatus: string;
requiresOAuth: boolean;
connectionStatus: 'disconnected' | 'connecting' | 'connected' | 'error';
}
export interface MCPAuthValuesResponse {

View file

@ -1,6 +1,6 @@
{
"name": "@librechat/data-schemas",
"version": "0.0.30",
"version": "0.0.31",
"description": "Mongoose schemas and models for LibreChat",
"type": "module",
"main": "dist/index.cjs",
@ -28,7 +28,7 @@
},
"repository": {
"type": "git",
"url": "git+https://github.com/danny-avila/LibreChat.git"
"url": "https://github.com/danny-avila/LibreChat"
},
"author": "",
"license": "MIT",