mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-02-09 02:54:23 +01:00
Merge branch 'main' into feature/entra-id-azure-integration
This commit is contained in:
commit
a7cf1ae27b
241 changed files with 25653 additions and 3303 deletions
|
|
@ -2,6 +2,7 @@ const crypto = require('crypto');
|
|||
const fetch = require('node-fetch');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const {
|
||||
countTokens,
|
||||
getBalanceConfig,
|
||||
extractFileContext,
|
||||
encodeAndFormatAudios,
|
||||
|
|
@ -23,7 +24,6 @@ const { getMessages, saveMessage, updateMessage, saveConvo, getConvo } = require
|
|||
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
|
||||
const { checkBalance } = require('~/models/balanceMethods');
|
||||
const { truncateToolCallOutputs } = require('./prompts');
|
||||
const countTokens = require('~/server/utils/countTokens');
|
||||
const { getFiles } = require('~/models/File');
|
||||
const TextStream = require('./TextStream');
|
||||
|
||||
|
|
@ -81,6 +81,7 @@ class BaseClient {
|
|||
throw new Error("Method 'getCompletion' must be implemented.");
|
||||
}
|
||||
|
||||
/** @type {sendCompletion} */
|
||||
async sendCompletion() {
|
||||
throw new Error("Method 'sendCompletion' must be implemented.");
|
||||
}
|
||||
|
|
@ -689,8 +690,7 @@ class BaseClient {
|
|||
});
|
||||
}
|
||||
|
||||
/** @type {string|string[]|undefined} */
|
||||
const completion = await this.sendCompletion(payload, opts);
|
||||
const { completion, metadata } = await this.sendCompletion(payload, opts);
|
||||
if (this.abortController) {
|
||||
this.abortController.requestCompleted = true;
|
||||
}
|
||||
|
|
@ -708,6 +708,7 @@ class BaseClient {
|
|||
iconURL: this.options.iconURL,
|
||||
endpoint: this.options.endpoint,
|
||||
...(this.metadata ?? {}),
|
||||
metadata,
|
||||
};
|
||||
|
||||
if (typeof completion === 'string') {
|
||||
|
|
@ -1212,8 +1213,8 @@ class BaseClient {
|
|||
this.options.req,
|
||||
attachments,
|
||||
{
|
||||
provider: this.options.agent?.provider,
|
||||
endpoint: this.options.agent?.endpoint,
|
||||
provider: this.options.agent?.provider ?? this.options.endpoint,
|
||||
endpoint: this.options.agent?.endpoint ?? this.options.endpoint,
|
||||
useResponsesApi: this.options.agent?.model_parameters?.useResponsesApi,
|
||||
},
|
||||
getStrategyFunctions,
|
||||
|
|
@ -1230,8 +1231,8 @@ class BaseClient {
|
|||
this.options.req,
|
||||
attachments,
|
||||
{
|
||||
provider: this.options.agent?.provider,
|
||||
endpoint: this.options.agent?.endpoint,
|
||||
provider: this.options.agent?.provider ?? this.options.endpoint,
|
||||
endpoint: this.options.agent?.endpoint ?? this.options.endpoint,
|
||||
},
|
||||
getStrategyFunctions,
|
||||
);
|
||||
|
|
@ -1245,8 +1246,8 @@ class BaseClient {
|
|||
this.options.req,
|
||||
attachments,
|
||||
{
|
||||
provider: this.options.agent?.provider,
|
||||
endpoint: this.options.agent?.endpoint,
|
||||
provider: this.options.agent?.provider ?? this.options.endpoint,
|
||||
endpoint: this.options.agent?.endpoint ?? this.options.endpoint,
|
||||
},
|
||||
getStrategyFunctions,
|
||||
);
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
const { getBasePath } = require('@librechat/api');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
|
||||
/**
|
||||
|
|
@ -32,6 +33,8 @@ function addImages(intermediateSteps, responseMessage) {
|
|||
return;
|
||||
}
|
||||
|
||||
const basePath = getBasePath();
|
||||
|
||||
// Correct any erroneous URLs in the responseMessage.text first
|
||||
intermediateSteps.forEach((step) => {
|
||||
const { observation } = step;
|
||||
|
|
@ -44,12 +47,14 @@ function addImages(intermediateSteps, responseMessage) {
|
|||
return;
|
||||
}
|
||||
const essentialImagePath = match[0];
|
||||
const fullImagePath = `${basePath}${essentialImagePath}`;
|
||||
|
||||
const regex = /!\[.*?\]\((.*?)\)/g;
|
||||
let matchErroneous;
|
||||
while ((matchErroneous = regex.exec(responseMessage.text)) !== null) {
|
||||
if (matchErroneous[1] && !matchErroneous[1].startsWith('/images/')) {
|
||||
responseMessage.text = responseMessage.text.replace(matchErroneous[1], essentialImagePath);
|
||||
if (matchErroneous[1] && !matchErroneous[1].startsWith(`${basePath}/images/`)) {
|
||||
// Replace with the full path including base path
|
||||
responseMessage.text = responseMessage.text.replace(matchErroneous[1], fullImagePath);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
|
@ -61,9 +66,23 @@ function addImages(intermediateSteps, responseMessage) {
|
|||
return;
|
||||
}
|
||||
const observedImagePath = observation.match(/!\[[^(]*\]\([^)]*\)/g);
|
||||
if (observedImagePath && !responseMessage.text.includes(observedImagePath[0])) {
|
||||
responseMessage.text += '\n' + observedImagePath[0];
|
||||
logger.debug('[addImages] added image from intermediateSteps:', observedImagePath[0]);
|
||||
if (observedImagePath) {
|
||||
// Fix the image path to include base path if it doesn't already
|
||||
let imageMarkdown = observedImagePath[0];
|
||||
const urlMatch = imageMarkdown.match(/\(([^)]+)\)/);
|
||||
if (
|
||||
urlMatch &&
|
||||
urlMatch[1] &&
|
||||
!urlMatch[1].startsWith(`${basePath}/images/`) &&
|
||||
urlMatch[1].startsWith('/images/')
|
||||
) {
|
||||
imageMarkdown = imageMarkdown.replace(urlMatch[1], `${basePath}${urlMatch[1]}`);
|
||||
}
|
||||
|
||||
if (!responseMessage.text.includes(imageMarkdown)) {
|
||||
responseMessage.text += '\n' + imageMarkdown;
|
||||
logger.debug('[addImages] added image from intermediateSteps:', imageMarkdown);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ describe('addImages', () => {
|
|||
|
||||
it('should append correctly from a real scenario', () => {
|
||||
responseMessage.text =
|
||||
'Here is the generated image based on your request. It depicts a surreal landscape filled with floating musical notes. The style is impressionistic, with vibrant sunset hues dominating the scene. At the center, there\'s a silhouette of a grand piano, adding a dreamy emotion to the overall image. This could serve as a unique and creative music album cover. Would you like to make any changes or generate another image?';
|
||||
"Here is the generated image based on your request. It depicts a surreal landscape filled with floating musical notes. The style is impressionistic, with vibrant sunset hues dominating the scene. At the center, there's a silhouette of a grand piano, adding a dreamy emotion to the overall image. This could serve as a unique and creative music album cover. Would you like to make any changes or generate another image?";
|
||||
const originalText = responseMessage.text;
|
||||
const imageMarkdown = '';
|
||||
intermediateSteps.push({ observation: imageMarkdown });
|
||||
|
|
@ -139,4 +139,108 @@ describe('addImages', () => {
|
|||
addImages(intermediateSteps, responseMessage);
|
||||
expect(responseMessage.text).toBe('\n');
|
||||
});
|
||||
|
||||
describe('basePath functionality', () => {
|
||||
let originalDomainClient;
|
||||
|
||||
beforeEach(() => {
|
||||
originalDomainClient = process.env.DOMAIN_CLIENT;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
process.env.DOMAIN_CLIENT = originalDomainClient;
|
||||
});
|
||||
|
||||
it('should prepend base path to image URLs when DOMAIN_CLIENT is set', () => {
|
||||
process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat';
|
||||
intermediateSteps.push({ observation: '' });
|
||||
addImages(intermediateSteps, responseMessage);
|
||||
expect(responseMessage.text).toBe('\n');
|
||||
});
|
||||
|
||||
it('should not prepend base path when image URL already has base path', () => {
|
||||
process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat';
|
||||
intermediateSteps.push({ observation: '' });
|
||||
addImages(intermediateSteps, responseMessage);
|
||||
expect(responseMessage.text).toBe('\n');
|
||||
});
|
||||
|
||||
it('should correct erroneous URLs with base path', () => {
|
||||
process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat';
|
||||
responseMessage.text = '';
|
||||
intermediateSteps.push({ observation: '' });
|
||||
addImages(intermediateSteps, responseMessage);
|
||||
expect(responseMessage.text).toBe('');
|
||||
});
|
||||
|
||||
it('should handle empty base path (root deployment)', () => {
|
||||
process.env.DOMAIN_CLIENT = 'http://localhost:3080/';
|
||||
intermediateSteps.push({ observation: '' });
|
||||
addImages(intermediateSteps, responseMessage);
|
||||
expect(responseMessage.text).toBe('\n');
|
||||
});
|
||||
|
||||
it('should handle missing DOMAIN_CLIENT', () => {
|
||||
delete process.env.DOMAIN_CLIENT;
|
||||
intermediateSteps.push({ observation: '' });
|
||||
addImages(intermediateSteps, responseMessage);
|
||||
expect(responseMessage.text).toBe('\n');
|
||||
});
|
||||
|
||||
it('should handle observation without image path match', () => {
|
||||
process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat';
|
||||
intermediateSteps.push({ observation: '' });
|
||||
addImages(intermediateSteps, responseMessage);
|
||||
expect(responseMessage.text).toBe('\n');
|
||||
});
|
||||
|
||||
it('should handle nested subdirectories in base path', () => {
|
||||
process.env.DOMAIN_CLIENT = 'http://localhost:3080/apps/librechat';
|
||||
intermediateSteps.push({ observation: '' });
|
||||
addImages(intermediateSteps, responseMessage);
|
||||
expect(responseMessage.text).toBe('\n');
|
||||
});
|
||||
|
||||
it('should handle multiple observations with mixed base path scenarios', () => {
|
||||
process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat';
|
||||
intermediateSteps.push({ observation: '' });
|
||||
intermediateSteps.push({ observation: '' });
|
||||
addImages(intermediateSteps, responseMessage);
|
||||
expect(responseMessage.text).toBe(
|
||||
'\n\n',
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle complex markdown with base path', () => {
|
||||
process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat';
|
||||
const complexMarkdown = `
|
||||
# Document Title
|
||||

|
||||
Some text between images
|
||||

|
||||
`;
|
||||
intermediateSteps.push({ observation: complexMarkdown });
|
||||
addImages(intermediateSteps, responseMessage);
|
||||
expect(responseMessage.text).toBe('\n');
|
||||
});
|
||||
|
||||
it('should handle URLs that are already absolute', () => {
|
||||
process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat';
|
||||
intermediateSteps.push({ observation: '' });
|
||||
addImages(intermediateSteps, responseMessage);
|
||||
expect(responseMessage.text).toBe('\n');
|
||||
});
|
||||
|
||||
it('should handle data URLs', () => {
|
||||
process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat';
|
||||
intermediateSteps.push({
|
||||
observation:
|
||||
'',
|
||||
});
|
||||
addImages(intermediateSteps, responseMessage);
|
||||
expect(responseMessage.text).toBe(
|
||||
'\n',
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -130,7 +130,7 @@ describe('formatAgentMessages', () => {
|
|||
content: [
|
||||
{
|
||||
type: ContentTypes.TEXT,
|
||||
[ContentTypes.TEXT]: 'I\'ll search for that information.',
|
||||
[ContentTypes.TEXT]: "I'll search for that information.",
|
||||
tool_call_ids: ['search_1'],
|
||||
},
|
||||
{
|
||||
|
|
@ -144,7 +144,7 @@ describe('formatAgentMessages', () => {
|
|||
},
|
||||
{
|
||||
type: ContentTypes.TEXT,
|
||||
[ContentTypes.TEXT]: 'Now, I\'ll convert the temperature.',
|
||||
[ContentTypes.TEXT]: "Now, I'll convert the temperature.",
|
||||
tool_call_ids: ['convert_1'],
|
||||
},
|
||||
{
|
||||
|
|
@ -156,7 +156,7 @@ describe('formatAgentMessages', () => {
|
|||
output: '23.89°C',
|
||||
},
|
||||
},
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Here\'s your answer.' },
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: "Here's your answer." },
|
||||
],
|
||||
},
|
||||
];
|
||||
|
|
@ -171,7 +171,7 @@ describe('formatAgentMessages', () => {
|
|||
expect(result[4]).toBeInstanceOf(AIMessage);
|
||||
|
||||
// Check first AIMessage
|
||||
expect(result[0].content).toBe('I\'ll search for that information.');
|
||||
expect(result[0].content).toBe("I'll search for that information.");
|
||||
expect(result[0].tool_calls).toHaveLength(1);
|
||||
expect(result[0].tool_calls[0]).toEqual({
|
||||
id: 'search_1',
|
||||
|
|
@ -187,7 +187,7 @@ describe('formatAgentMessages', () => {
|
|||
);
|
||||
|
||||
// Check second AIMessage
|
||||
expect(result[2].content).toBe('Now, I\'ll convert the temperature.');
|
||||
expect(result[2].content).toBe("Now, I'll convert the temperature.");
|
||||
expect(result[2].tool_calls).toHaveLength(1);
|
||||
expect(result[2].tool_calls[0]).toEqual({
|
||||
id: 'convert_1',
|
||||
|
|
@ -202,7 +202,7 @@ describe('formatAgentMessages', () => {
|
|||
|
||||
// Check final AIMessage
|
||||
expect(result[4].content).toStrictEqual([
|
||||
{ [ContentTypes.TEXT]: 'Here\'s your answer.', type: ContentTypes.TEXT },
|
||||
{ [ContentTypes.TEXT]: "Here's your answer.", type: ContentTypes.TEXT },
|
||||
]);
|
||||
});
|
||||
|
||||
|
|
@ -217,7 +217,7 @@ describe('formatAgentMessages', () => {
|
|||
role: 'assistant',
|
||||
content: [{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'How can I help you?' }],
|
||||
},
|
||||
{ role: 'user', content: 'What\'s the weather?' },
|
||||
{ role: 'user', content: "What's the weather?" },
|
||||
{
|
||||
role: 'assistant',
|
||||
content: [
|
||||
|
|
@ -240,7 +240,7 @@ describe('formatAgentMessages', () => {
|
|||
{
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Here\'s the weather information.' },
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: "Here's the weather information." },
|
||||
],
|
||||
},
|
||||
];
|
||||
|
|
@ -265,12 +265,12 @@ describe('formatAgentMessages', () => {
|
|||
{ [ContentTypes.TEXT]: 'How can I help you?', type: ContentTypes.TEXT },
|
||||
]);
|
||||
expect(result[2].content).toStrictEqual([
|
||||
{ [ContentTypes.TEXT]: 'What\'s the weather?', type: ContentTypes.TEXT },
|
||||
{ [ContentTypes.TEXT]: "What's the weather?", type: ContentTypes.TEXT },
|
||||
]);
|
||||
expect(result[3].content).toBe('Let me check that for you.');
|
||||
expect(result[4].content).toBe('Sunny, 75°F');
|
||||
expect(result[5].content).toStrictEqual([
|
||||
{ [ContentTypes.TEXT]: 'Here\'s the weather information.', type: ContentTypes.TEXT },
|
||||
{ [ContentTypes.TEXT]: "Here's the weather information.", type: ContentTypes.TEXT },
|
||||
]);
|
||||
|
||||
// Check that there are no consecutive AIMessages
|
||||
|
|
|
|||
|
|
@ -82,7 +82,10 @@ const initializeFakeClient = (apiKey, options, fakeMessages) => {
|
|||
});
|
||||
|
||||
TestClient.sendCompletion = jest.fn(async () => {
|
||||
return 'Mock response text';
|
||||
return {
|
||||
completion: 'Mock response text',
|
||||
metadata: undefined,
|
||||
};
|
||||
});
|
||||
|
||||
TestClient.getCompletion = jest.fn().mockImplementation(async (..._args) => {
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ const { v4: uuidv4 } = require('uuid');
|
|||
const { Tool } = require('@langchain/core/tools');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { FileContext, ContentTypes } = require('librechat-data-provider');
|
||||
const { getBasePath } = require('@librechat/api');
|
||||
const paths = require('~/config/paths');
|
||||
|
||||
const displayMessage =
|
||||
|
|
@ -36,7 +37,7 @@ class StableDiffusionAPI extends Tool {
|
|||
this.description_for_model = `// Generate images and visuals using text.
|
||||
// Guidelines:
|
||||
// - ALWAYS use {{"prompt": "7+ detailed keywords", "negative_prompt": "7+ detailed keywords"}} structure for queries.
|
||||
// - ALWAYS include the markdown url in your final response to show the user: 
|
||||
// - ALWAYS include the markdown url in your final response to show the user: }/images/id.png)
|
||||
// - Visually describe the moods, details, structures, styles, and/or proportions of the image. Remember, the focus is on visual attributes.
|
||||
// - Craft your input by "showing" and not "telling" the imagery. Think in terms of what you'd want to see in a photograph or a painting.
|
||||
// - Here's an example for generating a realistic portrait photo of a man:
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
const { z } = require('zod');
|
||||
const { ProxyAgent, fetch } = require('undici');
|
||||
const { tool } = require('@langchain/core/tools');
|
||||
const { getApiKey } = require('./credentials');
|
||||
|
||||
|
|
@ -19,13 +20,19 @@ function createTavilySearchTool(fields = {}) {
|
|||
...kwargs,
|
||||
};
|
||||
|
||||
const response = await fetch('https://api.tavily.com/search', {
|
||||
const fetchOptions = {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify(requestBody),
|
||||
});
|
||||
};
|
||||
|
||||
if (process.env.PROXY) {
|
||||
fetchOptions.dispatcher = new ProxyAgent(process.env.PROXY);
|
||||
}
|
||||
|
||||
const response = await fetch('https://api.tavily.com/search', fetchOptions);
|
||||
|
||||
const json = await response.json();
|
||||
if (!response.ok) {
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
const { z } = require('zod');
|
||||
const { ProxyAgent, fetch } = require('undici');
|
||||
const { Tool } = require('@langchain/core/tools');
|
||||
const { getEnvironmentVariable } = require('@langchain/core/utils/env');
|
||||
|
||||
|
|
@ -102,13 +103,19 @@ class TavilySearchResults extends Tool {
|
|||
...this.kwargs,
|
||||
};
|
||||
|
||||
const response = await fetch('https://api.tavily.com/search', {
|
||||
const fetchOptions = {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify(requestBody),
|
||||
});
|
||||
};
|
||||
|
||||
if (process.env.PROXY) {
|
||||
fetchOptions.dispatcher = new ProxyAgent(process.env.PROXY);
|
||||
}
|
||||
|
||||
const response = await fetch('https://api.tavily.com/search', fetchOptions);
|
||||
|
||||
const json = await response.json();
|
||||
if (!response.ok) {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
const { fetch, ProxyAgent } = require('undici');
|
||||
const TavilySearchResults = require('../TavilySearchResults');
|
||||
|
||||
jest.mock('node-fetch');
|
||||
jest.mock('undici');
|
||||
jest.mock('@langchain/core/utils/env');
|
||||
|
||||
describe('TavilySearchResults', () => {
|
||||
|
|
@ -13,6 +14,7 @@ describe('TavilySearchResults', () => {
|
|||
|
||||
beforeEach(() => {
|
||||
jest.resetModules();
|
||||
jest.clearAllMocks();
|
||||
process.env = {
|
||||
...originalEnv,
|
||||
TAVILY_API_KEY: mockApiKey,
|
||||
|
|
@ -20,7 +22,6 @@ describe('TavilySearchResults', () => {
|
|||
});
|
||||
|
||||
afterEach(() => {
|
||||
jest.clearAllMocks();
|
||||
process.env = originalEnv;
|
||||
});
|
||||
|
||||
|
|
@ -35,4 +36,49 @@ describe('TavilySearchResults', () => {
|
|||
});
|
||||
expect(instance.apiKey).toBe(mockApiKey);
|
||||
});
|
||||
|
||||
describe('proxy support', () => {
|
||||
const mockResponse = {
|
||||
ok: true,
|
||||
json: jest.fn().mockResolvedValue({ results: [] }),
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
fetch.mockResolvedValue(mockResponse);
|
||||
});
|
||||
|
||||
it('should use ProxyAgent when PROXY env var is set', async () => {
|
||||
const proxyUrl = 'http://proxy.example.com:8080';
|
||||
process.env.PROXY = proxyUrl;
|
||||
|
||||
const mockProxyAgent = { type: 'proxy-agent' };
|
||||
ProxyAgent.mockImplementation(() => mockProxyAgent);
|
||||
|
||||
const instance = new TavilySearchResults({ TAVILY_API_KEY: mockApiKey });
|
||||
await instance._call({ query: 'test query' });
|
||||
|
||||
expect(ProxyAgent).toHaveBeenCalledWith(proxyUrl);
|
||||
expect(fetch).toHaveBeenCalledWith(
|
||||
'https://api.tavily.com/search',
|
||||
expect.objectContaining({
|
||||
dispatcher: mockProxyAgent,
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('should not use ProxyAgent when PROXY env var is not set', async () => {
|
||||
delete process.env.PROXY;
|
||||
|
||||
const instance = new TavilySearchResults({ TAVILY_API_KEY: mockApiKey });
|
||||
await instance._call({ query: 'test query' });
|
||||
|
||||
expect(ProxyAgent).not.toHaveBeenCalled();
|
||||
expect(fetch).toHaveBeenCalledWith(
|
||||
'https://api.tavily.com/search',
|
||||
expect.not.objectContaining({
|
||||
dispatcher: expect.anything(),
|
||||
}),
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -78,15 +78,14 @@ const createFileSearchTool = async ({ userId, files, entity_id, fileCitations =
|
|||
return tool(
|
||||
async ({ query }) => {
|
||||
if (files.length === 0) {
|
||||
return 'No files to search. Instruct the user to add files for the search.';
|
||||
return ['No files to search. Instruct the user to add files for the search.', undefined];
|
||||
}
|
||||
const jwtToken = generateShortLivedToken(userId);
|
||||
if (!jwtToken) {
|
||||
return 'There was an error authenticating the file search request.';
|
||||
return ['There was an error authenticating the file search request.', undefined];
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {import('librechat-data-provider').TFile} file
|
||||
* @returns {{ file_id: string, query: string, k: number, entity_id?: string }}
|
||||
*/
|
||||
|
|
@ -122,7 +121,7 @@ const createFileSearchTool = async ({ userId, files, entity_id, fileCitations =
|
|||
const validResults = results.filter((result) => result !== null);
|
||||
|
||||
if (validResults.length === 0) {
|
||||
return 'No results found or errors occurred while searching the files.';
|
||||
return ['No results found or errors occurred while searching the files.', undefined];
|
||||
}
|
||||
|
||||
const formattedResults = validResults
|
||||
|
|
@ -135,11 +134,16 @@ const createFileSearchTool = async ({ userId, files, entity_id, fileCitations =
|
|||
page: docInfo.metadata.page || null,
|
||||
})),
|
||||
)
|
||||
// TODO: results should be sorted by relevance, not distance
|
||||
.sort((a, b) => a.distance - b.distance)
|
||||
// TODO: make this configurable
|
||||
.slice(0, 10);
|
||||
|
||||
if (formattedResults.length === 0) {
|
||||
return [
|
||||
'No content found in the files. The files may not have been processed correctly or you may need to refine your query.',
|
||||
undefined,
|
||||
];
|
||||
}
|
||||
|
||||
const formattedString = formattedResults
|
||||
.map(
|
||||
(result, index) =>
|
||||
|
|
@ -169,11 +173,12 @@ const createFileSearchTool = async ({ userId, files, entity_id, fileCitations =
|
|||
? `
|
||||
|
||||
**CITE FILE SEARCH RESULTS:**
|
||||
Use anchor markers immediately after statements derived from file content. Reference the filename in your text:
|
||||
Use the EXACT anchor markers shown below (copy them verbatim) immediately after statements derived from file content. Reference the filename in your text:
|
||||
- File citation: "The document.pdf states that... \\ue202turn0file0"
|
||||
- Page reference: "According to report.docx... \\ue202turn0file1"
|
||||
- Multi-file: "Multiple sources confirm... \\ue200\\ue202turn0file0\\ue202turn0file1\\ue201"
|
||||
|
||||
**CRITICAL:** Output these escape sequences EXACTLY as shown (e.g., \\ue202turn0file0). Do NOT substitute with other characters like † or similar symbols.
|
||||
**ALWAYS mention the filename in your text before the citation marker. NEVER use markdown links or footnotes.**`
|
||||
: ''
|
||||
}`,
|
||||
|
|
|
|||
|
|
@ -317,14 +317,22 @@ const loadTools = async ({
|
|||
requestedTools[tool] = async () => {
|
||||
toolContextMap[tool] = `# \`${tool}\`:
|
||||
Current Date & Time: ${replaceSpecialVars({ text: '{{iso_datetime}}' })}
|
||||
1. **Execute immediately without preface** when using \`${tool}\`.
|
||||
2. **After the search, begin with a brief summary** that directly addresses the query without headers or explaining your process.
|
||||
3. **Structure your response clearly** using Markdown formatting (Level 2 headers for sections, lists for multiple points, tables for comparisons).
|
||||
4. **Cite sources properly** according to the citation anchor format, utilizing group anchors when appropriate.
|
||||
5. **Tailor your approach to the query type** (academic, news, coding, etc.) while maintaining an expert, journalistic, unbiased tone.
|
||||
6. **Provide comprehensive information** with specific details, examples, and as much relevant context as possible from search results.
|
||||
7. **Avoid moralizing language.**
|
||||
`.trim();
|
||||
|
||||
**Execute immediately without preface.** After search, provide a brief summary addressing the query directly, then structure your response with clear Markdown formatting (## headers, lists, tables). Cite sources properly, tailor tone to query type, and provide comprehensive details.
|
||||
|
||||
**CITATION FORMAT - UNICODE ESCAPE SEQUENCES ONLY:**
|
||||
Use these EXACT escape sequences (copy verbatim): \\ue202 (before each anchor), \\ue200 (group start), \\ue201 (group end), \\ue203 (highlight start), \\ue204 (highlight end)
|
||||
|
||||
Anchor pattern: \\ue202turn{N}{type}{index} where N=turn number, type=search|news|image|ref, index=0,1,2...
|
||||
|
||||
**Examples (copy these exactly):**
|
||||
- Single: "Statement.\\ue202turn0search0"
|
||||
- Multiple: "Statement.\\ue202turn0search0\\ue202turn0news1"
|
||||
- Group: "Statement. \\ue200\\ue202turn0search0\\ue202turn0news1\\ue201"
|
||||
- Highlight: "\\ue203Cited text.\\ue204\\ue202turn0search0"
|
||||
- Image: "See photo\\ue202turn0image0."
|
||||
|
||||
**CRITICAL:** Output escape sequences EXACTLY as shown. Do NOT substitute with † or other symbols. Place anchors AFTER punctuation. Cite every non-obvious fact/quote. NEVER use markdown links, [1], footnotes, or HTML tags.`.trim();
|
||||
return createSearchTool({
|
||||
...result.authResult,
|
||||
onSearchResults,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue