Merge branch 'main' into feat/E2EE

This commit is contained in:
Ruben Talstra 2025-02-16 10:23:22 +01:00 committed by GitHub
commit 0cc0e5d287
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
16 changed files with 757 additions and 618 deletions

View file

@ -389,7 +389,7 @@ FACEBOOK_CALLBACK_URL=/oauth/facebook/callback
GITHUB_CLIENT_ID= GITHUB_CLIENT_ID=
GITHUB_CLIENT_SECRET= GITHUB_CLIENT_SECRET=
GITHUB_CALLBACK_URL=/oauth/github/callback GITHUB_CALLBACK_URL=/oauth/github/callback
# GitHub Eenterprise # GitHub Enterprise
# GITHUB_ENTERPRISE_BASE_URL= # GITHUB_ENTERPRISE_BASE_URL=
# GITHUB_ENTERPRISE_USER_AGENT= # GITHUB_ENTERPRISE_USER_AGENT=
@ -527,4 +527,4 @@ HELP_AND_FAQ_URL=https://librechat.ai
#=====================================================# #=====================================================#
# OpenWeather # # OpenWeather #
#=====================================================# #=====================================================#
OPENWEATHER_API_KEY= OPENWEATHER_API_KEY=

View file

@ -2,7 +2,7 @@ const { z } = require('zod');
const axios = require('axios'); const axios = require('axios');
const { Ollama } = require('ollama'); const { Ollama } = require('ollama');
const { Constants } = require('librechat-data-provider'); const { Constants } = require('librechat-data-provider');
const { deriveBaseURL } = require('~/utils'); const { deriveBaseURL, logAxiosError } = require('~/utils');
const { sleep } = require('~/server/utils'); const { sleep } = require('~/server/utils');
const { logger } = require('~/config'); const { logger } = require('~/config');
@ -68,7 +68,7 @@ class OllamaClient {
} catch (error) { } catch (error) {
const logMessage = const logMessage =
'Failed to fetch models from Ollama API. If you are not using Ollama directly, and instead, through some aggregator or reverse proxy that handles fetching via OpenAI spec, ensure the name of the endpoint doesn\'t start with `ollama` (case-insensitive).'; 'Failed to fetch models from Ollama API. If you are not using Ollama directly, and instead, through some aggregator or reverse proxy that handles fetching via OpenAI spec, ensure the name of the endpoint doesn\'t start with `ollama` (case-insensitive).';
logger.error(logMessage, error); logAxiosError({ message: logMessage, error });
return []; return [];
} }
} }

View file

@ -7,6 +7,7 @@ const {
ImageDetail, ImageDetail,
EModelEndpoint, EModelEndpoint,
resolveHeaders, resolveHeaders,
KnownEndpoints,
openAISettings, openAISettings,
ImageDetailCost, ImageDetailCost,
CohereConstants, CohereConstants,
@ -116,11 +117,7 @@ class OpenAIClient extends BaseClient {
const { reverseProxyUrl: reverseProxy } = this.options; const { reverseProxyUrl: reverseProxy } = this.options;
if ( if (!this.useOpenRouter && reverseProxy && reverseProxy.includes(KnownEndpoints.openrouter)) {
!this.useOpenRouter &&
reverseProxy &&
reverseProxy.includes('https://openrouter.ai/api/v1')
) {
this.useOpenRouter = true; this.useOpenRouter = true;
} }

View file

@ -282,4 +282,47 @@ describe('formatAgentMessages', () => {
// Additional check to ensure the consecutive assistant messages were combined // Additional check to ensure the consecutive assistant messages were combined
expect(result[1].content).toHaveLength(2); expect(result[1].content).toHaveLength(2);
}); });
it('should skip THINK type content parts', () => {
const payload = [
{
role: 'assistant',
content: [
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Initial response' },
{ type: ContentTypes.THINK, [ContentTypes.THINK]: 'Reasoning about the problem...' },
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Final answer' },
],
},
];
const result = formatAgentMessages(payload);
expect(result).toHaveLength(1);
expect(result[0]).toBeInstanceOf(AIMessage);
expect(result[0].content).toEqual('Initial response\nFinal answer');
});
it('should join TEXT content as string when THINK content type is present', () => {
const payload = [
{
role: 'assistant',
content: [
{ type: ContentTypes.THINK, [ContentTypes.THINK]: 'Analyzing the problem...' },
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'First part of response' },
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Second part of response' },
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Final part of response' },
],
},
];
const result = formatAgentMessages(payload);
expect(result).toHaveLength(1);
expect(result[0]).toBeInstanceOf(AIMessage);
expect(typeof result[0].content).toBe('string');
expect(result[0].content).toBe(
'First part of response\nSecond part of response\nFinal part of response',
);
expect(result[0].content).not.toContain('Analyzing the problem...');
});
}); });

View file

@ -153,6 +153,7 @@ const formatAgentMessages = (payload) => {
let currentContent = []; let currentContent = [];
let lastAIMessage = null; let lastAIMessage = null;
let hasReasoning = false;
for (const part of message.content) { for (const part of message.content) {
if (part.type === ContentTypes.TEXT && part.tool_call_ids) { if (part.type === ContentTypes.TEXT && part.tool_call_ids) {
/* /*
@ -207,11 +208,25 @@ const formatAgentMessages = (payload) => {
content: output || '', content: output || '',
}), }),
); );
} else if (part.type === ContentTypes.THINK) {
hasReasoning = true;
continue;
} else { } else {
currentContent.push(part); currentContent.push(part);
} }
} }
if (hasReasoning) {
currentContent = currentContent
.reduce((acc, curr) => {
if (curr.type === ContentTypes.TEXT) {
return `${acc}${curr[ContentTypes.TEXT]}\n`;
}
return acc;
}, '')
.trim();
}
if (currentContent.length > 0) { if (currentContent.length > 0) {
messages.push(new AIMessage({ content: currentContent })); messages.push(new AIMessage({ content: currentContent }));
} }

View file

@ -45,7 +45,7 @@
"@langchain/google-genai": "^0.1.7", "@langchain/google-genai": "^0.1.7",
"@langchain/google-vertexai": "^0.1.8", "@langchain/google-vertexai": "^0.1.8",
"@langchain/textsplitters": "^0.1.0", "@langchain/textsplitters": "^0.1.0",
"@librechat/agents": "^2.0.5", "@librechat/agents": "^2.1.2",
"@waylaidwanderer/fetch-event-source": "^3.0.1", "@waylaidwanderer/fetch-event-source": "^3.0.1",
"axios": "1.7.8", "axios": "1.7.8",
"bcryptjs": "^2.4.3", "bcryptjs": "^2.4.3",
@ -65,6 +65,7 @@
"firebase": "^11.0.2", "firebase": "^11.0.2",
"googleapis": "^126.0.1", "googleapis": "^126.0.1",
"handlebars": "^4.7.7", "handlebars": "^4.7.7",
"https-proxy-agent": "^7.0.6",
"ioredis": "^5.3.2", "ioredis": "^5.3.2",
"js-yaml": "^4.1.0", "js-yaml": "^4.1.0",
"jsonwebtoken": "^9.0.0", "jsonwebtoken": "^9.0.0",

View file

@ -199,6 +199,22 @@ function getDefaultHandlers({ res, aggregateContent, toolEndCallback, collectedU
aggregateContent({ event, data }); aggregateContent({ event, data });
}, },
}, },
[GraphEvents.ON_REASONING_DELTA]: {
/**
* Handle ON_REASONING_DELTA event.
* @param {string} event - The event name.
* @param {StreamEventData} data - The event data.
* @param {GraphRunnableConfig['configurable']} [metadata] The runnable metadata.
*/
handle: (event, data, metadata) => {
if (metadata?.last_agent_index === metadata?.agent_index) {
sendEvent(res, { event, data });
} else if (!metadata?.hide_sequential_outputs) {
sendEvent(res, { event, data });
}
aggregateContent({ event, data });
},
},
}; };
return handlers; return handlers;

View file

@ -20,11 +20,6 @@ const {
bedrockOutputParser, bedrockOutputParser,
removeNullishValues, removeNullishValues,
} = require('librechat-data-provider'); } = require('librechat-data-provider');
const {
extractBaseURL,
// constructAzureURL,
// genAzureChatCompletion,
} = require('~/utils');
const { const {
formatMessage, formatMessage,
formatAgentMessages, formatAgentMessages,
@ -477,19 +472,6 @@ class AgentClient extends BaseClient {
abortController = new AbortController(); abortController = new AbortController();
} }
const baseURL = extractBaseURL(this.completionsUrl);
logger.debug('[api/server/controllers/agents/client.js] chatCompletion', {
baseURL,
payload,
});
// if (this.useOpenRouter) {
// opts.defaultHeaders = {
// 'HTTP-Referer': 'https://librechat.ai',
// 'X-Title': 'LibreChat',
// };
// }
// if (this.options.headers) { // if (this.options.headers) {
// opts.defaultHeaders = { ...opts.defaultHeaders, ...this.options.headers }; // opts.defaultHeaders = { ...opts.defaultHeaders, ...this.options.headers };
// } // }
@ -626,7 +608,7 @@ class AgentClient extends BaseClient {
let systemContent = [ let systemContent = [
systemMessage, systemMessage,
agent.instructions ?? '', agent.instructions ?? '',
i !== 0 ? agent.additional_instructions ?? '' : '', i !== 0 ? (agent.additional_instructions ?? '') : '',
] ]
.join('\n') .join('\n')
.trim(); .trim();

View file

@ -1,5 +1,5 @@
const { Run, Providers } = require('@librechat/agents'); const { Run, Providers } = require('@librechat/agents');
const { providerEndpointMap } = require('librechat-data-provider'); const { providerEndpointMap, KnownEndpoints } = require('librechat-data-provider');
/** /**
* @typedef {import('@librechat/agents').t} t * @typedef {import('@librechat/agents').t} t
@ -7,6 +7,7 @@ const { providerEndpointMap } = require('librechat-data-provider');
* @typedef {import('@librechat/agents').StreamEventData} StreamEventData * @typedef {import('@librechat/agents').StreamEventData} StreamEventData
* @typedef {import('@librechat/agents').EventHandler} EventHandler * @typedef {import('@librechat/agents').EventHandler} EventHandler
* @typedef {import('@librechat/agents').GraphEvents} GraphEvents * @typedef {import('@librechat/agents').GraphEvents} GraphEvents
* @typedef {import('@librechat/agents').LLMConfig} LLMConfig
* @typedef {import('@librechat/agents').IState} IState * @typedef {import('@librechat/agents').IState} IState
*/ */
@ -32,6 +33,7 @@ async function createRun({
streamUsage = true, streamUsage = true,
}) { }) {
const provider = providerEndpointMap[agent.provider] ?? agent.provider; const provider = providerEndpointMap[agent.provider] ?? agent.provider;
/** @type {LLMConfig} */
const llmConfig = Object.assign( const llmConfig = Object.assign(
{ {
provider, provider,
@ -41,6 +43,11 @@ async function createRun({
agent.model_parameters, agent.model_parameters,
); );
/** @type {'reasoning_content' | 'reasoning'} */
let reasoningKey;
if (llmConfig.configuration?.baseURL.includes(KnownEndpoints.openrouter)) {
reasoningKey = 'reasoning';
}
if (/o1(?!-(?:mini|preview)).*$/.test(llmConfig.model)) { if (/o1(?!-(?:mini|preview)).*$/.test(llmConfig.model)) {
llmConfig.streaming = false; llmConfig.streaming = false;
llmConfig.disableStreaming = true; llmConfig.disableStreaming = true;
@ -50,6 +57,7 @@ async function createRun({
const graphConfig = { const graphConfig = {
signal, signal,
llmConfig, llmConfig,
reasoningKey,
tools: agent.tools, tools: agent.tools,
instructions: agent.instructions, instructions: agent.instructions,
additional_instructions: agent.additional_instructions, additional_instructions: agent.additional_instructions,

View file

@ -22,12 +22,14 @@ const { getAgent } = require('~/models/Agent');
const { logger } = require('~/config'); const { logger } = require('~/config');
const providerConfigMap = { const providerConfigMap = {
[Providers.OLLAMA]: initCustom,
[Providers.DEEPSEEK]: initCustom,
[Providers.OPENROUTER]: initCustom,
[EModelEndpoint.openAI]: initOpenAI, [EModelEndpoint.openAI]: initOpenAI,
[EModelEndpoint.google]: initGoogle,
[EModelEndpoint.azureOpenAI]: initOpenAI, [EModelEndpoint.azureOpenAI]: initOpenAI,
[EModelEndpoint.anthropic]: initAnthropic, [EModelEndpoint.anthropic]: initAnthropic,
[EModelEndpoint.bedrock]: getBedrockOptions, [EModelEndpoint.bedrock]: getBedrockOptions,
[EModelEndpoint.google]: initGoogle,
[Providers.OLLAMA]: initCustom,
}; };
/** /**
@ -100,8 +102,10 @@ const initializeAgentOptions = async ({
const provider = agent.provider; const provider = agent.provider;
let getOptions = providerConfigMap[provider]; let getOptions = providerConfigMap[provider];
if (!getOptions && providerConfigMap[provider.toLowerCase()] != null) {
if (!getOptions) { agent.provider = provider.toLowerCase();
getOptions = providerConfigMap[agent.provider];
} else if (!getOptions) {
const customEndpointConfig = await getCustomEndpointConfig(provider); const customEndpointConfig = await getCustomEndpointConfig(provider);
if (!customEndpointConfig) { if (!customEndpointConfig) {
throw new Error(`Provider ${provider} not supported`); throw new Error(`Provider ${provider} not supported`);

View file

@ -1,4 +1,5 @@
const { HttpsProxyAgent } = require('https-proxy-agent'); const { HttpsProxyAgent } = require('https-proxy-agent');
const { KnownEndpoints } = require('librechat-data-provider');
const { sanitizeModelName, constructAzureURL } = require('~/utils'); const { sanitizeModelName, constructAzureURL } = require('~/utils');
const { isEnabled } = require('~/server/utils'); const { isEnabled } = require('~/server/utils');
@ -57,10 +58,9 @@ function getLLMConfig(apiKey, options = {}) {
/** @type {OpenAIClientOptions['configuration']} */ /** @type {OpenAIClientOptions['configuration']} */
const configOptions = {}; const configOptions = {};
if (useOpenRouter || reverseProxyUrl.includes(KnownEndpoints.openrouter)) {
// Handle OpenRouter or custom reverse proxy llmConfig.include_reasoning = true;
if (useOpenRouter || reverseProxyUrl === 'https://openrouter.ai/api/v1') { configOptions.baseURL = reverseProxyUrl;
configOptions.baseURL = 'https://openrouter.ai/api/v1';
configOptions.defaultHeaders = Object.assign( configOptions.defaultHeaders = Object.assign(
{ {
'HTTP-Referer': 'https://librechat.ai', 'HTTP-Referer': 'https://librechat.ai',

View file

@ -1,4 +1,5 @@
const axios = require('axios'); const axios = require('axios');
const { Providers } = require('@librechat/agents');
const { HttpsProxyAgent } = require('https-proxy-agent'); const { HttpsProxyAgent } = require('https-proxy-agent');
const { EModelEndpoint, defaultModels, CacheKeys } = require('librechat-data-provider'); const { EModelEndpoint, defaultModels, CacheKeys } = require('librechat-data-provider');
const { inputSchema, logAxiosError, extractBaseURL, processModelData } = require('~/utils'); const { inputSchema, logAxiosError, extractBaseURL, processModelData } = require('~/utils');
@ -57,7 +58,7 @@ const fetchModels = async ({
return models; return models;
} }
if (name && name.toLowerCase().startsWith('ollama')) { if (name && name.toLowerCase().startsWith(Providers.OLLAMA)) {
return await OllamaClient.fetchModels(baseURL); return await OllamaClient.fetchModels(baseURL);
} }

View file

@ -6,7 +6,7 @@ const getProfileDetails = ({ profile }) => ({
id: profile.id, id: profile.id,
avatarUrl: profile.photos[0].value, avatarUrl: profile.photos[0].value,
username: profile.name.givenName, username: profile.name.givenName,
name: `${profile.name.givenName} ${profile.name.familyName}`, name: `${profile.name.givenName}${profile.name.familyName ? ` ${profile.name.familyName}` : ''}`,
emailVerified: profile.emails[0].verified, emailVerified: profile.emails[0].verified,
}); });

1224
package-lock.json generated

File diff suppressed because it is too large Load diff

View file

@ -620,6 +620,7 @@ export const alternateName = {
[EModelEndpoint.custom]: 'Custom', [EModelEndpoint.custom]: 'Custom',
[EModelEndpoint.bedrock]: 'AWS Bedrock', [EModelEndpoint.bedrock]: 'AWS Bedrock',
[KnownEndpoints.ollama]: 'Ollama', [KnownEndpoints.ollama]: 'Ollama',
[KnownEndpoints.deepseek]: 'DeepSeek',
[KnownEndpoints.xai]: 'xAI', [KnownEndpoints.xai]: 'xAI',
}; };

View file

@ -149,6 +149,9 @@ export const codeTypeMapping: { [key: string]: string } = {
ts: 'application/typescript', ts: 'application/typescript',
tar: 'application/x-tar', tar: 'application/x-tar',
zip: 'application/zip', zip: 'application/zip',
yml: 'application/x-yaml',
yaml: 'application/x-yaml',
log: 'text/plain',
}; };
export const retrievalMimeTypes = [ export const retrievalMimeTypes = [