mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-23 11:50:14 +01:00
Merge branch 'main' into feat/E2EE
This commit is contained in:
commit
0cc0e5d287
16 changed files with 757 additions and 618 deletions
|
|
@ -389,7 +389,7 @@ FACEBOOK_CALLBACK_URL=/oauth/facebook/callback
|
|||
GITHUB_CLIENT_ID=
|
||||
GITHUB_CLIENT_SECRET=
|
||||
GITHUB_CALLBACK_URL=/oauth/github/callback
|
||||
# GitHub Eenterprise
|
||||
# GitHub Enterprise
|
||||
# GITHUB_ENTERPRISE_BASE_URL=
|
||||
# GITHUB_ENTERPRISE_USER_AGENT=
|
||||
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ const { z } = require('zod');
|
|||
const axios = require('axios');
|
||||
const { Ollama } = require('ollama');
|
||||
const { Constants } = require('librechat-data-provider');
|
||||
const { deriveBaseURL } = require('~/utils');
|
||||
const { deriveBaseURL, logAxiosError } = require('~/utils');
|
||||
const { sleep } = require('~/server/utils');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
|
|
@ -68,7 +68,7 @@ class OllamaClient {
|
|||
} catch (error) {
|
||||
const logMessage =
|
||||
'Failed to fetch models from Ollama API. If you are not using Ollama directly, and instead, through some aggregator or reverse proxy that handles fetching via OpenAI spec, ensure the name of the endpoint doesn\'t start with `ollama` (case-insensitive).';
|
||||
logger.error(logMessage, error);
|
||||
logAxiosError({ message: logMessage, error });
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ const {
|
|||
ImageDetail,
|
||||
EModelEndpoint,
|
||||
resolveHeaders,
|
||||
KnownEndpoints,
|
||||
openAISettings,
|
||||
ImageDetailCost,
|
||||
CohereConstants,
|
||||
|
|
@ -116,11 +117,7 @@ class OpenAIClient extends BaseClient {
|
|||
|
||||
const { reverseProxyUrl: reverseProxy } = this.options;
|
||||
|
||||
if (
|
||||
!this.useOpenRouter &&
|
||||
reverseProxy &&
|
||||
reverseProxy.includes('https://openrouter.ai/api/v1')
|
||||
) {
|
||||
if (!this.useOpenRouter && reverseProxy && reverseProxy.includes(KnownEndpoints.openrouter)) {
|
||||
this.useOpenRouter = true;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -282,4 +282,47 @@ describe('formatAgentMessages', () => {
|
|||
// Additional check to ensure the consecutive assistant messages were combined
|
||||
expect(result[1].content).toHaveLength(2);
|
||||
});
|
||||
|
||||
it('should skip THINK type content parts', () => {
|
||||
const payload = [
|
||||
{
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Initial response' },
|
||||
{ type: ContentTypes.THINK, [ContentTypes.THINK]: 'Reasoning about the problem...' },
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Final answer' },
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
const result = formatAgentMessages(payload);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0]).toBeInstanceOf(AIMessage);
|
||||
expect(result[0].content).toEqual('Initial response\nFinal answer');
|
||||
});
|
||||
|
||||
it('should join TEXT content as string when THINK content type is present', () => {
|
||||
const payload = [
|
||||
{
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{ type: ContentTypes.THINK, [ContentTypes.THINK]: 'Analyzing the problem...' },
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'First part of response' },
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Second part of response' },
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Final part of response' },
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
const result = formatAgentMessages(payload);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0]).toBeInstanceOf(AIMessage);
|
||||
expect(typeof result[0].content).toBe('string');
|
||||
expect(result[0].content).toBe(
|
||||
'First part of response\nSecond part of response\nFinal part of response',
|
||||
);
|
||||
expect(result[0].content).not.toContain('Analyzing the problem...');
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -153,6 +153,7 @@ const formatAgentMessages = (payload) => {
|
|||
let currentContent = [];
|
||||
let lastAIMessage = null;
|
||||
|
||||
let hasReasoning = false;
|
||||
for (const part of message.content) {
|
||||
if (part.type === ContentTypes.TEXT && part.tool_call_ids) {
|
||||
/*
|
||||
|
|
@ -207,11 +208,25 @@ const formatAgentMessages = (payload) => {
|
|||
content: output || '',
|
||||
}),
|
||||
);
|
||||
} else if (part.type === ContentTypes.THINK) {
|
||||
hasReasoning = true;
|
||||
continue;
|
||||
} else {
|
||||
currentContent.push(part);
|
||||
}
|
||||
}
|
||||
|
||||
if (hasReasoning) {
|
||||
currentContent = currentContent
|
||||
.reduce((acc, curr) => {
|
||||
if (curr.type === ContentTypes.TEXT) {
|
||||
return `${acc}${curr[ContentTypes.TEXT]}\n`;
|
||||
}
|
||||
return acc;
|
||||
}, '')
|
||||
.trim();
|
||||
}
|
||||
|
||||
if (currentContent.length > 0) {
|
||||
messages.push(new AIMessage({ content: currentContent }));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@
|
|||
"@langchain/google-genai": "^0.1.7",
|
||||
"@langchain/google-vertexai": "^0.1.8",
|
||||
"@langchain/textsplitters": "^0.1.0",
|
||||
"@librechat/agents": "^2.0.5",
|
||||
"@librechat/agents": "^2.1.2",
|
||||
"@waylaidwanderer/fetch-event-source": "^3.0.1",
|
||||
"axios": "1.7.8",
|
||||
"bcryptjs": "^2.4.3",
|
||||
|
|
@ -65,6 +65,7 @@
|
|||
"firebase": "^11.0.2",
|
||||
"googleapis": "^126.0.1",
|
||||
"handlebars": "^4.7.7",
|
||||
"https-proxy-agent": "^7.0.6",
|
||||
"ioredis": "^5.3.2",
|
||||
"js-yaml": "^4.1.0",
|
||||
"jsonwebtoken": "^9.0.0",
|
||||
|
|
|
|||
|
|
@ -199,6 +199,22 @@ function getDefaultHandlers({ res, aggregateContent, toolEndCallback, collectedU
|
|||
aggregateContent({ event, data });
|
||||
},
|
||||
},
|
||||
[GraphEvents.ON_REASONING_DELTA]: {
|
||||
/**
|
||||
* Handle ON_REASONING_DELTA event.
|
||||
* @param {string} event - The event name.
|
||||
* @param {StreamEventData} data - The event data.
|
||||
* @param {GraphRunnableConfig['configurable']} [metadata] The runnable metadata.
|
||||
*/
|
||||
handle: (event, data, metadata) => {
|
||||
if (metadata?.last_agent_index === metadata?.agent_index) {
|
||||
sendEvent(res, { event, data });
|
||||
} else if (!metadata?.hide_sequential_outputs) {
|
||||
sendEvent(res, { event, data });
|
||||
}
|
||||
aggregateContent({ event, data });
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
return handlers;
|
||||
|
|
|
|||
|
|
@ -20,11 +20,6 @@ const {
|
|||
bedrockOutputParser,
|
||||
removeNullishValues,
|
||||
} = require('librechat-data-provider');
|
||||
const {
|
||||
extractBaseURL,
|
||||
// constructAzureURL,
|
||||
// genAzureChatCompletion,
|
||||
} = require('~/utils');
|
||||
const {
|
||||
formatMessage,
|
||||
formatAgentMessages,
|
||||
|
|
@ -477,19 +472,6 @@ class AgentClient extends BaseClient {
|
|||
abortController = new AbortController();
|
||||
}
|
||||
|
||||
const baseURL = extractBaseURL(this.completionsUrl);
|
||||
logger.debug('[api/server/controllers/agents/client.js] chatCompletion', {
|
||||
baseURL,
|
||||
payload,
|
||||
});
|
||||
|
||||
// if (this.useOpenRouter) {
|
||||
// opts.defaultHeaders = {
|
||||
// 'HTTP-Referer': 'https://librechat.ai',
|
||||
// 'X-Title': 'LibreChat',
|
||||
// };
|
||||
// }
|
||||
|
||||
// if (this.options.headers) {
|
||||
// opts.defaultHeaders = { ...opts.defaultHeaders, ...this.options.headers };
|
||||
// }
|
||||
|
|
@ -626,7 +608,7 @@ class AgentClient extends BaseClient {
|
|||
let systemContent = [
|
||||
systemMessage,
|
||||
agent.instructions ?? '',
|
||||
i !== 0 ? agent.additional_instructions ?? '' : '',
|
||||
i !== 0 ? (agent.additional_instructions ?? '') : '',
|
||||
]
|
||||
.join('\n')
|
||||
.trim();
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
const { Run, Providers } = require('@librechat/agents');
|
||||
const { providerEndpointMap } = require('librechat-data-provider');
|
||||
const { providerEndpointMap, KnownEndpoints } = require('librechat-data-provider');
|
||||
|
||||
/**
|
||||
* @typedef {import('@librechat/agents').t} t
|
||||
|
|
@ -7,6 +7,7 @@ const { providerEndpointMap } = require('librechat-data-provider');
|
|||
* @typedef {import('@librechat/agents').StreamEventData} StreamEventData
|
||||
* @typedef {import('@librechat/agents').EventHandler} EventHandler
|
||||
* @typedef {import('@librechat/agents').GraphEvents} GraphEvents
|
||||
* @typedef {import('@librechat/agents').LLMConfig} LLMConfig
|
||||
* @typedef {import('@librechat/agents').IState} IState
|
||||
*/
|
||||
|
||||
|
|
@ -32,6 +33,7 @@ async function createRun({
|
|||
streamUsage = true,
|
||||
}) {
|
||||
const provider = providerEndpointMap[agent.provider] ?? agent.provider;
|
||||
/** @type {LLMConfig} */
|
||||
const llmConfig = Object.assign(
|
||||
{
|
||||
provider,
|
||||
|
|
@ -41,6 +43,11 @@ async function createRun({
|
|||
agent.model_parameters,
|
||||
);
|
||||
|
||||
/** @type {'reasoning_content' | 'reasoning'} */
|
||||
let reasoningKey;
|
||||
if (llmConfig.configuration?.baseURL.includes(KnownEndpoints.openrouter)) {
|
||||
reasoningKey = 'reasoning';
|
||||
}
|
||||
if (/o1(?!-(?:mini|preview)).*$/.test(llmConfig.model)) {
|
||||
llmConfig.streaming = false;
|
||||
llmConfig.disableStreaming = true;
|
||||
|
|
@ -50,6 +57,7 @@ async function createRun({
|
|||
const graphConfig = {
|
||||
signal,
|
||||
llmConfig,
|
||||
reasoningKey,
|
||||
tools: agent.tools,
|
||||
instructions: agent.instructions,
|
||||
additional_instructions: agent.additional_instructions,
|
||||
|
|
|
|||
|
|
@ -22,12 +22,14 @@ const { getAgent } = require('~/models/Agent');
|
|||
const { logger } = require('~/config');
|
||||
|
||||
const providerConfigMap = {
|
||||
[Providers.OLLAMA]: initCustom,
|
||||
[Providers.DEEPSEEK]: initCustom,
|
||||
[Providers.OPENROUTER]: initCustom,
|
||||
[EModelEndpoint.openAI]: initOpenAI,
|
||||
[EModelEndpoint.google]: initGoogle,
|
||||
[EModelEndpoint.azureOpenAI]: initOpenAI,
|
||||
[EModelEndpoint.anthropic]: initAnthropic,
|
||||
[EModelEndpoint.bedrock]: getBedrockOptions,
|
||||
[EModelEndpoint.google]: initGoogle,
|
||||
[Providers.OLLAMA]: initCustom,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -100,8 +102,10 @@ const initializeAgentOptions = async ({
|
|||
|
||||
const provider = agent.provider;
|
||||
let getOptions = providerConfigMap[provider];
|
||||
|
||||
if (!getOptions) {
|
||||
if (!getOptions && providerConfigMap[provider.toLowerCase()] != null) {
|
||||
agent.provider = provider.toLowerCase();
|
||||
getOptions = providerConfigMap[agent.provider];
|
||||
} else if (!getOptions) {
|
||||
const customEndpointConfig = await getCustomEndpointConfig(provider);
|
||||
if (!customEndpointConfig) {
|
||||
throw new Error(`Provider ${provider} not supported`);
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||
const { KnownEndpoints } = require('librechat-data-provider');
|
||||
const { sanitizeModelName, constructAzureURL } = require('~/utils');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
|
||||
|
|
@ -57,10 +58,9 @@ function getLLMConfig(apiKey, options = {}) {
|
|||
|
||||
/** @type {OpenAIClientOptions['configuration']} */
|
||||
const configOptions = {};
|
||||
|
||||
// Handle OpenRouter or custom reverse proxy
|
||||
if (useOpenRouter || reverseProxyUrl === 'https://openrouter.ai/api/v1') {
|
||||
configOptions.baseURL = 'https://openrouter.ai/api/v1';
|
||||
if (useOpenRouter || reverseProxyUrl.includes(KnownEndpoints.openrouter)) {
|
||||
llmConfig.include_reasoning = true;
|
||||
configOptions.baseURL = reverseProxyUrl;
|
||||
configOptions.defaultHeaders = Object.assign(
|
||||
{
|
||||
'HTTP-Referer': 'https://librechat.ai',
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
const axios = require('axios');
|
||||
const { Providers } = require('@librechat/agents');
|
||||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||
const { EModelEndpoint, defaultModels, CacheKeys } = require('librechat-data-provider');
|
||||
const { inputSchema, logAxiosError, extractBaseURL, processModelData } = require('~/utils');
|
||||
|
|
@ -57,7 +58,7 @@ const fetchModels = async ({
|
|||
return models;
|
||||
}
|
||||
|
||||
if (name && name.toLowerCase().startsWith('ollama')) {
|
||||
if (name && name.toLowerCase().startsWith(Providers.OLLAMA)) {
|
||||
return await OllamaClient.fetchModels(baseURL);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ const getProfileDetails = ({ profile }) => ({
|
|||
id: profile.id,
|
||||
avatarUrl: profile.photos[0].value,
|
||||
username: profile.name.givenName,
|
||||
name: `${profile.name.givenName} ${profile.name.familyName}`,
|
||||
name: `${profile.name.givenName}${profile.name.familyName ? ` ${profile.name.familyName}` : ''}`,
|
||||
emailVerified: profile.emails[0].verified,
|
||||
});
|
||||
|
||||
|
|
|
|||
1224
package-lock.json
generated
1224
package-lock.json
generated
File diff suppressed because it is too large
Load diff
|
|
@ -620,6 +620,7 @@ export const alternateName = {
|
|||
[EModelEndpoint.custom]: 'Custom',
|
||||
[EModelEndpoint.bedrock]: 'AWS Bedrock',
|
||||
[KnownEndpoints.ollama]: 'Ollama',
|
||||
[KnownEndpoints.deepseek]: 'DeepSeek',
|
||||
[KnownEndpoints.xai]: 'xAI',
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -149,6 +149,9 @@ export const codeTypeMapping: { [key: string]: string } = {
|
|||
ts: 'application/typescript',
|
||||
tar: 'application/x-tar',
|
||||
zip: 'application/zip',
|
||||
yml: 'application/x-yaml',
|
||||
yaml: 'application/x-yaml',
|
||||
log: 'text/plain',
|
||||
};
|
||||
|
||||
export const retrievalMimeTypes = [
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue