mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-18 01:10:14 +01:00
🆕 feat: Enhanced Title Generation Config Options (#8580)
* 🏗️ refactor: Extract reasoning key logic into separate function
* refactor: Ensure `overrideProvider` is always defined in `getProviderConfig` result, and only used in `initializeAgent` if different from `agent.provider`
* feat: new title configuration options across services
- titlePrompt
- titleEndpoint
- titlePromptTemplate
- new "completion" titleMethod (new default)
* chore: update @librechat/agents and conform openai version to prevent SDK errors
* chore: add form-data package as a dependency and override to v4.0.4 to address CVE-2025-7783
* feat: add support for 'all' endpoint configuration in AppService and corresponding tests
* refactor: replace HttpsProxyAgent with ProxyAgent from undici for improved proxy handling in assistant initialization
* chore: update frontend review workflow to limit package paths to data-provider
* chore: update backend review workflow to include all package paths
This commit is contained in:
parent
aec1777a90
commit
14660d75ae
21 changed files with 2666 additions and 196 deletions
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "@librechat/api",
|
||||
"version": "1.2.7",
|
||||
"version": "1.2.8",
|
||||
"type": "commonjs",
|
||||
"description": "MCP services for LibreChat",
|
||||
"main": "dist/index.js",
|
||||
|
|
@ -70,7 +70,7 @@
|
|||
},
|
||||
"peerDependencies": {
|
||||
"@langchain/core": "^0.3.62",
|
||||
"@librechat/agents": "^2.4.63",
|
||||
"@librechat/agents": "^2.4.67",
|
||||
"@librechat/data-schemas": "*",
|
||||
"@modelcontextprotocol/sdk": "^1.13.3",
|
||||
"axios": "^1.8.2",
|
||||
|
|
|
|||
|
|
@ -18,6 +18,28 @@ const customProviders = new Set([
|
|||
Providers.OPENROUTER,
|
||||
]);
|
||||
|
||||
export function getReasoningKey(
|
||||
provider: Providers,
|
||||
llmConfig: t.RunLLMConfig,
|
||||
agentEndpoint?: string | null,
|
||||
): 'reasoning_content' | 'reasoning' {
|
||||
let reasoningKey: 'reasoning_content' | 'reasoning' = 'reasoning_content';
|
||||
if (provider === Providers.GOOGLE) {
|
||||
reasoningKey = 'reasoning';
|
||||
} else if (
|
||||
llmConfig.configuration?.baseURL?.includes(KnownEndpoints.openrouter) ||
|
||||
(agentEndpoint && agentEndpoint.toLowerCase().includes(KnownEndpoints.openrouter))
|
||||
) {
|
||||
reasoningKey = 'reasoning';
|
||||
} else if (
|
||||
(llmConfig as OpenAIClientOptions).useResponsesApi === true &&
|
||||
(provider === Providers.OPENAI || provider === Providers.AZURE)
|
||||
) {
|
||||
reasoningKey = 'reasoning';
|
||||
}
|
||||
return reasoningKey;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new Run instance with custom handlers and configuration.
|
||||
*
|
||||
|
|
@ -69,21 +91,7 @@ export async function createRun({
|
|||
llmConfig.usage = true;
|
||||
}
|
||||
|
||||
let reasoningKey: 'reasoning_content' | 'reasoning' = 'reasoning_content';
|
||||
if (provider === Providers.GOOGLE) {
|
||||
reasoningKey = 'reasoning';
|
||||
} else if (
|
||||
llmConfig.configuration?.baseURL?.includes(KnownEndpoints.openrouter) ||
|
||||
(agent.endpoint && agent.endpoint.toLowerCase().includes(KnownEndpoints.openrouter))
|
||||
) {
|
||||
reasoningKey = 'reasoning';
|
||||
} else if (
|
||||
(llmConfig as OpenAIClientOptions).useResponsesApi === true &&
|
||||
(provider === Providers.OPENAI || provider === Providers.AZURE)
|
||||
) {
|
||||
reasoningKey = 'reasoning';
|
||||
}
|
||||
|
||||
const reasoningKey = getReasoningKey(provider, llmConfig, agent.endpoint);
|
||||
const graphConfig: StandardGraphConfig = {
|
||||
signal,
|
||||
llmConfig,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue