🆕 feat: Enhanced Title Generation Config Options (#8580)

* 🏗️ refactor: Extract reasoning key logic into separate function

* refactor: Ensure `overrideProvider` is always defined in `getProviderConfig` result, and only used in `initializeAgent` if different from `agent.provider`

* feat: new title configuration options across services

- titlePrompt
- titleEndpoint
- titlePromptTemplate
- new "completion" titleMethod (new default)

* chore: update @librechat/agents and conform openai version to prevent SDK errors

* chore: add form-data package as a dependency and override to v4.0.4 to address CVE-2025-7783

* feat: add support for 'all' endpoint configuration in AppService and corresponding tests

* refactor: replace HttpsProxyAgent with ProxyAgent from undici for improved proxy handling in assistant initialization

* chore: update frontend review workflow to limit package paths to data-provider

* chore: update backend review workflow to include all package paths
This commit is contained in:
Danny Avila 2025-07-21 17:37:37 -04:00 committed by GitHub
parent aec1777a90
commit 14660d75ae
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
21 changed files with 2666 additions and 196 deletions

View file

@ -1,6 +1,6 @@
{
"name": "@librechat/api",
"version": "1.2.7",
"version": "1.2.8",
"type": "commonjs",
"description": "MCP services for LibreChat",
"main": "dist/index.js",
@ -70,7 +70,7 @@
},
"peerDependencies": {
"@langchain/core": "^0.3.62",
"@librechat/agents": "^2.4.63",
"@librechat/agents": "^2.4.67",
"@librechat/data-schemas": "*",
"@modelcontextprotocol/sdk": "^1.13.3",
"axios": "^1.8.2",

View file

@ -18,6 +18,28 @@ const customProviders = new Set([
Providers.OPENROUTER,
]);
export function getReasoningKey(
provider: Providers,
llmConfig: t.RunLLMConfig,
agentEndpoint?: string | null,
): 'reasoning_content' | 'reasoning' {
let reasoningKey: 'reasoning_content' | 'reasoning' = 'reasoning_content';
if (provider === Providers.GOOGLE) {
reasoningKey = 'reasoning';
} else if (
llmConfig.configuration?.baseURL?.includes(KnownEndpoints.openrouter) ||
(agentEndpoint && agentEndpoint.toLowerCase().includes(KnownEndpoints.openrouter))
) {
reasoningKey = 'reasoning';
} else if (
(llmConfig as OpenAIClientOptions).useResponsesApi === true &&
(provider === Providers.OPENAI || provider === Providers.AZURE)
) {
reasoningKey = 'reasoning';
}
return reasoningKey;
}
/**
* Creates a new Run instance with custom handlers and configuration.
*
@ -69,21 +91,7 @@ export async function createRun({
llmConfig.usage = true;
}
let reasoningKey: 'reasoning_content' | 'reasoning' = 'reasoning_content';
if (provider === Providers.GOOGLE) {
reasoningKey = 'reasoning';
} else if (
llmConfig.configuration?.baseURL?.includes(KnownEndpoints.openrouter) ||
(agent.endpoint && agent.endpoint.toLowerCase().includes(KnownEndpoints.openrouter))
) {
reasoningKey = 'reasoning';
} else if (
(llmConfig as OpenAIClientOptions).useResponsesApi === true &&
(provider === Providers.OPENAI || provider === Providers.AZURE)
) {
reasoningKey = 'reasoning';
}
const reasoningKey = getReasoningKey(provider, llmConfig, agent.endpoint);
const graphConfig: StandardGraphConfig = {
signal,
llmConfig,

View file

@ -1,6 +1,6 @@
{
"name": "librechat-data-provider",
"version": "0.7.900",
"version": "0.7.901",
"description": "data services for librechat apps",
"main": "dist/index.js",
"module": "dist/index.es.js",
@ -62,7 +62,6 @@
"@types/winston": "^2.4.4",
"jest": "^29.5.0",
"jest-junit": "^16.0.0",
"openai": "^4.76.3",
"openapi-types": "^12.1.3",
"rimraf": "^5.0.1",
"rollup": "^4.22.4",

View file

@ -185,6 +185,12 @@ export const baseEndpointSchema = z.object({
baseURL: z.string().optional(),
titlePrompt: z.string().optional(),
titleModel: z.string().optional(),
titleConvo: z.boolean().optional(),
titleMethod: z
.union([z.literal('completion'), z.literal('functions'), z.literal('structured')])
.optional(),
titleEndpoint: z.string().optional(),
titlePromptTemplate: z.string().optional(),
});
export type TBaseEndpoint = z.infer<typeof baseEndpointSchema>;
@ -225,8 +231,6 @@ export const assistantEndpointSchema = baseEndpointSchema.merge(
userIdQuery: z.boolean().optional(),
})
.optional(),
titleConvo: z.boolean().optional(),
titleMethod: z.union([z.literal('completion'), z.literal('functions')]).optional(),
headers: z.record(z.any()).optional(),
}),
);
@ -279,8 +283,6 @@ export const endpointSchema = baseEndpointSchema.merge(
fetch: z.boolean().optional(),
userIdQuery: z.boolean().optional(),
}),
titleConvo: z.boolean().optional(),
titleMethod: z.union([z.literal('completion'), z.literal('functions')]).optional(),
summarize: z.boolean().optional(),
summaryModel: z.string().optional(),
forcePrompt: z.boolean().optional(),
@ -315,6 +317,8 @@ export const azureEndpointSchema = z
titleConvo: true,
titleMethod: true,
titleModel: true,
titlePrompt: true,
titlePromptTemplate: true,
summarize: true,
summaryModel: true,
customOrder: true,

View file

@ -1,4 +1,3 @@
import type OpenAI from 'openai';
import type { InfiniteData } from '@tanstack/react-query';
import type {
TBanner,
@ -14,8 +13,6 @@ import type { SettingDefinition } from './generate';
import type { TMinimalFeedback } from './feedback';
import type { Agent } from './types/assistants';
export type TOpenAIMessage = OpenAI.Chat.ChatCompletionMessageParam;
export * from './schemas';
export type TMessages = TMessage[];