mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-01-08 19:48:51 +01:00
refactor: parameter extraction and organization in agent services, minimize redundancy of shared fields across objects, make clear distinction of parameters processed uniquely by LibreChat vs LLM Provider Configs
This commit is contained in:
parent
6bc0bbeebb
commit
2797aff423
7 changed files with 257 additions and 21 deletions
|
|
@ -1,5 +1,9 @@
|
|||
const { Providers } = require('@librechat/agents');
|
||||
const { primeResources, optionalChainWithEmptyCheck } = require('@librechat/api');
|
||||
const {
|
||||
primeResources,
|
||||
extractLibreChatParams,
|
||||
optionalChainWithEmptyCheck,
|
||||
} = require('@librechat/api');
|
||||
const {
|
||||
ErrorTypes,
|
||||
EModelEndpoint,
|
||||
|
|
@ -15,10 +19,9 @@ const initGoogle = require('~/server/services/Endpoints/google/initialize');
|
|||
const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts');
|
||||
const { getCustomEndpointConfig } = require('~/server/services/Config');
|
||||
const { processFiles } = require('~/server/services/Files/process');
|
||||
const { getFiles, getToolFilesByIds } = require('~/models/File');
|
||||
const { getConvoFiles } = require('~/models/Conversation');
|
||||
const { getToolFilesByIds } = require('~/models/File');
|
||||
const { getModelMaxTokens } = require('~/utils');
|
||||
const { getFiles } = require('~/models/File');
|
||||
|
||||
const providerConfigMap = {
|
||||
[Providers.XAI]: initCustom,
|
||||
|
|
@ -71,7 +74,7 @@ const initializeAgent = async ({
|
|||
),
|
||||
);
|
||||
|
||||
const { resendFiles = true, ...modelOptions } = _modelOptions;
|
||||
const { resendFiles, maxContextTokens, modelOptions } = extractLibreChatParams(_modelOptions);
|
||||
|
||||
if (isInitialAgent && conversationId != null && resendFiles) {
|
||||
const fileIds = (await getConvoFiles(conversationId)) ?? [];
|
||||
|
|
@ -145,9 +148,8 @@ const initializeAgent = async ({
|
|||
modelOptions.maxTokens,
|
||||
0,
|
||||
);
|
||||
const maxContextTokens = optionalChainWithEmptyCheck(
|
||||
modelOptions.maxContextTokens,
|
||||
modelOptions.max_context_tokens,
|
||||
const agentMaxContextTokens = optionalChainWithEmptyCheck(
|
||||
maxContextTokens,
|
||||
getModelMaxTokens(tokensModel, providerEndpointMap[provider]),
|
||||
4096,
|
||||
);
|
||||
|
|
@ -189,7 +191,7 @@ const initializeAgent = async ({
|
|||
attachments,
|
||||
resendFiles,
|
||||
toolContextMap,
|
||||
maxContextTokens: (maxContextTokens - maxTokens) * 0.9,
|
||||
maxContextTokens: (agentMaxContextTokens - maxTokens) * 0.9,
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -1,10 +1,9 @@
|
|||
const { isAgentsEndpoint, Constants } = require('librechat-data-provider');
|
||||
const { isAgentsEndpoint, removeNullishValues, Constants } = require('librechat-data-provider');
|
||||
const { loadAgent } = require('~/models/Agent');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const buildOptions = (req, endpoint, parsedBody, endpointType) => {
|
||||
const { spec, iconURL, agent_id, instructions, maxContextTokens, ...model_parameters } =
|
||||
parsedBody;
|
||||
const { spec, iconURL, agent_id, instructions, ...model_parameters } = parsedBody;
|
||||
const agentPromise = loadAgent({
|
||||
req,
|
||||
agent_id: isAgentsEndpoint(endpoint) ? agent_id : Constants.EPHEMERAL_AGENT_ID,
|
||||
|
|
@ -15,19 +14,16 @@ const buildOptions = (req, endpoint, parsedBody, endpointType) => {
|
|||
return undefined;
|
||||
});
|
||||
|
||||
const endpointOption = {
|
||||
return removeNullishValues({
|
||||
spec,
|
||||
iconURL,
|
||||
endpoint,
|
||||
agent_id,
|
||||
endpointType,
|
||||
instructions,
|
||||
maxContextTokens,
|
||||
model_parameters,
|
||||
agent: agentPromise,
|
||||
};
|
||||
|
||||
return endpointOption;
|
||||
});
|
||||
};
|
||||
|
||||
module.exports = { buildOptions };
|
||||
|
|
|
|||
|
|
@ -61,6 +61,7 @@ const initializeClient = async ({ req, res, endpointOption }) => {
|
|||
}
|
||||
|
||||
const primaryAgent = await endpointOption.agent;
|
||||
delete endpointOption.agent;
|
||||
if (!primaryAgent) {
|
||||
throw new Error('Agent not found');
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,5 +4,6 @@ export * from './common';
|
|||
export * from './events';
|
||||
export * from './files';
|
||||
export * from './generators';
|
||||
export * from './llm';
|
||||
export * from './openid';
|
||||
export { default as Tokenizer } from './tokenizer';
|
||||
|
|
|
|||
189
packages/api/src/utils/llm.test.ts
Normal file
189
packages/api/src/utils/llm.test.ts
Normal file
|
|
@ -0,0 +1,189 @@
|
|||
import { extractLibreChatParams } from './llm';
|
||||
|
||||
describe('extractLibreChatParams', () => {
|
||||
it('should return defaults when options is undefined', () => {
|
||||
const result = extractLibreChatParams(undefined);
|
||||
|
||||
expect(result.resendFiles).toBe(true);
|
||||
expect(result.promptPrefix).toBeUndefined();
|
||||
expect(result.maxContextTokens).toBeUndefined();
|
||||
expect(result.modelLabel).toBeUndefined();
|
||||
expect(result.modelOptions).toEqual({});
|
||||
});
|
||||
|
||||
it('should return defaults when options is null', () => {
|
||||
const result = extractLibreChatParams();
|
||||
|
||||
expect(result.resendFiles).toBe(true);
|
||||
expect(result.promptPrefix).toBeUndefined();
|
||||
expect(result.maxContextTokens).toBeUndefined();
|
||||
expect(result.modelLabel).toBeUndefined();
|
||||
expect(result.modelOptions).toEqual({});
|
||||
});
|
||||
|
||||
it('should extract all LibreChat params and leave model options', () => {
|
||||
const options = {
|
||||
resendFiles: false,
|
||||
promptPrefix: 'You are a helpful assistant',
|
||||
maxContextTokens: 4096,
|
||||
modelLabel: 'GPT-4',
|
||||
model: 'gpt-4',
|
||||
temperature: 0.7,
|
||||
max_tokens: 1000,
|
||||
};
|
||||
|
||||
const result = extractLibreChatParams(options);
|
||||
|
||||
expect(result.resendFiles).toBe(false);
|
||||
expect(result.promptPrefix).toBe('You are a helpful assistant');
|
||||
expect(result.maxContextTokens).toBe(4096);
|
||||
expect(result.modelLabel).toBe('GPT-4');
|
||||
expect(result.modelOptions).toEqual({
|
||||
model: 'gpt-4',
|
||||
temperature: 0.7,
|
||||
max_tokens: 1000,
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle null values for LibreChat params', () => {
|
||||
const options = {
|
||||
resendFiles: true,
|
||||
promptPrefix: null,
|
||||
maxContextTokens: 2048,
|
||||
modelLabel: null,
|
||||
model: 'claude-3',
|
||||
};
|
||||
|
||||
const result = extractLibreChatParams(options);
|
||||
|
||||
expect(result.resendFiles).toBe(true);
|
||||
expect(result.promptPrefix).toBeNull();
|
||||
expect(result.maxContextTokens).toBe(2048);
|
||||
expect(result.modelLabel).toBeNull();
|
||||
expect(result.modelOptions).toEqual({
|
||||
model: 'claude-3',
|
||||
});
|
||||
});
|
||||
|
||||
it('should use default for resendFiles when not provided', () => {
|
||||
const options = {
|
||||
promptPrefix: 'Test prefix',
|
||||
model: 'gpt-3.5-turbo',
|
||||
temperature: 0.5,
|
||||
};
|
||||
|
||||
const result = extractLibreChatParams(options);
|
||||
|
||||
expect(result.resendFiles).toBe(true); // Should use default
|
||||
expect(result.promptPrefix).toBe('Test prefix');
|
||||
expect(result.maxContextTokens).toBeUndefined();
|
||||
expect(result.modelLabel).toBeUndefined();
|
||||
expect(result.modelOptions).toEqual({
|
||||
model: 'gpt-3.5-turbo',
|
||||
temperature: 0.5,
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle empty options object', () => {
|
||||
const result = extractLibreChatParams({});
|
||||
|
||||
expect(result.resendFiles).toBe(true); // Should use default
|
||||
expect(result.promptPrefix).toBeUndefined();
|
||||
expect(result.maxContextTokens).toBeUndefined();
|
||||
expect(result.modelLabel).toBeUndefined();
|
||||
expect(result.modelOptions).toEqual({});
|
||||
});
|
||||
|
||||
it('should only extract known LibreChat params', () => {
|
||||
const options = {
|
||||
resendFiles: false,
|
||||
promptPrefix: 'Custom prompt',
|
||||
maxContextTokens: 8192,
|
||||
modelLabel: 'Custom Model',
|
||||
// Model options
|
||||
model: 'gpt-4',
|
||||
temperature: 0.9,
|
||||
top_p: 0.95,
|
||||
frequency_penalty: 0.5,
|
||||
presence_penalty: 0.5,
|
||||
// Unknown params should stay in modelOptions
|
||||
unknownParam: 'should remain',
|
||||
customSetting: 123,
|
||||
};
|
||||
|
||||
const result = extractLibreChatParams(options);
|
||||
|
||||
// LibreChat params extracted
|
||||
expect(result.resendFiles).toBe(false);
|
||||
expect(result.promptPrefix).toBe('Custom prompt');
|
||||
expect(result.maxContextTokens).toBe(8192);
|
||||
expect(result.modelLabel).toBe('Custom Model');
|
||||
|
||||
// Model options should include everything else
|
||||
expect(result.modelOptions).toEqual({
|
||||
model: 'gpt-4',
|
||||
temperature: 0.9,
|
||||
top_p: 0.95,
|
||||
frequency_penalty: 0.5,
|
||||
presence_penalty: 0.5,
|
||||
unknownParam: 'should remain',
|
||||
customSetting: 123,
|
||||
});
|
||||
});
|
||||
|
||||
it('should not mutate the original options object', () => {
|
||||
const options = {
|
||||
resendFiles: false,
|
||||
promptPrefix: 'Test',
|
||||
model: 'gpt-4',
|
||||
temperature: 0.7,
|
||||
};
|
||||
const originalOptions = { ...options };
|
||||
|
||||
extractLibreChatParams(options);
|
||||
|
||||
// Original object should remain unchanged
|
||||
expect(options).toEqual(originalOptions);
|
||||
});
|
||||
|
||||
it('should handle undefined values for optional LibreChat params', () => {
|
||||
const options = {
|
||||
resendFiles: false,
|
||||
promptPrefix: undefined,
|
||||
maxContextTokens: undefined,
|
||||
modelLabel: undefined,
|
||||
model: 'claude-2',
|
||||
};
|
||||
|
||||
const result = extractLibreChatParams(options);
|
||||
|
||||
expect(result.resendFiles).toBe(false);
|
||||
expect(result.promptPrefix).toBeUndefined();
|
||||
expect(result.maxContextTokens).toBeUndefined();
|
||||
expect(result.modelLabel).toBeUndefined();
|
||||
expect(result.modelOptions).toEqual({
|
||||
model: 'claude-2',
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle mixed null and undefined values', () => {
|
||||
const options = {
|
||||
promptPrefix: null,
|
||||
maxContextTokens: undefined,
|
||||
modelLabel: null,
|
||||
model: 'gpt-3.5-turbo',
|
||||
stop: ['\\n', '\\n\\n'],
|
||||
};
|
||||
|
||||
const result = extractLibreChatParams(options);
|
||||
|
||||
expect(result.resendFiles).toBe(true); // default
|
||||
expect(result.promptPrefix).toBeNull();
|
||||
expect(result.maxContextTokens).toBeUndefined();
|
||||
expect(result.modelLabel).toBeNull();
|
||||
expect(result.modelOptions).toEqual({
|
||||
model: 'gpt-3.5-turbo',
|
||||
stop: ['\\n', '\\n\\n'],
|
||||
});
|
||||
});
|
||||
});
|
||||
47
packages/api/src/utils/llm.ts
Normal file
47
packages/api/src/utils/llm.ts
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
import { librechat } from 'librechat-data-provider';
|
||||
import type { DynamicSettingProps } from 'librechat-data-provider';
|
||||
|
||||
type LibreChatKeys = keyof typeof librechat;
|
||||
|
||||
type LibreChatParams = {
|
||||
modelOptions: Omit<NonNullable<DynamicSettingProps['conversation']>, LibreChatKeys>;
|
||||
resendFiles: boolean;
|
||||
promptPrefix?: string | null;
|
||||
maxContextTokens?: number;
|
||||
modelLabel?: string | null;
|
||||
};
|
||||
|
||||
/**
|
||||
* Separates LibreChat-specific parameters from model options
|
||||
* @param options - The combined options object
|
||||
*/
|
||||
export function extractLibreChatParams(
|
||||
options?: DynamicSettingProps['conversation'],
|
||||
): LibreChatParams {
|
||||
if (!options) {
|
||||
return {
|
||||
modelOptions: {} as Omit<NonNullable<DynamicSettingProps['conversation']>, LibreChatKeys>,
|
||||
resendFiles: librechat.resendFiles.default as boolean,
|
||||
};
|
||||
}
|
||||
|
||||
const modelOptions = { ...options };
|
||||
|
||||
const resendFiles =
|
||||
(delete modelOptions.resendFiles, options.resendFiles) ??
|
||||
(librechat.resendFiles.default as boolean);
|
||||
const promptPrefix = (delete modelOptions.promptPrefix, options.promptPrefix);
|
||||
const maxContextTokens = (delete modelOptions.maxContextTokens, options.maxContextTokens);
|
||||
const modelLabel = (delete modelOptions.modelLabel, options.modelLabel);
|
||||
|
||||
return {
|
||||
modelOptions: modelOptions as Omit<
|
||||
NonNullable<DynamicSettingProps['conversation']>,
|
||||
LibreChatKeys
|
||||
>,
|
||||
maxContextTokens,
|
||||
promptPrefix,
|
||||
resendFiles,
|
||||
modelLabel,
|
||||
};
|
||||
}
|
||||
|
|
@ -83,7 +83,7 @@ const createDefinition = (
|
|||
return { ...base, ...overrides } as SettingDefinition;
|
||||
};
|
||||
|
||||
const librechat: Record<string, SettingDefinition> = {
|
||||
export const librechat = {
|
||||
modelLabel: {
|
||||
key: 'modelLabel',
|
||||
label: 'com_endpoint_custom_name',
|
||||
|
|
@ -94,7 +94,7 @@ const librechat: Record<string, SettingDefinition> = {
|
|||
placeholder: 'com_endpoint_openai_custom_name_placeholder',
|
||||
placeholderCode: true,
|
||||
optionType: 'conversation',
|
||||
},
|
||||
} as const,
|
||||
maxContextTokens: {
|
||||
key: 'maxContextTokens',
|
||||
label: 'com_endpoint_context_tokens',
|
||||
|
|
@ -107,7 +107,7 @@ const librechat: Record<string, SettingDefinition> = {
|
|||
descriptionCode: true,
|
||||
optionType: 'model',
|
||||
columnSpan: 2,
|
||||
},
|
||||
} as const,
|
||||
resendFiles: {
|
||||
key: 'resendFiles',
|
||||
label: 'com_endpoint_plug_resend_files',
|
||||
|
|
@ -120,7 +120,7 @@ const librechat: Record<string, SettingDefinition> = {
|
|||
optionType: 'conversation',
|
||||
showDefault: false,
|
||||
columnSpan: 2,
|
||||
},
|
||||
} as const,
|
||||
promptPrefix: {
|
||||
key: 'promptPrefix',
|
||||
label: 'com_endpoint_prompt_prefix',
|
||||
|
|
@ -131,7 +131,7 @@ const librechat: Record<string, SettingDefinition> = {
|
|||
placeholder: 'com_endpoint_openai_prompt_prefix_placeholder',
|
||||
placeholderCode: true,
|
||||
optionType: 'model',
|
||||
},
|
||||
} as const,
|
||||
};
|
||||
|
||||
const openAIParams: Record<string, SettingDefinition> = {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue