mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-22 11:20:15 +01:00
refactor: update appConfig access to use endpoints structure across various services
This commit is contained in:
parent
89fb9c7e1c
commit
240e3bd59e
36 changed files with 591 additions and 510 deletions
|
|
@ -718,8 +718,7 @@ class OpenAIClient extends BaseClient {
|
||||||
max_tokens: 16,
|
max_tokens: 16,
|
||||||
};
|
};
|
||||||
|
|
||||||
/** @type {TAzureConfig | undefined} */
|
const azureConfig = appConfig?.endpoints?.[EModelEndpoint.azureOpenAI];
|
||||||
const azureConfig = appConfig?.[EModelEndpoint.azureOpenAI];
|
|
||||||
|
|
||||||
const resetTitleOptions = !!(
|
const resetTitleOptions = !!(
|
||||||
(this.azure && azureConfig) ||
|
(this.azure && azureConfig) ||
|
||||||
|
|
@ -1154,8 +1153,7 @@ ${convo}
|
||||||
opts.fetchOptions.agent = new HttpsProxyAgent(this.options.proxy);
|
opts.fetchOptions.agent = new HttpsProxyAgent(this.options.proxy);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @type {TAzureConfig | undefined} */
|
const azureConfig = appConfig?.endpoints?.[EModelEndpoint.azureOpenAI];
|
||||||
const azureConfig = appConfig?.[EModelEndpoint.azureOpenAI];
|
|
||||||
|
|
||||||
if (
|
if (
|
||||||
(this.azure && this.isVisionModel && azureConfig) ||
|
(this.azure && this.isVisionModel && azureConfig) ||
|
||||||
|
|
|
||||||
|
|
@ -464,7 +464,9 @@ class AgentClient extends BaseClient {
|
||||||
|
|
||||||
/** @type {Agent} */
|
/** @type {Agent} */
|
||||||
let prelimAgent;
|
let prelimAgent;
|
||||||
const allowedProviders = new Set(appConfig?.[EModelEndpoint.agents]?.allowedProviders);
|
const allowedProviders = new Set(
|
||||||
|
appConfig?.endpoints?.[EModelEndpoint.agents]?.allowedProviders,
|
||||||
|
);
|
||||||
try {
|
try {
|
||||||
if (memoryConfig.agent?.id != null && memoryConfig.agent.id !== this.options.agent.id) {
|
if (memoryConfig.agent?.id != null && memoryConfig.agent.id !== this.options.agent.id) {
|
||||||
prelimAgent = await loadAgent({
|
prelimAgent = await loadAgent({
|
||||||
|
|
@ -770,8 +772,8 @@ class AgentClient extends BaseClient {
|
||||||
}
|
}
|
||||||
|
|
||||||
const appConfig = await getAppConfig({ role: this.options.req.user?.role });
|
const appConfig = await getAppConfig({ role: this.options.req.user?.role });
|
||||||
/** @type {TCustomConfig['endpoints']['agents']} */
|
/** @type {AppConfig['endpoints']['agents']} */
|
||||||
const agentsEConfig = appConfig[EModelEndpoint.agents];
|
const agentsEConfig = appConfig.endpoints?.[EModelEndpoint.agents];
|
||||||
|
|
||||||
config = {
|
config = {
|
||||||
configurable: {
|
configurable: {
|
||||||
|
|
@ -1104,7 +1106,9 @@ class AgentClient extends BaseClient {
|
||||||
|
|
||||||
/** @type {TEndpoint | undefined} */
|
/** @type {TEndpoint | undefined} */
|
||||||
const endpointConfig =
|
const endpointConfig =
|
||||||
appConfig.all ?? appConfig[endpoint] ?? titleProviderConfig.customEndpointConfig;
|
appConfig.endpoints?.all ??
|
||||||
|
appConfig.endpoints?.[endpoint] ??
|
||||||
|
titleProviderConfig.customEndpointConfig;
|
||||||
if (!endpointConfig) {
|
if (!endpointConfig) {
|
||||||
logger.warn(
|
logger.warn(
|
||||||
'[api/server/controllers/agents/client.js #titleConvo] Error getting endpoint config',
|
'[api/server/controllers/agents/client.js #titleConvo] Error getting endpoint config',
|
||||||
|
|
|
||||||
|
|
@ -46,12 +46,14 @@ describe('AgentClient - titleConvo', () => {
|
||||||
|
|
||||||
// Mock getAppConfig to return endpoint configurations
|
// Mock getAppConfig to return endpoint configurations
|
||||||
getAppConfig.mockResolvedValue({
|
getAppConfig.mockResolvedValue({
|
||||||
[EModelEndpoint.openAI]: {
|
endpoints: {
|
||||||
// Match the agent endpoint
|
[EModelEndpoint.openAI]: {
|
||||||
titleModel: 'gpt-3.5-turbo',
|
// Match the agent endpoint
|
||||||
titlePrompt: 'Custom title prompt',
|
titleModel: 'gpt-3.5-turbo',
|
||||||
titleMethod: 'structured',
|
titlePrompt: 'Custom title prompt',
|
||||||
titlePromptTemplate: 'Template: {{content}}',
|
titleMethod: 'structured',
|
||||||
|
titlePromptTemplate: 'Template: {{content}}',
|
||||||
|
},
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
@ -148,7 +150,7 @@ describe('AgentClient - titleConvo', () => {
|
||||||
|
|
||||||
it('should handle missing endpoint config gracefully', async () => {
|
it('should handle missing endpoint config gracefully', async () => {
|
||||||
// Remove endpoint config
|
// Remove endpoint config
|
||||||
getAppConfig.mockResolvedValue({});
|
getAppConfig.mockResolvedValue({ endpoints: {} });
|
||||||
|
|
||||||
const text = 'Test conversation text';
|
const text = 'Test conversation text';
|
||||||
const abortController = new AbortController();
|
const abortController = new AbortController();
|
||||||
|
|
@ -167,11 +169,13 @@ describe('AgentClient - titleConvo', () => {
|
||||||
it('should use agent model when titleModel is not provided', async () => {
|
it('should use agent model when titleModel is not provided', async () => {
|
||||||
// Remove titleModel from config
|
// Remove titleModel from config
|
||||||
getAppConfig.mockResolvedValue({
|
getAppConfig.mockResolvedValue({
|
||||||
[EModelEndpoint.openAI]: {
|
endpoints: {
|
||||||
titlePrompt: 'Custom title prompt',
|
[EModelEndpoint.openAI]: {
|
||||||
titleMethod: 'structured',
|
titlePrompt: 'Custom title prompt',
|
||||||
titlePromptTemplate: 'Template: {{content}}',
|
titleMethod: 'structured',
|
||||||
// titleModel is omitted
|
titlePromptTemplate: 'Template: {{content}}',
|
||||||
|
// titleModel is omitted
|
||||||
|
},
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
@ -186,11 +190,13 @@ describe('AgentClient - titleConvo', () => {
|
||||||
|
|
||||||
it('should not use titleModel when it equals CURRENT_MODEL constant', async () => {
|
it('should not use titleModel when it equals CURRENT_MODEL constant', async () => {
|
||||||
getAppConfig.mockResolvedValue({
|
getAppConfig.mockResolvedValue({
|
||||||
[EModelEndpoint.openAI]: {
|
endpoints: {
|
||||||
titleModel: Constants.CURRENT_MODEL,
|
[EModelEndpoint.openAI]: {
|
||||||
titlePrompt: 'Custom title prompt',
|
titleModel: Constants.CURRENT_MODEL,
|
||||||
titleMethod: 'structured',
|
titlePrompt: 'Custom title prompt',
|
||||||
titlePromptTemplate: 'Template: {{content}}',
|
titleMethod: 'structured',
|
||||||
|
titlePromptTemplate: 'Template: {{content}}',
|
||||||
|
},
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
@ -265,12 +271,14 @@ describe('AgentClient - titleConvo', () => {
|
||||||
|
|
||||||
// Add titleEndpoint to the config
|
// Add titleEndpoint to the config
|
||||||
getAppConfig.mockResolvedValue({
|
getAppConfig.mockResolvedValue({
|
||||||
[EModelEndpoint.openAI]: {
|
endpoints: {
|
||||||
titleModel: 'gpt-3.5-turbo',
|
[EModelEndpoint.openAI]: {
|
||||||
titleEndpoint: EModelEndpoint.anthropic,
|
titleModel: 'gpt-3.5-turbo',
|
||||||
titleMethod: 'structured',
|
titleEndpoint: EModelEndpoint.anthropic,
|
||||||
titlePrompt: 'Custom title prompt',
|
titleMethod: 'structured',
|
||||||
titlePromptTemplate: 'Custom template',
|
titlePrompt: 'Custom title prompt',
|
||||||
|
titlePromptTemplate: 'Custom template',
|
||||||
|
},
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
@ -300,11 +308,13 @@ describe('AgentClient - titleConvo', () => {
|
||||||
it('should use all config when endpoint config is missing', async () => {
|
it('should use all config when endpoint config is missing', async () => {
|
||||||
// Set 'all' config without endpoint-specific config
|
// Set 'all' config without endpoint-specific config
|
||||||
getAppConfig.mockResolvedValue({
|
getAppConfig.mockResolvedValue({
|
||||||
all: {
|
endpoints: {
|
||||||
titleModel: 'gpt-4o-mini',
|
all: {
|
||||||
titlePrompt: 'All config title prompt',
|
titleModel: 'gpt-4o-mini',
|
||||||
titleMethod: 'completion',
|
titlePrompt: 'All config title prompt',
|
||||||
titlePromptTemplate: 'All config template: {{content}}',
|
titleMethod: 'completion',
|
||||||
|
titlePromptTemplate: 'All config template: {{content}}',
|
||||||
|
},
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
@ -330,17 +340,19 @@ describe('AgentClient - titleConvo', () => {
|
||||||
it('should prioritize all config over endpoint config for title settings', async () => {
|
it('should prioritize all config over endpoint config for title settings', async () => {
|
||||||
// Set both endpoint and 'all' config
|
// Set both endpoint and 'all' config
|
||||||
getAppConfig.mockResolvedValue({
|
getAppConfig.mockResolvedValue({
|
||||||
[EModelEndpoint.openAI]: {
|
endpoints: {
|
||||||
titleModel: 'gpt-3.5-turbo',
|
[EModelEndpoint.openAI]: {
|
||||||
titlePrompt: 'Endpoint title prompt',
|
titleModel: 'gpt-3.5-turbo',
|
||||||
titleMethod: 'structured',
|
titlePrompt: 'Endpoint title prompt',
|
||||||
// titlePromptTemplate is omitted to test fallback
|
titleMethod: 'structured',
|
||||||
},
|
// titlePromptTemplate is omitted to test fallback
|
||||||
all: {
|
},
|
||||||
titleModel: 'gpt-4o-mini',
|
all: {
|
||||||
titlePrompt: 'All config title prompt',
|
titleModel: 'gpt-4o-mini',
|
||||||
titleMethod: 'completion',
|
titlePrompt: 'All config title prompt',
|
||||||
titlePromptTemplate: 'All config template',
|
titleMethod: 'completion',
|
||||||
|
titlePromptTemplate: 'All config template',
|
||||||
|
},
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
@ -370,13 +382,15 @@ describe('AgentClient - titleConvo', () => {
|
||||||
|
|
||||||
// Set comprehensive 'all' config with all new title options
|
// Set comprehensive 'all' config with all new title options
|
||||||
getAppConfig.mockResolvedValue({
|
getAppConfig.mockResolvedValue({
|
||||||
all: {
|
endpoints: {
|
||||||
titleConvo: true,
|
all: {
|
||||||
titleModel: 'claude-3-haiku-20240307',
|
titleConvo: true,
|
||||||
titleMethod: 'completion', // Testing the new default method
|
titleModel: 'claude-3-haiku-20240307',
|
||||||
titlePrompt: 'Generate a concise, descriptive title for this conversation',
|
titleMethod: 'completion', // Testing the new default method
|
||||||
titlePromptTemplate: 'Conversation summary: {{content}}',
|
titlePrompt: 'Generate a concise, descriptive title for this conversation',
|
||||||
titleEndpoint: EModelEndpoint.anthropic, // Should switch provider to Anthropic
|
titlePromptTemplate: 'Conversation summary: {{content}}',
|
||||||
|
titleEndpoint: EModelEndpoint.anthropic, // Should switch provider to Anthropic
|
||||||
|
},
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
@ -425,11 +439,13 @@ describe('AgentClient - titleConvo', () => {
|
||||||
|
|
||||||
// Set 'all' config with specific titleMethod
|
// Set 'all' config with specific titleMethod
|
||||||
getAppConfig.mockResolvedValue({
|
getAppConfig.mockResolvedValue({
|
||||||
all: {
|
endpoints: {
|
||||||
titleModel: 'gpt-4o-mini',
|
all: {
|
||||||
titleMethod: method,
|
titleModel: 'gpt-4o-mini',
|
||||||
titlePrompt: `Testing ${method} method`,
|
titleMethod: method,
|
||||||
titlePromptTemplate: `Template for ${method}: {{content}}`,
|
titlePrompt: `Testing ${method} method`,
|
||||||
|
titlePromptTemplate: `Template for ${method}: {{content}}`,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
@ -476,27 +492,29 @@ describe('AgentClient - titleConvo', () => {
|
||||||
mockAgent.endpoint = EModelEndpoint.azureOpenAI;
|
mockAgent.endpoint = EModelEndpoint.azureOpenAI;
|
||||||
mockAgent.provider = EModelEndpoint.azureOpenAI;
|
mockAgent.provider = EModelEndpoint.azureOpenAI;
|
||||||
getAppConfig.mockResolvedValue({
|
getAppConfig.mockResolvedValue({
|
||||||
[EModelEndpoint.azureOpenAI]: {
|
endpoints: {
|
||||||
titleConvo: true,
|
[EModelEndpoint.azureOpenAI]: {
|
||||||
titleModel: 'grok-3',
|
titleConvo: true,
|
||||||
titleMethod: 'completion',
|
titleModel: 'grok-3',
|
||||||
titlePrompt: 'Azure serverless title prompt',
|
titleMethod: 'completion',
|
||||||
streamRate: 35,
|
titlePrompt: 'Azure serverless title prompt',
|
||||||
modelGroupMap: {
|
streamRate: 35,
|
||||||
'grok-3': {
|
modelGroupMap: {
|
||||||
group: 'Azure AI Foundry',
|
'grok-3': {
|
||||||
deploymentName: 'grok-3',
|
group: 'Azure AI Foundry',
|
||||||
|
deploymentName: 'grok-3',
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
groupMap: {
|
||||||
groupMap: {
|
'Azure AI Foundry': {
|
||||||
'Azure AI Foundry': {
|
apiKey: '${AZURE_API_KEY}',
|
||||||
apiKey: '${AZURE_API_KEY}',
|
baseURL: 'https://test.services.ai.azure.com/models',
|
||||||
baseURL: 'https://test.services.ai.azure.com/models',
|
version: '2024-05-01-preview',
|
||||||
version: '2024-05-01-preview',
|
serverless: true,
|
||||||
serverless: true,
|
models: {
|
||||||
models: {
|
'grok-3': {
|
||||||
'grok-3': {
|
deploymentName: 'grok-3',
|
||||||
deploymentName: 'grok-3',
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -526,26 +544,28 @@ describe('AgentClient - titleConvo', () => {
|
||||||
mockAgent.endpoint = EModelEndpoint.azureOpenAI;
|
mockAgent.endpoint = EModelEndpoint.azureOpenAI;
|
||||||
mockAgent.provider = EModelEndpoint.azureOpenAI;
|
mockAgent.provider = EModelEndpoint.azureOpenAI;
|
||||||
getAppConfig.mockResolvedValue({
|
getAppConfig.mockResolvedValue({
|
||||||
[EModelEndpoint.azureOpenAI]: {
|
endpoints: {
|
||||||
titleConvo: true,
|
[EModelEndpoint.azureOpenAI]: {
|
||||||
titleModel: 'gpt-4o',
|
titleConvo: true,
|
||||||
titleMethod: 'structured',
|
titleModel: 'gpt-4o',
|
||||||
titlePrompt: 'Azure instance title prompt',
|
titleMethod: 'structured',
|
||||||
streamRate: 35,
|
titlePrompt: 'Azure instance title prompt',
|
||||||
modelGroupMap: {
|
streamRate: 35,
|
||||||
'gpt-4o': {
|
modelGroupMap: {
|
||||||
group: 'eastus',
|
'gpt-4o': {
|
||||||
deploymentName: 'gpt-4o',
|
group: 'eastus',
|
||||||
|
deploymentName: 'gpt-4o',
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
groupMap: {
|
||||||
groupMap: {
|
eastus: {
|
||||||
eastus: {
|
apiKey: '${EASTUS_API_KEY}',
|
||||||
apiKey: '${EASTUS_API_KEY}',
|
instanceName: 'region-instance',
|
||||||
instanceName: 'region-instance',
|
version: '2024-02-15-preview',
|
||||||
version: '2024-02-15-preview',
|
models: {
|
||||||
models: {
|
'gpt-4o': {
|
||||||
'gpt-4o': {
|
deploymentName: 'gpt-4o',
|
||||||
deploymentName: 'gpt-4o',
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -576,27 +596,29 @@ describe('AgentClient - titleConvo', () => {
|
||||||
mockAgent.provider = EModelEndpoint.azureOpenAI;
|
mockAgent.provider = EModelEndpoint.azureOpenAI;
|
||||||
mockAgent.model_parameters.model = 'gpt-4o-latest';
|
mockAgent.model_parameters.model = 'gpt-4o-latest';
|
||||||
getAppConfig.mockResolvedValue({
|
getAppConfig.mockResolvedValue({
|
||||||
[EModelEndpoint.azureOpenAI]: {
|
endpoints: {
|
||||||
titleConvo: true,
|
[EModelEndpoint.azureOpenAI]: {
|
||||||
titleModel: Constants.CURRENT_MODEL,
|
titleConvo: true,
|
||||||
titleMethod: 'functions',
|
titleModel: Constants.CURRENT_MODEL,
|
||||||
streamRate: 35,
|
titleMethod: 'functions',
|
||||||
modelGroupMap: {
|
streamRate: 35,
|
||||||
'gpt-4o-latest': {
|
modelGroupMap: {
|
||||||
group: 'region-eastus',
|
'gpt-4o-latest': {
|
||||||
deploymentName: 'gpt-4o-mini',
|
group: 'region-eastus',
|
||||||
version: '2024-02-15-preview',
|
deploymentName: 'gpt-4o-mini',
|
||||||
|
version: '2024-02-15-preview',
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
groupMap: {
|
||||||
groupMap: {
|
'region-eastus': {
|
||||||
'region-eastus': {
|
apiKey: '${EASTUS2_API_KEY}',
|
||||||
apiKey: '${EASTUS2_API_KEY}',
|
instanceName: 'test-instance',
|
||||||
instanceName: 'test-instance',
|
version: '2024-12-01-preview',
|
||||||
version: '2024-12-01-preview',
|
models: {
|
||||||
models: {
|
'gpt-4o-latest': {
|
||||||
'gpt-4o-latest': {
|
deploymentName: 'gpt-4o-mini',
|
||||||
deploymentName: 'gpt-4o-mini',
|
version: '2024-02-15-preview',
|
||||||
version: '2024-02-15-preview',
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -625,54 +647,56 @@ describe('AgentClient - titleConvo', () => {
|
||||||
mockAgent.endpoint = EModelEndpoint.azureOpenAI;
|
mockAgent.endpoint = EModelEndpoint.azureOpenAI;
|
||||||
mockAgent.provider = EModelEndpoint.azureOpenAI;
|
mockAgent.provider = EModelEndpoint.azureOpenAI;
|
||||||
getAppConfig.mockResolvedValue({
|
getAppConfig.mockResolvedValue({
|
||||||
[EModelEndpoint.azureOpenAI]: {
|
endpoints: {
|
||||||
titleConvo: true,
|
[EModelEndpoint.azureOpenAI]: {
|
||||||
titleModel: 'o1-mini',
|
titleConvo: true,
|
||||||
titleMethod: 'completion',
|
titleModel: 'o1-mini',
|
||||||
streamRate: 35,
|
titleMethod: 'completion',
|
||||||
modelGroupMap: {
|
streamRate: 35,
|
||||||
'gpt-4o': {
|
modelGroupMap: {
|
||||||
group: 'eastus',
|
'gpt-4o': {
|
||||||
deploymentName: 'gpt-4o',
|
group: 'eastus',
|
||||||
},
|
deploymentName: 'gpt-4o',
|
||||||
'o1-mini': {
|
},
|
||||||
group: 'region-eastus',
|
'o1-mini': {
|
||||||
deploymentName: 'o1-mini',
|
group: 'region-eastus',
|
||||||
},
|
deploymentName: 'o1-mini',
|
||||||
'codex-mini': {
|
},
|
||||||
group: 'codex-mini',
|
'codex-mini': {
|
||||||
deploymentName: 'codex-mini',
|
group: 'codex-mini',
|
||||||
},
|
deploymentName: 'codex-mini',
|
||||||
},
|
|
||||||
groupMap: {
|
|
||||||
eastus: {
|
|
||||||
apiKey: '${EASTUS_API_KEY}',
|
|
||||||
instanceName: 'region-eastus',
|
|
||||||
version: '2024-02-15-preview',
|
|
||||||
models: {
|
|
||||||
'gpt-4o': {
|
|
||||||
deploymentName: 'gpt-4o',
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
'region-eastus': {
|
groupMap: {
|
||||||
apiKey: '${EASTUS2_API_KEY}',
|
eastus: {
|
||||||
instanceName: 'region-eastus2',
|
apiKey: '${EASTUS_API_KEY}',
|
||||||
version: '2024-12-01-preview',
|
instanceName: 'region-eastus',
|
||||||
models: {
|
version: '2024-02-15-preview',
|
||||||
'o1-mini': {
|
models: {
|
||||||
deploymentName: 'o1-mini',
|
'gpt-4o': {
|
||||||
|
deploymentName: 'gpt-4o',
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
'region-eastus': {
|
||||||
'codex-mini': {
|
apiKey: '${EASTUS2_API_KEY}',
|
||||||
apiKey: '${AZURE_API_KEY}',
|
instanceName: 'region-eastus2',
|
||||||
baseURL: 'https://example.cognitiveservices.azure.com/openai/',
|
version: '2024-12-01-preview',
|
||||||
version: '2025-04-01-preview',
|
models: {
|
||||||
serverless: true,
|
'o1-mini': {
|
||||||
models: {
|
deploymentName: 'o1-mini',
|
||||||
'codex-mini': {
|
},
|
||||||
deploymentName: 'codex-mini',
|
},
|
||||||
|
},
|
||||||
|
'codex-mini': {
|
||||||
|
apiKey: '${AZURE_API_KEY}',
|
||||||
|
baseURL: 'https://example.cognitiveservices.azure.com/openai/',
|
||||||
|
version: '2025-04-01-preview',
|
||||||
|
serverless: true,
|
||||||
|
models: {
|
||||||
|
'codex-mini': {
|
||||||
|
deploymentName: 'codex-mini',
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -709,27 +733,29 @@ describe('AgentClient - titleConvo', () => {
|
||||||
|
|
||||||
// Set 'all' config as fallback with a serverless Azure config
|
// Set 'all' config as fallback with a serverless Azure config
|
||||||
getAppConfig.mockResolvedValue({
|
getAppConfig.mockResolvedValue({
|
||||||
all: {
|
endpoints: {
|
||||||
titleConvo: true,
|
all: {
|
||||||
titleModel: 'gpt-4',
|
titleConvo: true,
|
||||||
titleMethod: 'structured',
|
titleModel: 'gpt-4',
|
||||||
titlePrompt: 'Fallback title prompt from all config',
|
titleMethod: 'structured',
|
||||||
titlePromptTemplate: 'Template: {{content}}',
|
titlePrompt: 'Fallback title prompt from all config',
|
||||||
modelGroupMap: {
|
titlePromptTemplate: 'Template: {{content}}',
|
||||||
'gpt-4': {
|
modelGroupMap: {
|
||||||
group: 'default-group',
|
'gpt-4': {
|
||||||
deploymentName: 'gpt-4',
|
group: 'default-group',
|
||||||
|
deploymentName: 'gpt-4',
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
groupMap: {
|
||||||
groupMap: {
|
'default-group': {
|
||||||
'default-group': {
|
apiKey: '${AZURE_API_KEY}',
|
||||||
apiKey: '${AZURE_API_KEY}',
|
baseURL: 'https://default.openai.azure.com/',
|
||||||
baseURL: 'https://default.openai.azure.com/',
|
version: '2024-02-15-preview',
|
||||||
version: '2024-02-15-preview',
|
serverless: true,
|
||||||
serverless: true,
|
models: {
|
||||||
models: {
|
'gpt-4': {
|
||||||
'gpt-4': {
|
deploymentName: 'gpt-4',
|
||||||
deploymentName: 'gpt-4',
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -376,9 +376,9 @@ const chatV2 = async (req, res) => {
|
||||||
};
|
};
|
||||||
|
|
||||||
/** @type {undefined | TAssistantEndpoint} */
|
/** @type {undefined | TAssistantEndpoint} */
|
||||||
const config = appConfig[endpoint] ?? {};
|
const config = appConfig.endpoints?.[endpoint] ?? {};
|
||||||
/** @type {undefined | TBaseEndpoint} */
|
/** @type {undefined | TBaseEndpoint} */
|
||||||
const allConfig = appConfig.all;
|
const allConfig = appConfig.endpoints?.all;
|
||||||
|
|
||||||
const streamRunManager = new StreamRunManager({
|
const streamRunManager = new StreamRunManager({
|
||||||
req,
|
req,
|
||||||
|
|
|
||||||
|
|
@ -231,20 +231,20 @@ const fetchAssistants = async ({ req, res, overrideEndpoint }) => {
|
||||||
if (endpoint === EModelEndpoint.assistants) {
|
if (endpoint === EModelEndpoint.assistants) {
|
||||||
({ body } = await listAllAssistants({ req, res, version, query }));
|
({ body } = await listAllAssistants({ req, res, version, query }));
|
||||||
} else if (endpoint === EModelEndpoint.azureAssistants) {
|
} else if (endpoint === EModelEndpoint.azureAssistants) {
|
||||||
const azureConfig = appConfig[EModelEndpoint.azureOpenAI];
|
const azureConfig = appConfig.endpoints?.[EModelEndpoint.azureOpenAI];
|
||||||
body = await listAssistantsForAzure({ req, res, version, azureConfig, query });
|
body = await listAssistantsForAzure({ req, res, version, azureConfig, query });
|
||||||
}
|
}
|
||||||
|
|
||||||
if (req.user.role === SystemRoles.ADMIN) {
|
if (req.user.role === SystemRoles.ADMIN) {
|
||||||
return body;
|
return body;
|
||||||
} else if (!appConfig[endpoint]) {
|
} else if (!appConfig.endpoints?.[endpoint]) {
|
||||||
return body;
|
return body;
|
||||||
}
|
}
|
||||||
|
|
||||||
body.data = filterAssistants({
|
body.data = filterAssistants({
|
||||||
userId: req.user.id,
|
userId: req.user.id,
|
||||||
assistants: body.data,
|
assistants: body.data,
|
||||||
assistantsConfig: appConfig[endpoint],
|
assistantsConfig: appConfig.endpoints?.[endpoint],
|
||||||
});
|
});
|
||||||
return body;
|
return body;
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -260,7 +260,7 @@ const getAssistantDocuments = async (req, res) => {
|
||||||
try {
|
try {
|
||||||
const appConfig = await getAppConfig({ role: req.user?.role });
|
const appConfig = await getAppConfig({ role: req.user?.role });
|
||||||
const endpoint = req.query;
|
const endpoint = req.query;
|
||||||
const assistantsConfig = appConfig[endpoint];
|
const assistantsConfig = appConfig.endpoints?.[endpoint];
|
||||||
const documents = await getAssistants(
|
const documents = await getAssistants(
|
||||||
{},
|
{},
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,7 @@ const validateAssistant = async (req, res, next) => {
|
||||||
|
|
||||||
const appConfig = await getAppConfig({ role: req.user?.role });
|
const appConfig = await getAppConfig({ role: req.user?.role });
|
||||||
/** @type {Partial<TAssistantEndpoint>} */
|
/** @type {Partial<TAssistantEndpoint>} */
|
||||||
const assistantsConfig = appConfig?.[endpoint];
|
const assistantsConfig = appConfig.endpoints?.[endpoint];
|
||||||
if (!assistantsConfig) {
|
if (!assistantsConfig) {
|
||||||
return next();
|
return next();
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ const validateAuthor = async ({ req, openai, overrideEndpoint, overrideAssistant
|
||||||
|
|
||||||
const appConfig = await getAppConfig({ role: req.user?.role });
|
const appConfig = await getAppConfig({ role: req.user?.role });
|
||||||
/** @type {Partial<TAssistantEndpoint>} */
|
/** @type {Partial<TAssistantEndpoint>} */
|
||||||
const assistantsConfig = appConfig?.[endpoint];
|
const assistantsConfig = appConfig.endpoints?.[endpoint];
|
||||||
if (!assistantsConfig) {
|
if (!assistantsConfig) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -130,7 +130,7 @@ router.post('/:assistant_id', async (req, res) => {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Map Azure OpenAI model to the assistant as defined by config */
|
/* Map Azure OpenAI model to the assistant as defined by config */
|
||||||
if (appConfig[EModelEndpoint.azureOpenAI]?.assistants) {
|
if (appConfig.endpoints?.[EModelEndpoint.azureOpenAI]?.assistants) {
|
||||||
updatedAssistant = {
|
updatedAssistant = {
|
||||||
...updatedAssistant,
|
...updatedAssistant,
|
||||||
model: req.body.model,
|
model: req.body.model,
|
||||||
|
|
|
||||||
|
|
@ -111,7 +111,9 @@ const AppService = async () => {
|
||||||
if (!Object.keys(config).length) {
|
if (!Object.keys(config).length) {
|
||||||
const appConfig = {
|
const appConfig = {
|
||||||
...defaultConfig,
|
...defaultConfig,
|
||||||
[EModelEndpoint.agents]: agentsDefaults,
|
endpoints: {
|
||||||
|
[EModelEndpoint.agents]: agentsDefaults,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
await setAppConfig(appConfig);
|
await setAppConfig(appConfig);
|
||||||
return;
|
return;
|
||||||
|
|
@ -126,7 +128,7 @@ const AppService = async () => {
|
||||||
fileConfig: config?.fileConfig,
|
fileConfig: config?.fileConfig,
|
||||||
secureImageLinks: config?.secureImageLinks,
|
secureImageLinks: config?.secureImageLinks,
|
||||||
modelSpecs: processModelSpecs(config?.endpoints, config.modelSpecs, interfaceConfig),
|
modelSpecs: processModelSpecs(config?.endpoints, config.modelSpecs, interfaceConfig),
|
||||||
...loadedEndpoints,
|
endpoints: loadedEndpoints,
|
||||||
};
|
};
|
||||||
|
|
||||||
await setAppConfig(appConfig);
|
await setAppConfig(appConfig);
|
||||||
|
|
|
||||||
|
|
@ -172,12 +172,14 @@ describe('AppService', () => {
|
||||||
searxngInstanceUrl: '${SEARXNG_INSTANCE_URL}',
|
searxngInstanceUrl: '${SEARXNG_INSTANCE_URL}',
|
||||||
}),
|
}),
|
||||||
memory: undefined,
|
memory: undefined,
|
||||||
agents: expect.objectContaining({
|
endpoints: expect.objectContaining({
|
||||||
disableBuilder: false,
|
agents: expect.objectContaining({
|
||||||
capabilities: expect.arrayContaining([...defaultAgentCapabilities]),
|
disableBuilder: false,
|
||||||
maxCitations: 30,
|
capabilities: expect.arrayContaining([...defaultAgentCapabilities]),
|
||||||
maxCitationsPerFile: 7,
|
maxCitations: 30,
|
||||||
minRelevanceScore: 0.45,
|
maxCitationsPerFile: 7,
|
||||||
|
minRelevanceScore: 0.45,
|
||||||
|
}),
|
||||||
}),
|
}),
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
|
|
@ -328,12 +330,14 @@ describe('AppService', () => {
|
||||||
|
|
||||||
expect(setAppConfig).toHaveBeenCalledWith(
|
expect(setAppConfig).toHaveBeenCalledWith(
|
||||||
expect.objectContaining({
|
expect.objectContaining({
|
||||||
[EModelEndpoint.assistants]: expect.objectContaining({
|
endpoints: expect.objectContaining({
|
||||||
disableBuilder: true,
|
[EModelEndpoint.assistants]: expect.objectContaining({
|
||||||
pollIntervalMs: 5000,
|
disableBuilder: true,
|
||||||
timeoutMs: 30000,
|
pollIntervalMs: 5000,
|
||||||
supportedIds: expect.arrayContaining(['id1', 'id2']),
|
timeoutMs: 30000,
|
||||||
privateAssistants: false,
|
supportedIds: expect.arrayContaining(['id1', 'id2']),
|
||||||
|
privateAssistants: false,
|
||||||
|
}),
|
||||||
}),
|
}),
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
|
|
@ -358,15 +362,17 @@ describe('AppService', () => {
|
||||||
|
|
||||||
expect(setAppConfig).toHaveBeenCalledWith(
|
expect(setAppConfig).toHaveBeenCalledWith(
|
||||||
expect.objectContaining({
|
expect.objectContaining({
|
||||||
[EModelEndpoint.agents]: expect.objectContaining({
|
endpoints: expect.objectContaining({
|
||||||
disableBuilder: true,
|
[EModelEndpoint.agents]: expect.objectContaining({
|
||||||
recursionLimit: 10,
|
disableBuilder: true,
|
||||||
maxRecursionLimit: 20,
|
recursionLimit: 10,
|
||||||
allowedProviders: expect.arrayContaining(['openai', 'anthropic']),
|
maxRecursionLimit: 20,
|
||||||
capabilities: expect.arrayContaining([
|
allowedProviders: expect.arrayContaining(['openai', 'anthropic']),
|
||||||
AgentCapabilities.tools,
|
capabilities: expect.arrayContaining([
|
||||||
AgentCapabilities.actions,
|
AgentCapabilities.tools,
|
||||||
]),
|
AgentCapabilities.actions,
|
||||||
|
]),
|
||||||
|
}),
|
||||||
}),
|
}),
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
|
|
@ -379,9 +385,11 @@ describe('AppService', () => {
|
||||||
|
|
||||||
expect(setAppConfig).toHaveBeenCalledWith(
|
expect(setAppConfig).toHaveBeenCalledWith(
|
||||||
expect.objectContaining({
|
expect.objectContaining({
|
||||||
[EModelEndpoint.agents]: expect.objectContaining({
|
endpoints: expect.objectContaining({
|
||||||
disableBuilder: false,
|
[EModelEndpoint.agents]: expect.objectContaining({
|
||||||
capabilities: expect.arrayContaining([...defaultAgentCapabilities]),
|
disableBuilder: false,
|
||||||
|
capabilities: expect.arrayContaining([...defaultAgentCapabilities]),
|
||||||
|
}),
|
||||||
}),
|
}),
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
|
|
@ -402,12 +410,14 @@ describe('AppService', () => {
|
||||||
|
|
||||||
expect(setAppConfig).toHaveBeenCalledWith(
|
expect(setAppConfig).toHaveBeenCalledWith(
|
||||||
expect.objectContaining({
|
expect.objectContaining({
|
||||||
[EModelEndpoint.agents]: expect.objectContaining({
|
endpoints: expect.objectContaining({
|
||||||
disableBuilder: false,
|
[EModelEndpoint.agents]: expect.objectContaining({
|
||||||
capabilities: expect.arrayContaining([...defaultAgentCapabilities]),
|
disableBuilder: false,
|
||||||
}),
|
capabilities: expect.arrayContaining([...defaultAgentCapabilities]),
|
||||||
[EModelEndpoint.openAI]: expect.objectContaining({
|
}),
|
||||||
titleConvo: true,
|
[EModelEndpoint.openAI]: expect.objectContaining({
|
||||||
|
titleConvo: true,
|
||||||
|
}),
|
||||||
}),
|
}),
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
|
|
@ -432,12 +442,14 @@ describe('AppService', () => {
|
||||||
await AppService();
|
await AppService();
|
||||||
expect(setAppConfig).toHaveBeenCalledWith(
|
expect(setAppConfig).toHaveBeenCalledWith(
|
||||||
expect.objectContaining({
|
expect.objectContaining({
|
||||||
[EModelEndpoint.azureAssistants]: expect.objectContaining({
|
endpoints: expect.objectContaining({
|
||||||
capabilities: expect.arrayContaining([
|
[EModelEndpoint.azureAssistants]: expect.objectContaining({
|
||||||
expect.any(String),
|
capabilities: expect.arrayContaining([
|
||||||
expect.any(String),
|
expect.any(String),
|
||||||
expect.any(String),
|
expect.any(String),
|
||||||
]),
|
expect.any(String),
|
||||||
|
]),
|
||||||
|
}),
|
||||||
}),
|
}),
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
|
|
@ -462,10 +474,12 @@ describe('AppService', () => {
|
||||||
const { modelNames, modelGroupMap, groupMap } = validateAzureGroups(azureGroups);
|
const { modelNames, modelGroupMap, groupMap } = validateAzureGroups(azureGroups);
|
||||||
expect(setAppConfig).toHaveBeenCalledWith(
|
expect(setAppConfig).toHaveBeenCalledWith(
|
||||||
expect.objectContaining({
|
expect.objectContaining({
|
||||||
[EModelEndpoint.azureOpenAI]: expect.objectContaining({
|
endpoints: expect.objectContaining({
|
||||||
modelNames,
|
[EModelEndpoint.azureOpenAI]: expect.objectContaining({
|
||||||
modelGroupMap,
|
modelNames,
|
||||||
groupMap,
|
modelGroupMap,
|
||||||
|
groupMap,
|
||||||
|
}),
|
||||||
}),
|
}),
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
|
|
@ -619,27 +633,29 @@ describe('AppService', () => {
|
||||||
|
|
||||||
expect(setAppConfig).toHaveBeenCalledWith(
|
expect(setAppConfig).toHaveBeenCalledWith(
|
||||||
expect.objectContaining({
|
expect.objectContaining({
|
||||||
// Check OpenAI endpoint configuration
|
endpoints: expect.objectContaining({
|
||||||
[EModelEndpoint.openAI]: expect.objectContaining({
|
// Check OpenAI endpoint configuration
|
||||||
titleConvo: true,
|
[EModelEndpoint.openAI]: expect.objectContaining({
|
||||||
titleModel: 'gpt-3.5-turbo',
|
titleConvo: true,
|
||||||
titleMethod: 'structured',
|
titleModel: 'gpt-3.5-turbo',
|
||||||
titlePrompt: 'Custom title prompt for conversation',
|
titleMethod: 'structured',
|
||||||
titlePromptTemplate: 'Summarize this conversation: {{conversation}}',
|
titlePrompt: 'Custom title prompt for conversation',
|
||||||
}),
|
titlePromptTemplate: 'Summarize this conversation: {{conversation}}',
|
||||||
// Check Assistants endpoint configuration
|
}),
|
||||||
[EModelEndpoint.assistants]: expect.objectContaining({
|
// Check Assistants endpoint configuration
|
||||||
titleMethod: 'functions',
|
[EModelEndpoint.assistants]: expect.objectContaining({
|
||||||
titlePrompt: 'Generate a title for this assistant conversation',
|
titleMethod: 'functions',
|
||||||
titlePromptTemplate: 'Assistant conversation template: {{messages}}',
|
titlePrompt: 'Generate a title for this assistant conversation',
|
||||||
}),
|
titlePromptTemplate: 'Assistant conversation template: {{messages}}',
|
||||||
// Check Azure OpenAI endpoint configuration
|
}),
|
||||||
[EModelEndpoint.azureOpenAI]: expect.objectContaining({
|
// Check Azure OpenAI endpoint configuration
|
||||||
titleConvo: true,
|
[EModelEndpoint.azureOpenAI]: expect.objectContaining({
|
||||||
titleMethod: 'completion',
|
titleConvo: true,
|
||||||
titleModel: 'gpt-4',
|
titleMethod: 'completion',
|
||||||
titlePrompt: 'Azure title prompt',
|
titleModel: 'gpt-4',
|
||||||
titlePromptTemplate: 'Azure conversation: {{context}}',
|
titlePrompt: 'Azure title prompt',
|
||||||
|
titlePromptTemplate: 'Azure conversation: {{context}}',
|
||||||
|
}),
|
||||||
}),
|
}),
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
|
|
@ -667,18 +683,20 @@ describe('AppService', () => {
|
||||||
|
|
||||||
expect(setAppConfig).toHaveBeenCalledWith(
|
expect(setAppConfig).toHaveBeenCalledWith(
|
||||||
expect.objectContaining({
|
expect.objectContaining({
|
||||||
[EModelEndpoint.agents]: expect.objectContaining({
|
endpoints: expect.objectContaining({
|
||||||
disableBuilder: false,
|
[EModelEndpoint.agents]: expect.objectContaining({
|
||||||
titleConvo: true,
|
disableBuilder: false,
|
||||||
titleModel: 'gpt-4',
|
titleConvo: true,
|
||||||
titleMethod: 'structured',
|
titleModel: 'gpt-4',
|
||||||
titlePrompt: 'Generate a descriptive title for this agent conversation',
|
titleMethod: 'structured',
|
||||||
titlePromptTemplate: 'Agent conversation summary: {{content}}',
|
titlePrompt: 'Generate a descriptive title for this agent conversation',
|
||||||
recursionLimit: 15,
|
titlePromptTemplate: 'Agent conversation summary: {{content}}',
|
||||||
capabilities: expect.arrayContaining([
|
recursionLimit: 15,
|
||||||
AgentCapabilities.tools,
|
capabilities: expect.arrayContaining([
|
||||||
AgentCapabilities.actions,
|
AgentCapabilities.tools,
|
||||||
]),
|
AgentCapabilities.actions,
|
||||||
|
]),
|
||||||
|
}),
|
||||||
}),
|
}),
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
|
|
@ -700,17 +718,19 @@ describe('AppService', () => {
|
||||||
|
|
||||||
expect(setAppConfig).toHaveBeenCalledWith(
|
expect(setAppConfig).toHaveBeenCalledWith(
|
||||||
expect.objectContaining({
|
expect.objectContaining({
|
||||||
[EModelEndpoint.openAI]: expect.objectContaining({
|
endpoints: expect.objectContaining({
|
||||||
titleConvo: true,
|
[EModelEndpoint.openAI]: expect.objectContaining({
|
||||||
|
titleConvo: true,
|
||||||
|
}),
|
||||||
}),
|
}),
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
|
|
||||||
// Verify that optional fields are not set when not provided
|
// Verify that optional fields are not set when not provided
|
||||||
const initCall = setAppConfig.mock.calls[0][0];
|
const initCall = setAppConfig.mock.calls[0][0];
|
||||||
expect(initCall[EModelEndpoint.openAI].titlePrompt).toBeUndefined();
|
expect(initCall.endpoints[EModelEndpoint.openAI].titlePrompt).toBeUndefined();
|
||||||
expect(initCall[EModelEndpoint.openAI].titlePromptTemplate).toBeUndefined();
|
expect(initCall.endpoints[EModelEndpoint.openAI].titlePromptTemplate).toBeUndefined();
|
||||||
expect(initCall[EModelEndpoint.openAI].titleMethod).toBeUndefined();
|
expect(initCall.endpoints[EModelEndpoint.openAI].titleMethod).toBeUndefined();
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should correctly configure titleEndpoint when specified', async () => {
|
it('should correctly configure titleEndpoint when specified', async () => {
|
||||||
|
|
@ -735,17 +755,19 @@ describe('AppService', () => {
|
||||||
|
|
||||||
expect(setAppConfig).toHaveBeenCalledWith(
|
expect(setAppConfig).toHaveBeenCalledWith(
|
||||||
expect.objectContaining({
|
expect.objectContaining({
|
||||||
// Check OpenAI endpoint has titleEndpoint
|
endpoints: expect.objectContaining({
|
||||||
[EModelEndpoint.openAI]: expect.objectContaining({
|
// Check OpenAI endpoint has titleEndpoint
|
||||||
titleConvo: true,
|
[EModelEndpoint.openAI]: expect.objectContaining({
|
||||||
titleModel: 'gpt-3.5-turbo',
|
titleConvo: true,
|
||||||
titleEndpoint: EModelEndpoint.anthropic,
|
titleModel: 'gpt-3.5-turbo',
|
||||||
titlePrompt: 'Generate a concise title',
|
titleEndpoint: EModelEndpoint.anthropic,
|
||||||
}),
|
titlePrompt: 'Generate a concise title',
|
||||||
// Check Agents endpoint has titleEndpoint
|
}),
|
||||||
[EModelEndpoint.agents]: expect.objectContaining({
|
// Check Agents endpoint has titleEndpoint
|
||||||
titleEndpoint: 'custom-provider',
|
[EModelEndpoint.agents]: expect.objectContaining({
|
||||||
titleMethod: 'structured',
|
titleEndpoint: 'custom-provider',
|
||||||
|
titleMethod: 'structured',
|
||||||
|
}),
|
||||||
}),
|
}),
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
|
|
@ -777,19 +799,21 @@ describe('AppService', () => {
|
||||||
expect(setAppConfig).toHaveBeenCalledWith(
|
expect(setAppConfig).toHaveBeenCalledWith(
|
||||||
expect.objectContaining({
|
expect.objectContaining({
|
||||||
// Check that 'all' endpoint config is loaded
|
// Check that 'all' endpoint config is loaded
|
||||||
all: expect.objectContaining({
|
endpoints: expect.objectContaining({
|
||||||
titleConvo: true,
|
all: expect.objectContaining({
|
||||||
titleModel: 'gpt-4o-mini',
|
titleConvo: true,
|
||||||
titleMethod: 'structured',
|
titleModel: 'gpt-4o-mini',
|
||||||
titlePrompt: 'Default title prompt for all endpoints',
|
titleMethod: 'structured',
|
||||||
titlePromptTemplate: 'Default template: {{conversation}}',
|
titlePrompt: 'Default title prompt for all endpoints',
|
||||||
titleEndpoint: EModelEndpoint.anthropic,
|
titlePromptTemplate: 'Default template: {{conversation}}',
|
||||||
streamRate: 50,
|
titleEndpoint: EModelEndpoint.anthropic,
|
||||||
}),
|
streamRate: 50,
|
||||||
// Check that OpenAI endpoint has its own config
|
}),
|
||||||
[EModelEndpoint.openAI]: expect.objectContaining({
|
// Check that OpenAI endpoint has its own config
|
||||||
titleConvo: true,
|
[EModelEndpoint.openAI]: expect.objectContaining({
|
||||||
titleModel: 'gpt-3.5-turbo',
|
titleConvo: true,
|
||||||
|
titleModel: 'gpt-3.5-turbo',
|
||||||
|
}),
|
||||||
}),
|
}),
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
|
|
@ -883,18 +907,20 @@ describe('AppService updating app config and issuing warnings', () => {
|
||||||
|
|
||||||
expect(setAppConfig).toHaveBeenCalledWith(
|
expect(setAppConfig).toHaveBeenCalledWith(
|
||||||
expect.objectContaining({
|
expect.objectContaining({
|
||||||
assistants: expect.objectContaining({
|
endpoints: expect.objectContaining({
|
||||||
disableBuilder: true,
|
assistants: expect.objectContaining({
|
||||||
pollIntervalMs: 5000,
|
disableBuilder: true,
|
||||||
timeoutMs: 30000,
|
pollIntervalMs: 5000,
|
||||||
supportedIds: ['id1', 'id2'],
|
timeoutMs: 30000,
|
||||||
|
supportedIds: ['id1', 'id2'],
|
||||||
|
}),
|
||||||
}),
|
}),
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
|
|
||||||
// Verify excludedIds is undefined when not provided
|
// Verify excludedIds is undefined when not provided
|
||||||
const initCall = setAppConfig.mock.calls[0][0];
|
const initCall = setAppConfig.mock.calls[0][0];
|
||||||
expect(initCall.assistants.excludedIds).toBeUndefined();
|
expect(initCall.endpoints.assistants.excludedIds).toBeUndefined();
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should log a warning when both supportedIds and excludedIds are provided', async () => {
|
it('should log a warning when both supportedIds and excludedIds are provided', async () => {
|
||||||
|
|
|
||||||
|
|
@ -398,8 +398,8 @@ async function runAssistant({
|
||||||
});
|
});
|
||||||
|
|
||||||
const { endpoint = EModelEndpoint.azureAssistants } = openai.req.body;
|
const { endpoint = EModelEndpoint.azureAssistants } = openai.req.body;
|
||||||
/** @type {TCustomConfig.endpoints.assistants} */
|
/** @type {AppConfig['endpoints']['assistants']} */
|
||||||
const assistantsEndpointConfig = appConfig?.[endpoint] ?? {};
|
const assistantsEndpointConfig = appConfig.endpoints?.[endpoint] ?? {};
|
||||||
const { pollIntervalMs, timeoutMs } = assistantsEndpointConfig;
|
const { pollIntervalMs, timeoutMs } = assistantsEndpointConfig;
|
||||||
|
|
||||||
const run = await waitForRun({
|
const run = await waitForRun({
|
||||||
|
|
|
||||||
|
|
@ -36,7 +36,7 @@ const getCustomEndpointConfig = async (endpoint) => {
|
||||||
throw new Error(`Config not found for the ${endpoint} custom endpoint.`);
|
throw new Error(`Config not found for the ${endpoint} custom endpoint.`);
|
||||||
}
|
}
|
||||||
|
|
||||||
const customEndpoints = appConfig[EModelEndpoint.custom] ?? [];
|
const customEndpoints = appConfig.endpoints?.[EModelEndpoint.custom] ?? [];
|
||||||
return customEndpoints.find(
|
return customEndpoints.find(
|
||||||
(endpointConfig) => normalizeEndpointName(endpointConfig.name) === endpoint,
|
(endpointConfig) => normalizeEndpointName(endpointConfig.name) === endpoint,
|
||||||
);
|
);
|
||||||
|
|
|
||||||
|
|
@ -28,9 +28,12 @@ async function getEndpointsConfig(req) {
|
||||||
|
|
||||||
/** @type {TEndpointsConfig} */
|
/** @type {TEndpointsConfig} */
|
||||||
const mergedConfig = { ...defaultEndpointsConfig, ...customConfigEndpoints };
|
const mergedConfig = { ...defaultEndpointsConfig, ...customConfigEndpoints };
|
||||||
if (mergedConfig[EModelEndpoint.assistants] && appConfig?.[EModelEndpoint.assistants]) {
|
if (
|
||||||
|
mergedConfig[EModelEndpoint.assistants] &&
|
||||||
|
appConfig?.endpoints?.[EModelEndpoint.assistants]
|
||||||
|
) {
|
||||||
const { disableBuilder, retrievalModels, capabilities, version, ..._rest } =
|
const { disableBuilder, retrievalModels, capabilities, version, ..._rest } =
|
||||||
appConfig[EModelEndpoint.assistants];
|
appConfig.endpoints[EModelEndpoint.assistants];
|
||||||
|
|
||||||
mergedConfig[EModelEndpoint.assistants] = {
|
mergedConfig[EModelEndpoint.assistants] = {
|
||||||
...mergedConfig[EModelEndpoint.assistants],
|
...mergedConfig[EModelEndpoint.assistants],
|
||||||
|
|
@ -40,9 +43,9 @@ async function getEndpointsConfig(req) {
|
||||||
capabilities,
|
capabilities,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
if (mergedConfig[EModelEndpoint.agents] && appConfig?.[EModelEndpoint.agents]) {
|
if (mergedConfig[EModelEndpoint.agents] && appConfig?.endpoints?.[EModelEndpoint.agents]) {
|
||||||
const { disableBuilder, capabilities, allowedProviders, ..._rest } =
|
const { disableBuilder, capabilities, allowedProviders, ..._rest } =
|
||||||
appConfig[EModelEndpoint.agents];
|
appConfig.endpoints[EModelEndpoint.agents];
|
||||||
|
|
||||||
mergedConfig[EModelEndpoint.agents] = {
|
mergedConfig[EModelEndpoint.agents] = {
|
||||||
...mergedConfig[EModelEndpoint.agents],
|
...mergedConfig[EModelEndpoint.agents],
|
||||||
|
|
@ -52,9 +55,12 @@ async function getEndpointsConfig(req) {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mergedConfig[EModelEndpoint.azureAssistants] && appConfig?.[EModelEndpoint.azureAssistants]) {
|
if (
|
||||||
|
mergedConfig[EModelEndpoint.azureAssistants] &&
|
||||||
|
appConfig?.endpoints?.[EModelEndpoint.azureAssistants]
|
||||||
|
) {
|
||||||
const { disableBuilder, retrievalModels, capabilities, version, ..._rest } =
|
const { disableBuilder, retrievalModels, capabilities, version, ..._rest } =
|
||||||
appConfig[EModelEndpoint.azureAssistants];
|
appConfig.endpoints[EModelEndpoint.azureAssistants];
|
||||||
|
|
||||||
mergedConfig[EModelEndpoint.azureAssistants] = {
|
mergedConfig[EModelEndpoint.azureAssistants] = {
|
||||||
...mergedConfig[EModelEndpoint.azureAssistants],
|
...mergedConfig[EModelEndpoint.azureAssistants],
|
||||||
|
|
@ -65,8 +71,8 @@ async function getEndpointsConfig(req) {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mergedConfig[EModelEndpoint.bedrock] && appConfig?.[EModelEndpoint.bedrock]) {
|
if (mergedConfig[EModelEndpoint.bedrock] && appConfig?.endpoints?.[EModelEndpoint.bedrock]) {
|
||||||
const { availableRegions } = appConfig[EModelEndpoint.bedrock];
|
const { availableRegions } = appConfig.endpoints[EModelEndpoint.bedrock];
|
||||||
mergedConfig[EModelEndpoint.bedrock] = {
|
mergedConfig[EModelEndpoint.bedrock] = {
|
||||||
...mergedConfig[EModelEndpoint.bedrock],
|
...mergedConfig[EModelEndpoint.bedrock],
|
||||||
availableRegions,
|
availableRegions,
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
const appConfig = require('./app');
|
const appConfig = require('./app');
|
||||||
const { config } = require('./EndpointService');
|
const { config } = require('./EndpointService');
|
||||||
const getCachedTools = require('./getCachedTools');
|
const getCachedTools = require('./getCachedTools');
|
||||||
|
const getCustomConfig = require('./getCustomConfig');
|
||||||
const loadCustomConfig = require('./loadCustomConfig');
|
const loadCustomConfig = require('./loadCustomConfig');
|
||||||
const loadConfigModels = require('./loadConfigModels');
|
const loadConfigModels = require('./loadConfigModels');
|
||||||
const loadDefaultModels = require('./loadDefaultModels');
|
const loadDefaultModels = require('./loadDefaultModels');
|
||||||
|
|
@ -17,5 +18,6 @@ module.exports = {
|
||||||
loadAsyncEndpoints,
|
loadAsyncEndpoints,
|
||||||
...appConfig,
|
...appConfig,
|
||||||
...getCachedTools,
|
...getCachedTools,
|
||||||
|
...getCustomConfig,
|
||||||
...getEndpointsConfig,
|
...getEndpointsConfig,
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -36,7 +36,7 @@ async function loadAsyncEndpoints(req) {
|
||||||
|
|
||||||
const google = serviceKey || isGoogleKeyProvided ? { userProvide: googleUserProvides } : false;
|
const google = serviceKey || isGoogleKeyProvided ? { userProvide: googleUserProvides } : false;
|
||||||
|
|
||||||
const useAzure = appConfig[EModelEndpoint.azureOpenAI]?.plugins;
|
const useAzure = appConfig.endpoints?.[EModelEndpoint.azureOpenAI]?.plugins;
|
||||||
const gptPlugins =
|
const gptPlugins =
|
||||||
useAzure || openAIApiKey || azureOpenAIApiKey
|
useAzure || openAIApiKey || azureOpenAIApiKey
|
||||||
? {
|
? {
|
||||||
|
|
|
||||||
|
|
@ -15,8 +15,8 @@ async function loadConfigEndpoints(req) {
|
||||||
|
|
||||||
const endpointsConfig = {};
|
const endpointsConfig = {};
|
||||||
|
|
||||||
if (Array.isArray(appConfig[EModelEndpoint.custom])) {
|
if (Array.isArray(appConfig.endpoints?.[EModelEndpoint.custom])) {
|
||||||
const customEndpoints = appConfig[EModelEndpoint.custom].filter(
|
const customEndpoints = appConfig.endpoints[EModelEndpoint.custom].filter(
|
||||||
(endpoint) =>
|
(endpoint) =>
|
||||||
endpoint.baseURL &&
|
endpoint.baseURL &&
|
||||||
endpoint.apiKey &&
|
endpoint.apiKey &&
|
||||||
|
|
@ -51,14 +51,14 @@ async function loadConfigEndpoints(req) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (appConfig[EModelEndpoint.azureOpenAI]) {
|
if (appConfig.endpoints?.[EModelEndpoint.azureOpenAI]) {
|
||||||
/** @type {Omit<TConfig, 'order'>} */
|
/** @type {Omit<TConfig, 'order'>} */
|
||||||
endpointsConfig[EModelEndpoint.azureOpenAI] = {
|
endpointsConfig[EModelEndpoint.azureOpenAI] = {
|
||||||
userProvide: false,
|
userProvide: false,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
if (appConfig[EModelEndpoint.azureOpenAI]?.assistants) {
|
if (appConfig.endpoints?.[EModelEndpoint.azureOpenAI]?.assistants) {
|
||||||
/** @type {Omit<TConfig, 'order'>} */
|
/** @type {Omit<TConfig, 'order'>} */
|
||||||
endpointsConfig[EModelEndpoint.azureAssistants] = {
|
endpointsConfig[EModelEndpoint.azureAssistants] = {
|
||||||
userProvide: false,
|
userProvide: false,
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,7 @@ async function loadConfigModels(req) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
const modelsConfig = {};
|
const modelsConfig = {};
|
||||||
const azureConfig = appConfig[EModelEndpoint.azureOpenAI];
|
const azureConfig = appConfig.endpoints?.[EModelEndpoint.azureOpenAI];
|
||||||
const { modelNames } = azureConfig ?? {};
|
const { modelNames } = azureConfig ?? {};
|
||||||
|
|
||||||
if (modelNames && azureConfig) {
|
if (modelNames && azureConfig) {
|
||||||
|
|
@ -29,11 +29,11 @@ async function loadConfigModels(req) {
|
||||||
modelsConfig[EModelEndpoint.azureAssistants] = azureConfig.assistantModels;
|
modelsConfig[EModelEndpoint.azureAssistants] = azureConfig.assistantModels;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!Array.isArray(appConfig[EModelEndpoint.custom])) {
|
if (!Array.isArray(appConfig.endpoints?.[EModelEndpoint.custom])) {
|
||||||
return modelsConfig;
|
return modelsConfig;
|
||||||
}
|
}
|
||||||
|
|
||||||
const customEndpoints = appConfig[EModelEndpoint.custom].filter(
|
const customEndpoints = appConfig.endpoints[EModelEndpoint.custom].filter(
|
||||||
(endpoint) =>
|
(endpoint) =>
|
||||||
endpoint.baseURL &&
|
endpoint.baseURL &&
|
||||||
endpoint.apiKey &&
|
endpoint.apiKey &&
|
||||||
|
|
|
||||||
|
|
@ -6,55 +6,57 @@ jest.mock('~/server/services/ModelService');
|
||||||
jest.mock('./app');
|
jest.mock('./app');
|
||||||
|
|
||||||
const exampleConfig = {
|
const exampleConfig = {
|
||||||
custom: [
|
endpoints: {
|
||||||
{
|
custom: [
|
||||||
name: 'Mistral',
|
{
|
||||||
apiKey: '${MY_PRECIOUS_MISTRAL_KEY}',
|
name: 'Mistral',
|
||||||
baseURL: 'https://api.mistral.ai/v1',
|
apiKey: '${MY_PRECIOUS_MISTRAL_KEY}',
|
||||||
models: {
|
baseURL: 'https://api.mistral.ai/v1',
|
||||||
default: ['mistral-tiny', 'mistral-small', 'mistral-medium', 'mistral-large-latest'],
|
models: {
|
||||||
fetch: true,
|
default: ['mistral-tiny', 'mistral-small', 'mistral-medium', 'mistral-large-latest'],
|
||||||
|
fetch: true,
|
||||||
|
},
|
||||||
|
dropParams: ['stop', 'user', 'frequency_penalty', 'presence_penalty'],
|
||||||
},
|
},
|
||||||
dropParams: ['stop', 'user', 'frequency_penalty', 'presence_penalty'],
|
{
|
||||||
},
|
name: 'OpenRouter',
|
||||||
{
|
apiKey: '${MY_OPENROUTER_API_KEY}',
|
||||||
name: 'OpenRouter',
|
baseURL: 'https://openrouter.ai/api/v1',
|
||||||
apiKey: '${MY_OPENROUTER_API_KEY}',
|
models: {
|
||||||
baseURL: 'https://openrouter.ai/api/v1',
|
default: ['gpt-3.5-turbo'],
|
||||||
models: {
|
fetch: true,
|
||||||
default: ['gpt-3.5-turbo'],
|
},
|
||||||
fetch: true,
|
dropParams: ['stop'],
|
||||||
},
|
},
|
||||||
dropParams: ['stop'],
|
{
|
||||||
},
|
name: 'groq',
|
||||||
{
|
apiKey: 'user_provided',
|
||||||
name: 'groq',
|
baseURL: 'https://api.groq.com/openai/v1/',
|
||||||
apiKey: 'user_provided',
|
models: {
|
||||||
baseURL: 'https://api.groq.com/openai/v1/',
|
default: ['llama2-70b-4096', 'mixtral-8x7b-32768'],
|
||||||
models: {
|
fetch: false,
|
||||||
default: ['llama2-70b-4096', 'mixtral-8x7b-32768'],
|
},
|
||||||
fetch: false,
|
|
||||||
},
|
},
|
||||||
},
|
{
|
||||||
{
|
name: 'Ollama',
|
||||||
name: 'Ollama',
|
apiKey: 'user_provided',
|
||||||
apiKey: 'user_provided',
|
baseURL: 'http://localhost:11434/v1/',
|
||||||
baseURL: 'http://localhost:11434/v1/',
|
models: {
|
||||||
models: {
|
default: ['mistral', 'llama2:13b'],
|
||||||
default: ['mistral', 'llama2:13b'],
|
fetch: false,
|
||||||
fetch: false,
|
},
|
||||||
},
|
},
|
||||||
},
|
{
|
||||||
{
|
name: 'MLX',
|
||||||
name: 'MLX',
|
apiKey: 'user_provided',
|
||||||
apiKey: 'user_provided',
|
baseURL: 'http://localhost:8080/v1/',
|
||||||
baseURL: 'http://localhost:8080/v1/',
|
models: {
|
||||||
models: {
|
default: ['Meta-Llama-3-8B-Instruct-4bit'],
|
||||||
default: ['Meta-Llama-3-8B-Instruct-4bit'],
|
fetch: false,
|
||||||
fetch: false,
|
},
|
||||||
},
|
},
|
||||||
},
|
],
|
||||||
],
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
describe('loadConfigModels', () => {
|
describe('loadConfigModels', () => {
|
||||||
|
|
@ -83,7 +85,9 @@ describe('loadConfigModels', () => {
|
||||||
|
|
||||||
it('handles azure models and endpoint correctly', async () => {
|
it('handles azure models and endpoint correctly', async () => {
|
||||||
getAppConfig.mockResolvedValue({
|
getAppConfig.mockResolvedValue({
|
||||||
azureOpenAI: { modelNames: ['model1', 'model2'] },
|
endpoints: {
|
||||||
|
azureOpenAI: { modelNames: ['model1', 'model2'] },
|
||||||
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
const result = await loadConfigModels(mockRequest);
|
const result = await loadConfigModels(mockRequest);
|
||||||
|
|
@ -102,7 +106,7 @@ describe('loadConfigModels', () => {
|
||||||
},
|
},
|
||||||
];
|
];
|
||||||
|
|
||||||
getAppConfig.mockResolvedValue({ custom: customEndpoints });
|
getAppConfig.mockResolvedValue({ endpoints: { custom: customEndpoints } });
|
||||||
fetchModels.mockResolvedValue(['customModel1', 'customModel2']);
|
fetchModels.mockResolvedValue(['customModel1', 'customModel2']);
|
||||||
|
|
||||||
const result = await loadConfigModels(mockRequest);
|
const result = await loadConfigModels(mockRequest);
|
||||||
|
|
@ -112,20 +116,22 @@ describe('loadConfigModels', () => {
|
||||||
|
|
||||||
it('correctly associates models to names using unique keys', async () => {
|
it('correctly associates models to names using unique keys', async () => {
|
||||||
getAppConfig.mockResolvedValue({
|
getAppConfig.mockResolvedValue({
|
||||||
custom: [
|
endpoints: {
|
||||||
{
|
custom: [
|
||||||
baseURL: 'http://example.com',
|
{
|
||||||
apiKey: 'API_KEY1',
|
baseURL: 'http://example.com',
|
||||||
name: 'Model1',
|
apiKey: 'API_KEY1',
|
||||||
models: { fetch: true },
|
name: 'Model1',
|
||||||
},
|
models: { fetch: true },
|
||||||
{
|
},
|
||||||
baseURL: 'http://example.com',
|
{
|
||||||
apiKey: 'API_KEY2',
|
baseURL: 'http://example.com',
|
||||||
name: 'Model2',
|
apiKey: 'API_KEY2',
|
||||||
models: { fetch: true },
|
name: 'Model2',
|
||||||
},
|
models: { fetch: true },
|
||||||
],
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
});
|
});
|
||||||
fetchModels.mockImplementation(({ apiKey }) =>
|
fetchModels.mockImplementation(({ apiKey }) =>
|
||||||
Promise.resolve(apiKey === 'API_KEY1' ? ['model1Data'] : ['model2Data']),
|
Promise.resolve(apiKey === 'API_KEY1' ? ['model1Data'] : ['model2Data']),
|
||||||
|
|
@ -139,26 +145,28 @@ describe('loadConfigModels', () => {
|
||||||
it('correctly handles multiple endpoints with the same baseURL but different apiKeys', async () => {
|
it('correctly handles multiple endpoints with the same baseURL but different apiKeys', async () => {
|
||||||
// Mock the custom configuration to simulate the user's scenario
|
// Mock the custom configuration to simulate the user's scenario
|
||||||
getAppConfig.mockResolvedValue({
|
getAppConfig.mockResolvedValue({
|
||||||
custom: [
|
endpoints: {
|
||||||
{
|
custom: [
|
||||||
name: 'LiteLLM',
|
{
|
||||||
apiKey: '${LITELLM_ALL_MODELS}',
|
name: 'LiteLLM',
|
||||||
baseURL: '${LITELLM_HOST}',
|
apiKey: '${LITELLM_ALL_MODELS}',
|
||||||
models: { fetch: true },
|
baseURL: '${LITELLM_HOST}',
|
||||||
},
|
models: { fetch: true },
|
||||||
{
|
},
|
||||||
name: 'OpenAI',
|
{
|
||||||
apiKey: '${LITELLM_OPENAI_MODELS}',
|
name: 'OpenAI',
|
||||||
baseURL: '${LITELLM_SECOND_HOST}',
|
apiKey: '${LITELLM_OPENAI_MODELS}',
|
||||||
models: { fetch: true },
|
baseURL: '${LITELLM_SECOND_HOST}',
|
||||||
},
|
models: { fetch: true },
|
||||||
{
|
},
|
||||||
name: 'Google',
|
{
|
||||||
apiKey: '${LITELLM_GOOGLE_MODELS}',
|
name: 'Google',
|
||||||
baseURL: '${LITELLM_SECOND_HOST}',
|
apiKey: '${LITELLM_GOOGLE_MODELS}',
|
||||||
models: { fetch: true },
|
baseURL: '${LITELLM_SECOND_HOST}',
|
||||||
},
|
models: { fetch: true },
|
||||||
],
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
// Mock `fetchModels` to return different models based on the apiKey
|
// Mock `fetchModels` to return different models based on the apiKey
|
||||||
|
|
@ -246,8 +254,8 @@ describe('loadConfigModels', () => {
|
||||||
// For groq and ollama, since the apiKey is "user_provided", models should not be fetched
|
// For groq and ollama, since the apiKey is "user_provided", models should not be fetched
|
||||||
// Depending on your implementation's behavior regarding "default" models without fetching,
|
// Depending on your implementation's behavior regarding "default" models without fetching,
|
||||||
// you may need to adjust the following assertions:
|
// you may need to adjust the following assertions:
|
||||||
expect(result.groq).toBe(exampleConfig.custom[2].models.default);
|
expect(result.groq).toBe(exampleConfig.endpoints.custom[2].models.default);
|
||||||
expect(result.ollama).toBe(exampleConfig.custom[3].models.default);
|
expect(result.ollama).toBe(exampleConfig.endpoints.custom[3].models.default);
|
||||||
|
|
||||||
// Verifying fetchModels was not called for groq and ollama
|
// Verifying fetchModels was not called for groq and ollama
|
||||||
expect(fetchModels).not.toHaveBeenCalledWith(
|
expect(fetchModels).not.toHaveBeenCalledWith(
|
||||||
|
|
@ -264,26 +272,28 @@ describe('loadConfigModels', () => {
|
||||||
|
|
||||||
it('falls back to default models if fetching returns an empty array', async () => {
|
it('falls back to default models if fetching returns an empty array', async () => {
|
||||||
getAppConfig.mockResolvedValue({
|
getAppConfig.mockResolvedValue({
|
||||||
custom: [
|
endpoints: {
|
||||||
{
|
custom: [
|
||||||
name: 'EndpointWithSameFetchKey',
|
{
|
||||||
apiKey: 'API_KEY',
|
name: 'EndpointWithSameFetchKey',
|
||||||
baseURL: 'http://example.com',
|
apiKey: 'API_KEY',
|
||||||
models: {
|
baseURL: 'http://example.com',
|
||||||
fetch: true,
|
models: {
|
||||||
default: ['defaultModel1'],
|
fetch: true,
|
||||||
|
default: ['defaultModel1'],
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
{
|
||||||
{
|
name: 'EmptyFetchModel',
|
||||||
name: 'EmptyFetchModel',
|
apiKey: 'API_KEY',
|
||||||
apiKey: 'API_KEY',
|
baseURL: 'http://example.com',
|
||||||
baseURL: 'http://example.com',
|
models: {
|
||||||
models: {
|
fetch: true,
|
||||||
fetch: true,
|
default: ['defaultModel1', 'defaultModel2'],
|
||||||
default: ['defaultModel1', 'defaultModel2'],
|
},
|
||||||
},
|
},
|
||||||
},
|
],
|
||||||
],
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
fetchModels.mockResolvedValue([]);
|
fetchModels.mockResolvedValue([]);
|
||||||
|
|
@ -295,17 +305,19 @@ describe('loadConfigModels', () => {
|
||||||
|
|
||||||
it('falls back to default models if fetching returns a falsy value', async () => {
|
it('falls back to default models if fetching returns a falsy value', async () => {
|
||||||
getAppConfig.mockResolvedValue({
|
getAppConfig.mockResolvedValue({
|
||||||
custom: [
|
endpoints: {
|
||||||
{
|
custom: [
|
||||||
name: 'FalsyFetchModel',
|
{
|
||||||
apiKey: 'API_KEY',
|
name: 'FalsyFetchModel',
|
||||||
baseURL: 'http://example.com',
|
apiKey: 'API_KEY',
|
||||||
models: {
|
baseURL: 'http://example.com',
|
||||||
fetch: true,
|
models: {
|
||||||
default: ['defaultModel1', 'defaultModel2'],
|
fetch: true,
|
||||||
|
default: ['defaultModel1', 'defaultModel2'],
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
],
|
||||||
],
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
fetchModels.mockResolvedValue(false);
|
fetchModels.mockResolvedValue(false);
|
||||||
|
|
@ -354,7 +366,9 @@ describe('loadConfigModels', () => {
|
||||||
];
|
];
|
||||||
|
|
||||||
getAppConfig.mockResolvedValue({
|
getAppConfig.mockResolvedValue({
|
||||||
custom: testCases,
|
endpoints: {
|
||||||
|
custom: testCases,
|
||||||
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
const result = await loadConfigModels(mockRequest);
|
const result = await loadConfigModels(mockRequest);
|
||||||
|
|
|
||||||
|
|
@ -90,8 +90,7 @@ const initializeClient = async ({ req, res, endpointOption }) => {
|
||||||
}
|
}
|
||||||
|
|
||||||
const agentConfigs = new Map();
|
const agentConfigs = new Map();
|
||||||
/** @type {Set<string>} */
|
const allowedProviders = new Set(appConfig?.endpoints?.[EModelEndpoint.agents]?.allowedProviders);
|
||||||
const allowedProviders = new Set(appConfig?.[EModelEndpoint.agents]?.allowedProviders);
|
|
||||||
|
|
||||||
const loadTools = createToolLoader();
|
const loadTools = createToolLoader();
|
||||||
/** @type {Array<MongoFile>} */
|
/** @type {Array<MongoFile>} */
|
||||||
|
|
@ -145,7 +144,7 @@ const initializeClient = async ({ req, res, endpointOption }) => {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let endpointConfig = appConfig[primaryConfig.endpoint];
|
let endpointConfig = appConfig.endpoints?.[primaryConfig.endpoint];
|
||||||
if (!isAgentsEndpoint(primaryConfig.endpoint) && !endpointConfig) {
|
if (!isAgentsEndpoint(primaryConfig.endpoint) && !endpointConfig) {
|
||||||
try {
|
try {
|
||||||
endpointConfig = await getCustomEndpointConfig(primaryConfig.endpoint);
|
endpointConfig = await getCustomEndpointConfig(primaryConfig.endpoint);
|
||||||
|
|
|
||||||
|
|
@ -25,15 +25,14 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
|
||||||
let clientOptions = {};
|
let clientOptions = {};
|
||||||
|
|
||||||
/** @type {undefined | TBaseEndpoint} */
|
/** @type {undefined | TBaseEndpoint} */
|
||||||
const anthropicConfig = appConfig[EModelEndpoint.anthropic];
|
const anthropicConfig = appConfig.endpoints?.[EModelEndpoint.anthropic];
|
||||||
|
|
||||||
if (anthropicConfig) {
|
if (anthropicConfig) {
|
||||||
clientOptions.streamRate = anthropicConfig.streamRate;
|
clientOptions.streamRate = anthropicConfig.streamRate;
|
||||||
clientOptions.titleModel = anthropicConfig.titleModel;
|
clientOptions.titleModel = anthropicConfig.titleModel;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @type {undefined | TBaseEndpoint} */
|
const allConfig = appConfig.endpoints?.all;
|
||||||
const allConfig = appConfig.all;
|
|
||||||
if (allConfig) {
|
if (allConfig) {
|
||||||
clientOptions.streamRate = allConfig.streamRate;
|
clientOptions.streamRate = allConfig.streamRate;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -83,7 +83,7 @@ const initializeClient = async ({ req, res, version, endpointOption, initAppClie
|
||||||
};
|
};
|
||||||
|
|
||||||
/** @type {TAzureConfig | undefined} */
|
/** @type {TAzureConfig | undefined} */
|
||||||
const azureConfig = appConfig[EModelEndpoint.azureOpenAI];
|
const azureConfig = appConfig.endpoints?.[EModelEndpoint.azureOpenAI];
|
||||||
|
|
||||||
/** @type {AzureOptions | undefined} */
|
/** @type {AzureOptions | undefined} */
|
||||||
let azureOptions;
|
let azureOptions;
|
||||||
|
|
|
||||||
|
|
@ -52,14 +52,13 @@ const getOptions = async ({ req, overrideModel, endpointOption }) => {
|
||||||
let streamRate = Constants.DEFAULT_STREAM_RATE;
|
let streamRate = Constants.DEFAULT_STREAM_RATE;
|
||||||
|
|
||||||
/** @type {undefined | TBaseEndpoint} */
|
/** @type {undefined | TBaseEndpoint} */
|
||||||
const bedrockConfig = appConfig[EModelEndpoint.bedrock];
|
const bedrockConfig = appConfig.endpoints?.[EModelEndpoint.bedrock];
|
||||||
|
|
||||||
if (bedrockConfig && bedrockConfig.streamRate) {
|
if (bedrockConfig && bedrockConfig.streamRate) {
|
||||||
streamRate = bedrockConfig.streamRate;
|
streamRate = bedrockConfig.streamRate;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @type {undefined | TBaseEndpoint} */
|
const allConfig = appConfig.endpoints?.all;
|
||||||
const allConfig = appConfig.all;
|
|
||||||
if (allConfig && allConfig.streamRate) {
|
if (allConfig && allConfig.streamRate) {
|
||||||
streamRate = allConfig.streamRate;
|
streamRate = allConfig.streamRate;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -118,8 +118,7 @@ const initializeClient = async ({ req, res, endpointOption, optionsOnly, overrid
|
||||||
endpointTokenConfig,
|
endpointTokenConfig,
|
||||||
};
|
};
|
||||||
|
|
||||||
/** @type {undefined | TBaseEndpoint} */
|
const allConfig = appConfig.endpoints?.all;
|
||||||
const allConfig = appConfig.all;
|
|
||||||
if (allConfig) {
|
if (allConfig) {
|
||||||
customOptions.streamRate = allConfig.streamRate;
|
customOptions.streamRate = allConfig.streamRate;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -49,9 +49,9 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
|
||||||
|
|
||||||
const appConfig = await getAppConfig({ role: req.user?.role });
|
const appConfig = await getAppConfig({ role: req.user?.role });
|
||||||
/** @type {undefined | TBaseEndpoint} */
|
/** @type {undefined | TBaseEndpoint} */
|
||||||
const allConfig = appConfig.all;
|
const allConfig = appConfig.endpoints?.all;
|
||||||
/** @type {undefined | TBaseEndpoint} */
|
/** @type {undefined | TBaseEndpoint} */
|
||||||
const googleConfig = appConfig[EModelEndpoint.google];
|
const googleConfig = appConfig.endpoints?.[EModelEndpoint.google];
|
||||||
|
|
||||||
if (googleConfig) {
|
if (googleConfig) {
|
||||||
clientOptions.streamRate = googleConfig.streamRate;
|
clientOptions.streamRate = googleConfig.streamRate;
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ const addTitle = async (req, { text, response, client }) => {
|
||||||
}
|
}
|
||||||
const { GOOGLE_TITLE_MODEL } = process.env ?? {};
|
const { GOOGLE_TITLE_MODEL } = process.env ?? {};
|
||||||
const appConfig = await getAppConfig({ role: req.user?.role });
|
const appConfig = await getAppConfig({ role: req.user?.role });
|
||||||
const providerConfig = appConfig[EModelEndpoint.google];
|
const providerConfig = appConfig.endpoints?.[EModelEndpoint.google];
|
||||||
let model =
|
let model =
|
||||||
providerConfig?.titleModel ??
|
providerConfig?.titleModel ??
|
||||||
GOOGLE_TITLE_MODEL ??
|
GOOGLE_TITLE_MODEL ??
|
||||||
|
|
|
||||||
|
|
@ -66,7 +66,7 @@ const initializeClient = async ({
|
||||||
|
|
||||||
const isAzureOpenAI = endpoint === EModelEndpoint.azureOpenAI;
|
const isAzureOpenAI = endpoint === EModelEndpoint.azureOpenAI;
|
||||||
/** @type {false | TAzureConfig} */
|
/** @type {false | TAzureConfig} */
|
||||||
const azureConfig = isAzureOpenAI && appConfig[EModelEndpoint.azureOpenAI];
|
const azureConfig = isAzureOpenAI && appConfig.endpoints?.[EModelEndpoint.azureOpenAI];
|
||||||
let serverless = false;
|
let serverless = false;
|
||||||
if (isAzureOpenAI && azureConfig) {
|
if (isAzureOpenAI && azureConfig) {
|
||||||
const { modelGroupMap, groupMap } = azureConfig;
|
const { modelGroupMap, groupMap } = azureConfig;
|
||||||
|
|
@ -115,15 +115,14 @@ const initializeClient = async ({
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @type {undefined | TBaseEndpoint} */
|
/** @type {undefined | TBaseEndpoint} */
|
||||||
const openAIConfig = appConfig[EModelEndpoint.openAI];
|
const openAIConfig = appConfig.endpoints?.[EModelEndpoint.openAI];
|
||||||
|
|
||||||
if (!isAzureOpenAI && openAIConfig) {
|
if (!isAzureOpenAI && openAIConfig) {
|
||||||
clientOptions.streamRate = openAIConfig.streamRate;
|
clientOptions.streamRate = openAIConfig.streamRate;
|
||||||
clientOptions.titleModel = openAIConfig.titleModel;
|
clientOptions.titleModel = openAIConfig.titleModel;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @type {undefined | TBaseEndpoint} */
|
const allConfig = appConfig.endpoints?.all;
|
||||||
const allConfig = appConfig.all;
|
|
||||||
if (allConfig) {
|
if (allConfig) {
|
||||||
clientOptions.streamRate = allConfig.streamRate;
|
clientOptions.streamRate = allConfig.streamRate;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -22,28 +22,30 @@ jest.mock('~/server/services/UserService', () => ({
|
||||||
|
|
||||||
jest.mock('~/server/services/Config', () => ({
|
jest.mock('~/server/services/Config', () => ({
|
||||||
getAppConfig: jest.fn().mockResolvedValue({
|
getAppConfig: jest.fn().mockResolvedValue({
|
||||||
openAI: {
|
endpoints: {
|
||||||
apiKey: 'test-key',
|
openAI: {
|
||||||
},
|
apiKey: 'test-key',
|
||||||
azureOpenAI: {
|
|
||||||
apiKey: 'test-azure-key',
|
|
||||||
modelNames: ['gpt-4-vision-preview', 'gpt-3.5-turbo', 'gpt-4'],
|
|
||||||
modelGroupMap: {
|
|
||||||
'gpt-4-vision-preview': {
|
|
||||||
group: 'librechat-westus',
|
|
||||||
deploymentName: 'gpt-4-vision-preview',
|
|
||||||
version: '2024-02-15-preview',
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
groupMap: {
|
azureOpenAI: {
|
||||||
'librechat-westus': {
|
apiKey: 'test-azure-key',
|
||||||
apiKey: 'WESTUS_API_KEY',
|
modelNames: ['gpt-4-vision-preview', 'gpt-3.5-turbo', 'gpt-4'],
|
||||||
instanceName: 'librechat-westus',
|
modelGroupMap: {
|
||||||
version: '2023-12-01-preview',
|
'gpt-4-vision-preview': {
|
||||||
models: {
|
group: 'librechat-westus',
|
||||||
'gpt-4-vision-preview': {
|
deploymentName: 'gpt-4-vision-preview',
|
||||||
deploymentName: 'gpt-4-vision-preview',
|
version: '2024-02-15-preview',
|
||||||
version: '2024-02-15-preview',
|
},
|
||||||
|
},
|
||||||
|
groupMap: {
|
||||||
|
'librechat-westus': {
|
||||||
|
apiKey: 'WESTUS_API_KEY',
|
||||||
|
instanceName: 'librechat-westus',
|
||||||
|
version: '2023-12-01-preview',
|
||||||
|
models: {
|
||||||
|
'gpt-4-vision-preview': {
|
||||||
|
deploymentName: 'gpt-4-vision-preview',
|
||||||
|
version: '2024-02-15-preview',
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -51,9 +51,11 @@ async function processFileCitations({ user, toolArtifact, toolCallId, metadata }
|
||||||
}
|
}
|
||||||
|
|
||||||
const appConfig = await getAppConfig({ role: user?.role });
|
const appConfig = await getAppConfig({ role: user?.role });
|
||||||
const maxCitations = appConfig?.[EModelEndpoint.agents]?.maxCitations ?? 30;
|
const maxCitations = appConfig.endpoints?.[EModelEndpoint.agents]?.maxCitations ?? 30;
|
||||||
const maxCitationsPerFile = appConfig?.[EModelEndpoint.agents]?.maxCitationsPerFile ?? 5;
|
const maxCitationsPerFile =
|
||||||
const minRelevanceScore = appConfig?.[EModelEndpoint.agents]?.minRelevanceScore ?? 0.45;
|
appConfig.endpoints?.[EModelEndpoint.agents]?.maxCitationsPerFile ?? 5;
|
||||||
|
const minRelevanceScore =
|
||||||
|
appConfig.endpoints?.[EModelEndpoint.agents]?.minRelevanceScore ?? 0.45;
|
||||||
|
|
||||||
const sources = toolArtifact[Tools.file_search].sources || [];
|
const sources = toolArtifact[Tools.file_search].sources || [];
|
||||||
const filteredSources = sources.filter((source) => source.relevance >= minRelevanceScore);
|
const filteredSources = sources.filter((source) => source.relevance >= minRelevanceScore);
|
||||||
|
|
|
||||||
|
|
@ -165,7 +165,7 @@ const processDeleteRequest = async ({ req, files }) => {
|
||||||
/** @type {Record<string, OpenAI | undefined>} */
|
/** @type {Record<string, OpenAI | undefined>} */
|
||||||
const client = { [FileSources.openai]: undefined, [FileSources.azure]: undefined };
|
const client = { [FileSources.openai]: undefined, [FileSources.azure]: undefined };
|
||||||
const initializeClients = async () => {
|
const initializeClients = async () => {
|
||||||
if (appConfig[EModelEndpoint.assistants]) {
|
if (appConfig.endpoints?.[EModelEndpoint.assistants]) {
|
||||||
const openAIClient = await getOpenAIClient({
|
const openAIClient = await getOpenAIClient({
|
||||||
req,
|
req,
|
||||||
overrideEndpoint: EModelEndpoint.assistants,
|
overrideEndpoint: EModelEndpoint.assistants,
|
||||||
|
|
@ -173,7 +173,7 @@ const processDeleteRequest = async ({ req, files }) => {
|
||||||
client[FileSources.openai] = openAIClient.openai;
|
client[FileSources.openai] = openAIClient.openai;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!appConfig[EModelEndpoint.azureOpenAI]?.assistants) {
|
if (!appConfig.endpoints?.[EModelEndpoint.azureOpenAI]?.assistants) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -33,7 +33,7 @@ async function retrieveRun({ thread_id, run_id, timeout, openai }) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @type {TAzureConfig | undefined} */
|
/** @type {TAzureConfig | undefined} */
|
||||||
const azureConfig = appConfig[EModelEndpoint.azureOpenAI];
|
const azureConfig = appConfig.endpoints?.[EModelEndpoint.azureOpenAI];
|
||||||
|
|
||||||
if (azureConfig && azureConfig.assistants) {
|
if (azureConfig && azureConfig.assistants) {
|
||||||
delete headers.Authorization;
|
delete headers.Authorization;
|
||||||
|
|
|
||||||
|
|
@ -504,7 +504,7 @@ async function loadAgentTools({ req, res, agent, tool_resources, openAIApiKey })
|
||||||
/** Edge case: use defined/fallback capabilities when the "agents" endpoint is not enabled */
|
/** Edge case: use defined/fallback capabilities when the "agents" endpoint is not enabled */
|
||||||
if (enabledCapabilities.size === 0 && agent.id === Constants.EPHEMERAL_AGENT_ID) {
|
if (enabledCapabilities.size === 0 && agent.id === Constants.EPHEMERAL_AGENT_ID) {
|
||||||
enabledCapabilities = new Set(
|
enabledCapabilities = new Set(
|
||||||
appConfig?.[EModelEndpoint.agents]?.capabilities ?? defaultAgentCapabilities,
|
appConfig.endpoints?.[EModelEndpoint.agents]?.capabilities ?? defaultAgentCapabilities,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
const checkCapability = (capability) => {
|
const checkCapability = (capability) => {
|
||||||
|
|
|
||||||
|
|
@ -29,9 +29,11 @@ describe('primeResources', () => {
|
||||||
|
|
||||||
// Setup mock appConfig
|
// Setup mock appConfig
|
||||||
mockAppConfig = {
|
mockAppConfig = {
|
||||||
[EModelEndpoint.agents]: {
|
endpoints: {
|
||||||
capabilities: [AgentCapabilities.ocr],
|
[EModelEndpoint.agents]: {
|
||||||
} as TAgentsEndpoint,
|
capabilities: [AgentCapabilities.ocr],
|
||||||
|
} as TAgentsEndpoint,
|
||||||
|
},
|
||||||
} as AppConfig;
|
} as AppConfig;
|
||||||
|
|
||||||
// Setup mock getFiles function
|
// Setup mock getFiles function
|
||||||
|
|
@ -87,7 +89,7 @@ describe('primeResources', () => {
|
||||||
|
|
||||||
describe('when OCR is disabled', () => {
|
describe('when OCR is disabled', () => {
|
||||||
it('should not fetch OCR files even if tool_resources has OCR file_ids', async () => {
|
it('should not fetch OCR files even if tool_resources has OCR file_ids', async () => {
|
||||||
(mockAppConfig[EModelEndpoint.agents] as TAgentsEndpoint).capabilities = [];
|
(mockAppConfig.endpoints![EModelEndpoint.agents] as TAgentsEndpoint).capabilities = [];
|
||||||
|
|
||||||
const tool_resources = {
|
const tool_resources = {
|
||||||
[EToolResources.ocr]: {
|
[EToolResources.ocr]: {
|
||||||
|
|
|
||||||
|
|
@ -202,9 +202,9 @@ export const primeResources = async ({
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const isOCREnabled = (appConfig?.[EModelEndpoint.agents]?.capabilities ?? []).includes(
|
const isOCREnabled = (
|
||||||
AgentCapabilities.ocr,
|
appConfig?.endpoints?.[EModelEndpoint.agents]?.capabilities ?? []
|
||||||
);
|
).includes(AgentCapabilities.ocr);
|
||||||
|
|
||||||
if (tool_resources[EToolResources.ocr]?.file_ids && isOCREnabled) {
|
if (tool_resources[EToolResources.ocr]?.file_ids && isOCREnabled) {
|
||||||
const context = await getFiles(
|
const context = await getFiles(
|
||||||
|
|
|
||||||
|
|
@ -72,7 +72,7 @@ export const initializeOpenAI = async ({
|
||||||
};
|
};
|
||||||
|
|
||||||
const isAzureOpenAI = endpoint === EModelEndpoint.azureOpenAI;
|
const isAzureOpenAI = endpoint === EModelEndpoint.azureOpenAI;
|
||||||
const azureConfig = isAzureOpenAI && appConfig[EModelEndpoint.azureOpenAI];
|
const azureConfig = isAzureOpenAI && appConfig.endpoints?.[EModelEndpoint.azureOpenAI];
|
||||||
|
|
||||||
if (isAzureOpenAI && azureConfig) {
|
if (isAzureOpenAI && azureConfig) {
|
||||||
const { modelGroupMap, groupMap } = azureConfig;
|
const { modelGroupMap, groupMap } = azureConfig;
|
||||||
|
|
@ -143,8 +143,8 @@ export const initializeOpenAI = async ({
|
||||||
|
|
||||||
const options = getOpenAIConfig(apiKey, finalClientOptions, endpoint);
|
const options = getOpenAIConfig(apiKey, finalClientOptions, endpoint);
|
||||||
|
|
||||||
const openAIConfig = appConfig[EModelEndpoint.openAI];
|
const openAIConfig = appConfig.endpoints?.[EModelEndpoint.openAI];
|
||||||
const allConfig = appConfig.all;
|
const allConfig = appConfig.endpoints?.all;
|
||||||
const azureRate = modelName?.includes('gpt-4') ? 30 : 17;
|
const azureRate = modelName?.includes('gpt-4') ? 30 : 17;
|
||||||
|
|
||||||
let streamRate: number | undefined;
|
let streamRate: number | undefined;
|
||||||
|
|
|
||||||
|
|
@ -59,26 +59,28 @@ export interface AppConfig {
|
||||||
secureImageLinks?: TCustomConfig['secureImageLinks'];
|
secureImageLinks?: TCustomConfig['secureImageLinks'];
|
||||||
/** Processed model specifications */
|
/** Processed model specifications */
|
||||||
modelSpecs?: TCustomConfig['modelSpecs'];
|
modelSpecs?: TCustomConfig['modelSpecs'];
|
||||||
/** OpenAI endpoint configuration */
|
endpoints?: {
|
||||||
openAI?: TEndpoint;
|
/** OpenAI endpoint configuration */
|
||||||
/** Google endpoint configuration */
|
openAI?: TEndpoint;
|
||||||
google?: TEndpoint;
|
/** Google endpoint configuration */
|
||||||
/** Bedrock endpoint configuration */
|
google?: TEndpoint;
|
||||||
bedrock?: TEndpoint;
|
/** Bedrock endpoint configuration */
|
||||||
/** Anthropic endpoint configuration */
|
bedrock?: TEndpoint;
|
||||||
anthropic?: TEndpoint;
|
/** Anthropic endpoint configuration */
|
||||||
/** GPT plugins endpoint configuration */
|
anthropic?: TEndpoint;
|
||||||
gptPlugins?: TEndpoint;
|
/** GPT plugins endpoint configuration */
|
||||||
/** Azure OpenAI endpoint configuration */
|
gptPlugins?: TEndpoint;
|
||||||
azureOpenAI?: TAzureConfig;
|
/** Azure OpenAI endpoint configuration */
|
||||||
/** Assistants endpoint configuration */
|
azureOpenAI?: TAzureConfig;
|
||||||
assistants?: TAssistantEndpoint;
|
/** Assistants endpoint configuration */
|
||||||
/** Azure assistants endpoint configuration */
|
assistants?: TAssistantEndpoint;
|
||||||
azureAssistants?: TAssistantEndpoint;
|
/** Azure assistants endpoint configuration */
|
||||||
/** Agents endpoint configuration */
|
azureAssistants?: TAssistantEndpoint;
|
||||||
[EModelEndpoint.agents]?: TAgentsEndpoint;
|
/** Agents endpoint configuration */
|
||||||
/** Global endpoint configuration */
|
[EModelEndpoint.agents]?: TAgentsEndpoint;
|
||||||
all?: TEndpoint;
|
/** Global endpoint configuration */
|
||||||
/** Any additional endpoint configurations */
|
all?: TEndpoint;
|
||||||
[key: string]: unknown;
|
/** Any additional endpoint configurations */
|
||||||
|
[key: string]: unknown;
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue