mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-01-21 01:36:13 +01:00
🤖 feat: Custom Endpoint Agents (experimental) (#4627)
* wip: first pass, custom endpoint agents * chore: imports * chore: consolidate exports * fix: imports * feat: convert message.content array to strings for legacy format handling (deepseek/groq) * refactor: normalize ollama endpoint name * refactor: update mocking in isDomainAllowed.spec.js * refactor: update deepseekModels in tokens.js and tokens.spec.js
This commit is contained in:
parent
9437e95315
commit
2e519f9b57
23 changed files with 230 additions and 73 deletions
|
|
@ -1,4 +1,4 @@
|
|||
const { CacheKeys } = require('librechat-data-provider');
|
||||
const { CacheKeys, EModelEndpoint } = require('librechat-data-provider');
|
||||
const loadCustomConfig = require('./loadCustomConfig');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
|
||||
|
|
@ -22,4 +22,19 @@ async function getCustomConfig() {
|
|||
return customConfig;
|
||||
}
|
||||
|
||||
module.exports = getCustomConfig;
|
||||
/**
|
||||
*
|
||||
* @param {string | EModelEndpoint} endpoint
|
||||
*/
|
||||
const getCustomEndpointConfig = async (endpoint) => {
|
||||
const customConfig = await getCustomConfig();
|
||||
if (!customConfig) {
|
||||
throw new Error(`Config not found for the ${endpoint} custom endpoint.`);
|
||||
}
|
||||
|
||||
const { endpoints = {} } = customConfig;
|
||||
const customEndpoints = endpoints[EModelEndpoint.custom] ?? [];
|
||||
return customEndpoints.find((endpointConfig) => endpointConfig.name === endpoint);
|
||||
};
|
||||
|
||||
module.exports = { getCustomConfig, getCustomEndpointConfig };
|
||||
|
|
|
|||
|
|
@ -10,12 +10,12 @@ const loadDefaultEndpointsConfig = require('./loadDefaultEConfig');
|
|||
|
||||
module.exports = {
|
||||
config,
|
||||
getCustomConfig,
|
||||
loadCustomConfig,
|
||||
loadConfigModels,
|
||||
loadDefaultModels,
|
||||
loadOverrideConfig,
|
||||
loadAsyncEndpoints,
|
||||
...getCustomConfig,
|
||||
loadConfigEndpoints,
|
||||
loadDefaultEndpointsConfig,
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
const { EModelEndpoint, extractEnvVariable } = require('librechat-data-provider');
|
||||
const { getCustomConfig } = require('./getCustomConfig');
|
||||
const { isUserProvided } = require('~/server/utils');
|
||||
const getCustomConfig = require('./getCustomConfig');
|
||||
|
||||
/**
|
||||
* Load config endpoints from the cached configuration object
|
||||
|
|
|
|||
|
|
@ -1,7 +1,16 @@
|
|||
const { Providers } = require('@librechat/agents');
|
||||
const { EModelEndpoint, extractEnvVariable } = require('librechat-data-provider');
|
||||
const { fetchModels } = require('~/server/services/ModelService');
|
||||
const { getCustomConfig } = require('./getCustomConfig');
|
||||
const { isUserProvided } = require('~/server/utils');
|
||||
const getCustomConfig = require('./getCustomConfig');
|
||||
|
||||
/**
|
||||
* @param {string} name
|
||||
* @returns {string}
|
||||
*/
|
||||
function normalizeEndpointName(name = '') {
|
||||
return name.toLowerCase() === Providers.OLLAMA ? Providers.OLLAMA : name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Load config endpoints from the cached configuration object
|
||||
|
|
@ -61,7 +70,8 @@ async function loadConfigModels(req) {
|
|||
|
||||
for (let i = 0; i < customEndpoints.length; i++) {
|
||||
const endpoint = customEndpoints[i];
|
||||
const { models, name, baseURL, apiKey } = endpoint;
|
||||
const { models, name: configName, baseURL, apiKey } = endpoint;
|
||||
const name = normalizeEndpointName(configName);
|
||||
endpointsMap[name] = endpoint;
|
||||
|
||||
const API_KEY = extractEnvVariable(apiKey);
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
const { fetchModels } = require('~/server/services/ModelService');
|
||||
const { getCustomConfig } = require('./getCustomConfig');
|
||||
const loadConfigModels = require('./loadConfigModels');
|
||||
const getCustomConfig = require('./getCustomConfig');
|
||||
|
||||
jest.mock('~/server/services/ModelService');
|
||||
jest.mock('./getCustomConfig');
|
||||
|
|
@ -253,13 +253,13 @@ describe('loadConfigModels', () => {
|
|||
}),
|
||||
);
|
||||
|
||||
// For groq and Ollama, since the apiKey is "user_provided", models should not be fetched
|
||||
// For groq and ollama, since the apiKey is "user_provided", models should not be fetched
|
||||
// Depending on your implementation's behavior regarding "default" models without fetching,
|
||||
// you may need to adjust the following assertions:
|
||||
expect(result.groq).toBe(exampleConfig.endpoints.custom[2].models.default);
|
||||
expect(result.Ollama).toBe(exampleConfig.endpoints.custom[3].models.default);
|
||||
expect(result.ollama).toBe(exampleConfig.endpoints.custom[3].models.default);
|
||||
|
||||
// Verifying fetchModels was not called for groq and Ollama
|
||||
// Verifying fetchModels was not called for groq and ollama
|
||||
expect(fetchModels).not.toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
name: 'groq',
|
||||
|
|
@ -267,7 +267,7 @@ describe('loadConfigModels', () => {
|
|||
);
|
||||
expect(fetchModels).not.toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
name: 'Ollama',
|
||||
name: 'ollama',
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
|
@ -335,4 +335,68 @@ describe('loadConfigModels', () => {
|
|||
|
||||
expect(result.FalsyFetchModel).toEqual(['defaultModel1', 'defaultModel2']);
|
||||
});
|
||||
|
||||
it('normalizes Ollama endpoint name to lowercase', async () => {
|
||||
const testCases = [
|
||||
{
|
||||
name: 'Ollama',
|
||||
apiKey: 'user_provided',
|
||||
baseURL: 'http://localhost:11434/v1/',
|
||||
models: {
|
||||
default: ['mistral', 'llama2'],
|
||||
fetch: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'OLLAMA',
|
||||
apiKey: 'user_provided',
|
||||
baseURL: 'http://localhost:11434/v1/',
|
||||
models: {
|
||||
default: ['mixtral', 'codellama'],
|
||||
fetch: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'OLLaMA',
|
||||
apiKey: 'user_provided',
|
||||
baseURL: 'http://localhost:11434/v1/',
|
||||
models: {
|
||||
default: ['phi', 'neural-chat'],
|
||||
fetch: false,
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
getCustomConfig.mockResolvedValue({
|
||||
endpoints: {
|
||||
custom: testCases,
|
||||
},
|
||||
});
|
||||
|
||||
const result = await loadConfigModels(mockRequest);
|
||||
|
||||
// All variations of "Ollama" should be normalized to lowercase "ollama"
|
||||
// and the last config in the array should override previous ones
|
||||
expect(result.Ollama).toBeUndefined();
|
||||
expect(result.OLLAMA).toBeUndefined();
|
||||
expect(result.OLLaMA).toBeUndefined();
|
||||
expect(result.ollama).toEqual(['phi', 'neural-chat']);
|
||||
|
||||
// Verify fetchModels was not called since these are user_provided
|
||||
expect(fetchModels).not.toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
name: 'Ollama',
|
||||
}),
|
||||
);
|
||||
expect(fetchModels).not.toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
name: 'OLLAMA',
|
||||
}),
|
||||
);
|
||||
expect(fetchModels).not.toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
name: 'OLLaMA',
|
||||
}),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue