🦙 fix: Ollama Custom Headers (#10314)

* 🦙 fix: Ollama Custom Headers

* chore: Correct import order for resolveHeaders in OllamaClient.js

* fix: Improve error logging for Ollama API model fetch failure

* ci: update Ollama model fetch tests

* ci: Add unit test for passing headers and user object to Ollama fetchModels
This commit is contained in:
Danny Avila 2025-10-30 14:48:10 -04:00 committed by GitHub
parent 5e35b7d09d
commit d904b281f1
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 107 additions and 56 deletions

View file

@ -57,7 +57,7 @@ async function loadConfigModels(req) {
for (let i = 0; i < customEndpoints.length; i++) {
const endpoint = customEndpoints[i];
const { models, name: configName, baseURL, apiKey } = endpoint;
const { models, name: configName, baseURL, apiKey, headers: endpointHeaders } = endpoint;
const name = normalizeEndpointName(configName);
endpointsMap[name] = endpoint;
@ -76,6 +76,8 @@ async function loadConfigModels(req) {
apiKey: API_KEY,
baseURL: BASE_URL,
user: req.user.id,
userObject: req.user,
headers: endpointHeaders,
direct: endpoint.directEndpoint,
userIdQuery: models.userIdQuery,
});

View file

@ -1,4 +1,3 @@
const { Providers } = require('@librechat/agents');
const {
resolveHeaders,
isUserProvided,
@ -143,39 +142,27 @@ const initializeClient = async ({ req, res, endpointOption, optionsOnly, overrid
if (optionsOnly) {
const modelOptions = endpointOption?.model_parameters ?? {};
if (endpoint !== Providers.OLLAMA) {
clientOptions = Object.assign(
{
modelOptions,
},
clientOptions,
);
clientOptions.modelOptions.user = req.user.id;
const options = getOpenAIConfig(apiKey, clientOptions, endpoint);
if (options != null) {
options.useLegacyContent = true;
options.endpointTokenConfig = endpointTokenConfig;
}
if (!clientOptions.streamRate) {
return options;
}
options.llmConfig.callbacks = [
{
handleLLMNewToken: createHandleLLMNewToken(clientOptions.streamRate),
},
];
clientOptions = Object.assign(
{
modelOptions,
},
clientOptions,
);
clientOptions.modelOptions.user = req.user.id;
const options = getOpenAIConfig(apiKey, clientOptions, endpoint);
if (options != null) {
options.useLegacyContent = true;
options.endpointTokenConfig = endpointTokenConfig;
}
if (!clientOptions.streamRate) {
return options;
}
if (clientOptions.reverseProxyUrl) {
modelOptions.baseUrl = clientOptions.reverseProxyUrl.split('/v1')[0];
delete clientOptions.reverseProxyUrl;
}
return {
useLegacyContent: true,
llmConfig: modelOptions,
};
options.llmConfig.callbacks = [
{
handleLLMNewToken: createHandleLLMNewToken(clientOptions.streamRate),
},
];
return options;
}
const client = new OpenAIClient(apiKey, clientOptions);

View file

@ -39,6 +39,8 @@ const { openAIApiKey, userProvidedOpenAI } = require('./Config/EndpointService')
* @param {boolean} [params.userIdQuery=false] - Whether to send the user ID as a query parameter.
* @param {boolean} [params.createTokenConfig=true] - Whether to create a token configuration from the API response.
* @param {string} [params.tokenKey] - The cache key to save the token configuration. Uses `name` if omitted.
* @param {Record<string, string>} [params.headers] - Optional headers for the request.
* @param {Partial<IUser>} [params.userObject] - Optional user object for header resolution.
* @returns {Promise<string[]>} A promise that resolves to an array of model identifiers.
* @async
*/
@ -52,6 +54,8 @@ const fetchModels = async ({
userIdQuery = false,
createTokenConfig = true,
tokenKey,
headers,
userObject,
}) => {
let models = [];
const baseURL = direct ? extractBaseURL(_baseURL) : _baseURL;
@ -65,7 +69,13 @@ const fetchModels = async ({
}
if (name && name.toLowerCase().startsWith(Providers.OLLAMA)) {
return await OllamaClient.fetchModels(baseURL);
try {
return await OllamaClient.fetchModels(baseURL, { headers, user: userObject });
} catch (ollamaError) {
const logMessage =
'Failed to fetch models from Ollama API. Attempting to fetch via OpenAI-compatible endpoint.';
logAxiosError({ message: logMessage, error: ollamaError });
}
}
try {

View file

@ -1,5 +1,5 @@
const axios = require('axios');
const { logger } = require('@librechat/data-schemas');
const { logAxiosError, resolveHeaders } = require('@librechat/api');
const { EModelEndpoint, defaultModels } = require('librechat-data-provider');
const {
@ -18,6 +18,8 @@ jest.mock('@librechat/api', () => {
processModelData: jest.fn((...args) => {
return originalUtils.processModelData(...args);
}),
logAxiosError: jest.fn(),
resolveHeaders: jest.fn((options) => options?.headers || {}),
};
});
@ -277,12 +279,51 @@ describe('fetchModels with Ollama specific logic', () => {
expect(models).toEqual(['Ollama-Base', 'Ollama-Advanced']);
expect(axios.get).toHaveBeenCalledWith('https://api.ollama.test.com/api/tags', {
headers: {},
timeout: 5000,
});
});
it('should handle errors gracefully when fetching Ollama models fails', async () => {
axios.get.mockRejectedValue(new Error('Network error'));
it('should pass headers and user object to Ollama fetchModels', async () => {
const customHeaders = {
'Content-Type': 'application/json',
Authorization: 'Bearer custom-token',
};
const userObject = {
id: 'user789',
email: 'test@example.com',
};
resolveHeaders.mockReturnValueOnce(customHeaders);
const models = await fetchModels({
user: 'user789',
apiKey: 'testApiKey',
baseURL: 'https://api.ollama.test.com',
name: 'ollama',
headers: customHeaders,
userObject,
});
expect(models).toEqual(['Ollama-Base', 'Ollama-Advanced']);
expect(resolveHeaders).toHaveBeenCalledWith({
headers: customHeaders,
user: userObject,
});
expect(axios.get).toHaveBeenCalledWith('https://api.ollama.test.com/api/tags', {
headers: customHeaders,
timeout: 5000,
});
});
it('should handle errors gracefully when fetching Ollama models fails and fallback to OpenAI-compatible fetch', async () => {
axios.get.mockRejectedValueOnce(new Error('Ollama API error'));
axios.get.mockResolvedValueOnce({
data: {
data: [{ id: 'fallback-model-1' }, { id: 'fallback-model-2' }],
},
});
const models = await fetchModels({
user: 'user789',
apiKey: 'testApiKey',
@ -290,8 +331,13 @@ describe('fetchModels with Ollama specific logic', () => {
name: 'OllamaAPI',
});
expect(models).toEqual([]);
expect(logger.error).toHaveBeenCalled();
expect(models).toEqual(['fallback-model-1', 'fallback-model-2']);
expect(logAxiosError).toHaveBeenCalledWith({
message:
'Failed to fetch models from Ollama API. Attempting to fetch via OpenAI-compatible endpoint.',
error: expect.any(Error),
});
expect(axios.get).toHaveBeenCalledTimes(2);
});
it('should return an empty array if no baseURL is provided', async () => {