🕒 feat: Add 5-second timeout for Fetching Model Lists (#4423)

* refactor: add 5 second timeout for fetching AI provider model lists

* ci: fix test due to recent changes
This commit is contained in:
Danny Avila 2024-10-15 19:37:41 -04:00 committed by GitHub
parent ef118009f6
commit c54a57019e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 16 additions and 9 deletions

View file

@ -60,7 +60,9 @@ class OllamaClient {
try {
const ollamaEndpoint = deriveBaseURL(baseURL);
/** @type {Promise<AxiosResponse<OllamaListResponse>>} */
const response = await axios.get(`${ollamaEndpoint}/api/tags`);
const response = await axios.get(`${ollamaEndpoint}/api/tags`, {
timeout: 5000,
});
models = response.data.models.map((tag) => tag.name);
return models;
} catch (error) {

View file

@ -1,7 +1,7 @@
const axios = require('axios');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { EModelEndpoint, defaultModels, CacheKeys } = require('librechat-data-provider');
const { extractBaseURL, inputSchema, processModelData, logAxiosError } = require('~/utils');
const { inputSchema, logAxiosError, extractBaseURL, processModelData } = require('~/utils');
const { OllamaClient } = require('~/app/clients/OllamaClient');
const getLogStores = require('~/cache/getLogStores');
@ -66,6 +66,7 @@ const fetchModels = async ({
headers: {
Authorization: `Bearer ${apiKey}`,
},
timeout: 5000,
};
if (process.env.PROXY) {
@ -149,6 +150,7 @@ const fetchOpenAIModels = async (opts, _models = []) => {
baseURL,
azure: opts.azure,
user: opts.user,
name: baseURL,
});
}
@ -175,7 +177,8 @@ const fetchOpenAIModels = async (opts, _models = []) => {
* @param {object} opts - The options for fetching the models.
* @param {string} opts.user - The user ID to send to the API.
* @param {boolean} [opts.azure=false] - Whether to fetch models from Azure.
* @param {boolean} [opts.plugins=false] - Whether to fetch models from the plugins.
* @param {boolean} [opts.plugins=false] - Whether to fetch models for the plugins endpoint.
* @param {boolean} [opts.assistants=false] - Whether to fetch models for the Assistants endpoint.
*/
const getOpenAIModels = async (opts) => {
let models = defaultModels[EModelEndpoint.openAI];

View file

@ -291,7 +291,9 @@ describe('fetchModels with Ollama specific logic', () => {
});
expect(models).toEqual(['Ollama-Base', 'Ollama-Advanced']);
expect(axios.get).toHaveBeenCalledWith('https://api.ollama.test.com/api/tags'); // Adjusted to expect only one argument if no options are passed
expect(axios.get).toHaveBeenCalledWith('https://api.ollama.test.com/api/tags', {
timeout: 5000,
});
});
it('should handle errors gracefully when fetching Ollama models fails', async () => {

View file

@ -42,4 +42,4 @@ const logAxiosError = ({ message, error }) => {
}
};
module.exports = logAxiosError;
module.exports = { logAxiosError };

View file

@ -1,17 +1,17 @@
const loadYaml = require('./loadYaml');
const axiosHelpers = require('./axios');
const tokenHelpers = require('./tokens');
const azureUtils = require('./azureUtils');
const deriveBaseURL = require('./deriveBaseURL');
const logAxiosError = require('./logAxiosError');
const extractBaseURL = require('./extractBaseURL');
const findMessageContent = require('./findMessageContent');
module.exports = {
loadYaml,
...tokenHelpers,
...azureUtils,
deriveBaseURL,
logAxiosError,
extractBaseURL,
...azureUtils,
...axiosHelpers,
...tokenHelpers,
findMessageContent,
};