mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-09-22 06:00:56 +02:00

* feat: add GOOGLE_MODELS env var * feat: add gemini vision support * refactor(GoogleClient): adjust clientOptions handling depending on model * fix(logger): fix redact logic and redact errors only * fix(GoogleClient): do not allow non-multiModal messages when gemini-pro-vision is selected * refactor(OpenAIClient): use `isVisionModel` client property to avoid calling validateVisionModel multiple times * refactor: better debug logging by correctly traversing, redacting sensitive info, and logging condensed versions of long values * refactor(GoogleClient): allow response errors to be thrown/caught above client handling so user receives meaningful error message debug orderedMessages, parentMessageId, and buildMessages result * refactor(AskController): use model from client.modelOptions.model when saving intermediate messages, which requires for the progress callback to be initialized after the client is initialized * feat(useSSE): revert to previous model if the model was auto-switched by backend due to message attachments * docs: update with google updates, notes about Gemini Pro Vision * fix: redis should not be initialized without USE_REDIS and increase max listeners to 20
34 lines
1.2 KiB
JavaScript
34 lines
1.2 KiB
JavaScript
const { EModelEndpoint } = require('librechat-data-provider');
|
|
const { useAzurePlugins } = require('~/server/services/Config/EndpointService').config;
|
|
const {
|
|
getOpenAIModels,
|
|
getGoogleModels,
|
|
getAnthropicModels,
|
|
getChatGPTBrowserModels,
|
|
} = require('~/server/services/ModelService');
|
|
|
|
const fitlerAssistantModels = (str) => {
|
|
return /gpt-4|gpt-3\\.5/i.test(str) && !/vision|instruct/i.test(str);
|
|
};
|
|
|
|
async function loadDefaultModels() {
|
|
const google = getGoogleModels();
|
|
const openAI = await getOpenAIModels();
|
|
const anthropic = getAnthropicModels();
|
|
const chatGPTBrowser = getChatGPTBrowserModels();
|
|
const azureOpenAI = await getOpenAIModels({ azure: true });
|
|
const gptPlugins = await getOpenAIModels({ azure: useAzurePlugins, plugins: true });
|
|
|
|
return {
|
|
[EModelEndpoint.openAI]: openAI,
|
|
[EModelEndpoint.google]: google,
|
|
[EModelEndpoint.anthropic]: anthropic,
|
|
[EModelEndpoint.gptPlugins]: gptPlugins,
|
|
[EModelEndpoint.azureOpenAI]: azureOpenAI,
|
|
[EModelEndpoint.bingAI]: ['BingAI', 'Sydney'],
|
|
[EModelEndpoint.chatGPTBrowser]: chatGPTBrowser,
|
|
[EModelEndpoint.assistant]: openAI.filter(fitlerAssistantModels),
|
|
};
|
|
}
|
|
|
|
module.exports = loadDefaultModels;
|