mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-20 18:30:15 +01:00
* refactor: add gemini-pro to google Models list; use defaultModels for central model listing * refactor(SetKeyDialog): create useMultipleKeys hook to use for Azure, export `isJson` from utils, use EModelEndpoint * refactor(useUserKey): change variable names to make keyName setting more clear * refactor(FileUpload): allow passing container className string * feat(GoogleClient): Gemini support * refactor(GoogleClient): alternate stream speed for Gemini models * feat(Gemini): styling/settings configuration for Gemini * refactor(GoogleClient): substract max response tokens from max context tokens if context is above 32k (I/O max is combined between the two) * refactor(tokens): correct google max token counts and subtract max response tokens when input/output count are combined towards max context count * feat(google/initializeClient): handle both local and user_provided credentials and write tests * fix(GoogleClient): catch if credentials are undefined, handle if serviceKey is string or object correctly, handle no examples passed, throw error if not a Generative Language model and no service account JSON key is provided, throw error if it is a Generative m odel, but not google API key was provided * refactor(loadAsyncEndpoints/google): activate Google endpoint if either the service key JSON file is provided in /api/data, or a GOOGLE_KEY is defined. * docs: updated Google configuration * fix(ci): Mock import of Service Account Key JSON file (auth.json) * Update apis_and_tokens.md * feat: increase max output tokens slider for gemini pro * refactor(GoogleSettings): handle max and default maxOutputTokens on model change * chore: add sensitive redact regex * docs: add warning about data privacy * Update apis_and_tokens.md
32 lines
1.2 KiB
JavaScript
32 lines
1.2 KiB
JavaScript
const { EModelEndpoint, defaultModels } = require('librechat-data-provider');
|
|
const { useAzurePlugins } = require('~/server/services/Config/EndpointService').config;
|
|
const {
|
|
getOpenAIModels,
|
|
getChatGPTBrowserModels,
|
|
getAnthropicModels,
|
|
} = require('~/server/services/ModelService');
|
|
|
|
const fitlerAssistantModels = (str) => {
|
|
return /gpt-4|gpt-3\\.5/i.test(str) && !/vision|instruct/i.test(str);
|
|
};
|
|
|
|
async function loadDefaultModels() {
|
|
const openAI = await getOpenAIModels();
|
|
const anthropic = getAnthropicModels();
|
|
const chatGPTBrowser = getChatGPTBrowserModels();
|
|
const azureOpenAI = await getOpenAIModels({ azure: true });
|
|
const gptPlugins = await getOpenAIModels({ azure: useAzurePlugins, plugins: true });
|
|
|
|
return {
|
|
[EModelEndpoint.openAI]: openAI,
|
|
[EModelEndpoint.azureOpenAI]: azureOpenAI,
|
|
[EModelEndpoint.assistant]: openAI.filter(fitlerAssistantModels),
|
|
[EModelEndpoint.google]: defaultModels[EModelEndpoint.google],
|
|
[EModelEndpoint.bingAI]: ['BingAI', 'Sydney'],
|
|
[EModelEndpoint.chatGPTBrowser]: chatGPTBrowser,
|
|
[EModelEndpoint.gptPlugins]: gptPlugins,
|
|
[EModelEndpoint.anthropic]: anthropic,
|
|
};
|
|
}
|
|
|
|
module.exports = loadDefaultModels;
|