mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 08:50:15 +01:00
feat: Google Gemini ❇️ (#1355)
* refactor: add gemini-pro to google Models list; use defaultModels for central model listing * refactor(SetKeyDialog): create useMultipleKeys hook to use for Azure, export `isJson` from utils, use EModelEndpoint * refactor(useUserKey): change variable names to make keyName setting more clear * refactor(FileUpload): allow passing container className string * feat(GoogleClient): Gemini support * refactor(GoogleClient): alternate stream speed for Gemini models * feat(Gemini): styling/settings configuration for Gemini * refactor(GoogleClient): substract max response tokens from max context tokens if context is above 32k (I/O max is combined between the two) * refactor(tokens): correct google max token counts and subtract max response tokens when input/output count are combined towards max context count * feat(google/initializeClient): handle both local and user_provided credentials and write tests * fix(GoogleClient): catch if credentials are undefined, handle if serviceKey is string or object correctly, handle no examples passed, throw error if not a Generative Language model and no service account JSON key is provided, throw error if it is a Generative m odel, but not google API key was provided * refactor(loadAsyncEndpoints/google): activate Google endpoint if either the service key JSON file is provided in /api/data, or a GOOGLE_KEY is defined. * docs: updated Google configuration * fix(ci): Mock import of Service Account Key JSON file (auth.json) * Update apis_and_tokens.md * feat: increase max output tokens slider for gemini pro * refactor(GoogleSettings): handle max and default maxOutputTokens on model change * chore: add sensitive redact regex * docs: add warning about data privacy * Update apis_and_tokens.md
This commit is contained in:
parent
d259431316
commit
561ce8e86a
37 changed files with 702 additions and 219 deletions
|
|
@ -1,10 +1,10 @@
|
|||
const { EModelEndpoint, defaultModels } = require('librechat-data-provider');
|
||||
const { useAzurePlugins } = require('~/server/services/Config/EndpointService').config;
|
||||
const {
|
||||
getOpenAIModels,
|
||||
getChatGPTBrowserModels,
|
||||
getAnthropicModels,
|
||||
} = require('~/server/services/ModelService');
|
||||
const { EModelEndpoint } = require('librechat-data-provider');
|
||||
const { useAzurePlugins } = require('~/server/services/Config/EndpointService').config;
|
||||
|
||||
const fitlerAssistantModels = (str) => {
|
||||
return /gpt-4|gpt-3\\.5/i.test(str) && !/vision|instruct/i.test(str);
|
||||
|
|
@ -21,18 +21,7 @@ async function loadDefaultModels() {
|
|||
[EModelEndpoint.openAI]: openAI,
|
||||
[EModelEndpoint.azureOpenAI]: azureOpenAI,
|
||||
[EModelEndpoint.assistant]: openAI.filter(fitlerAssistantModels),
|
||||
[EModelEndpoint.google]: [
|
||||
'chat-bison',
|
||||
'chat-bison-32k',
|
||||
'codechat-bison',
|
||||
'codechat-bison-32k',
|
||||
'text-bison',
|
||||
'text-bison-32k',
|
||||
'text-unicorn',
|
||||
'code-gecko',
|
||||
'code-bison',
|
||||
'code-bison-32k',
|
||||
],
|
||||
[EModelEndpoint.google]: defaultModels[EModelEndpoint.google],
|
||||
[EModelEndpoint.bingAI]: ['BingAI', 'Sydney'],
|
||||
[EModelEndpoint.chatGPTBrowser]: chatGPTBrowser,
|
||||
[EModelEndpoint.gptPlugins]: gptPlugins,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue