mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 08:50:15 +01:00
🤖 feat: Gemini 1.5 Support (+Vertex AI) (#2383)
* WIP: gemini-1.5 support * feat: extended vertex ai support * fix: handle possibly undefined modelName * fix: gpt-4-turbo-preview invalid vision model * feat: specify `fileConfig.imageOutputType` and make PNG default image conversion type * feat: better truncation for errors including base64 strings * fix: gemini inlineData formatting * feat: RAG augmented prompt for gemini-1.5 * feat: gemini-1.5 rates and token window * chore: adjust tokens, update docs, update vision Models * chore: add back `ChatGoogleVertexAI` for chat models via vertex ai * refactor: ask/edit controllers to not use `unfinished` field for google endpoint * chore: remove comment * chore(ci): fix AppService test * chore: remove comment * refactor(GoogleSearch): use `GOOGLE_SEARCH_API_KEY` instead, issue warning for old variable * chore: bump data-provider to 0.5.4 * chore: update docs * fix: condition for gemini-1.5 using generative ai lib * chore: update docs * ci: add additional AppService test for `imageOutputType` * refactor: optimize new config value `imageOutputType` * chore: bump CONFIG_VERSION * fix(assistants): avatar upload
This commit is contained in:
parent
fce7246ac1
commit
9d854dac07
37 changed files with 1030 additions and 258 deletions
|
|
@ -131,6 +131,18 @@ describe('getModelMaxTokens', () => {
|
|||
});
|
||||
|
||||
test('should return correct tokens for partial match - Google models', () => {
|
||||
expect(getModelMaxTokens('gemini-1.5-pro-latest', EModelEndpoint.google)).toBe(
|
||||
maxTokensMap[EModelEndpoint.google]['gemini-1.5'],
|
||||
);
|
||||
expect(getModelMaxTokens('gemini-1.5-pro-preview-0409', EModelEndpoint.google)).toBe(
|
||||
maxTokensMap[EModelEndpoint.google]['gemini-1.5'],
|
||||
);
|
||||
expect(getModelMaxTokens('gemini-pro-vision', EModelEndpoint.google)).toBe(
|
||||
maxTokensMap[EModelEndpoint.google]['gemini-pro-vision'],
|
||||
);
|
||||
expect(getModelMaxTokens('gemini-1.0', EModelEndpoint.google)).toBe(
|
||||
maxTokensMap[EModelEndpoint.google]['gemini'],
|
||||
);
|
||||
expect(getModelMaxTokens('gemini-pro', EModelEndpoint.google)).toBe(
|
||||
maxTokensMap[EModelEndpoint.google]['gemini'],
|
||||
);
|
||||
|
|
@ -142,6 +154,15 @@ describe('getModelMaxTokens', () => {
|
|||
);
|
||||
});
|
||||
|
||||
test('should return correct tokens for partial match - Cohere models', () => {
|
||||
expect(getModelMaxTokens('command', EModelEndpoint.custom)).toBe(
|
||||
maxTokensMap[EModelEndpoint.custom]['command'],
|
||||
);
|
||||
expect(getModelMaxTokens('command-r-plus', EModelEndpoint.custom)).toBe(
|
||||
maxTokensMap[EModelEndpoint.custom]['command-r-plus'],
|
||||
);
|
||||
});
|
||||
|
||||
test('should return correct tokens when using a custom endpointTokenConfig', () => {
|
||||
const customTokenConfig = {
|
||||
'custom-model': 12345,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue