mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 00:40:14 +01:00
📸 feat: Gemini vision, Improved Logs and Multi-modal Handling (#1368)
* feat: add GOOGLE_MODELS env var * feat: add gemini vision support * refactor(GoogleClient): adjust clientOptions handling depending on model * fix(logger): fix redact logic and redact errors only * fix(GoogleClient): do not allow non-multiModal messages when gemini-pro-vision is selected * refactor(OpenAIClient): use `isVisionModel` client property to avoid calling validateVisionModel multiple times * refactor: better debug logging by correctly traversing, redacting sensitive info, and logging condensed versions of long values * refactor(GoogleClient): allow response errors to be thrown/caught above client handling so user receives meaningful error message debug orderedMessages, parentMessageId, and buildMessages result * refactor(AskController): use model from client.modelOptions.model when saving intermediate messages, which requires for the progress callback to be initialized after the client is initialized * feat(useSSE): revert to previous model if the model was auto-switched by backend due to message attachments * docs: update with google updates, notes about Gemini Pro Vision * fix: redis should not be initialized without USE_REDIS and increase max listeners to 20
This commit is contained in:
parent
676f133545
commit
0c326797dd
21 changed files with 356 additions and 210 deletions
|
|
@ -25,6 +25,7 @@ export const defaultEndpoints: EModelEndpoint[] = [
|
|||
export const defaultModels = {
|
||||
[EModelEndpoint.google]: [
|
||||
'gemini-pro',
|
||||
'gemini-pro-vision',
|
||||
'chat-bison',
|
||||
'chat-bison-32k',
|
||||
'codechat-bison',
|
||||
|
|
@ -135,6 +136,7 @@ export const modularEndpoints = new Set<EModelEndpoint | string>([
|
|||
|
||||
export const supportsFiles = {
|
||||
[EModelEndpoint.openAI]: true,
|
||||
[EModelEndpoint.google]: true,
|
||||
[EModelEndpoint.assistant]: true,
|
||||
};
|
||||
|
||||
|
|
@ -144,7 +146,7 @@ export const supportsBalanceCheck = {
|
|||
[EModelEndpoint.gptPlugins]: true,
|
||||
};
|
||||
|
||||
export const visionModels = ['gpt-4-vision', 'llava-13b'];
|
||||
export const visionModels = ['gpt-4-vision', 'llava-13b', 'gemini-pro-vision'];
|
||||
|
||||
export const eModelEndpointSchema = z.nativeEnum(EModelEndpoint);
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue