🤖 fix: GoogleClient Context Handling & GenAI Parameters (#5503)

* fix: remove legacy code for GoogleClient and fix model parameters for GenAI

* refactor: streamline client init logic

* refactor: remove legacy vertex clients, WIP remote vertex token count

* refactor: enhance GoogleClient with improved type definitions and streamline token count method

* refactor: remove unused methods and consolidate methods

* refactor: remove examples

* refactor: improve input handling logic in DynamicInput component

* refactor: enhance GoogleClient with token usage tracking and context handling improvements

* refactor: update GoogleClient to support 'learnlm' model and streamline model checks

* refactor: remove unused text model handling in GoogleClient

* refactor: record token usage for GoogleClient titles and handle edge cases

* chore: remove unused undici, addresses verbose version warning
This commit is contained in:
Danny Avila 2025-01-27 12:21:33 -05:00 committed by GitHub
parent 47b72e8159
commit 528ee62eb1
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
12 changed files with 277 additions and 270 deletions

View file

@ -11,6 +11,7 @@ const buildOptions = (endpoint, parsedBody) => {
greeting,
spec,
artifacts,
maxContextTokens,
...modelOptions
} = parsedBody;
const endpointOption = removeNullishValues({
@ -22,6 +23,7 @@ const buildOptions = (endpoint, parsedBody) => {
iconURL,
greeting,
spec,
maxContextTokens,
modelOptions,
});

View file

@ -1,9 +1,6 @@
const { Providers } = require('@librechat/agents');
const { AuthKeys } = require('librechat-data-provider');
// Example internal constant from your code
const EXCLUDED_GENAI_MODELS = /gemini-(?:1\.0|1-0|pro)/;
/**
*
* @param {boolean} isGemini2
@ -89,22 +86,12 @@ function getLLMConfig(credentials, options = {}) {
/** Used only for Safety Settings */
const isGemini2 = llmConfig.model.includes('gemini-2.0') && !llmConfig.model.includes('thinking');
const isGenerativeModel = llmConfig.model.includes('gemini');
const isChatModel = !isGenerativeModel && llmConfig.model.includes('chat');
const isTextModel = !isGenerativeModel && !isChatModel && /code|text/.test(llmConfig.model);
llmConfig.safetySettings = getSafetySettings(isGemini2);
let provider;
if (project_id && isTextModel) {
if (project_id) {
provider = Providers.VERTEXAI;
} else if (project_id && isChatModel) {
provider = Providers.VERTEXAI;
} else if (project_id) {
provider = Providers.VERTEXAI;
} else if (!EXCLUDED_GENAI_MODELS.test(llmConfig.model)) {
provider = Providers.GOOGLE;
} else {
provider = Providers.GOOGLE;
}