🔧 refactor: Improve Agent Context & Minor Fixes (#5349)

* refactor: Improve Context for Agents

* 🔧 fix: Safeguard against undefined properties in OpenAIClient response handling

* refactor: log error before re-throwing for original stack trace

* refactor: remove toolResource state from useFileHandling, allow svg files

* refactor: prevent verbose logs from axios errors when using actions

* refactor: add silent method recordTokenUsage in AgentClient

* refactor: streamline token count assignment in BaseClient

* refactor: enhance safety settings handling for Gemini 2.0 model

* fix: capabilities structure in MCPConnection

* refactor: simplify civic integrity threshold handling in GoogleClient and llm

* refactor: update token count retrieval method in BaseClient tests

* ci: fix test for svg
This commit is contained in:
Danny Avila 2025-01-17 12:55:48 -05:00 committed by GitHub
parent e309c6abef
commit b35a8b78e2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
19 changed files with 324 additions and 112 deletions

View file

@ -4,27 +4,47 @@ const { AuthKeys } = require('librechat-data-provider');
// Example internal constant from your code
const EXCLUDED_GENAI_MODELS = /gemini-(?:1\.0|1-0|pro)/;
function getSafetySettings() {
/**
*
* @param {boolean} isGemini2
* @returns {Array<{category: string, threshold: string}>}
*/
function getSafetySettings(isGemini2) {
const mapThreshold = (value) => {
if (isGemini2 && value === 'BLOCK_NONE') {
return 'OFF';
}
return value;
};
return [
{
category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
threshold: process.env.GOOGLE_SAFETY_SEXUALLY_EXPLICIT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
threshold: mapThreshold(
process.env.GOOGLE_SAFETY_SEXUALLY_EXPLICIT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
),
},
{
category: 'HARM_CATEGORY_HATE_SPEECH',
threshold: process.env.GOOGLE_SAFETY_HATE_SPEECH || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
threshold: mapThreshold(
process.env.GOOGLE_SAFETY_HATE_SPEECH || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
),
},
{
category: 'HARM_CATEGORY_HARASSMENT',
threshold: process.env.GOOGLE_SAFETY_HARASSMENT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
threshold: mapThreshold(
process.env.GOOGLE_SAFETY_HARASSMENT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
),
},
{
category: 'HARM_CATEGORY_DANGEROUS_CONTENT',
threshold: process.env.GOOGLE_SAFETY_DANGEROUS_CONTENT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
threshold: mapThreshold(
process.env.GOOGLE_SAFETY_DANGEROUS_CONTENT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
),
},
{
category: 'HARM_CATEGORY_CIVIC_INTEGRITY',
threshold: process.env.GOOGLE_SAFETY_CIVIC_INTEGRITY || 'BLOCK_NONE',
threshold: mapThreshold(process.env.GOOGLE_SAFETY_CIVIC_INTEGRITY || 'BLOCK_NONE'),
},
];
}
@ -64,14 +84,16 @@ function getLLMConfig(credentials, options = {}) {
/** @type {GoogleClientOptions | VertexAIClientOptions} */
let llmConfig = {
...(options.modelOptions || {}),
safetySettings: getSafetySettings(),
maxRetries: 2,
};
const isGemini2 = llmConfig.model.includes('gemini-2.0');
const isGenerativeModel = llmConfig.model.includes('gemini');
const isChatModel = !isGenerativeModel && llmConfig.model.includes('chat');
const isTextModel = !isGenerativeModel && !isChatModel && /code|text/.test(llmConfig.model);
llmConfig.safetySettings = getSafetySettings(isGemini2);
let provider;
if (project_id && isTextModel) {