mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-16 16:30:15 +01:00
* 🤖 refactor: streamline model selection logic for title model in GoogleClient
* refactor: add options for empty object schemas in convertJsonSchemaToZod
* refactor: add utility function to check for empty object schemas in convertJsonSchemaToZod
* fix: Google MCP Tool errors, and remove Object Unescaping as Google fixed this
* fix: google safetySettings
* feat: add safety settings exclusion via GOOGLE_EXCLUDE_SAFETY_SETTINGS environment variable
* fix: rename environment variable for console JSON string length
* fix: disable portal for dropdown in ExportModal component
* fix: screenshot functionality to use image placeholder for remote images
* feat: add visionMode property to BaseClient and initialize in GoogleClient to fix resendFiles issue
* fix: enhance formatMessages to include image URLs in message content for Vertex AI
* fix: safety settings for titleChatCompletion
* fix: remove deprecated model assignment in GoogleClient and streamline title model retrieval
* fix: remove unused image preloading logic in ScreenshotContext
* chore: update default google models to latest models shared by vertex ai and gen ai
* refactor: enhance Google error messaging
* fix: update token values and model limits for Gemini models
* ci: fix model matching
* chore: bump version of librechat-data-provider to 0.7.699
180 lines
5.1 KiB
JavaScript
180 lines
5.1 KiB
JavaScript
const { Providers } = require('@librechat/agents');
|
|
const { AuthKeys } = require('librechat-data-provider');
|
|
const { isEnabled } = require('~/server/utils');
|
|
|
|
function getThresholdMapping(model) {
|
|
const gemini1Pattern = /gemini-(1\.0|1\.5|pro$|1\.0-pro|1\.5-pro|1\.5-flash-001)/;
|
|
const restrictedPattern = /(gemini-(1\.5-flash-8b|2\.0|exp)|learnlm)/;
|
|
|
|
if (gemini1Pattern.test(model)) {
|
|
return (value) => {
|
|
if (value === 'OFF') {
|
|
return 'BLOCK_NONE';
|
|
}
|
|
return value;
|
|
};
|
|
}
|
|
|
|
if (restrictedPattern.test(model)) {
|
|
return (value) => {
|
|
if (value === 'OFF' || value === 'HARM_BLOCK_THRESHOLD_UNSPECIFIED') {
|
|
return 'BLOCK_NONE';
|
|
}
|
|
return value;
|
|
};
|
|
}
|
|
|
|
return (value) => value;
|
|
}
|
|
|
|
/**
|
|
*
|
|
* @param {string} model
|
|
* @returns {Array<{category: string, threshold: string}> | undefined}
|
|
*/
|
|
function getSafetySettings(model) {
|
|
if (isEnabled(process.env.GOOGLE_EXCLUDE_SAFETY_SETTINGS)) {
|
|
return undefined;
|
|
}
|
|
const mapThreshold = getThresholdMapping(model);
|
|
|
|
return [
|
|
{
|
|
category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
|
|
threshold: mapThreshold(
|
|
process.env.GOOGLE_SAFETY_SEXUALLY_EXPLICIT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
|
|
),
|
|
},
|
|
{
|
|
category: 'HARM_CATEGORY_HATE_SPEECH',
|
|
threshold: mapThreshold(
|
|
process.env.GOOGLE_SAFETY_HATE_SPEECH || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
|
|
),
|
|
},
|
|
{
|
|
category: 'HARM_CATEGORY_HARASSMENT',
|
|
threshold: mapThreshold(
|
|
process.env.GOOGLE_SAFETY_HARASSMENT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
|
|
),
|
|
},
|
|
{
|
|
category: 'HARM_CATEGORY_DANGEROUS_CONTENT',
|
|
threshold: mapThreshold(
|
|
process.env.GOOGLE_SAFETY_DANGEROUS_CONTENT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
|
|
),
|
|
},
|
|
{
|
|
category: 'HARM_CATEGORY_CIVIC_INTEGRITY',
|
|
threshold: mapThreshold(process.env.GOOGLE_SAFETY_CIVIC_INTEGRITY || 'BLOCK_NONE'),
|
|
},
|
|
];
|
|
}
|
|
|
|
/**
|
|
* Replicates core logic from GoogleClient's constructor and setOptions, plus client determination.
|
|
* Returns an object with the provider label and the final options that would be passed to createLLM.
|
|
*
|
|
* @param {string | object} credentials - Either a JSON string or an object containing Google keys
|
|
* @param {object} [options={}] - The same shape as the "GoogleClient" constructor options
|
|
*/
|
|
|
|
function getLLMConfig(credentials, options = {}) {
|
|
// 1. Parse credentials
|
|
let creds = {};
|
|
if (typeof credentials === 'string') {
|
|
try {
|
|
creds = JSON.parse(credentials);
|
|
} catch (err) {
|
|
throw new Error(`Error parsing string credentials: ${err.message}`);
|
|
}
|
|
} else if (credentials && typeof credentials === 'object') {
|
|
creds = credentials;
|
|
}
|
|
|
|
// Extract from credentials
|
|
const serviceKeyRaw = creds[AuthKeys.GOOGLE_SERVICE_KEY] ?? {};
|
|
const serviceKey =
|
|
typeof serviceKeyRaw === 'string' ? JSON.parse(serviceKeyRaw) : serviceKeyRaw ?? {};
|
|
|
|
const project_id = serviceKey?.project_id ?? null;
|
|
const apiKey = creds[AuthKeys.GOOGLE_API_KEY] ?? null;
|
|
|
|
const reverseProxyUrl = options.reverseProxyUrl;
|
|
const authHeader = options.authHeader;
|
|
|
|
/** @type {GoogleClientOptions | VertexAIClientOptions} */
|
|
let llmConfig = {
|
|
...(options.modelOptions || {}),
|
|
maxRetries: 2,
|
|
};
|
|
|
|
/** Used only for Safety Settings */
|
|
llmConfig.safetySettings = getSafetySettings(llmConfig.model);
|
|
|
|
let provider;
|
|
|
|
if (project_id) {
|
|
provider = Providers.VERTEXAI;
|
|
} else {
|
|
provider = Providers.GOOGLE;
|
|
}
|
|
|
|
// If we have a GCP project => Vertex AI
|
|
if (project_id && provider === Providers.VERTEXAI) {
|
|
/** @type {VertexAIClientOptions['authOptions']} */
|
|
llmConfig.authOptions = {
|
|
credentials: { ...serviceKey },
|
|
projectId: project_id,
|
|
};
|
|
llmConfig.location = process.env.GOOGLE_LOC || 'us-central1';
|
|
} else if (apiKey && provider === Providers.GOOGLE) {
|
|
llmConfig.apiKey = apiKey;
|
|
}
|
|
|
|
/*
|
|
let legacyOptions = {};
|
|
// Filter out any "examples" that are empty
|
|
legacyOptions.examples = (legacyOptions.examples ?? [])
|
|
.filter(Boolean)
|
|
.filter((obj) => obj?.input?.content !== '' && obj?.output?.content !== '');
|
|
|
|
// If user has "examples" from legacyOptions, push them onto llmConfig
|
|
if (legacyOptions.examples?.length) {
|
|
llmConfig.examples = legacyOptions.examples.map((ex) => {
|
|
const { input, output } = ex;
|
|
if (!input?.content || !output?.content) {return undefined;}
|
|
return {
|
|
input: new HumanMessage(input.content),
|
|
output: new AIMessage(output.content),
|
|
};
|
|
}).filter(Boolean);
|
|
}
|
|
*/
|
|
|
|
if (reverseProxyUrl) {
|
|
llmConfig.baseUrl = reverseProxyUrl;
|
|
}
|
|
|
|
if (authHeader) {
|
|
/**
|
|
* NOTE: NOT SUPPORTED BY LANGCHAIN GENAI CLIENT,
|
|
* REQUIRES PR IN https://github.com/langchain-ai/langchainjs
|
|
*/
|
|
llmConfig.customHeaders = {
|
|
Authorization: `Bearer ${apiKey}`,
|
|
};
|
|
}
|
|
|
|
// Return the final shape
|
|
return {
|
|
/** @type {Providers.GOOGLE | Providers.VERTEXAI} */
|
|
provider,
|
|
/** @type {GoogleClientOptions | VertexAIClientOptions} */
|
|
llmConfig,
|
|
};
|
|
}
|
|
|
|
module.exports = {
|
|
getLLMConfig,
|
|
getSafetySettings,
|
|
};
|