mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-19 01:40:15 +01:00
🤖 feat: Support Google Agents, fix Various Provider Configurations (#5126)
* feat: Refactor ModelEndHandler to collect usage metadata only if it exists * feat: google tool end handling, custom anthropic class for better token ux * refactor: differentiate between client <> request options * feat: initial support for google agents * feat: only cache messages with non-empty text * feat: Cache non-empty messages in chatV2 controller * fix: anthropic llm client options llmConfig * refactor: streamline client options handling in LLM configuration * fix: VertexAI Agent Auth & Tool Handling * fix: additional fields for llmConfig, however customHeaders are not supported by langchain, requires PR * feat: set default location for vertexai LLM configuration * fix: outdated OpenAI Client options for getLLMConfig * chore: agent provider options typing * chore: add note about currently unsupported customHeaders in langchain GenAI client * fix: skip transaction creation when rawAmount is NaN
This commit is contained in:
parent
a423eb8c7b
commit
24cad6bbd4
18 changed files with 429 additions and 363 deletions
146
api/server/services/Endpoints/google/llm.js
Normal file
146
api/server/services/Endpoints/google/llm.js
Normal file
|
|
@ -0,0 +1,146 @@
|
|||
const { Providers } = require('@librechat/agents');
|
||||
const { AuthKeys } = require('librechat-data-provider');
|
||||
|
||||
// Example internal constant from your code
|
||||
const EXCLUDED_GENAI_MODELS = /gemini-(?:1\.0|1-0|pro)/;
|
||||
|
||||
function getSafetySettings() {
|
||||
return [
|
||||
{
|
||||
category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
|
||||
threshold: process.env.GOOGLE_SAFETY_SEXUALLY_EXPLICIT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
|
||||
},
|
||||
{
|
||||
category: 'HARM_CATEGORY_HATE_SPEECH',
|
||||
threshold: process.env.GOOGLE_SAFETY_HATE_SPEECH || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
|
||||
},
|
||||
{
|
||||
category: 'HARM_CATEGORY_HARASSMENT',
|
||||
threshold: process.env.GOOGLE_SAFETY_HARASSMENT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
|
||||
},
|
||||
{
|
||||
category: 'HARM_CATEGORY_DANGEROUS_CONTENT',
|
||||
threshold: process.env.GOOGLE_SAFETY_DANGEROUS_CONTENT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
|
||||
},
|
||||
{
|
||||
category: 'HARM_CATEGORY_CIVIC_INTEGRITY',
|
||||
threshold: process.env.GOOGLE_SAFETY_CIVIC_INTEGRITY || 'BLOCK_NONE',
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
/**
|
||||
* Replicates core logic from GoogleClient's constructor and setOptions, plus client determination.
|
||||
* Returns an object with the provider label and the final options that would be passed to createLLM.
|
||||
*
|
||||
* @param {string | object} credentials - Either a JSON string or an object containing Google keys
|
||||
* @param {object} [options={}] - The same shape as the "GoogleClient" constructor options
|
||||
*/
|
||||
|
||||
function getLLMConfig(credentials, options = {}) {
|
||||
// 1. Parse credentials
|
||||
let creds = {};
|
||||
if (typeof credentials === 'string') {
|
||||
try {
|
||||
creds = JSON.parse(credentials);
|
||||
} catch (err) {
|
||||
throw new Error(`Error parsing string credentials: ${err.message}`);
|
||||
}
|
||||
} else if (credentials && typeof credentials === 'object') {
|
||||
creds = credentials;
|
||||
}
|
||||
|
||||
// Extract from credentials
|
||||
const serviceKeyRaw = creds[AuthKeys.GOOGLE_SERVICE_KEY] ?? {};
|
||||
const serviceKey =
|
||||
typeof serviceKeyRaw === 'string' ? JSON.parse(serviceKeyRaw) : serviceKeyRaw ?? {};
|
||||
|
||||
const project_id = serviceKey?.project_id ?? null;
|
||||
const apiKey = creds[AuthKeys.GOOGLE_API_KEY] ?? null;
|
||||
|
||||
const reverseProxyUrl = options.reverseProxyUrl;
|
||||
const authHeader = options.authHeader;
|
||||
|
||||
/** @type {GoogleClientOptions | VertexAIClientOptions} */
|
||||
let llmConfig = {
|
||||
...(options.modelOptions || {}),
|
||||
safetySettings: getSafetySettings(),
|
||||
maxRetries: 2,
|
||||
};
|
||||
|
||||
const isGenerativeModel = llmConfig.model.includes('gemini');
|
||||
const isChatModel = !isGenerativeModel && llmConfig.model.includes('chat');
|
||||
const isTextModel = !isGenerativeModel && !isChatModel && /code|text/.test(llmConfig.model);
|
||||
|
||||
let provider;
|
||||
|
||||
if (project_id && isTextModel) {
|
||||
provider = Providers.VERTEXAI;
|
||||
} else if (project_id && isChatModel) {
|
||||
provider = Providers.VERTEXAI;
|
||||
} else if (project_id) {
|
||||
provider = Providers.VERTEXAI;
|
||||
} else if (!EXCLUDED_GENAI_MODELS.test(llmConfig.model)) {
|
||||
provider = Providers.GOOGLE;
|
||||
} else {
|
||||
provider = Providers.GOOGLE;
|
||||
}
|
||||
|
||||
// If we have a GCP project => Vertex AI
|
||||
if (project_id && provider === Providers.VERTEXAI) {
|
||||
/** @type {VertexAIClientOptions['authOptions']} */
|
||||
llmConfig.authOptions = {
|
||||
credentials: { ...serviceKey },
|
||||
projectId: project_id,
|
||||
};
|
||||
llmConfig.location = process.env.GOOGLE_LOC || 'us-central1';
|
||||
} else if (apiKey && provider === Providers.GOOGLE) {
|
||||
llmConfig.apiKey = apiKey;
|
||||
}
|
||||
|
||||
/*
|
||||
let legacyOptions = {};
|
||||
// Filter out any "examples" that are empty
|
||||
legacyOptions.examples = (legacyOptions.examples ?? [])
|
||||
.filter(Boolean)
|
||||
.filter((obj) => obj?.input?.content !== '' && obj?.output?.content !== '');
|
||||
|
||||
// If user has "examples" from legacyOptions, push them onto llmConfig
|
||||
if (legacyOptions.examples?.length) {
|
||||
llmConfig.examples = legacyOptions.examples.map((ex) => {
|
||||
const { input, output } = ex;
|
||||
if (!input?.content || !output?.content) {return undefined;}
|
||||
return {
|
||||
input: new HumanMessage(input.content),
|
||||
output: new AIMessage(output.content),
|
||||
};
|
||||
}).filter(Boolean);
|
||||
}
|
||||
*/
|
||||
|
||||
if (reverseProxyUrl) {
|
||||
llmConfig.baseUrl = reverseProxyUrl;
|
||||
}
|
||||
|
||||
if (authHeader) {
|
||||
/**
|
||||
* NOTE: NOT SUPPORTED BY LANGCHAIN GENAI CLIENT,
|
||||
* REQUIRES PR IN https://github.com/langchain-ai/langchainjs
|
||||
*/
|
||||
llmConfig.customHeaders = {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
};
|
||||
}
|
||||
|
||||
// Return the final shape
|
||||
return {
|
||||
/** @type {Providers.GOOGLE | Providers.VERTEXAI} */
|
||||
provider,
|
||||
/** @type {GoogleClientOptions | VertexAIClientOptions} */
|
||||
llmConfig,
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
getLLMConfig,
|
||||
};
|
||||
Loading…
Add table
Add a link
Reference in a new issue