🤖 feat: Support Google Agents, fix Various Provider Configurations (#5126)

* feat: Refactor ModelEndHandler to collect usage metadata only if it exists

* feat: google tool end handling, custom anthropic class for better token ux

* refactor: differentiate between client <> request options

* feat: initial support for google agents

* feat: only cache messages with non-empty text

* feat: Cache non-empty messages in chatV2 controller

* fix: anthropic llm client options llmConfig

* refactor: streamline client options handling in LLM configuration

* fix: VertexAI Agent Auth & Tool Handling

* fix: additional fields for llmConfig, however customHeaders are not supported by langchain, requires PR

* feat: set default location for vertexai LLM configuration

* fix: outdated OpenAI Client options for getLLMConfig

* chore: agent provider options typing

* chore: add note about currently unsupported customHeaders in langchain GenAI client

* fix: skip transaction creation when rawAmount is NaN
This commit is contained in:
Danny Avila 2024-12-28 17:15:03 -05:00 committed by GitHub
parent a423eb8c7b
commit 24cad6bbd4
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
18 changed files with 429 additions and 363 deletions

View file

@ -20,7 +20,7 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
checkUserKeyExpiry(expiresAt, EModelEndpoint.anthropic);
}
const clientOptions = {};
let clientOptions = {};
/** @type {undefined | TBaseEndpoint} */
const anthropicConfig = req.app.locals[EModelEndpoint.anthropic];
@ -36,7 +36,7 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
}
if (optionsOnly) {
const requestOptions = Object.assign(
clientOptions = Object.assign(
{
reverseProxyUrl: ANTHROPIC_REVERSE_PROXY ?? null,
proxy: PROXY ?? null,
@ -45,9 +45,9 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
clientOptions,
);
if (overrideModel) {
requestOptions.modelOptions.model = overrideModel;
clientOptions.modelOptions.model = overrideModel;
}
return getLLMConfig(anthropicApiKey, requestOptions);
return getLLMConfig(anthropicApiKey, clientOptions);
}
const client = new AnthropicClient(anthropicApiKey, {