🧪 ci: Tests for Anthropic and OpenAI LLM Configuration (#9484)

* fix: freq. and pres. penalty use camelcase

* ci: OpenAI Configuration Tests

* ci: Enhance OpenAI Configuration Tests with Azure and Custom Endpoint Scenarios

* Added integration tests for OpenAI and Azure configurations simulating various initialization scenarios.
* Updated OpenAIConfigOptions to allow null values for reverseProxyUrl and proxy.
* Improved handling of reasoning parameters in tests for both OpenAI and Azure setups.
* Ensured robust error handling for missing API keys and malformed configurations.
* Optimized performance for large parameter sets in configuration.

* test: Add comprehensive integration tests for Anthropic LLM configuration

* Introduced real usage integration tests for various Anthropic endpoint configurations, including handling of proxy and reverse proxy setups.
* Implemented model-specific scenarios for Claude-3.7 and web search functionality.
* Enhanced error handling for missing user IDs and large parameter sets.
* Validated parameter logic, including default values, boundary conditions, and type handling for numeric and array parameters.
* Ensured proper exclusion of system options from model options and maintained expected behavior across different model variations.
This commit is contained in:
Danny Avila 2025-09-06 09:42:12 -04:00 committed by GitHub
parent 6f6a34d126
commit 035f85c3ba
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 1721 additions and 8 deletions

View file

@ -104,7 +104,14 @@ export function getOpenAIConfig(
addParams,
dropParams,
} = options;
const { reasoning_effort, reasoning_summary, verbosity, ...modelOptions } = _modelOptions;
const {
reasoning_effort,
reasoning_summary,
verbosity,
frequency_penalty,
presence_penalty,
...modelOptions
} = _modelOptions;
const llmConfig: Partial<t.ClientOptions> &
Partial<t.OpenAIParameters> &
Partial<AzureOpenAIInput> = Object.assign(
@ -115,6 +122,13 @@ export function getOpenAIConfig(
modelOptions,
);
if (frequency_penalty != null) {
llmConfig.frequencyPenalty = frequency_penalty;
}
if (presence_penalty != null) {
llmConfig.presencePenalty = presence_penalty;
}
const modelKwargs: Record<string, unknown> = {};
let hasModelKwargs = false;