🎚️ feat: Anthropic Parameter Set Support via Custom Endpoints (#9415)

* refactor: modularize openai llm config logic into new getOpenAILLMConfig function (#9412)

* ✈️ refactor: Migrate Anthropic's getLLMConfig to TypeScript (#9413)

* refactor: move tokens.js over to packages/api and update imports

* refactor: port tokens.js to typescript

* refactor: move helpers.js over to packages/api and update imports

* refactor: port helpers.js to typescript

* refactor: move anthropic/llm.js over to packages/api and update imports

* refactor: port anthropic/llm.js to typescript with supporting types in types/anthropic.ts and updated tests in llm.spec.js

* refactor: move llm.spec.js over to packages/api and update import

* refactor: port llm.spec.js over to typescript

* 📝  Add Prompt Parameter Support for Anthropic Custom Endpoints (#9414)

feat: add anthropic llm config support for openai-like (custom) endpoints

* fix: missed compiler / type issues from addition of getAnthropicLLMConfig

* refactor: update tokens.ts to export constants and functions, enhance type definitions, and adjust default values

* WIP: first pass, decouple `llmConfig` from `configOptions`

* chore: update import path for OpenAI configuration from 'llm' to 'config'

* refactor: enhance type definitions for ThinkingConfig and update modelOptions in AnthropicConfigOptions

* refactor: cleanup type, introduce openai transform from alt provider

* chore: integrate removeNullishValues in Google llmConfig and update OpenAI exports

* chore: bump version of @librechat/api to 1.3.5 in package.json and package-lock.json

* refactor: update customParams type in OpenAIConfigOptions to use TConfig['customParams']

* refactor: enhance transformToOpenAIConfig to include fromEndpoint and improve config extraction

* refactor: conform userId field for anthropic/openai, cleanup anthropic typing

* ci: add backward compatibility tests for getOpenAIConfig with various endpoints and configurations

* ci: replace userId with user in clientOptions for getLLMConfig

* test: add Azure OpenAI endpoint tests for various configurations in getOpenAIConfig

* refactor: defaultHeaders retrieval for prompt caching for anthropic-based custom endpoint (litellm)

* test: add unit tests for getOpenAIConfig with various Anthropic model configurations

* test: enhance Anthropic compatibility tests with addParams and dropParams handling

* chore: update @librechat/agents dependency to version 2.4.78 in package.json and package-lock.json

* chore: update @librechat/agents dependency to version 2.4.79 in package.json and package-lock.json

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
This commit is contained in:
Dustin Healy 2025-09-08 11:35:29 -07:00 committed by GitHub
parent 7de6f6e44c
commit c6ecf0095b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
40 changed files with 1736 additions and 432 deletions

View file

@ -1,12 +1,12 @@
const { EModelEndpoint } = require('librechat-data-provider');
const {
maxTokensMap,
matchModelName,
processModelData,
getModelMaxTokens,
maxOutputTokensMap,
findMatchingPattern,
getModelMaxTokens,
processModelData,
matchModelName,
maxTokensMap,
} = require('./tokens');
} = require('@librechat/api');
describe('getModelMaxTokens', () => {
test('should return correct tokens for exact match', () => {
@ -394,7 +394,7 @@ describe('getModelMaxTokens', () => {
});
test('should return correct max output tokens for GPT-5 models', () => {
const { getModelMaxOutputTokens } = require('./tokens');
const { getModelMaxOutputTokens } = require('@librechat/api');
['gpt-5', 'gpt-5-mini', 'gpt-5-nano'].forEach((model) => {
expect(getModelMaxOutputTokens(model)).toBe(maxOutputTokensMap[EModelEndpoint.openAI][model]);
expect(getModelMaxOutputTokens(model, EModelEndpoint.openAI)).toBe(
@ -407,7 +407,7 @@ describe('getModelMaxTokens', () => {
});
test('should return correct max output tokens for GPT-OSS models', () => {
const { getModelMaxOutputTokens } = require('./tokens');
const { getModelMaxOutputTokens } = require('@librechat/api');
['gpt-oss-20b', 'gpt-oss-120b'].forEach((model) => {
expect(getModelMaxOutputTokens(model)).toBe(maxOutputTokensMap[EModelEndpoint.openAI][model]);
expect(getModelMaxOutputTokens(model, EModelEndpoint.openAI)).toBe(