refactor: port anthropic/llm.js to typescript with supporting types in types/anthropic.ts and updated tests in llm.spec.js

This commit is contained in:
Dustin Healy 2025-08-30 23:01:14 -07:00 committed by Danny Avila
parent 796cb2b1ab
commit f5bb44e652
No known key found for this signature in database
GPG key ID: BF31EEB2C5CA0956
3 changed files with 75 additions and 19 deletions

View file

@ -211,13 +211,13 @@ describe('getLLMConfig', () => {
it('should handle empty modelOptions', () => {
expect(() => {
getLLMConfig('test-api-key', {});
}).toThrow("Cannot read properties of undefined (reading 'thinking')");
}).toThrow('No modelOptions provided');
});
it('should handle no options parameter', () => {
expect(() => {
getLLMConfig('test-api-key');
}).toThrow("Cannot read properties of undefined (reading 'thinking')");
}).toThrow('No modelOptions provided');
});
it('should handle temperature, stop sequences, and stream settings', () => {

View file

@ -1,6 +1,12 @@
const { ProxyAgent } = require('undici');
const { anthropicSettings, removeNullishValues } = require('librechat-data-provider');
const { checkPromptCacheSupport, getClaudeHeaders, configureReasoning } = require('./helpers');
import { ProxyAgent } from 'undici';
import { AnthropicClientOptions } from '@librechat/agents';
import { anthropicSettings, removeNullishValues } from 'librechat-data-provider';
import type {
AnthropicConfigOptions,
AnthropicLLMConfigResult,
AnthropicParameters,
} from '~/types/anthropic';
import { checkPromptCacheSupport, getClaudeHeaders, configureReasoning } from './helpers';
/**
* Generates configuration options for creating an Anthropic language model (LLM) instance.
@ -21,25 +27,42 @@ const { checkPromptCacheSupport, getClaudeHeaders, configureReasoning } = requir
*
* @returns {Object} Configuration options for creating an Anthropic LLM instance, with null and undefined values removed.
*/
function getLLMConfig(apiKey, options = {}) {
function getLLMConfig(
apiKey: string,
options: AnthropicConfigOptions = {} as AnthropicConfigOptions,
): AnthropicLLMConfigResult {
const systemOptions = {
thinking: options.modelOptions.thinking ?? anthropicSettings.thinking.default,
promptCache: options.modelOptions.promptCache ?? anthropicSettings.promptCache.default,
thinkingBudget: options.modelOptions.thinkingBudget ?? anthropicSettings.thinkingBudget.default,
thinking: options.modelOptions?.thinking ?? anthropicSettings.thinking.default,
promptCache: options.modelOptions?.promptCache ?? anthropicSettings.promptCache.default,
thinkingBudget:
options.modelOptions?.thinkingBudget ?? anthropicSettings.thinkingBudget.default,
};
for (let key in systemOptions) {
delete options.modelOptions[key];
/** Couldn't figure out a way to still loop through the object while deleting the overlapping keys when porting this
* over from javascript, so for now they are being deleted manually until a better way presents itself.
*/
if (options.modelOptions) {
delete options.modelOptions.thinking;
delete options.modelOptions.promptCache;
delete options.modelOptions.thinkingBudget;
} else {
throw new Error('No modelOptions provided');
}
const defaultOptions = {
model: anthropicSettings.model.default,
maxOutputTokens: anthropicSettings.maxOutputTokens.default,
stream: true,
};
const mergedOptions = Object.assign(defaultOptions, options.modelOptions);
const mergedOptions = Object.assign(
defaultOptions,
options.modelOptions,
) as typeof defaultOptions &
Partial<AnthropicParameters> & { stop?: string[]; web_search?: boolean };
/** @type {AnthropicClientOptions} */
let requestOptions = {
let requestOptions: AnthropicClientOptions & { stream?: boolean } = {
apiKey,
model: mergedOptions.model,
stream: mergedOptions.stream,
@ -66,20 +89,20 @@ function getLLMConfig(apiKey, options = {}) {
}
const supportsCacheControl =
systemOptions.promptCache === true && checkPromptCacheSupport(requestOptions.model);
const headers = getClaudeHeaders(requestOptions.model, supportsCacheControl);
if (headers) {
systemOptions.promptCache === true && checkPromptCacheSupport(requestOptions.model ?? '');
const headers = getClaudeHeaders(requestOptions.model ?? '', supportsCacheControl);
if (headers && requestOptions.clientOptions) {
requestOptions.clientOptions.defaultHeaders = headers;
}
if (options.proxy) {
if (options.proxy && requestOptions.clientOptions) {
const proxyAgent = new ProxyAgent(options.proxy);
requestOptions.clientOptions.fetchOptions = {
dispatcher: proxyAgent,
};
}
if (options.reverseProxyUrl) {
if (options.reverseProxyUrl && requestOptions.clientOptions) {
requestOptions.clientOptions.baseURL = options.reverseProxyUrl;
requestOptions.anthropicApiUrl = options.reverseProxyUrl;
}
@ -96,7 +119,9 @@ function getLLMConfig(apiKey, options = {}) {
return {
tools,
/** @type {AnthropicClientOptions} */
llmConfig: removeNullishValues(requestOptions),
llmConfig: removeNullishValues(
requestOptions as Record<string, unknown>,
) as AnthropicClientOptions,
};
}

View file

@ -0,0 +1,31 @@
import { z } from 'zod';
import { AnthropicClientOptions } from '@librechat/agents';
import { anthropicSchema } from 'librechat-data-provider';
export type AnthropicParameters = z.infer<typeof anthropicSchema>;
/**
* Configuration options for the getLLMConfig function
*/
export interface AnthropicConfigOptions {
modelOptions?: Partial<AnthropicParameters>;
/** The user ID for tracking and personalization */
userId?: string;
/** Proxy server URL */
proxy?: string;
/** URL for a reverse proxy, if used */
reverseProxyUrl?: string;
}
/**
* Return type for getLLMConfig function
*/
export interface AnthropicLLMConfigResult {
/** Configuration options for creating an Anthropic LLM instance */
llmConfig: AnthropicClientOptions;
/** Array of tools to be used */
tools: Array<{
type: string;
name?: string;
}>;
}