LibreChat/api/server/services/Endpoints/bedrock/initialize.js
Dustin Healy c6ecf0095b
🎚️ feat: Anthropic Parameter Set Support via Custom Endpoints (#9415)
* refactor: modularize openai llm config logic into new getOpenAILLMConfig function (#9412)

* ✈️ refactor: Migrate Anthropic's getLLMConfig to TypeScript (#9413)

* refactor: move tokens.js over to packages/api and update imports

* refactor: port tokens.js to typescript

* refactor: move helpers.js over to packages/api and update imports

* refactor: port helpers.js to typescript

* refactor: move anthropic/llm.js over to packages/api and update imports

* refactor: port anthropic/llm.js to typescript with supporting types in types/anthropic.ts and updated tests in llm.spec.js

* refactor: move llm.spec.js over to packages/api and update import

* refactor: port llm.spec.js over to typescript

* 📝  Add Prompt Parameter Support for Anthropic Custom Endpoints (#9414)

feat: add anthropic llm config support for openai-like (custom) endpoints

* fix: missed compiler / type issues from addition of getAnthropicLLMConfig

* refactor: update tokens.ts to export constants and functions, enhance type definitions, and adjust default values

* WIP: first pass, decouple `llmConfig` from `configOptions`

* chore: update import path for OpenAI configuration from 'llm' to 'config'

* refactor: enhance type definitions for ThinkingConfig and update modelOptions in AnthropicConfigOptions

* refactor: cleanup type, introduce openai transform from alt provider

* chore: integrate removeNullishValues in Google llmConfig and update OpenAI exports

* chore: bump version of @librechat/api to 1.3.5 in package.json and package-lock.json

* refactor: update customParams type in OpenAIConfigOptions to use TConfig['customParams']

* refactor: enhance transformToOpenAIConfig to include fromEndpoint and improve config extraction

* refactor: conform userId field for anthropic/openai, cleanup anthropic typing

* ci: add backward compatibility tests for getOpenAIConfig with various endpoints and configurations

* ci: replace userId with user in clientOptions for getLLMConfig

* test: add Azure OpenAI endpoint tests for various configurations in getOpenAIConfig

* refactor: defaultHeaders retrieval for prompt caching for anthropic-based custom endpoint (litellm)

* test: add unit tests for getOpenAIConfig with various Anthropic model configurations

* test: enhance Anthropic compatibility tests with addParams and dropParams handling

* chore: update @librechat/agents dependency to version 2.4.78 in package.json and package-lock.json

* chore: update @librechat/agents dependency to version 2.4.79 in package.json and package-lock.json

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
2025-09-08 14:35:29 -04:00

79 lines
2.4 KiB
JavaScript

const { getModelMaxTokens } = require('@librechat/api');
const { createContentAggregator } = require('@librechat/agents');
const {
EModelEndpoint,
providerEndpointMap,
getResponseSender,
} = require('librechat-data-provider');
const { getDefaultHandlers } = require('~/server/controllers/agents/callbacks');
const getOptions = require('~/server/services/Endpoints/bedrock/options');
const AgentClient = require('~/server/controllers/agents/client');
const initializeClient = async ({ req, res, endpointOption }) => {
if (!endpointOption) {
throw new Error('Endpoint option not provided');
}
/** @type {Array<UsageMetadata>} */
const collectedUsage = [];
const { contentParts, aggregateContent } = createContentAggregator();
const eventHandlers = getDefaultHandlers({ res, aggregateContent, collectedUsage });
/** @type {Agent} */
const agent = {
id: EModelEndpoint.bedrock,
name: endpointOption.name,
provider: EModelEndpoint.bedrock,
endpoint: EModelEndpoint.bedrock,
instructions: endpointOption.promptPrefix,
model: endpointOption.model_parameters.model,
model_parameters: endpointOption.model_parameters,
};
if (typeof endpointOption.artifactsPrompt === 'string' && endpointOption.artifactsPrompt) {
agent.instructions = `${agent.instructions ?? ''}\n${endpointOption.artifactsPrompt}`.trim();
}
// TODO: pass-in override settings that are specific to current run
const options = await getOptions({
req,
res,
endpointOption,
});
agent.model_parameters = Object.assign(agent.model_parameters, options.llmConfig);
if (options.configOptions) {
agent.model_parameters.configuration = options.configOptions;
}
const sender =
agent.name ??
getResponseSender({
...endpointOption,
model: endpointOption.model_parameters.model,
});
const client = new AgentClient({
req,
res,
agent,
sender,
// tools,
contentParts,
eventHandlers,
collectedUsage,
spec: endpointOption.spec,
iconURL: endpointOption.iconURL,
endpoint: EModelEndpoint.bedrock,
resendFiles: endpointOption.resendFiles,
maxContextTokens:
endpointOption.maxContextTokens ??
agent.max_context_tokens ??
getModelMaxTokens(agent.model_parameters.model, providerEndpointMap[agent.provider]) ??
4000,
attachments: endpointOption.attachments,
});
return { client };
};
module.exports = { initializeClient };