mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 17:00:15 +01:00
* feat: Add BedrockIcon component to SVG library * feat: EModelEndpoint.bedrock * feat: first pass, bedrock chat. note: AgentClient is returning `agents` as conversation.endpoint * fix: declare endpoint in initialization step * chore: Update @librechat/agents dependency to version 1.4.5 * feat: backend content aggregation for agents/bedrock * feat: abort agent requests * feat: AWS Bedrock icons * WIP: agent provider schema parsing * chore: Update EditIcon props type * refactor(useGenerationsByLatest): make agents and bedrock editable * refactor: non-assistant message content, parts * fix: Bedrock response `sender` * fix: use endpointOption.model_parameters not endpointOption.modelOptions * fix: types for step handler * refactor: Update Agents.ToolCallDelta type * refactor: Remove unnecessary assignment of parentMessageId in AskController * refactor: remove unnecessary assignment of parentMessageId (agent request handler) * fix(bedrock/agents): message regeneration * refactor: dynamic form elements using react-hook-form Controllers * fix: agent icons/labels for messages * fix: agent actions * fix: use of new dynamic tags causing application crash * refactor: dynamic settings touch-ups * refactor: update Slider component to allow custom track class name * refactor: update DynamicSlider component styles * refactor: use Constants value for GLOBAL_PROJECT_NAME (enum) * feat: agent share global methods/controllers * fix: agents query * fix: `getResponseModel` * fix: share prompt a11y issue * refactor: update SharePrompt dialog theme styles * refactor: explicit typing for SharePrompt * feat: add agent roles/permissions * chore: update @librechat/agents dependency to version 1.4.7 for tool_call_ids edge case * fix(Anthropic): messages.X.content.Y.tool_use.input: Input should be a valid dictionary * fix: handle text parts with tool_call_ids and empty text * fix: role initialization * refactor: don't make instructions required * refactor: improve typing of Text part * fix: setShowStopButton for agents route * chore: remove params for now * fix: add streamBuffer and streamRate to help prevent 'Overloaded' errors from Anthropic API * refactor: remove console.log statement in ContentRender component * chore: typing, rename Context to Delete Button * chore(DeleteButton): logging * refactor(Action): make accessible * style(Action): improve a11y again * refactor: remove use/mention of mongoose sessions * feat: first pass, sharing agents * feat: visual indicator for global agent, remove author when serving to non-author * wip: params * chore: fix typing issues * fix(schemas): typing * refactor: improve accessibility of ListCard component and fix console React warning * wip: reset templates for non-legacy new convos * Revert "wip: params" This reverts commitf8067e91d4. * Revert "refactor: dynamic form elements using react-hook-form Controllers" This reverts commit2150c4815d. * fix(Parameters): types and parameter effect update to only update local state to parameters * refactor: optimize useDebouncedInput hook for better performance * feat: first pass, anthropic bedrock params * chore: paramEndpoints check for endpointType too * fix: maxTokens to use coerceNumber.optional(), * feat: extra chat model params * chore: reduce code repetition * refactor: improve preset title handling in SaveAsPresetDialog component * refactor: improve preset handling in HeaderOptions component * chore: improve typing, replace legacy dialog for SaveAsPresetDialog * feat: save as preset from parameters panel * fix: multi-search in select dropdown when using Option type * refactor: update default showDefault value to false in Dynamic components * feat: Bedrock presets settings * chore: config, fix agents schema, update config version * refactor: update AWS region variable name in bedrock options endpoint to BEDROCK_AWS_DEFAULT_REGION * refactor: update baseEndpointSchema in config.ts to include baseURL property * refactor: update createRun function to include req parameter and set streamRate based on provider * feat: availableRegions via config * refactor: remove unused demo agent controller file * WIP: title * Update @librechat/agents to version 1.5.0 * chore: addTitle.js to handle empty responseText * feat: support images and titles * feat: context token updates * Refactor BaseClient test to use expect.objectContaining * refactor: add model select, remove header options params, move side panel params below prompts * chore: update models list, catch title error * feat: model service for bedrock models (env) * chore: Remove verbose debug log in AgentClient class following stream * feat(bedrock): track token spend; fix: token rates, value key mapping for AWS models * refactor: handle streamRate in `handleLLMNewToken` callback * chore: AWS Bedrock example config in `.env.example` * refactor: Rename bedrockMeta to bedrockGeneral in settings.ts and use for AI21 and Amazon Bedrock providers * refactor: Update `.env.example` with AWS Bedrock model IDs URL and additional notes * feat: titleModel support for bedrock * refactor: Update `.env.example` with additional notes for AWS Bedrock model IDs
191 lines
7.6 KiB
JavaScript
191 lines
7.6 KiB
JavaScript
const { matchModelName } = require('../utils');
|
|
const defaultRate = 6;
|
|
|
|
/** AWS Bedrock pricing */
|
|
const bedrockValues = {
|
|
'llama2-13b': { prompt: 0.75, completion: 1.0 },
|
|
'llama2-70b': { prompt: 1.95, completion: 2.56 },
|
|
'llama3-8b': { prompt: 0.3, completion: 0.6 },
|
|
'llama3-70b': { prompt: 2.65, completion: 3.5 },
|
|
'llama3-1-8b': { prompt: 0.3, completion: 0.6 },
|
|
'llama3-1-70b': { prompt: 2.65, completion: 3.5 },
|
|
'llama3-1-405b': { prompt: 5.32, completion: 16.0 },
|
|
'mistral-7b': { prompt: 0.15, completion: 0.2 },
|
|
'mistral-small': { prompt: 0.15, completion: 0.2 },
|
|
'mixtral-8x7b': { prompt: 0.45, completion: 0.7 },
|
|
'mistral-large-2402': { prompt: 4.0, completion: 12.0 },
|
|
'mistral-large-2407': { prompt: 3.0, completion: 9.0 },
|
|
'command-text': { prompt: 1.5, completion: 2.0 },
|
|
'command-light': { prompt: 0.3, completion: 0.6 },
|
|
'ai21.j2-mid-v1': { prompt: 12.5, completion: 12.5 },
|
|
'ai21.j2-ultra-v1': { prompt: 18.8, completion: 18.8 },
|
|
'ai21.jamba-instruct-v1:0': { prompt: 0.5, completion: 0.7 },
|
|
'amazon.titan-text-lite-v1': { prompt: 0.15, completion: 0.2 },
|
|
'amazon.titan-text-express-v1': { prompt: 0.2, completion: 0.6 },
|
|
'amazon.titan-text-premier-v1:0': { prompt: 0.5, completion: 1.5 },
|
|
};
|
|
|
|
/**
|
|
* Mapping of model token sizes to their respective multipliers for prompt and completion.
|
|
* The rates are 1 USD per 1M tokens.
|
|
* @type {Object.<string, {prompt: number, completion: number}>}
|
|
*/
|
|
const tokenValues = Object.assign(
|
|
{
|
|
'8k': { prompt: 30, completion: 60 },
|
|
'32k': { prompt: 60, completion: 120 },
|
|
'4k': { prompt: 1.5, completion: 2 },
|
|
'16k': { prompt: 3, completion: 4 },
|
|
'gpt-3.5-turbo-1106': { prompt: 1, completion: 2 },
|
|
'gpt-4o-2024-08-06': { prompt: 2.5, completion: 10 },
|
|
'gpt-4o-mini': { prompt: 0.15, completion: 0.6 },
|
|
'gpt-4o': { prompt: 5, completion: 15 },
|
|
'gpt-4-1106': { prompt: 10, completion: 30 },
|
|
'gpt-3.5-turbo-0125': { prompt: 0.5, completion: 1.5 },
|
|
'claude-3-opus': { prompt: 15, completion: 75 },
|
|
'claude-3-sonnet': { prompt: 3, completion: 15 },
|
|
'claude-3-5-sonnet': { prompt: 3, completion: 15 },
|
|
'claude-3.5-sonnet': { prompt: 3, completion: 15 },
|
|
'claude-3-haiku': { prompt: 0.25, completion: 1.25 },
|
|
'claude-2.1': { prompt: 8, completion: 24 },
|
|
'claude-2': { prompt: 8, completion: 24 },
|
|
'claude-instant': { prompt: 0.8, completion: 2.4 },
|
|
'claude-': { prompt: 0.8, completion: 2.4 },
|
|
'command-r-plus': { prompt: 3, completion: 15 },
|
|
'command-r': { prompt: 0.5, completion: 1.5 },
|
|
/* cohere doesn't have rates for the older command models,
|
|
so this was from https://artificialanalysis.ai/models/command-light/providers */
|
|
command: { prompt: 0.38, completion: 0.38 },
|
|
'gemini-1.5': { prompt: 7, completion: 21 }, // May 2nd, 2024 pricing
|
|
gemini: { prompt: 0.5, completion: 1.5 }, // May 2nd, 2024 pricing
|
|
},
|
|
bedrockValues,
|
|
);
|
|
|
|
/**
|
|
* Mapping of model token sizes to their respective multipliers for cached input, read and write.
|
|
* See Anthropic's documentation on this: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching#pricing
|
|
* The rates are 1 USD per 1M tokens.
|
|
* @type {Object.<string, {write: number, read: number }>}
|
|
*/
|
|
const cacheTokenValues = {
|
|
'claude-3.5-sonnet': { write: 3.75, read: 0.3 },
|
|
'claude-3-5-sonnet': { write: 3.75, read: 0.3 },
|
|
'claude-3-haiku': { write: 0.3, read: 0.03 },
|
|
};
|
|
|
|
/**
|
|
* Retrieves the key associated with a given model name.
|
|
*
|
|
* @param {string} model - The model name to match.
|
|
* @param {string} endpoint - The endpoint name to match.
|
|
* @returns {string|undefined} The key corresponding to the model name, or undefined if no match is found.
|
|
*/
|
|
const getValueKey = (model, endpoint) => {
|
|
const modelName = matchModelName(model, endpoint);
|
|
if (!modelName) {
|
|
return undefined;
|
|
}
|
|
|
|
if (modelName.includes('gpt-3.5-turbo-16k')) {
|
|
return '16k';
|
|
} else if (modelName.includes('gpt-3.5-turbo-0125')) {
|
|
return 'gpt-3.5-turbo-0125';
|
|
} else if (modelName.includes('gpt-3.5-turbo-1106')) {
|
|
return 'gpt-3.5-turbo-1106';
|
|
} else if (modelName.includes('gpt-3.5')) {
|
|
return '4k';
|
|
} else if (modelName.includes('gpt-4o-2024-08-06')) {
|
|
return 'gpt-4o-2024-08-06';
|
|
} else if (modelName.includes('gpt-4o-mini')) {
|
|
return 'gpt-4o-mini';
|
|
} else if (modelName.includes('gpt-4o')) {
|
|
return 'gpt-4o';
|
|
} else if (modelName.includes('gpt-4-vision')) {
|
|
return 'gpt-4-1106';
|
|
} else if (modelName.includes('gpt-4-1106')) {
|
|
return 'gpt-4-1106';
|
|
} else if (modelName.includes('gpt-4-0125')) {
|
|
return 'gpt-4-1106';
|
|
} else if (modelName.includes('gpt-4-turbo')) {
|
|
return 'gpt-4-1106';
|
|
} else if (modelName.includes('gpt-4-32k')) {
|
|
return '32k';
|
|
} else if (modelName.includes('gpt-4')) {
|
|
return '8k';
|
|
} else if (tokenValues[modelName]) {
|
|
return modelName;
|
|
}
|
|
|
|
return undefined;
|
|
};
|
|
|
|
/**
|
|
* Retrieves the multiplier for a given value key and token type. If no value key is provided,
|
|
* it attempts to derive it from the model name.
|
|
*
|
|
* @param {Object} params - The parameters for the function.
|
|
* @param {string} [params.valueKey] - The key corresponding to the model name.
|
|
* @param {'prompt' | 'completion'} [params.tokenType] - The type of token (e.g., 'prompt' or 'completion').
|
|
* @param {string} [params.model] - The model name to derive the value key from if not provided.
|
|
* @param {string} [params.endpoint] - The endpoint name to derive the value key from if not provided.
|
|
* @param {EndpointTokenConfig} [params.endpointTokenConfig] - The token configuration for the endpoint.
|
|
* @returns {number} The multiplier for the given parameters, or a default value if not found.
|
|
*/
|
|
const getMultiplier = ({ valueKey, tokenType, model, endpoint, endpointTokenConfig }) => {
|
|
if (endpointTokenConfig) {
|
|
return endpointTokenConfig?.[model]?.[tokenType] ?? defaultRate;
|
|
}
|
|
|
|
if (valueKey && tokenType) {
|
|
return tokenValues[valueKey][tokenType] ?? defaultRate;
|
|
}
|
|
|
|
if (!tokenType || !model) {
|
|
return 1;
|
|
}
|
|
|
|
valueKey = getValueKey(model, endpoint);
|
|
if (!valueKey) {
|
|
return defaultRate;
|
|
}
|
|
|
|
// If we got this far, and values[tokenType] is undefined somehow, return a rough average of default multipliers
|
|
return tokenValues[valueKey]?.[tokenType] ?? defaultRate;
|
|
};
|
|
|
|
/**
|
|
* Retrieves the cache multiplier for a given value key and token type. If no value key is provided,
|
|
* it attempts to derive it from the model name.
|
|
*
|
|
* @param {Object} params - The parameters for the function.
|
|
* @param {string} [params.valueKey] - The key corresponding to the model name.
|
|
* @param {'write' | 'read'} [params.cacheType] - The type of token (e.g., 'write' or 'read').
|
|
* @param {string} [params.model] - The model name to derive the value key from if not provided.
|
|
* @param {string} [params.endpoint] - The endpoint name to derive the value key from if not provided.
|
|
* @param {EndpointTokenConfig} [params.endpointTokenConfig] - The token configuration for the endpoint.
|
|
* @returns {number | null} The multiplier for the given parameters, or `null` if not found.
|
|
*/
|
|
const getCacheMultiplier = ({ valueKey, cacheType, model, endpoint, endpointTokenConfig }) => {
|
|
if (endpointTokenConfig) {
|
|
return endpointTokenConfig?.[model]?.[cacheType] ?? null;
|
|
}
|
|
|
|
if (valueKey && cacheType) {
|
|
return cacheTokenValues[valueKey]?.[cacheType] ?? null;
|
|
}
|
|
|
|
if (!cacheType || !model) {
|
|
return null;
|
|
}
|
|
|
|
valueKey = getValueKey(model, endpoint);
|
|
if (!valueKey) {
|
|
return null;
|
|
}
|
|
|
|
// If we got this far, and values[cacheType] is undefined somehow, return a rough average of default multipliers
|
|
return cacheTokenValues[valueKey]?.[cacheType] ?? null;
|
|
};
|
|
|
|
module.exports = { tokenValues, getValueKey, getMultiplier, getCacheMultiplier, defaultRate };
|