mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 08:50:15 +01:00
* feat: Add BedrockIcon component to SVG library * feat: EModelEndpoint.bedrock * feat: first pass, bedrock chat. note: AgentClient is returning `agents` as conversation.endpoint * fix: declare endpoint in initialization step * chore: Update @librechat/agents dependency to version 1.4.5 * feat: backend content aggregation for agents/bedrock * feat: abort agent requests * feat: AWS Bedrock icons * WIP: agent provider schema parsing * chore: Update EditIcon props type * refactor(useGenerationsByLatest): make agents and bedrock editable * refactor: non-assistant message content, parts * fix: Bedrock response `sender` * fix: use endpointOption.model_parameters not endpointOption.modelOptions * fix: types for step handler * refactor: Update Agents.ToolCallDelta type * refactor: Remove unnecessary assignment of parentMessageId in AskController * refactor: remove unnecessary assignment of parentMessageId (agent request handler) * fix(bedrock/agents): message regeneration * refactor: dynamic form elements using react-hook-form Controllers * fix: agent icons/labels for messages * fix: agent actions * fix: use of new dynamic tags causing application crash * refactor: dynamic settings touch-ups * refactor: update Slider component to allow custom track class name * refactor: update DynamicSlider component styles * refactor: use Constants value for GLOBAL_PROJECT_NAME (enum) * feat: agent share global methods/controllers * fix: agents query * fix: `getResponseModel` * fix: share prompt a11y issue * refactor: update SharePrompt dialog theme styles * refactor: explicit typing for SharePrompt * feat: add agent roles/permissions * chore: update @librechat/agents dependency to version 1.4.7 for tool_call_ids edge case * fix(Anthropic): messages.X.content.Y.tool_use.input: Input should be a valid dictionary * fix: handle text parts with tool_call_ids and empty text * fix: role initialization * refactor: don't make instructions required * refactor: improve typing of Text part * fix: setShowStopButton for agents route * chore: remove params for now * fix: add streamBuffer and streamRate to help prevent 'Overloaded' errors from Anthropic API * refactor: remove console.log statement in ContentRender component * chore: typing, rename Context to Delete Button * chore(DeleteButton): logging * refactor(Action): make accessible * style(Action): improve a11y again * refactor: remove use/mention of mongoose sessions * feat: first pass, sharing agents * feat: visual indicator for global agent, remove author when serving to non-author * wip: params * chore: fix typing issues * fix(schemas): typing * refactor: improve accessibility of ListCard component and fix console React warning * wip: reset templates for non-legacy new convos * Revert "wip: params" This reverts commitf8067e91d4. * Revert "refactor: dynamic form elements using react-hook-form Controllers" This reverts commit2150c4815d. * fix(Parameters): types and parameter effect update to only update local state to parameters * refactor: optimize useDebouncedInput hook for better performance * feat: first pass, anthropic bedrock params * chore: paramEndpoints check for endpointType too * fix: maxTokens to use coerceNumber.optional(), * feat: extra chat model params * chore: reduce code repetition * refactor: improve preset title handling in SaveAsPresetDialog component * refactor: improve preset handling in HeaderOptions component * chore: improve typing, replace legacy dialog for SaveAsPresetDialog * feat: save as preset from parameters panel * fix: multi-search in select dropdown when using Option type * refactor: update default showDefault value to false in Dynamic components * feat: Bedrock presets settings * chore: config, fix agents schema, update config version * refactor: update AWS region variable name in bedrock options endpoint to BEDROCK_AWS_DEFAULT_REGION * refactor: update baseEndpointSchema in config.ts to include baseURL property * refactor: update createRun function to include req parameter and set streamRate based on provider * feat: availableRegions via config * refactor: remove unused demo agent controller file * WIP: title * Update @librechat/agents to version 1.5.0 * chore: addTitle.js to handle empty responseText * feat: support images and titles * feat: context token updates * Refactor BaseClient test to use expect.objectContaining * refactor: add model select, remove header options params, move side panel params below prompts * chore: update models list, catch title error * feat: model service for bedrock models (env) * chore: Remove verbose debug log in AgentClient class following stream * feat(bedrock): track token spend; fix: token rates, value key mapping for AWS models * refactor: handle streamRate in `handleLLMNewToken` callback * chore: AWS Bedrock example config in `.env.example` * refactor: Rename bedrockMeta to bedrockGeneral in settings.ts and use for AI21 and Amazon Bedrock providers * refactor: Update `.env.example` with AWS Bedrock model IDs URL and additional notes * feat: titleModel support for bedrock * refactor: Update `.env.example` with additional notes for AWS Bedrock model IDs
304 lines
8.4 KiB
JavaScript
304 lines
8.4 KiB
JavaScript
const z = require('zod');
|
|
const { EModelEndpoint } = require('librechat-data-provider');
|
|
|
|
const openAIModels = {
|
|
'gpt-4': 8187, // -5 from max
|
|
'gpt-4-0613': 8187, // -5 from max
|
|
'gpt-4-32k': 32758, // -10 from max
|
|
'gpt-4-32k-0314': 32758, // -10 from max
|
|
'gpt-4-32k-0613': 32758, // -10 from max
|
|
'gpt-4-1106': 127500, // -500 from max
|
|
'gpt-4-0125': 127500, // -500 from max
|
|
'gpt-4o': 127500, // -500 from max
|
|
'gpt-4o-mini': 127500, // -500 from max
|
|
'gpt-4o-2024-08-06': 127500, // -500 from max
|
|
'gpt-4-turbo': 127500, // -500 from max
|
|
'gpt-4-vision': 127500, // -500 from max
|
|
'gpt-3.5-turbo': 16375, // -10 from max
|
|
'gpt-3.5-turbo-0613': 4092, // -5 from max
|
|
'gpt-3.5-turbo-0301': 4092, // -5 from max
|
|
'gpt-3.5-turbo-16k': 16375, // -10 from max
|
|
'gpt-3.5-turbo-16k-0613': 16375, // -10 from max
|
|
'gpt-3.5-turbo-1106': 16375, // -10 from max
|
|
'gpt-3.5-turbo-0125': 16375, // -10 from max
|
|
};
|
|
|
|
const mistralModels = {
|
|
'mistral-': 31990, // -10 from max
|
|
'mistral-7b': 31990, // -10 from max
|
|
'mistral-small': 31990, // -10 from max
|
|
'mixtral-8x7b': 31990, // -10 from max
|
|
'mistral-large-2402': 127500,
|
|
'mistral-large-2407': 127500,
|
|
};
|
|
|
|
const cohereModels = {
|
|
'command-light': 4086, // -10 from max
|
|
'command-light-nightly': 8182, // -10 from max
|
|
command: 4086, // -10 from max
|
|
'command-nightly': 8182, // -10 from max
|
|
'command-r': 127500, // -500 from max
|
|
'command-r-plus': 127500, // -500 from max
|
|
};
|
|
|
|
const googleModels = {
|
|
/* Max I/O is combined so we subtract the amount from max response tokens for actual total */
|
|
gemini: 30720, // -2048 from max
|
|
'gemini-pro-vision': 12288, // -4096 from max
|
|
'gemini-1.5': 1048576, // -8192 from max
|
|
'text-bison-32k': 32758, // -10 from max
|
|
'chat-bison-32k': 32758, // -10 from max
|
|
'code-bison-32k': 32758, // -10 from max
|
|
'codechat-bison-32k': 32758,
|
|
/* Codey, -5 from max: 6144 */
|
|
'code-': 6139,
|
|
'codechat-': 6139,
|
|
/* PaLM2, -5 from max: 8192 */
|
|
'text-': 8187,
|
|
'chat-': 8187,
|
|
};
|
|
|
|
const anthropicModels = {
|
|
'claude-': 100000,
|
|
'claude-instant': 100000,
|
|
'claude-2': 100000,
|
|
'claude-2.1': 200000,
|
|
'claude-3-haiku': 200000,
|
|
'claude-3-sonnet': 200000,
|
|
'claude-3-opus': 200000,
|
|
'claude-3-5-sonnet': 200000,
|
|
'claude-3.5-sonnet': 200000,
|
|
};
|
|
|
|
const metaModels = {
|
|
'llama2-13b': 4000,
|
|
'llama2-70b': 4000,
|
|
'llama3-8b': 8000,
|
|
'llama3-70b': 8000,
|
|
'llama3-1-8b': 127500,
|
|
'llama3-1-70b': 127500,
|
|
'llama3-1-405b': 127500,
|
|
};
|
|
|
|
const ai21Models = {
|
|
'ai21.j2-mid-v1': 8182, // -10 from max
|
|
'ai21.j2-ultra-v1': 8182, // -10 from max
|
|
'ai21.jamba-instruct-v1:0': 255500, // -500 from max
|
|
};
|
|
|
|
const amazonModels = {
|
|
'amazon.titan-text-lite-v1': 4000,
|
|
'amazon.titan-text-express-v1': 8000,
|
|
'amazon.titan-text-premier-v1:0': 31500, // -500 from max
|
|
};
|
|
|
|
const bedrockModels = {
|
|
...anthropicModels,
|
|
...mistralModels,
|
|
...cohereModels,
|
|
...metaModels,
|
|
...ai21Models,
|
|
...amazonModels,
|
|
};
|
|
|
|
const aggregateModels = { ...openAIModels, ...googleModels, ...bedrockModels };
|
|
|
|
const maxTokensMap = {
|
|
[EModelEndpoint.azureOpenAI]: openAIModels,
|
|
[EModelEndpoint.openAI]: aggregateModels,
|
|
[EModelEndpoint.agents]: aggregateModels,
|
|
[EModelEndpoint.custom]: aggregateModels,
|
|
[EModelEndpoint.google]: googleModels,
|
|
[EModelEndpoint.anthropic]: anthropicModels,
|
|
[EModelEndpoint.bedrock]: bedrockModels,
|
|
};
|
|
|
|
/**
|
|
* Finds the first matching pattern in the tokens map.
|
|
* @param {string} modelName
|
|
* @param {Record<string, number>} tokensMap
|
|
* @returns {string|null}
|
|
*/
|
|
function findMatchingPattern(modelName, tokensMap) {
|
|
const keys = Object.keys(tokensMap);
|
|
for (let i = keys.length - 1; i >= 0; i--) {
|
|
const modelKey = keys[i];
|
|
if (modelName.includes(modelKey)) {
|
|
return modelKey;
|
|
}
|
|
}
|
|
|
|
return null;
|
|
}
|
|
|
|
/**
|
|
* Retrieves the maximum tokens for a given model name. If the exact model name isn't found,
|
|
* it searches for partial matches within the model name, checking keys in reverse order.
|
|
*
|
|
* @param {string} modelName - The name of the model to look up.
|
|
* @param {string} endpoint - The endpoint (default is 'openAI').
|
|
* @param {EndpointTokenConfig} [endpointTokenConfig] - Token Config for current endpoint to use for max tokens lookup
|
|
* @returns {number|undefined} The maximum tokens for the given model or undefined if no match is found.
|
|
*
|
|
* @example
|
|
* getModelMaxTokens('gpt-4-32k-0613'); // Returns 32767
|
|
* getModelMaxTokens('gpt-4-32k-unknown'); // Returns 32767
|
|
* getModelMaxTokens('unknown-model'); // Returns undefined
|
|
*/
|
|
function getModelMaxTokens(modelName, endpoint = EModelEndpoint.openAI, endpointTokenConfig) {
|
|
if (typeof modelName !== 'string') {
|
|
return undefined;
|
|
}
|
|
|
|
/** @type {EndpointTokenConfig | Record<string, number>} */
|
|
const tokensMap = endpointTokenConfig ?? maxTokensMap[endpoint];
|
|
if (!tokensMap) {
|
|
return undefined;
|
|
}
|
|
|
|
if (tokensMap[modelName]?.context) {
|
|
return tokensMap[modelName].context;
|
|
}
|
|
|
|
if (tokensMap[modelName]) {
|
|
return tokensMap[modelName];
|
|
}
|
|
|
|
const matchedPattern = findMatchingPattern(modelName, tokensMap);
|
|
|
|
if (matchedPattern) {
|
|
const result = tokensMap[matchedPattern];
|
|
return result?.context ?? result;
|
|
}
|
|
|
|
return undefined;
|
|
}
|
|
|
|
/**
|
|
* Retrieves the model name key for a given model name input. If the exact model name isn't found,
|
|
* it searches for partial matches within the model name, checking keys in reverse order.
|
|
*
|
|
* @param {string} modelName - The name of the model to look up.
|
|
* @param {string} endpoint - The endpoint (default is 'openAI').
|
|
* @returns {string|undefined} The model name key for the given model; returns input if no match is found and is string.
|
|
*
|
|
* @example
|
|
* matchModelName('gpt-4-32k-0613'); // Returns 'gpt-4-32k-0613'
|
|
* matchModelName('gpt-4-32k-unknown'); // Returns 'gpt-4-32k'
|
|
* matchModelName('unknown-model'); // Returns undefined
|
|
*/
|
|
function matchModelName(modelName, endpoint = EModelEndpoint.openAI) {
|
|
if (typeof modelName !== 'string') {
|
|
return undefined;
|
|
}
|
|
|
|
const tokensMap = maxTokensMap[endpoint];
|
|
if (!tokensMap) {
|
|
return modelName;
|
|
}
|
|
|
|
if (tokensMap[modelName]) {
|
|
return modelName;
|
|
}
|
|
|
|
const matchedPattern = findMatchingPattern(modelName, tokensMap);
|
|
return matchedPattern || modelName;
|
|
}
|
|
|
|
const modelSchema = z.object({
|
|
id: z.string(),
|
|
pricing: z.object({
|
|
prompt: z.string(),
|
|
completion: z.string(),
|
|
}),
|
|
context_length: z.number(),
|
|
});
|
|
|
|
const inputSchema = z.object({
|
|
data: z.array(modelSchema),
|
|
});
|
|
|
|
/**
|
|
* Processes a list of model data from an API and organizes it into structured data based on URL and specifics of rates and context.
|
|
* @param {{ data: Array<z.infer<typeof modelSchema>> }} input The input object containing base URL and data fetched from the API.
|
|
* @returns {EndpointTokenConfig} The processed model data.
|
|
*/
|
|
function processModelData(input) {
|
|
const validationResult = inputSchema.safeParse(input);
|
|
if (!validationResult.success) {
|
|
throw new Error('Invalid input data');
|
|
}
|
|
const { data } = validationResult.data;
|
|
|
|
/** @type {EndpointTokenConfig} */
|
|
const tokenConfig = {};
|
|
|
|
for (const model of data) {
|
|
const modelKey = model.id;
|
|
if (modelKey === 'openrouter/auto') {
|
|
model.pricing = {
|
|
prompt: '0.00001',
|
|
completion: '0.00003',
|
|
};
|
|
}
|
|
const prompt = parseFloat(model.pricing.prompt) * 1000000;
|
|
const completion = parseFloat(model.pricing.completion) * 1000000;
|
|
|
|
tokenConfig[modelKey] = {
|
|
prompt,
|
|
completion,
|
|
context: model.context_length,
|
|
};
|
|
}
|
|
|
|
return tokenConfig;
|
|
}
|
|
|
|
const tiktokenModels = new Set([
|
|
'text-davinci-003',
|
|
'text-davinci-002',
|
|
'text-davinci-001',
|
|
'text-curie-001',
|
|
'text-babbage-001',
|
|
'text-ada-001',
|
|
'davinci',
|
|
'curie',
|
|
'babbage',
|
|
'ada',
|
|
'code-davinci-002',
|
|
'code-davinci-001',
|
|
'code-cushman-002',
|
|
'code-cushman-001',
|
|
'davinci-codex',
|
|
'cushman-codex',
|
|
'text-davinci-edit-001',
|
|
'code-davinci-edit-001',
|
|
'text-embedding-ada-002',
|
|
'text-similarity-davinci-001',
|
|
'text-similarity-curie-001',
|
|
'text-similarity-babbage-001',
|
|
'text-similarity-ada-001',
|
|
'text-search-davinci-doc-001',
|
|
'text-search-curie-doc-001',
|
|
'text-search-babbage-doc-001',
|
|
'text-search-ada-doc-001',
|
|
'code-search-babbage-code-001',
|
|
'code-search-ada-code-001',
|
|
'gpt2',
|
|
'gpt-4',
|
|
'gpt-4-0314',
|
|
'gpt-4-32k',
|
|
'gpt-4-32k-0314',
|
|
'gpt-3.5-turbo',
|
|
'gpt-3.5-turbo-0301',
|
|
]);
|
|
|
|
module.exports = {
|
|
tiktokenModels,
|
|
maxTokensMap,
|
|
inputSchema,
|
|
modelSchema,
|
|
getModelMaxTokens,
|
|
matchModelName,
|
|
processModelData,
|
|
};
|