mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-09-22 08:12:00 +02:00

* feat: Add BedrockIcon component to SVG library * feat: EModelEndpoint.bedrock * feat: first pass, bedrock chat. note: AgentClient is returning `agents` as conversation.endpoint * fix: declare endpoint in initialization step * chore: Update @librechat/agents dependency to version 1.4.5 * feat: backend content aggregation for agents/bedrock * feat: abort agent requests * feat: AWS Bedrock icons * WIP: agent provider schema parsing * chore: Update EditIcon props type * refactor(useGenerationsByLatest): make agents and bedrock editable * refactor: non-assistant message content, parts * fix: Bedrock response `sender` * fix: use endpointOption.model_parameters not endpointOption.modelOptions * fix: types for step handler * refactor: Update Agents.ToolCallDelta type * refactor: Remove unnecessary assignment of parentMessageId in AskController * refactor: remove unnecessary assignment of parentMessageId (agent request handler) * fix(bedrock/agents): message regeneration * refactor: dynamic form elements using react-hook-form Controllers * fix: agent icons/labels for messages * fix: agent actions * fix: use of new dynamic tags causing application crash * refactor: dynamic settings touch-ups * refactor: update Slider component to allow custom track class name * refactor: update DynamicSlider component styles * refactor: use Constants value for GLOBAL_PROJECT_NAME (enum) * feat: agent share global methods/controllers * fix: agents query * fix: `getResponseModel` * fix: share prompt a11y issue * refactor: update SharePrompt dialog theme styles * refactor: explicit typing for SharePrompt * feat: add agent roles/permissions * chore: update @librechat/agents dependency to version 1.4.7 for tool_call_ids edge case * fix(Anthropic): messages.X.content.Y.tool_use.input: Input should be a valid dictionary * fix: handle text parts with tool_call_ids and empty text * fix: role initialization * refactor: don't make instructions required * refactor: improve typing of Text part * fix: setShowStopButton for agents route * chore: remove params for now * fix: add streamBuffer and streamRate to help prevent 'Overloaded' errors from Anthropic API * refactor: remove console.log statement in ContentRender component * chore: typing, rename Context to Delete Button * chore(DeleteButton): logging * refactor(Action): make accessible * style(Action): improve a11y again * refactor: remove use/mention of mongoose sessions * feat: first pass, sharing agents * feat: visual indicator for global agent, remove author when serving to non-author * wip: params * chore: fix typing issues * fix(schemas): typing * refactor: improve accessibility of ListCard component and fix console React warning * wip: reset templates for non-legacy new convos * Revert "wip: params" This reverts commitf8067e91d4
. * Revert "refactor: dynamic form elements using react-hook-form Controllers" This reverts commit2150c4815d
. * fix(Parameters): types and parameter effect update to only update local state to parameters * refactor: optimize useDebouncedInput hook for better performance * feat: first pass, anthropic bedrock params * chore: paramEndpoints check for endpointType too * fix: maxTokens to use coerceNumber.optional(), * feat: extra chat model params * chore: reduce code repetition * refactor: improve preset title handling in SaveAsPresetDialog component * refactor: improve preset handling in HeaderOptions component * chore: improve typing, replace legacy dialog for SaveAsPresetDialog * feat: save as preset from parameters panel * fix: multi-search in select dropdown when using Option type * refactor: update default showDefault value to false in Dynamic components * feat: Bedrock presets settings * chore: config, fix agents schema, update config version * refactor: update AWS region variable name in bedrock options endpoint to BEDROCK_AWS_DEFAULT_REGION * refactor: update baseEndpointSchema in config.ts to include baseURL property * refactor: update createRun function to include req parameter and set streamRate based on provider * feat: availableRegions via config * refactor: remove unused demo agent controller file * WIP: title * Update @librechat/agents to version 1.5.0 * chore: addTitle.js to handle empty responseText * feat: support images and titles * feat: context token updates * Refactor BaseClient test to use expect.objectContaining * refactor: add model select, remove header options params, move side panel params below prompts * chore: update models list, catch title error * feat: model service for bedrock models (env) * chore: Remove verbose debug log in AgentClient class following stream * feat(bedrock): track token spend; fix: token rates, value key mapping for AWS models * refactor: handle streamRate in `handleLLMNewToken` callback * chore: AWS Bedrock example config in `.env.example` * refactor: Rename bedrockMeta to bedrockGeneral in settings.ts and use for AI21 and Amazon Bedrock providers * refactor: Update `.env.example` with AWS Bedrock model IDs URL and additional notes * feat: titleModel support for bedrock * refactor: Update `.env.example` with additional notes for AWS Bedrock model IDs
267 lines
7.8 KiB
JavaScript
267 lines
7.8 KiB
JavaScript
const axios = require('axios');
|
|
const { HttpsProxyAgent } = require('https-proxy-agent');
|
|
const { EModelEndpoint, defaultModels, CacheKeys } = require('librechat-data-provider');
|
|
const { extractBaseURL, inputSchema, processModelData, logAxiosError } = require('~/utils');
|
|
const { OllamaClient } = require('~/app/clients/OllamaClient');
|
|
const getLogStores = require('~/cache/getLogStores');
|
|
|
|
/**
|
|
* Splits a string by commas and trims each resulting value.
|
|
* @param {string} input - The input string to split.
|
|
* @returns {string[]} An array of trimmed values.
|
|
*/
|
|
const splitAndTrim = (input) => {
|
|
if (!input || typeof input !== 'string') {
|
|
return [];
|
|
}
|
|
return input
|
|
.split(',')
|
|
.map((item) => item.trim())
|
|
.filter(Boolean);
|
|
};
|
|
|
|
const { openAIApiKey, userProvidedOpenAI } = require('./Config/EndpointService').config;
|
|
|
|
/**
|
|
* Fetches OpenAI models from the specified base API path or Azure, based on the provided configuration.
|
|
*
|
|
* @param {Object} params - The parameters for fetching the models.
|
|
* @param {Object} params.user - The user ID to send to the API.
|
|
* @param {string} params.apiKey - The API key for authentication with the API.
|
|
* @param {string} params.baseURL - The base path URL for the API.
|
|
* @param {string} [params.name='OpenAI'] - The name of the API; defaults to 'OpenAI'.
|
|
* @param {boolean} [params.azure=false] - Whether to fetch models from Azure.
|
|
* @param {boolean} [params.userIdQuery=false] - Whether to send the user ID as a query parameter.
|
|
* @param {boolean} [params.createTokenConfig=true] - Whether to create a token configuration from the API response.
|
|
* @param {string} [params.tokenKey] - The cache key to save the token configuration. Uses `name` if omitted.
|
|
* @returns {Promise<string[]>} A promise that resolves to an array of model identifiers.
|
|
* @async
|
|
*/
|
|
const fetchModels = async ({
|
|
user,
|
|
apiKey,
|
|
baseURL,
|
|
name = 'OpenAI',
|
|
azure = false,
|
|
userIdQuery = false,
|
|
createTokenConfig = true,
|
|
tokenKey,
|
|
}) => {
|
|
let models = [];
|
|
|
|
if (!baseURL && !azure) {
|
|
return models;
|
|
}
|
|
|
|
if (!apiKey) {
|
|
return models;
|
|
}
|
|
|
|
if (name && name.toLowerCase().startsWith('ollama')) {
|
|
return await OllamaClient.fetchModels(baseURL);
|
|
}
|
|
|
|
try {
|
|
const options = {
|
|
headers: {
|
|
Authorization: `Bearer ${apiKey}`,
|
|
},
|
|
};
|
|
|
|
if (process.env.PROXY) {
|
|
options.httpsAgent = new HttpsProxyAgent(process.env.PROXY);
|
|
}
|
|
|
|
if (process.env.OPENAI_ORGANIZATION && baseURL.includes('openai')) {
|
|
options.headers['OpenAI-Organization'] = process.env.OPENAI_ORGANIZATION;
|
|
}
|
|
|
|
const url = new URL(`${baseURL}${azure ? '' : '/models'}`);
|
|
if (user && userIdQuery) {
|
|
url.searchParams.append('user', user);
|
|
}
|
|
const res = await axios.get(url.toString(), options);
|
|
|
|
/** @type {z.infer<typeof inputSchema>} */
|
|
const input = res.data;
|
|
|
|
const validationResult = inputSchema.safeParse(input);
|
|
if (validationResult.success && createTokenConfig) {
|
|
const endpointTokenConfig = processModelData(input);
|
|
const cache = getLogStores(CacheKeys.TOKEN_CONFIG);
|
|
await cache.set(tokenKey ?? name, endpointTokenConfig);
|
|
}
|
|
models = input.data.map((item) => item.id);
|
|
} catch (error) {
|
|
const logMessage = `Failed to fetch models from ${azure ? 'Azure ' : ''}${name} API`;
|
|
logAxiosError({ message: logMessage, error });
|
|
}
|
|
|
|
return models;
|
|
};
|
|
|
|
/**
|
|
* Fetches models from the specified API path or Azure, based on the provided options.
|
|
* @async
|
|
* @function
|
|
* @param {object} opts - The options for fetching the models.
|
|
* @param {string} opts.user - The user ID to send to the API.
|
|
* @param {boolean} [opts.azure=false] - Whether to fetch models from Azure.
|
|
* @param {boolean} [opts.assistants=false] - Whether to fetch models from Azure.
|
|
* @param {boolean} [opts.plugins=false] - Whether to fetch models from the plugins.
|
|
* @param {string[]} [_models=[]] - The models to use as a fallback.
|
|
*/
|
|
const fetchOpenAIModels = async (opts, _models = []) => {
|
|
let models = _models.slice() ?? [];
|
|
let apiKey = openAIApiKey;
|
|
const openaiBaseURL = 'https://api.openai.com/v1';
|
|
let baseURL = openaiBaseURL;
|
|
let reverseProxyUrl = process.env.OPENAI_REVERSE_PROXY;
|
|
|
|
if (opts.assistants && process.env.ASSISTANTS_BASE_URL) {
|
|
reverseProxyUrl = process.env.ASSISTANTS_BASE_URL;
|
|
} else if (opts.azure) {
|
|
return models;
|
|
// const azure = getAzureCredentials();
|
|
// baseURL = (genAzureChatCompletion(azure))
|
|
// .split('/deployments')[0]
|
|
// .concat(`/models?api-version=${azure.azureOpenAIApiVersion}`);
|
|
// apiKey = azureOpenAIApiKey;
|
|
} else if (process.env.OPENROUTER_API_KEY) {
|
|
reverseProxyUrl = 'https://openrouter.ai/api/v1';
|
|
apiKey = process.env.OPENROUTER_API_KEY;
|
|
}
|
|
|
|
if (reverseProxyUrl) {
|
|
baseURL = extractBaseURL(reverseProxyUrl);
|
|
}
|
|
|
|
const modelsCache = getLogStores(CacheKeys.MODEL_QUERIES);
|
|
|
|
const cachedModels = await modelsCache.get(baseURL);
|
|
if (cachedModels) {
|
|
return cachedModels;
|
|
}
|
|
|
|
if (baseURL || opts.azure) {
|
|
models = await fetchModels({
|
|
apiKey,
|
|
baseURL,
|
|
azure: opts.azure,
|
|
user: opts.user,
|
|
});
|
|
}
|
|
|
|
if (models.length === 0) {
|
|
return _models;
|
|
}
|
|
|
|
if (baseURL === openaiBaseURL) {
|
|
const regex = /(text-davinci-003|gpt-)/;
|
|
models = models.filter((model) => regex.test(model));
|
|
const instructModels = models.filter((model) => model.includes('instruct'));
|
|
const otherModels = models.filter((model) => !model.includes('instruct'));
|
|
models = otherModels.concat(instructModels);
|
|
}
|
|
|
|
await modelsCache.set(baseURL, models);
|
|
return models;
|
|
};
|
|
|
|
/**
|
|
* Loads the default models for the application.
|
|
* @async
|
|
* @function
|
|
* @param {object} opts - The options for fetching the models.
|
|
* @param {string} opts.user - The user ID to send to the API.
|
|
* @param {boolean} [opts.azure=false] - Whether to fetch models from Azure.
|
|
* @param {boolean} [opts.plugins=false] - Whether to fetch models from the plugins.
|
|
*/
|
|
const getOpenAIModels = async (opts) => {
|
|
let models = defaultModels[EModelEndpoint.openAI];
|
|
|
|
if (opts.assistants) {
|
|
models = defaultModels[EModelEndpoint.assistants];
|
|
} else if (opts.azure) {
|
|
models = defaultModels[EModelEndpoint.azureAssistants];
|
|
}
|
|
|
|
if (opts.plugins) {
|
|
models = models.filter(
|
|
(model) =>
|
|
!model.includes('text-davinci') &&
|
|
!model.includes('instruct') &&
|
|
!model.includes('0613') &&
|
|
!model.includes('0314') &&
|
|
!model.includes('0301'),
|
|
);
|
|
}
|
|
|
|
let key;
|
|
if (opts.assistants) {
|
|
key = 'ASSISTANTS_MODELS';
|
|
} else if (opts.azure) {
|
|
key = 'AZURE_OPENAI_MODELS';
|
|
} else if (opts.plugins) {
|
|
key = 'PLUGIN_MODELS';
|
|
} else {
|
|
key = 'OPENAI_MODELS';
|
|
}
|
|
|
|
if (process.env[key]) {
|
|
models = splitAndTrim(process.env[key]);
|
|
return models;
|
|
}
|
|
|
|
if (userProvidedOpenAI && !process.env.OPENROUTER_API_KEY) {
|
|
return models;
|
|
}
|
|
|
|
return await fetchOpenAIModels(opts, models);
|
|
};
|
|
|
|
const getChatGPTBrowserModels = () => {
|
|
let models = ['text-davinci-002-render-sha', 'gpt-4'];
|
|
if (process.env.CHATGPT_MODELS) {
|
|
models = splitAndTrim(process.env.CHATGPT_MODELS);
|
|
}
|
|
|
|
return models;
|
|
};
|
|
|
|
const getAnthropicModels = () => {
|
|
let models = defaultModels[EModelEndpoint.anthropic];
|
|
if (process.env.ANTHROPIC_MODELS) {
|
|
models = splitAndTrim(process.env.ANTHROPIC_MODELS);
|
|
}
|
|
|
|
return models;
|
|
};
|
|
|
|
const getGoogleModels = () => {
|
|
let models = defaultModels[EModelEndpoint.google];
|
|
if (process.env.GOOGLE_MODELS) {
|
|
models = splitAndTrim(process.env.GOOGLE_MODELS);
|
|
}
|
|
|
|
return models;
|
|
};
|
|
|
|
const getBedrockModels = () => {
|
|
let models = defaultModels[EModelEndpoint.bedrock];
|
|
if (process.env.BEDROCK_AWS_MODELS) {
|
|
models = splitAndTrim(process.env.BEDROCK_AWS_MODELS);
|
|
}
|
|
|
|
return models;
|
|
};
|
|
|
|
module.exports = {
|
|
fetchModels,
|
|
splitAndTrim,
|
|
getOpenAIModels,
|
|
getBedrockModels,
|
|
getChatGPTBrowserModels,
|
|
getAnthropicModels,
|
|
getGoogleModels,
|
|
};
|