LibreChat/api/server/services/Config/loadDefaultModels.js

83 lines
2.6 KiB
JavaScript
Raw Normal View History

const { EModelEndpoint } = require('librechat-data-provider');
feat: Google Gemini ❇️ (#1355) * refactor: add gemini-pro to google Models list; use defaultModels for central model listing * refactor(SetKeyDialog): create useMultipleKeys hook to use for Azure, export `isJson` from utils, use EModelEndpoint * refactor(useUserKey): change variable names to make keyName setting more clear * refactor(FileUpload): allow passing container className string * feat(GoogleClient): Gemini support * refactor(GoogleClient): alternate stream speed for Gemini models * feat(Gemini): styling/settings configuration for Gemini * refactor(GoogleClient): substract max response tokens from max context tokens if context is above 32k (I/O max is combined between the two) * refactor(tokens): correct google max token counts and subtract max response tokens when input/output count are combined towards max context count * feat(google/initializeClient): handle both local and user_provided credentials and write tests * fix(GoogleClient): catch if credentials are undefined, handle if serviceKey is string or object correctly, handle no examples passed, throw error if not a Generative Language model and no service account JSON key is provided, throw error if it is a Generative m odel, but not google API key was provided * refactor(loadAsyncEndpoints/google): activate Google endpoint if either the service key JSON file is provided in /api/data, or a GOOGLE_KEY is defined. * docs: updated Google configuration * fix(ci): Mock import of Service Account Key JSON file (auth.json) * Update apis_and_tokens.md * feat: increase max output tokens slider for gemini pro * refactor(GoogleSettings): handle max and default maxOutputTokens on model change * chore: add sensitive redact regex * docs: add warning about data privacy * Update apis_and_tokens.md
2023-12-15 02:18:07 -05:00
const { useAzurePlugins } = require('~/server/services/Config/EndpointService').config;
const {
getOpenAIModels,
getGoogleModels,
🪨 feat: AWS Bedrock support (#3935) * feat: Add BedrockIcon component to SVG library * feat: EModelEndpoint.bedrock * feat: first pass, bedrock chat. note: AgentClient is returning `agents` as conversation.endpoint * fix: declare endpoint in initialization step * chore: Update @librechat/agents dependency to version 1.4.5 * feat: backend content aggregation for agents/bedrock * feat: abort agent requests * feat: AWS Bedrock icons * WIP: agent provider schema parsing * chore: Update EditIcon props type * refactor(useGenerationsByLatest): make agents and bedrock editable * refactor: non-assistant message content, parts * fix: Bedrock response `sender` * fix: use endpointOption.model_parameters not endpointOption.modelOptions * fix: types for step handler * refactor: Update Agents.ToolCallDelta type * refactor: Remove unnecessary assignment of parentMessageId in AskController * refactor: remove unnecessary assignment of parentMessageId (agent request handler) * fix(bedrock/agents): message regeneration * refactor: dynamic form elements using react-hook-form Controllers * fix: agent icons/labels for messages * fix: agent actions * fix: use of new dynamic tags causing application crash * refactor: dynamic settings touch-ups * refactor: update Slider component to allow custom track class name * refactor: update DynamicSlider component styles * refactor: use Constants value for GLOBAL_PROJECT_NAME (enum) * feat: agent share global methods/controllers * fix: agents query * fix: `getResponseModel` * fix: share prompt a11y issue * refactor: update SharePrompt dialog theme styles * refactor: explicit typing for SharePrompt * feat: add agent roles/permissions * chore: update @librechat/agents dependency to version 1.4.7 for tool_call_ids edge case * fix(Anthropic): messages.X.content.Y.tool_use.input: Input should be a valid dictionary * fix: handle text parts with tool_call_ids and empty text * fix: role initialization * refactor: don't make instructions required * refactor: improve typing of Text part * fix: setShowStopButton for agents route * chore: remove params for now * fix: add streamBuffer and streamRate to help prevent 'Overloaded' errors from Anthropic API * refactor: remove console.log statement in ContentRender component * chore: typing, rename Context to Delete Button * chore(DeleteButton): logging * refactor(Action): make accessible * style(Action): improve a11y again * refactor: remove use/mention of mongoose sessions * feat: first pass, sharing agents * feat: visual indicator for global agent, remove author when serving to non-author * wip: params * chore: fix typing issues * fix(schemas): typing * refactor: improve accessibility of ListCard component and fix console React warning * wip: reset templates for non-legacy new convos * Revert "wip: params" This reverts commit f8067e91d4adf7be9e0d9e914aaae79ac4689b80. * Revert "refactor: dynamic form elements using react-hook-form Controllers" This reverts commit 2150c4815d8c74a978a4b697aa8f54dc11e035d7. * fix(Parameters): types and parameter effect update to only update local state to parameters * refactor: optimize useDebouncedInput hook for better performance * feat: first pass, anthropic bedrock params * chore: paramEndpoints check for endpointType too * fix: maxTokens to use coerceNumber.optional(), * feat: extra chat model params * chore: reduce code repetition * refactor: improve preset title handling in SaveAsPresetDialog component * refactor: improve preset handling in HeaderOptions component * chore: improve typing, replace legacy dialog for SaveAsPresetDialog * feat: save as preset from parameters panel * fix: multi-search in select dropdown when using Option type * refactor: update default showDefault value to false in Dynamic components * feat: Bedrock presets settings * chore: config, fix agents schema, update config version * refactor: update AWS region variable name in bedrock options endpoint to BEDROCK_AWS_DEFAULT_REGION * refactor: update baseEndpointSchema in config.ts to include baseURL property * refactor: update createRun function to include req parameter and set streamRate based on provider * feat: availableRegions via config * refactor: remove unused demo agent controller file * WIP: title * Update @librechat/agents to version 1.5.0 * chore: addTitle.js to handle empty responseText * feat: support images and titles * feat: context token updates * Refactor BaseClient test to use expect.objectContaining * refactor: add model select, remove header options params, move side panel params below prompts * chore: update models list, catch title error * feat: model service for bedrock models (env) * chore: Remove verbose debug log in AgentClient class following stream * feat(bedrock): track token spend; fix: token rates, value key mapping for AWS models * refactor: handle streamRate in `handleLLMNewToken` callback * chore: AWS Bedrock example config in `.env.example` * refactor: Rename bedrockMeta to bedrockGeneral in settings.ts and use for AI21 and Amazon Bedrock providers * refactor: Update `.env.example` with AWS Bedrock model IDs URL and additional notes * feat: titleModel support for bedrock * refactor: Update `.env.example` with additional notes for AWS Bedrock model IDs
2024-09-09 12:06:59 -04:00
getBedrockModels,
getAnthropicModels,
} = require('~/server/services/ModelService');
const { logger } = require('~/config');
/**
* Loads the default models for the application.
* @async
* @function
* @param {Express.Request} req - The Express request object.
*/
async function loadDefaultModels(req) {
try {
const [
openAI,
anthropic,
azureOpenAI,
gptPlugins,
assistants,
azureAssistants,
google,
bedrock,
] = await Promise.all([
getOpenAIModels({ user: req.user.id }).catch((error) => {
logger.error('Error fetching OpenAI models:', error);
return [];
}),
getAnthropicModels({ user: req.user.id }).catch((error) => {
logger.error('Error fetching Anthropic models:', error);
return [];
}),
getOpenAIModels({ user: req.user.id, azure: true }).catch((error) => {
logger.error('Error fetching Azure OpenAI models:', error);
return [];
}),
getOpenAIModels({ user: req.user.id, azure: useAzurePlugins, plugins: true }).catch(
(error) => {
logger.error('Error fetching Plugin models:', error);
return [];
},
),
getOpenAIModels({ assistants: true }).catch((error) => {
logger.error('Error fetching OpenAI Assistants API models:', error);
return [];
}),
getOpenAIModels({ azureAssistants: true }).catch((error) => {
logger.error('Error fetching Azure OpenAI Assistants API models:', error);
return [];
}),
Promise.resolve(getGoogleModels()).catch((error) => {
logger.error('Error getting Google models:', error);
return [];
}),
Promise.resolve(getBedrockModels()).catch((error) => {
logger.error('Error getting Bedrock models:', error);
return [];
}),
]);
return {
[EModelEndpoint.openAI]: openAI,
[EModelEndpoint.agents]: openAI,
[EModelEndpoint.google]: google,
[EModelEndpoint.anthropic]: anthropic,
[EModelEndpoint.gptPlugins]: gptPlugins,
[EModelEndpoint.azureOpenAI]: azureOpenAI,
[EModelEndpoint.assistants]: assistants,
[EModelEndpoint.azureAssistants]: azureAssistants,
[EModelEndpoint.bedrock]: bedrock,
};
} catch (error) {
logger.error('Error fetching default models:', error);
throw new Error(`Failed to load default models: ${error.message}`);
}
}
module.exports = loadDefaultModels;