🤖 feat(Anthropic): Claude 3 & Vision Support (#1984)

* chore: bump anthropic SDK

* chore: update anthropic config settings (fileSupport, default models)

* feat: anthropic multi modal formatting

* refactor: update vision models and use endpoint specific max long side resizing

* feat(anthropic): multimodal messages, retry logic, and messages payload

* chore: add more safety to trimming content due to whitespace error for assistant messages

* feat(anthropic): token accounting and resending multiple images in progress

* chore: bump data-provider

* feat(anthropic): resendImages feature

* chore: optimize Edit/Ask controllers, switch model back to req model

* fix: false positive of invalid model

* refactor(validateVisionModel): use object as arg, pass in additional/available models

* refactor(validateModel): use helper function, `getModelsConfig`

* feat: add modelsConfig to endpointOption so it gets passed to all clients, use for properly validating vision models

* refactor: initialize default vision model and make sure it's available before assigning it

* refactor(useSSE): avoid resetting model if user selected a new model between request and response

* feat: show rate in transaction logging

* fix: return tokenCountMap regardless of payload shape
This commit is contained in:
Danny Avila 2024-03-06 00:04:52 -05:00 committed by GitHub
parent b023c5683d
commit 8263ddda3f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
28 changed files with 599 additions and 115 deletions

View file

@ -1,11 +1,12 @@
const { parseConvo, EModelEndpoint } = require('librechat-data-provider');
const { getModelsConfig } = require('~/server/controllers/ModelController');
const { processFiles } = require('~/server/services/Files/process');
const gptPlugins = require('~/server/services/Endpoints/gptPlugins');
const anthropic = require('~/server/services/Endpoints/anthropic');
const assistant = require('~/server/services/Endpoints/assistant');
const openAI = require('~/server/services/Endpoints/openAI');
const custom = require('~/server/services/Endpoints/custom');
const google = require('~/server/services/Endpoints/google');
const assistant = require('~/server/services/Endpoints/assistant');
const buildFunction = {
[EModelEndpoint.openAI]: openAI.buildOptions,
@ -17,7 +18,7 @@ const buildFunction = {
[EModelEndpoint.assistants]: assistant.buildOptions,
};
function buildEndpointOption(req, res, next) {
async function buildEndpointOption(req, res, next) {
const { endpoint, endpointType } = req.body;
const parsedBody = parseConvo({ endpoint, endpointType, conversation: req.body });
req.body.endpointOption = buildFunction[endpointType ?? endpoint](
@ -25,6 +26,10 @@ function buildEndpointOption(req, res, next) {
parsedBody,
endpointType,
);
const modelsConfig = await getModelsConfig(req);
req.body.endpointOption.modelsConfig = modelsConfig;
if (req.body.files) {
// hold the promise
req.body.endpointOption.attachments = processFiles(req.body.files);