mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 08:50:15 +01:00
* chore: bump anthropic SDK * chore: update anthropic config settings (fileSupport, default models) * feat: anthropic multi modal formatting * refactor: update vision models and use endpoint specific max long side resizing * feat(anthropic): multimodal messages, retry logic, and messages payload * chore: add more safety to trimming content due to whitespace error for assistant messages * feat(anthropic): token accounting and resending multiple images in progress * chore: bump data-provider * feat(anthropic): resendImages feature * chore: optimize Edit/Ask controllers, switch model back to req model * fix: false positive of invalid model * refactor(validateVisionModel): use object as arg, pass in additional/available models * refactor(validateModel): use helper function, `getModelsConfig` * feat: add modelsConfig to endpointOption so it gets passed to all clients, use for properly validating vision models * refactor: initialize default vision model and make sure it's available before assigning it * refactor(useSSE): avoid resetting model if user selected a new model between request and response * feat: show rate in transaction logging * fix: return tokenCountMap regardless of payload shape
40 lines
1.6 KiB
JavaScript
40 lines
1.6 KiB
JavaScript
const { parseConvo, EModelEndpoint } = require('librechat-data-provider');
|
|
const { getModelsConfig } = require('~/server/controllers/ModelController');
|
|
const { processFiles } = require('~/server/services/Files/process');
|
|
const gptPlugins = require('~/server/services/Endpoints/gptPlugins');
|
|
const anthropic = require('~/server/services/Endpoints/anthropic');
|
|
const assistant = require('~/server/services/Endpoints/assistant');
|
|
const openAI = require('~/server/services/Endpoints/openAI');
|
|
const custom = require('~/server/services/Endpoints/custom');
|
|
const google = require('~/server/services/Endpoints/google');
|
|
|
|
const buildFunction = {
|
|
[EModelEndpoint.openAI]: openAI.buildOptions,
|
|
[EModelEndpoint.google]: google.buildOptions,
|
|
[EModelEndpoint.custom]: custom.buildOptions,
|
|
[EModelEndpoint.azureOpenAI]: openAI.buildOptions,
|
|
[EModelEndpoint.anthropic]: anthropic.buildOptions,
|
|
[EModelEndpoint.gptPlugins]: gptPlugins.buildOptions,
|
|
[EModelEndpoint.assistants]: assistant.buildOptions,
|
|
};
|
|
|
|
async function buildEndpointOption(req, res, next) {
|
|
const { endpoint, endpointType } = req.body;
|
|
const parsedBody = parseConvo({ endpoint, endpointType, conversation: req.body });
|
|
req.body.endpointOption = buildFunction[endpointType ?? endpoint](
|
|
endpoint,
|
|
parsedBody,
|
|
endpointType,
|
|
);
|
|
|
|
const modelsConfig = await getModelsConfig(req);
|
|
req.body.endpointOption.modelsConfig = modelsConfig;
|
|
|
|
if (req.body.files) {
|
|
// hold the promise
|
|
req.body.endpointOption.attachments = processFiles(req.body.files);
|
|
}
|
|
next();
|
|
}
|
|
|
|
module.exports = buildEndpointOption;
|