mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-09-22 06:00:56 +02:00

* feat: update PaLM icons * feat: add additional google models * POC: formatting inputs for Vertex AI streaming * refactor: move endpoints services outside of /routes dir to /services/Endpoints * refactor: shorten schemas import * refactor: rename PALM to GOOGLE * feat: make Google editable endpoint * feat: reusable Ask and Edit controllers based off Anthropic * chore: organize imports/logic * fix(parseConvo): include examples in googleSchema * fix: google only allows odd number of messages to be sent * fix: pass proxy to AnthropicClient * refactor: change `google` altName to `Google` * refactor: update getModelMaxTokens and related functions to handle maxTokensMap with nested endpoint model key/values * refactor: google Icon and response sender changes (Codey and Google logo instead of PaLM in all cases) * feat: google support for maxTokensMap * feat: google updated endpoints with Ask/Edit controllers, buildOptions, and initializeClient * feat(GoogleClient): now builds prompt for text models and supports real streaming from Vertex AI through langchain * chore(GoogleClient): remove comments, left before for reference in git history * docs: update google instructions (WIP) * docs(apis_and_tokens.md): add images to google instructions * docs: remove typo apis_and_tokens.md * Update apis_and_tokens.md * feat(Google): use default settings map, fully support context for both text and chat models, fully support examples for chat models * chore: update more PaLM references to Google * chore: move playwright out of workflows to avoid failing tests
42 lines
1.1 KiB
JavaScript
42 lines
1.1 KiB
JavaScript
const express = require('express');
|
|
const openAI = require('./openAI');
|
|
const google = require('./google');
|
|
const anthropic = require('./anthropic');
|
|
const gptPlugins = require('./gptPlugins');
|
|
const { isEnabled } = require('~/server/utils');
|
|
const { EModelEndpoint } = require('~/server/services/Endpoints');
|
|
const {
|
|
checkBan,
|
|
uaParser,
|
|
requireJwtAuth,
|
|
messageIpLimiter,
|
|
concurrentLimiter,
|
|
messageUserLimiter,
|
|
} = require('~/server/middleware');
|
|
|
|
const { LIMIT_CONCURRENT_MESSAGES, LIMIT_MESSAGE_IP, LIMIT_MESSAGE_USER } = process.env ?? {};
|
|
|
|
const router = express.Router();
|
|
|
|
router.use(requireJwtAuth);
|
|
router.use(checkBan);
|
|
router.use(uaParser);
|
|
|
|
if (isEnabled(LIMIT_CONCURRENT_MESSAGES)) {
|
|
router.use(concurrentLimiter);
|
|
}
|
|
|
|
if (isEnabled(LIMIT_MESSAGE_IP)) {
|
|
router.use(messageIpLimiter);
|
|
}
|
|
|
|
if (isEnabled(LIMIT_MESSAGE_USER)) {
|
|
router.use(messageUserLimiter);
|
|
}
|
|
|
|
router.use([`/${EModelEndpoint.azureOpenAI}`, `/${EModelEndpoint.openAI}`], openAI);
|
|
router.use(`/${EModelEndpoint.gptPlugins}`, gptPlugins);
|
|
router.use(`/${EModelEndpoint.anthropic}`, anthropic);
|
|
router.use(`/${EModelEndpoint.google}`, google);
|
|
|
|
module.exports = router;
|