LibreChat/api/server/middleware/denyRequest.js
Danny Avila 583e978a82
feat(Google): Support all Text/Chat Models, Response streaming, PaLM -> Google 🤖 (#1316)
* feat: update PaLM icons

* feat: add additional google models

* POC: formatting inputs for Vertex AI streaming

* refactor: move endpoints services outside of /routes dir to /services/Endpoints

* refactor: shorten schemas import

* refactor: rename PALM to GOOGLE

* feat: make Google editable endpoint

* feat: reusable Ask and Edit controllers based off Anthropic

* chore: organize imports/logic

* fix(parseConvo): include examples in googleSchema

* fix: google only allows odd number of messages to be sent

* fix: pass proxy to AnthropicClient

* refactor: change `google` altName to `Google`

* refactor: update getModelMaxTokens and related functions to handle maxTokensMap with nested endpoint model key/values

* refactor: google Icon and response sender changes (Codey and Google logo instead of PaLM in all cases)

* feat: google support for maxTokensMap

* feat: google updated endpoints with Ask/Edit controllers, buildOptions, and initializeClient

* feat(GoogleClient): now builds prompt for text models and supports real streaming from Vertex AI through langchain

* chore(GoogleClient): remove comments, left before for reference in git history

* docs: update google instructions (WIP)

* docs(apis_and_tokens.md): add images to google instructions

* docs: remove typo apis_and_tokens.md

* Update apis_and_tokens.md

* feat(Google): use default settings map, fully support context for both text and chat models, fully support examples for chat models

* chore: update more PaLM references to Google

* chore: move playwright out of workflows to avoid failing tests
2023-12-10 14:54:13 -05:00

59 lines
2 KiB
JavaScript

const crypto = require('crypto');
const { saveMessage } = require('~/models');
const { sendMessage, sendError } = require('~/server/utils');
const { getResponseSender } = require('~/server/services/Endpoints');
/**
* Denies a request by sending an error message and optionally saves the user's message.
*
* @async
* @function
* @param {Object} req - Express request object.
* @param {Object} req.body - The body of the request.
* @param {string} [req.body.messageId] - The ID of the message.
* @param {string} [req.body.conversationId] - The ID of the conversation.
* @param {string} [req.body.parentMessageId] - The ID of the parent message.
* @param {string} req.body.text - The text of the message.
* @param {Object} res - Express response object.
* @param {string} errorMessage - The error message to be sent.
* @returns {Promise<Object>} A promise that resolves with the error response.
* @throws {Error} Throws an error if there's an issue saving the message or sending the error.
*/
const denyRequest = async (req, res, errorMessage) => {
let responseText = errorMessage;
if (typeof errorMessage === 'object') {
responseText = JSON.stringify(errorMessage);
}
const { messageId, conversationId: _convoId, parentMessageId, text } = req.body;
const conversationId = _convoId ?? crypto.randomUUID();
const userMessage = {
sender: 'User',
messageId: messageId ?? crypto.randomUUID(),
parentMessageId,
conversationId,
isCreatedByUser: true,
text,
};
sendMessage(res, { message: userMessage, created: true });
const shouldSaveMessage =
_convoId && parentMessageId && parentMessageId !== '00000000-0000-0000-0000-000000000000';
if (shouldSaveMessage) {
await saveMessage({ ...userMessage, user: req.user.id });
}
return await sendError(res, {
sender: getResponseSender(req.body),
messageId: crypto.randomUUID(),
conversationId,
parentMessageId: userMessage.messageId,
text: responseText,
shouldSaveMessage,
user: req.user.id,
});
};
module.exports = denyRequest;