👮feat: moderation text (#1388)

* fixed some bugs and handling errors better

* feat: plugins support

* fix: prettier error message

* moved circular-json-es6 in /api

* docs: added openai moderation text

* fix(gptPlugins): incorrect merge

* discarding changes

* removed circular-json-es6
This commit is contained in:
Marco Beretta 2024-01-01 21:08:02 +01:00 committed by GitHub
parent 1cd5fdf4f0
commit c7306395e9
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 81 additions and 3 deletions

View file

@ -188,6 +188,10 @@ MEILI_MASTER_KEY=DrhYf7zENyR6AlUCKmnz0eYASOQdl6zxH7s7MKFSfFCt
# Moderation #
#========================#
OPENAI_MODERATION=false
OPENAI_MODERATION_API_KEY=
# OPENAI_MODERATION_REVERSE_PROXY=not working with some reverse proxys
BAN_VIOLATIONS=true
BAN_DURATION=1000 * 60 * 60 * 2
BAN_INTERVAL=20

View file

@ -12,6 +12,7 @@ const concurrentLimiter = require('./concurrentLimiter');
const validateMessageReq = require('./validateMessageReq');
const buildEndpointOption = require('./buildEndpointOption');
const validateRegistration = require('./validateRegistration');
const moderateText = require('./moderateText');
const noIndex = require('./noIndex');
module.exports = {
@ -29,5 +30,6 @@ module.exports = {
validateMessageReq,
buildEndpointOption,
validateRegistration,
moderateText,
noIndex,
};

View file

@ -0,0 +1,39 @@
const axios = require('axios');
const denyRequest = require('./denyRequest');
async function moderateText(req, res, next) {
if (process.env.OPENAI_MODERATION === 'true') {
try {
const { text } = req.body;
const response = await axios.post(
process.env.OPENAI_MODERATION_REVERSE_PROXY || 'https://api.openai.com/v1/moderations',
{
input: text,
},
{
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${process.env.OPENAI_MODERATION_API_KEY}`,
},
},
);
const results = response.data.results;
const flagged = results.some((result) => result.flagged);
if (flagged) {
const type = 'moderation';
const errorMessage = { type };
return await denyRequest(req, res, errorMessage);
}
} catch (error) {
console.error('Error in moderateText:', error);
const errorMessage = 'error in moderation check';
return await denyRequest(req, res, errorMessage);
}
}
next();
}
module.exports = moderateText;

View file

@ -13,9 +13,11 @@ const {
setHeaders,
validateEndpoint,
buildEndpointOption,
moderateText,
} = require('~/server/middleware');
const { logger } = require('~/config');
router.use(moderateText);
router.post('/abort', handleAbort());
router.post('/', validateEndpoint, buildEndpointOption, setHeaders, async (req, res) => {

View file

@ -6,10 +6,11 @@ const {
setHeaders,
validateEndpoint,
buildEndpointOption,
moderateText,
} = require('~/server/middleware');
const router = express.Router();
router.use(moderateText);
router.post('/abort', handleAbort());
router.post('/', validateEndpoint, buildEndpointOption, setHeaders, async (req, res, next) => {

View file

@ -12,9 +12,11 @@ const {
setHeaders,
validateEndpoint,
buildEndpointOption,
moderateText,
} = require('~/server/middleware');
const { logger } = require('~/config');
router.use(moderateText);
router.post('/abort', handleAbort());
router.post('/', validateEndpoint, buildEndpointOption, setHeaders, async (req, res) => {

View file

@ -6,10 +6,11 @@ const {
setHeaders,
validateEndpoint,
buildEndpointOption,
moderateText,
} = require('~/server/middleware');
const router = express.Router();
router.use(moderateText);
router.post('/abort', handleAbort());
router.post('/', validateEndpoint, buildEndpointOption, setHeaders, async (req, res, next) => {

View file

@ -29,6 +29,8 @@ const errorMessages = {
'Invalid API key. Please check your API key and try again. You can do this by clicking on the model logo in the left corner of the textbox and selecting "Set Token" for the current selected endpoint. Thank you for your understanding.',
insufficient_quota:
'We apologize for any inconvenience caused. The default API key has reached its limit. To continue using this service, please set up your own API key. You can do this by clicking on the model logo in the left corner of the textbox and selecting "Set Token" for the current selected endpoint. Thank you for your understanding.',
moderation:
'It appears that the content submitted has been flagged by our moderation system for not aligning with our community guidelines. We\'re unable to proceed with this specific topic. If you have any other questions or topics you\'d like to explore, please edit your message, or create a new conversation.',
concurrent: (json: TConcurrent) => {
const { limit } = json;
const plural = limit > 1 ? 's' : '';

View file

@ -70,3 +70,28 @@ LIMIT_MESSAGE_USER=false # Whether to limit the amount of messages an IP can sen
MESSAGE_USER_MAX=40 # The max amount of messages an IP can send per MESSAGE_USER_WINDOW
MESSAGE_USER_WINDOW=1 # in minutes, determines the window of time for MESSAGE_USER_MAX messages
```
## OpenAI moderation text
### OPENAI_MODERATION
enable or disable OpenAI moderation
Values:
`true`: OpenAI moderation is enabled
`false`: OpenAI moderation is disabled
### OPENAI_MODERATION_API_KEY
Specify your OpenAI moderation API key here
### OPENAI_MODERATION_REVERSE_PROXY
enable or disable reverse proxy compatibility for OpenAI moderation. Note that it may not work with some reverse proxies
Values:
`true`: Enable reverse proxy compatibility
`false`: Disable reverse proxy compatibility
```bash
OPENAI_MODERATION=true
OPENAI_MODERATION_API_KEY=sk-1234
# OPENAI_MODERATION_REVERSE_PROXY=false
```