mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 08:50:15 +01:00
329 lines
7.9 KiB
JavaScript
329 lines
7.9 KiB
JavaScript
|
|
const { z } = require('zod');
|
||
|
|
|
||
|
|
const EModelEndpoint = {
|
||
|
|
azureOpenAI: 'azureOpenAI',
|
||
|
|
openAI: 'openAI',
|
||
|
|
bingAI: 'bingAI',
|
||
|
|
chatGPTBrowser: 'chatGPTBrowser',
|
||
|
|
google: 'google',
|
||
|
|
gptPlugins: 'gptPlugins',
|
||
|
|
anthropic: 'anthropic',
|
||
|
|
};
|
||
|
|
|
||
|
|
const eModelEndpointSchema = z.nativeEnum(EModelEndpoint);
|
||
|
|
|
||
|
|
const tPluginAuthConfigSchema = z.object({
|
||
|
|
authField: z.string(),
|
||
|
|
label: z.string(),
|
||
|
|
description: z.string(),
|
||
|
|
});
|
||
|
|
|
||
|
|
const tPluginSchema = z.object({
|
||
|
|
name: z.string(),
|
||
|
|
pluginKey: z.string(),
|
||
|
|
description: z.string(),
|
||
|
|
icon: z.string(),
|
||
|
|
authConfig: z.array(tPluginAuthConfigSchema),
|
||
|
|
authenticated: z.boolean().optional(),
|
||
|
|
isButton: z.boolean().optional(),
|
||
|
|
});
|
||
|
|
|
||
|
|
const tExampleSchema = z.object({
|
||
|
|
input: z.object({
|
||
|
|
content: z.string(),
|
||
|
|
}),
|
||
|
|
output: z.object({
|
||
|
|
content: z.string(),
|
||
|
|
}),
|
||
|
|
});
|
||
|
|
|
||
|
|
const tAgentOptionsSchema = z.object({
|
||
|
|
agent: z.string(),
|
||
|
|
skipCompletion: z.boolean(),
|
||
|
|
model: z.string(),
|
||
|
|
temperature: z.number(),
|
||
|
|
});
|
||
|
|
|
||
|
|
const tConversationSchema = z.object({
|
||
|
|
conversationId: z.string().nullable(),
|
||
|
|
title: z.string(),
|
||
|
|
user: z.string().optional(),
|
||
|
|
endpoint: eModelEndpointSchema.nullable(),
|
||
|
|
suggestions: z.array(z.string()).optional(),
|
||
|
|
messages: z.array(z.string()).optional(),
|
||
|
|
tools: z.array(tPluginSchema).optional(),
|
||
|
|
createdAt: z.string(),
|
||
|
|
updatedAt: z.string(),
|
||
|
|
systemMessage: z.string().nullable().optional(),
|
||
|
|
modelLabel: z.string().nullable().optional(),
|
||
|
|
examples: z.array(tExampleSchema).optional(),
|
||
|
|
chatGptLabel: z.string().nullable().optional(),
|
||
|
|
userLabel: z.string().optional(),
|
||
|
|
model: z.string().nullable().optional(),
|
||
|
|
promptPrefix: z.string().nullable().optional(),
|
||
|
|
temperature: z.number().optional(),
|
||
|
|
topP: z.number().optional(),
|
||
|
|
topK: z.number().optional(),
|
||
|
|
context: z.string().nullable().optional(),
|
||
|
|
top_p: z.number().optional(),
|
||
|
|
frequency_penalty: z.number().optional(),
|
||
|
|
presence_penalty: z.number().optional(),
|
||
|
|
jailbreak: z.boolean().optional(),
|
||
|
|
jailbreakConversationId: z.string().nullable().optional(),
|
||
|
|
conversationSignature: z.string().nullable().optional(),
|
||
|
|
parentMessageId: z.string().optional(),
|
||
|
|
clientId: z.string().nullable().optional(),
|
||
|
|
invocationId: z.number().nullable().optional(),
|
||
|
|
toneStyle: z.string().nullable().optional(),
|
||
|
|
maxOutputTokens: z.number().optional(),
|
||
|
|
agentOptions: tAgentOptionsSchema.nullable().optional(),
|
||
|
|
});
|
||
|
|
|
||
|
|
const openAISchema = tConversationSchema
|
||
|
|
.pick({
|
||
|
|
model: true,
|
||
|
|
chatGptLabel: true,
|
||
|
|
promptPrefix: true,
|
||
|
|
temperature: true,
|
||
|
|
top_p: true,
|
||
|
|
presence_penalty: true,
|
||
|
|
frequency_penalty: true,
|
||
|
|
})
|
||
|
|
.transform((obj) => ({
|
||
|
|
...obj,
|
||
|
|
model: obj.model ?? 'gpt-3.5-turbo',
|
||
|
|
chatGptLabel: obj.chatGptLabel ?? null,
|
||
|
|
promptPrefix: obj.promptPrefix ?? null,
|
||
|
|
temperature: obj.temperature ?? 1,
|
||
|
|
top_p: obj.top_p ?? 1,
|
||
|
|
presence_penalty: obj.presence_penalty ?? 0,
|
||
|
|
frequency_penalty: obj.frequency_penalty ?? 0,
|
||
|
|
}))
|
||
|
|
.catch(() => ({
|
||
|
|
model: 'gpt-3.5-turbo',
|
||
|
|
chatGptLabel: null,
|
||
|
|
promptPrefix: null,
|
||
|
|
temperature: 1,
|
||
|
|
top_p: 1,
|
||
|
|
presence_penalty: 0,
|
||
|
|
frequency_penalty: 0,
|
||
|
|
}));
|
||
|
|
|
||
|
|
const googleSchema = tConversationSchema
|
||
|
|
.pick({
|
||
|
|
model: true,
|
||
|
|
modelLabel: true,
|
||
|
|
promptPrefix: true,
|
||
|
|
examples: true,
|
||
|
|
temperature: true,
|
||
|
|
maxOutputTokens: true,
|
||
|
|
topP: true,
|
||
|
|
topK: true,
|
||
|
|
})
|
||
|
|
.transform((obj) => ({
|
||
|
|
...obj,
|
||
|
|
model: obj.model ?? 'chat-bison',
|
||
|
|
modelLabel: obj.modelLabel ?? null,
|
||
|
|
promptPrefix: obj.promptPrefix ?? null,
|
||
|
|
temperature: obj.temperature ?? 0.2,
|
||
|
|
maxOutputTokens: obj.maxOutputTokens ?? 1024,
|
||
|
|
topP: obj.topP ?? 0.95,
|
||
|
|
topK: obj.topK ?? 40,
|
||
|
|
}))
|
||
|
|
.catch(() => ({
|
||
|
|
model: 'chat-bison',
|
||
|
|
modelLabel: null,
|
||
|
|
promptPrefix: null,
|
||
|
|
temperature: 0.2,
|
||
|
|
maxOutputTokens: 1024,
|
||
|
|
topP: 0.95,
|
||
|
|
topK: 40,
|
||
|
|
}));
|
||
|
|
|
||
|
|
const bingAISchema = tConversationSchema
|
||
|
|
.pick({
|
||
|
|
jailbreak: true,
|
||
|
|
systemMessage: true,
|
||
|
|
context: true,
|
||
|
|
toneStyle: true,
|
||
|
|
jailbreakConversationId: true,
|
||
|
|
conversationSignature: true,
|
||
|
|
clientId: true,
|
||
|
|
invocationId: true,
|
||
|
|
})
|
||
|
|
.transform((obj) => ({
|
||
|
|
...obj,
|
||
|
|
model: '',
|
||
|
|
jailbreak: obj.jailbreak ?? false,
|
||
|
|
systemMessage: obj.systemMessage ?? null,
|
||
|
|
context: obj.context ?? null,
|
||
|
|
toneStyle: obj.toneStyle ?? 'creative',
|
||
|
|
jailbreakConversationId: obj.jailbreakConversationId ?? null,
|
||
|
|
conversationSignature: obj.conversationSignature ?? null,
|
||
|
|
clientId: obj.clientId ?? null,
|
||
|
|
invocationId: obj.invocationId ?? 1,
|
||
|
|
}))
|
||
|
|
.catch(() => ({
|
||
|
|
model: '',
|
||
|
|
jailbreak: false,
|
||
|
|
systemMessage: null,
|
||
|
|
context: null,
|
||
|
|
toneStyle: 'creative',
|
||
|
|
jailbreakConversationId: null,
|
||
|
|
conversationSignature: null,
|
||
|
|
clientId: null,
|
||
|
|
invocationId: 1,
|
||
|
|
}));
|
||
|
|
|
||
|
|
const anthropicSchema = tConversationSchema
|
||
|
|
.pick({
|
||
|
|
model: true,
|
||
|
|
modelLabel: true,
|
||
|
|
promptPrefix: true,
|
||
|
|
temperature: true,
|
||
|
|
maxOutputTokens: true,
|
||
|
|
topP: true,
|
||
|
|
topK: true,
|
||
|
|
})
|
||
|
|
.transform((obj) => ({
|
||
|
|
...obj,
|
||
|
|
model: obj.model ?? 'claude-1',
|
||
|
|
modelLabel: obj.modelLabel ?? null,
|
||
|
|
promptPrefix: obj.promptPrefix ?? null,
|
||
|
|
temperature: obj.temperature ?? 1,
|
||
|
|
maxOutputTokens: obj.maxOutputTokens ?? 1024,
|
||
|
|
topP: obj.topP ?? 0.7,
|
||
|
|
topK: obj.topK ?? 5,
|
||
|
|
}))
|
||
|
|
.catch(() => ({
|
||
|
|
model: 'claude-1',
|
||
|
|
modelLabel: null,
|
||
|
|
promptPrefix: null,
|
||
|
|
temperature: 1,
|
||
|
|
maxOutputTokens: 1024,
|
||
|
|
topP: 0.7,
|
||
|
|
topK: 5,
|
||
|
|
}));
|
||
|
|
|
||
|
|
const chatGPTBrowserSchema = tConversationSchema
|
||
|
|
.pick({
|
||
|
|
model: true,
|
||
|
|
})
|
||
|
|
.transform((obj) => ({
|
||
|
|
...obj,
|
||
|
|
model: obj.model ?? 'text-davinci-002-render-sha',
|
||
|
|
}))
|
||
|
|
.catch(() => ({
|
||
|
|
model: 'text-davinci-002-render-sha',
|
||
|
|
}));
|
||
|
|
|
||
|
|
const gptPluginsSchema = tConversationSchema
|
||
|
|
.pick({
|
||
|
|
model: true,
|
||
|
|
chatGptLabel: true,
|
||
|
|
promptPrefix: true,
|
||
|
|
temperature: true,
|
||
|
|
top_p: true,
|
||
|
|
presence_penalty: true,
|
||
|
|
frequency_penalty: true,
|
||
|
|
tools: true,
|
||
|
|
agentOptions: true,
|
||
|
|
})
|
||
|
|
.transform((obj) => ({
|
||
|
|
...obj,
|
||
|
|
model: obj.model ?? 'gpt-3.5-turbo',
|
||
|
|
chatGptLabel: obj.chatGptLabel ?? null,
|
||
|
|
promptPrefix: obj.promptPrefix ?? null,
|
||
|
|
temperature: obj.temperature ?? 0.8,
|
||
|
|
top_p: obj.top_p ?? 1,
|
||
|
|
presence_penalty: obj.presence_penalty ?? 0,
|
||
|
|
frequency_penalty: obj.frequency_penalty ?? 0,
|
||
|
|
tools: obj.tools ?? [],
|
||
|
|
agentOptions: obj.agentOptions ?? {
|
||
|
|
agent: 'functions',
|
||
|
|
skipCompletion: true,
|
||
|
|
model: 'gpt-3.5-turbo',
|
||
|
|
temperature: 0,
|
||
|
|
},
|
||
|
|
}))
|
||
|
|
.catch(() => ({
|
||
|
|
model: 'gpt-3.5-turbo',
|
||
|
|
chatGptLabel: null,
|
||
|
|
promptPrefix: null,
|
||
|
|
temperature: 0.8,
|
||
|
|
top_p: 1,
|
||
|
|
presence_penalty: 0,
|
||
|
|
frequency_penalty: 0,
|
||
|
|
tools: [],
|
||
|
|
agentOptions: {
|
||
|
|
agent: 'functions',
|
||
|
|
skipCompletion: true,
|
||
|
|
model: 'gpt-3.5-turbo',
|
||
|
|
temperature: 0,
|
||
|
|
},
|
||
|
|
}));
|
||
|
|
|
||
|
|
const endpointSchemas = {
|
||
|
|
openAI: openAISchema,
|
||
|
|
azureOpenAI: openAISchema,
|
||
|
|
google: googleSchema,
|
||
|
|
bingAI: bingAISchema,
|
||
|
|
anthropic: anthropicSchema,
|
||
|
|
chatGPTBrowser: chatGPTBrowserSchema,
|
||
|
|
gptPlugins: gptPluginsSchema,
|
||
|
|
};
|
||
|
|
|
||
|
|
function getFirstDefinedValue(possibleValues) {
|
||
|
|
let returnValue;
|
||
|
|
for (const value of possibleValues) {
|
||
|
|
if (value) {
|
||
|
|
returnValue = value;
|
||
|
|
break;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
return returnValue;
|
||
|
|
}
|
||
|
|
|
||
|
|
const parseConvo = (endpoint, conversation, possibleValues) => {
|
||
|
|
const schema = endpointSchemas[endpoint];
|
||
|
|
|
||
|
|
if (!schema) {
|
||
|
|
throw new Error(`Unknown endpoint: ${endpoint}`);
|
||
|
|
}
|
||
|
|
|
||
|
|
const convo = schema.parse(conversation);
|
||
|
|
|
||
|
|
if (possibleValues && convo) {
|
||
|
|
convo.model = getFirstDefinedValue(possibleValues.model) ?? convo.model;
|
||
|
|
}
|
||
|
|
|
||
|
|
return convo;
|
||
|
|
};
|
||
|
|
|
||
|
|
const getResponseSender = (endpointOption) => {
|
||
|
|
const { endpoint, chatGptLabel, modelLabel, jailbreak } = endpointOption;
|
||
|
|
|
||
|
|
if (['openAI', 'azureOpenAI', 'gptPlugins', 'chatGPTBrowser'].includes(endpoint)) {
|
||
|
|
return chatGptLabel ?? 'ChatGPT';
|
||
|
|
}
|
||
|
|
|
||
|
|
if (endpoint === 'bingAI') {
|
||
|
|
return jailbreak ? 'Sydney' : 'BingAI';
|
||
|
|
}
|
||
|
|
|
||
|
|
if (endpoint === 'anthropic') {
|
||
|
|
return modelLabel ?? 'Anthropic';
|
||
|
|
}
|
||
|
|
|
||
|
|
if (endpoint === 'google') {
|
||
|
|
return modelLabel ?? 'PaLM2';
|
||
|
|
}
|
||
|
|
|
||
|
|
return '';
|
||
|
|
};
|
||
|
|
|
||
|
|
module.exports = {
|
||
|
|
parseConvo,
|
||
|
|
getResponseSender,
|
||
|
|
};
|