🐛 fix: Prevent Empty File Uploads & Assistants Fixes (#2611)

* chore: update default models for openai/assistants

* fix: allows assistants models fetching

* change default models order, ensure assistant_id is defined if intended

* fix: prevent empty files from being uploaded
This commit is contained in:
Danny Avila 2024-05-03 12:49:26 -04:00 committed by GitHub
parent a0288f1c5c
commit c8baceac76
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 24 additions and 7 deletions

View file

@ -514,6 +514,10 @@ function filterFile({ req, file, image }) {
throw new Error('No file_id provided');
}
if (file.size === 0) {
throw new Error('Empty file uploaded');
}
/* parse to validate api call, throws error on fail */
isUUID.parse(file_id);

View file

@ -141,6 +141,7 @@ const fetchModels = async ({
* @param {object} opts - The options for fetching the models.
* @param {string} opts.user - The user ID to send to the API.
* @param {boolean} [opts.azure=false] - Whether to fetch models from Azure.
* @param {boolean} [opts.assistants=false] - Whether to fetch models from Azure.
* @param {boolean} [opts.plugins=false] - Whether to fetch models from the plugins.
* @param {string[]} [_models=[]] - The models to use as a fallback.
*/
@ -150,7 +151,10 @@ const fetchOpenAIModels = async (opts, _models = []) => {
const openaiBaseURL = 'https://api.openai.com/v1';
let baseURL = openaiBaseURL;
let reverseProxyUrl = process.env.OPENAI_REVERSE_PROXY;
if (opts.azure) {
if (opts.assistants && process.env.ASSISTANTS_BASE_URL) {
reverseProxyUrl = process.env.ASSISTANTS_BASE_URL;
} else if (opts.azure) {
return models;
// const azure = getAzureCredentials();
// baseURL = (genAzureChatCompletion(azure))
@ -245,10 +249,6 @@ const getOpenAIModels = async (opts) => {
return models;
}
if (opts.assistants) {
return models;
}
return await fetchOpenAIModels(opts, models);
};

View file

@ -163,6 +163,10 @@ const useFileHandling = (params?: UseFileHandling) => {
const validateFiles = (fileList: File[]) => {
const existingFiles = Array.from(files.values());
const incomingTotalSize = fileList.reduce((total, file) => total + file.size, 0);
if (incomingTotalSize === 0) {
setError('Empty files are not allowed.');
return false;
}
const currentTotalSize = existingFiles.reduce((total, file) => total + file.size, 0);
if (fileList.length + files.size > fileLimit) {

View file

@ -64,6 +64,11 @@ const buildDefaultConvo = ({
endpoint,
};
// Ensures assistant_id is always defined
if (endpoint === EModelEndpoint.assistants && !defaultConvo.assistant_id && convo.assistant_id) {
defaultConvo.assistant_id = convo.assistant_id;
}
defaultConvo.tools = lastConversationSetup?.tools ?? lastSelectedTools ?? defaultConvo.tools;
defaultConvo.jailbreak = jailbreak ?? defaultConvo.jailbreak;
defaultConvo.toneStyle = toneStyle ?? defaultConvo.toneStyle;

View file

@ -1,6 +1,6 @@
{
"name": "librechat-data-provider",
"version": "0.5.9",
"version": "0.6.0",
"description": "data services for librechat apps",
"main": "dist/index.js",
"module": "dist/index.es.js",

View file

@ -345,14 +345,16 @@ export const alternateName = {
export const defaultModels = {
[EModelEndpoint.assistants]: [
'gpt-3.5-turbo',
'gpt-3.5-turbo-0125',
'gpt-4-turbo',
'gpt-4-turbo-2024-04-09',
'gpt-4-0125-preview',
'gpt-4-turbo-preview',
'gpt-4-1106-preview',
'gpt-3.5-turbo-1106',
'gpt-3.5-turbo-16k-0613',
'gpt-3.5-turbo-16k',
'gpt-3.5-turbo',
'gpt-4',
'gpt-4-0314',
'gpt-4-32k-0314',
@ -387,6 +389,8 @@ export const defaultModels = {
],
[EModelEndpoint.openAI]: [
'gpt-3.5-turbo-0125',
'gpt-4-turbo',
'gpt-4-turbo-2024-04-09',
'gpt-3.5-turbo-16k-0613',
'gpt-3.5-turbo-16k',
'gpt-4-turbo-preview',