🗃️ feat: General File Support for OpenAI, Azure, Custom, Anthropic and Google (RAG) (#2143)

* refactor: re-purpose `resendImages` as `resendFiles`

* refactor: re-purpose `resendImages` as `resendFiles`

* feat: upload general files

* feat: embed file during upload

* feat: delete file embeddings on file deletion

* chore(fileConfig): add epub+zip type

* feat(encodeAndFormat): handle non-image files

* feat(createContextHandlers): build context prompt from file attachments and successful RAG

* fix: prevent non-temp files as well as embedded files to be deleted on new conversation

* fix: remove temp_file_id on usage, prevent non-temp files as well as embedded files to be deleted on new conversation

* fix: prevent non-temp files as well as embedded files to be deleted on new conversation

* feat(OpenAI/Anthropic/Google): basic RAG support

* fix: delete `resendFiles` only when true (Default)

* refactor(RAG): update endpoints and pass JWT

* fix(resendFiles): default values

* fix(context/processFile): query unique ids only

* feat: rag-api.yaml

* feat: file upload improved ux for longer uploads

* chore: await embed call and catch embedding errors

* refactor: store augmentedPrompt in Client

* refactor(processFileUpload): throw error if not assistant file upload

* fix(useFileHandling): handle markdown empty mimetype issue

* chore: necessary compose file changes
This commit is contained in:
Danny Avila 2024-03-19 20:54:30 -04:00 committed by GitHub
parent af347cccde
commit f7761df52c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
38 changed files with 683 additions and 261 deletions

View file

@ -16,14 +16,13 @@ const {
getModelMaxTokens,
genAzureChatCompletion,
} = require('~/utils');
const { truncateText, formatMessage, createContextHandlers, CUT_OFF_PROMPT } = require('./prompts');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const { truncateText, formatMessage, CUT_OFF_PROMPT } = require('./prompts');
const { handleOpenAIErrors } = require('./tools/util');
const spendTokens = require('~/models/spendTokens');
const { createLLM, RunManager } = require('./llm');
const ChatGPTClient = require('./ChatGPTClient');
const { isEnabled } = require('~/server/utils');
const { getFiles } = require('~/models/File');
const { summaryBuffer } = require('./memory');
const { runTitleChain } = require('./chains');
const { tokenSplit } = require('./document');
@ -92,7 +91,7 @@ class OpenAIClient extends BaseClient {
}
this.defaultVisionModel = this.options.visionModel ?? 'gpt-4-vision-preview';
this.checkVisionRequest(this.options.attachments);
this.options.attachments?.then((attachments) => this.checkVisionRequest(attachments));
const { OPENROUTER_API_KEY, OPENAI_FORCE_PROMPT } = process.env ?? {};
if (OPENROUTER_API_KEY && !this.azure) {
@ -223,14 +222,19 @@ class OpenAIClient extends BaseClient {
* - Sets `this.modelOptions.model` to `gpt-4-vision-preview` if the request is a vision request.
* - Sets `this.isVisionModel` to `true` if vision request.
* - Deletes `this.modelOptions.stop` if vision request.
* @param {Array<Promise<MongoFile[]> | MongoFile[]> | Record<string, MongoFile[]>} attachments
* @param {MongoFile[]} attachments
*/
checkVisionRequest(attachments) {
const availableModels = this.options.modelsConfig?.[this.options.endpoint];
this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels });
const visionModelAvailable = availableModels?.includes(this.defaultVisionModel);
if (attachments && visionModelAvailable && !this.isVisionModel) {
if (
attachments &&
attachments.some((file) => file?.type && file?.type?.includes('image')) &&
visionModelAvailable &&
!this.isVisionModel
) {
this.modelOptions.model = this.defaultVisionModel;
this.isVisionModel = true;
}
@ -366,7 +370,7 @@ class OpenAIClient extends BaseClient {
return {
chatGptLabel: this.options.chatGptLabel,
promptPrefix: this.options.promptPrefix,
resendImages: this.options.resendImages,
resendFiles: this.options.resendFiles,
imageDetail: this.options.imageDetail,
...this.modelOptions,
};
@ -380,54 +384,6 @@ class OpenAIClient extends BaseClient {
};
}
/**
*
* @param {TMessage[]} _messages
* @returns {TMessage[]}
*/
async addPreviousAttachments(_messages) {
if (!this.options.resendImages) {
return _messages;
}
/**
*
* @param {TMessage} message
*/
const processMessage = async (message) => {
if (!this.message_file_map) {
/** @type {Record<string, MongoFile[]> */
this.message_file_map = {};
}
const fileIds = message.files.map((file) => file.file_id);
const files = await getFiles({
file_id: { $in: fileIds },
});
await this.addImageURLs(message, files);
this.message_file_map[message.messageId] = files;
return message;
};
const promises = [];
for (const message of _messages) {
if (!message.files) {
promises.push(message);
continue;
}
promises.push(processMessage(message));
}
const messages = await Promise.all(promises);
this.checkVisionRequest(this.message_file_map);
return messages;
}
/**
*
* Adds image URLs to the message object and returns the files
@ -438,8 +394,7 @@ class OpenAIClient extends BaseClient {
*/
async addImageURLs(message, attachments) {
const { files, image_urls } = await encodeAndFormat(this.options.req, attachments);
message.image_urls = image_urls;
message.image_urls = image_urls.length ? image_urls : undefined;
return files;
}
@ -467,23 +422,9 @@ class OpenAIClient extends BaseClient {
let promptTokens;
promptPrefix = (promptPrefix || this.options.promptPrefix || '').trim();
if (promptPrefix) {
promptPrefix = `Instructions:\n${promptPrefix}`;
instructions = {
role: 'system',
name: 'instructions',
content: promptPrefix,
};
if (this.contextStrategy) {
instructions.tokenCount = this.getTokenCountForMessage(instructions);
}
}
if (this.options.attachments) {
const attachments = (await this.options.attachments).filter((file) =>
file.type.includes('image'),
);
const attachments = await this.options.attachments;
if (this.message_file_map) {
this.message_file_map[orderedMessages[orderedMessages.length - 1].messageId] = attachments;
@ -501,6 +442,13 @@ class OpenAIClient extends BaseClient {
this.options.attachments = files;
}
if (this.message_file_map) {
this.contextHandlers = createContextHandlers(
this.options.req,
orderedMessages[orderedMessages.length - 1].text,
);
}
const formattedMessages = orderedMessages.map((message, i) => {
const formattedMessage = formatMessage({
message,
@ -519,6 +467,11 @@ class OpenAIClient extends BaseClient {
if (this.message_file_map && this.message_file_map[message.messageId]) {
const attachments = this.message_file_map[message.messageId];
for (const file of attachments) {
if (file.embedded) {
this.contextHandlers?.processFile(file);
continue;
}
orderedMessages[i].tokenCount += this.calculateImageTokenCost({
width: file.width,
height: file.height,
@ -530,6 +483,24 @@ class OpenAIClient extends BaseClient {
return formattedMessage;
});
if (this.contextHandlers) {
this.augmentedPrompt = await this.contextHandlers.createContext();
promptPrefix = this.augmentedPrompt + promptPrefix;
}
if (promptPrefix) {
promptPrefix = `Instructions:\n${promptPrefix.trim()}`;
instructions = {
role: 'system',
name: 'instructions',
content: promptPrefix,
};
if (this.contextStrategy) {
instructions.tokenCount = this.getTokenCountForMessage(instructions);
}
}
// TODO: need to handle interleaving instructions better
if (this.contextStrategy) {
({ payload, tokenCountMap, promptTokens, messages } = await this.handleContextStrategy({