mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-09-21 21:50:49 +02:00
🤖 feat: Gemini 1.5 Support (+Vertex AI) (#2383)
* WIP: gemini-1.5 support * feat: extended vertex ai support * fix: handle possibly undefined modelName * fix: gpt-4-turbo-preview invalid vision model * feat: specify `fileConfig.imageOutputType` and make PNG default image conversion type * feat: better truncation for errors including base64 strings * fix: gemini inlineData formatting * feat: RAG augmented prompt for gemini-1.5 * feat: gemini-1.5 rates and token window * chore: adjust tokens, update docs, update vision Models * chore: add back `ChatGoogleVertexAI` for chat models via vertex ai * refactor: ask/edit controllers to not use `unfinished` field for google endpoint * chore: remove comment * chore(ci): fix AppService test * chore: remove comment * refactor(GoogleSearch): use `GOOGLE_SEARCH_API_KEY` instead, issue warning for old variable * chore: bump data-provider to 0.5.4 * chore: update docs * fix: condition for gemini-1.5 using generative ai lib * chore: update docs * ci: add additional AppService test for `imageOutputType` * refactor: optimize new config value `imageOutputType` * chore: bump CONFIG_VERSION * fix(assistants): avatar upload
This commit is contained in:
parent
fce7246ac1
commit
9d854dac07
37 changed files with 1030 additions and 258 deletions
|
@ -192,7 +192,7 @@ AZURE_AI_SEARCH_SEARCH_OPTION_SELECT=
|
|||
|
||||
# Google
|
||||
#-----------------
|
||||
GOOGLE_API_KEY=
|
||||
GOOGLE_SEARCH_API_KEY=
|
||||
GOOGLE_CSE_ID=
|
||||
|
||||
# SerpAPI
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
const { google } = require('googleapis');
|
||||
const { Agent, ProxyAgent } = require('undici');
|
||||
const { GoogleVertexAI } = require('langchain/llms/googlevertexai');
|
||||
const { ChatVertexAI } = require('@langchain/google-vertexai');
|
||||
const { ChatGoogleGenerativeAI } = require('@langchain/google-genai');
|
||||
const { GoogleGenerativeAI: GenAI } = require('@google/generative-ai');
|
||||
const { GoogleVertexAI } = require('@langchain/community/llms/googlevertexai');
|
||||
const { ChatGoogleVertexAI } = require('langchain/chat_models/googlevertexai');
|
||||
const { AIMessage, HumanMessage, SystemMessage } = require('langchain/schema');
|
||||
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
|
||||
|
@ -10,6 +12,7 @@ const {
|
|||
getResponseSender,
|
||||
endpointSettings,
|
||||
EModelEndpoint,
|
||||
VisionModes,
|
||||
AuthKeys,
|
||||
} = require('librechat-data-provider');
|
||||
const { encodeAndFormat } = require('~/server/services/Files/images');
|
||||
|
@ -126,7 +129,7 @@ class GoogleClient extends BaseClient {
|
|||
|
||||
this.options.attachments?.then((attachments) => this.checkVisionRequest(attachments));
|
||||
|
||||
// TODO: as of 12/14/23, only gemini models are "Generative AI" models provided by Google
|
||||
/** @type {boolean} Whether using a "GenerativeAI" Model */
|
||||
this.isGenerativeModel = this.modelOptions.model.includes('gemini');
|
||||
const { isGenerativeModel } = this;
|
||||
this.isChatModel = !isGenerativeModel && this.modelOptions.model.includes('chat');
|
||||
|
@ -247,6 +250,40 @@ class GoogleClient extends BaseClient {
|
|||
})).bind(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Formats messages for generative AI
|
||||
* @param {TMessage[]} messages
|
||||
* @returns
|
||||
*/
|
||||
async formatGenerativeMessages(messages) {
|
||||
const formattedMessages = [];
|
||||
const attachments = await this.options.attachments;
|
||||
const latestMessage = { ...messages[messages.length - 1] };
|
||||
const files = await this.addImageURLs(latestMessage, attachments, VisionModes.generative);
|
||||
this.options.attachments = files;
|
||||
messages[messages.length - 1] = latestMessage;
|
||||
|
||||
for (const _message of messages) {
|
||||
const role = _message.isCreatedByUser ? this.userLabel : this.modelLabel;
|
||||
const parts = [];
|
||||
parts.push({ text: _message.text });
|
||||
if (!_message.image_urls?.length) {
|
||||
formattedMessages.push({ role, parts });
|
||||
continue;
|
||||
}
|
||||
|
||||
for (const images of _message.image_urls) {
|
||||
if (images.inlineData) {
|
||||
parts.push({ inlineData: images.inlineData });
|
||||
}
|
||||
}
|
||||
|
||||
formattedMessages.push({ role, parts });
|
||||
}
|
||||
|
||||
return formattedMessages;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* Adds image URLs to the message object and returns the files
|
||||
|
@ -255,17 +292,23 @@ class GoogleClient extends BaseClient {
|
|||
* @param {MongoFile[]} files
|
||||
* @returns {Promise<MongoFile[]>}
|
||||
*/
|
||||
async addImageURLs(message, attachments) {
|
||||
async addImageURLs(message, attachments, mode = '') {
|
||||
const { files, image_urls } = await encodeAndFormat(
|
||||
this.options.req,
|
||||
attachments,
|
||||
EModelEndpoint.google,
|
||||
mode,
|
||||
);
|
||||
message.image_urls = image_urls.length ? image_urls : undefined;
|
||||
return files;
|
||||
}
|
||||
|
||||
async buildVisionMessages(messages = [], parentMessageId) {
|
||||
/**
|
||||
* Builds the augmented prompt for attachments
|
||||
* TODO: Add File API Support
|
||||
* @param {TMessage[]} messages
|
||||
*/
|
||||
async buildAugmentedPrompt(messages = []) {
|
||||
const attachments = await this.options.attachments;
|
||||
const latestMessage = { ...messages[messages.length - 1] };
|
||||
this.contextHandlers = createContextHandlers(this.options.req, latestMessage.text);
|
||||
|
@ -281,6 +324,12 @@ class GoogleClient extends BaseClient {
|
|||
this.augmentedPrompt = await this.contextHandlers.createContext();
|
||||
this.options.promptPrefix = this.augmentedPrompt + this.options.promptPrefix;
|
||||
}
|
||||
}
|
||||
|
||||
async buildVisionMessages(messages = [], parentMessageId) {
|
||||
const attachments = await this.options.attachments;
|
||||
const latestMessage = { ...messages[messages.length - 1] };
|
||||
await this.buildAugmentedPrompt(messages);
|
||||
|
||||
const { prompt } = await this.buildMessagesPrompt(messages, parentMessageId);
|
||||
|
||||
|
@ -301,15 +350,26 @@ class GoogleClient extends BaseClient {
|
|||
return { prompt: payload };
|
||||
}
|
||||
|
||||
/** @param {TMessage[]} [messages=[]] */
|
||||
async buildGenerativeMessages(messages = []) {
|
||||
this.userLabel = 'user';
|
||||
this.modelLabel = 'model';
|
||||
const promises = [];
|
||||
promises.push(await this.formatGenerativeMessages(messages));
|
||||
promises.push(this.buildAugmentedPrompt(messages));
|
||||
const [formattedMessages] = await Promise.all(promises);
|
||||
return { prompt: formattedMessages };
|
||||
}
|
||||
|
||||
async buildMessages(messages = [], parentMessageId) {
|
||||
if (!this.isGenerativeModel && !this.project_id) {
|
||||
throw new Error(
|
||||
'[GoogleClient] a Service Account JSON Key is required for PaLM 2 and Codey models (Vertex AI)',
|
||||
);
|
||||
} else if (this.isGenerativeModel && (!this.apiKey || this.apiKey === 'user_provided')) {
|
||||
throw new Error(
|
||||
'[GoogleClient] an API Key is required for Gemini models (Generative Language API)',
|
||||
);
|
||||
}
|
||||
|
||||
if (!this.project_id && this.modelOptions.model.includes('1.5')) {
|
||||
return await this.buildGenerativeMessages(messages);
|
||||
}
|
||||
|
||||
if (this.options.attachments && this.isGenerativeModel) {
|
||||
|
@ -526,13 +586,24 @@ class GoogleClient extends BaseClient {
|
|||
}
|
||||
|
||||
createLLM(clientOptions) {
|
||||
if (this.isGenerativeModel) {
|
||||
return new ChatGoogleGenerativeAI({ ...clientOptions, apiKey: this.apiKey });
|
||||
const model = clientOptions.modelName ?? clientOptions.model;
|
||||
if (this.project_id && this.isTextModel) {
|
||||
return new GoogleVertexAI(clientOptions);
|
||||
} else if (this.project_id && this.isChatModel) {
|
||||
return new ChatGoogleVertexAI(clientOptions);
|
||||
} else if (this.project_id) {
|
||||
return new ChatVertexAI(clientOptions);
|
||||
} else if (model.includes('1.5')) {
|
||||
return new GenAI(this.apiKey).getGenerativeModel(
|
||||
{
|
||||
...clientOptions,
|
||||
model,
|
||||
},
|
||||
{ apiVersion: 'v1beta' },
|
||||
);
|
||||
}
|
||||
|
||||
return this.isTextModel
|
||||
? new GoogleVertexAI(clientOptions)
|
||||
: new ChatGoogleVertexAI(clientOptions);
|
||||
return new ChatGoogleGenerativeAI({ ...clientOptions, apiKey: this.apiKey });
|
||||
}
|
||||
|
||||
async getCompletion(_payload, options = {}) {
|
||||
|
@ -544,7 +615,7 @@ class GoogleClient extends BaseClient {
|
|||
|
||||
let clientOptions = { ...parameters, maxRetries: 2 };
|
||||
|
||||
if (!this.isGenerativeModel) {
|
||||
if (this.project_id) {
|
||||
clientOptions['authOptions'] = {
|
||||
credentials: {
|
||||
...this.serviceKey,
|
||||
|
@ -557,7 +628,7 @@ class GoogleClient extends BaseClient {
|
|||
clientOptions = { ...clientOptions, ...this.modelOptions };
|
||||
}
|
||||
|
||||
if (this.isGenerativeModel) {
|
||||
if (this.isGenerativeModel && !this.project_id) {
|
||||
clientOptions.modelName = clientOptions.model;
|
||||
delete clientOptions.model;
|
||||
}
|
||||
|
@ -588,16 +659,46 @@ class GoogleClient extends BaseClient {
|
|||
messages.unshift(new SystemMessage(context));
|
||||
}
|
||||
|
||||
const modelName = clientOptions.modelName ?? clientOptions.model ?? '';
|
||||
if (modelName?.includes('1.5') && !this.project_id) {
|
||||
/** @type {GenerativeModel} */
|
||||
const client = model;
|
||||
const requestOptions = {
|
||||
contents: _payload,
|
||||
};
|
||||
|
||||
if (this.options?.promptPrefix?.length) {
|
||||
requestOptions.systemInstruction = {
|
||||
parts: [
|
||||
{
|
||||
text: this.options.promptPrefix,
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
const result = await client.generateContentStream(requestOptions);
|
||||
for await (const chunk of result.stream) {
|
||||
const chunkText = chunk.text();
|
||||
this.generateTextStream(chunkText, onProgress, {
|
||||
delay: 12,
|
||||
});
|
||||
reply += chunkText;
|
||||
}
|
||||
return reply;
|
||||
}
|
||||
|
||||
const stream = await model.stream(messages, {
|
||||
signal: abortController.signal,
|
||||
timeout: 7000,
|
||||
});
|
||||
|
||||
for await (const chunk of stream) {
|
||||
await this.generateTextStream(chunk?.content ?? chunk, onProgress, {
|
||||
const chunkText = chunk?.content ?? chunk;
|
||||
this.generateTextStream(chunkText, onProgress, {
|
||||
delay: this.isGenerativeModel ? 12 : 8,
|
||||
});
|
||||
reply += chunk?.content ?? chunk;
|
||||
reply += chunkText;
|
||||
}
|
||||
|
||||
return reply;
|
||||
|
|
|
@ -13,7 +13,7 @@ module.exports = {
|
|||
...handleInputs,
|
||||
...instructions,
|
||||
...titlePrompts,
|
||||
truncateText,
|
||||
...truncateText,
|
||||
createVisionPrompt,
|
||||
createContextHandlers,
|
||||
};
|
||||
|
|
|
@ -1,10 +1,40 @@
|
|||
const MAX_CHAR = 255;
|
||||
|
||||
function truncateText(text) {
|
||||
if (text.length > MAX_CHAR) {
|
||||
return `${text.slice(0, MAX_CHAR)}... [text truncated for brevity]`;
|
||||
/**
|
||||
* Truncates a given text to a specified maximum length, appending ellipsis and a notification
|
||||
* if the original text exceeds the maximum length.
|
||||
*
|
||||
* @param {string} text - The text to be truncated.
|
||||
* @param {number} [maxLength=MAX_CHAR] - The maximum length of the text after truncation. Defaults to MAX_CHAR.
|
||||
* @returns {string} The truncated text if the original text length exceeds maxLength, otherwise returns the original text.
|
||||
*/
|
||||
function truncateText(text, maxLength = MAX_CHAR) {
|
||||
if (text.length > maxLength) {
|
||||
return `${text.slice(0, maxLength)}... [text truncated for brevity]`;
|
||||
}
|
||||
return text;
|
||||
}
|
||||
|
||||
module.exports = truncateText;
|
||||
/**
|
||||
* Truncates a given text to a specified maximum length by showing the first half and the last half of the text,
|
||||
* separated by ellipsis. This method ensures the output does not exceed the maximum length, including the addition
|
||||
* of ellipsis and notification if the original text exceeds the maximum length.
|
||||
*
|
||||
* @param {string} text - The text to be truncated.
|
||||
* @param {number} [maxLength=MAX_CHAR] - The maximum length of the output text after truncation. Defaults to MAX_CHAR.
|
||||
* @returns {string} The truncated text showing the first half and the last half, or the original text if it does not exceed maxLength.
|
||||
*/
|
||||
function smartTruncateText(text, maxLength = MAX_CHAR) {
|
||||
const ellipsis = '...';
|
||||
const notification = ' [text truncated for brevity]';
|
||||
const halfMaxLength = Math.floor((maxLength - ellipsis.length - notification.length) / 2);
|
||||
|
||||
if (text.length > maxLength) {
|
||||
const startLastHalf = text.length - halfMaxLength;
|
||||
return `${text.slice(0, halfMaxLength)}${ellipsis}${text.slice(startLastHalf)}${notification}`;
|
||||
}
|
||||
|
||||
return text;
|
||||
}
|
||||
|
||||
module.exports = { truncateText, smartTruncateText };
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
"description": "This is your Google Custom Search Engine ID. For instructions on how to obtain this, see <a href='https://github.com/danny-avila/LibreChat/blob/main/docs/features/plugins/google_search.md'>Our Docs</a>."
|
||||
},
|
||||
{
|
||||
"authField": "GOOGLE_API_KEY",
|
||||
"authField": "GOOGLE_SEARCH_API_KEY",
|
||||
"label": "Google API Key",
|
||||
"description": "This is your Google Custom Search API Key. For instructions on how to obtain this, see <a href='https://github.com/danny-avila/LibreChat/blob/main/docs/features/plugins/google_search.md'>Our Docs</a>."
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ class GoogleSearchResults extends Tool {
|
|||
|
||||
constructor(fields = {}) {
|
||||
super(fields);
|
||||
this.envVarApiKey = 'GOOGLE_API_KEY';
|
||||
this.envVarApiKey = 'GOOGLE_SEARCH_API_KEY';
|
||||
this.envVarSearchEngineId = 'GOOGLE_CSE_ID';
|
||||
this.override = fields.override ?? false;
|
||||
this.apiKey = fields.apiKey ?? getEnvironmentVariable(this.envVarApiKey);
|
||||
|
|
|
@ -25,6 +25,10 @@ const tokenValues = {
|
|||
/* cohere doesn't have rates for the older command models,
|
||||
so this was from https://artificialanalysis.ai/models/command-light/providers */
|
||||
command: { prompt: 0.38, completion: 0.38 },
|
||||
// 'gemini-1.5': { prompt: 7, completion: 21 }, // May 2nd, 2024 pricing
|
||||
// 'gemini': { prompt: 0.5, completion: 1.5 }, // May 2nd, 2024 pricing
|
||||
'gemini-1.5': { prompt: 0, completion: 0 }, // currently free
|
||||
gemini: { prompt: 0, completion: 0 }, // currently free
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -35,10 +35,12 @@
|
|||
"dependencies": {
|
||||
"@anthropic-ai/sdk": "^0.16.1",
|
||||
"@azure/search-documents": "^12.0.0",
|
||||
"@google/generative-ai": "^0.5.0",
|
||||
"@keyv/mongo": "^2.1.8",
|
||||
"@keyv/redis": "^2.8.1",
|
||||
"@langchain/community": "^0.0.17",
|
||||
"@langchain/google-genai": "^0.0.8",
|
||||
"@langchain/community": "^0.0.46",
|
||||
"@langchain/google-genai": "^0.0.11",
|
||||
"@langchain/google-vertexai": "^0.0.5",
|
||||
"axios": "^1.3.4",
|
||||
"bcryptjs": "^2.4.3",
|
||||
"cheerio": "^1.0.0-rc.12",
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
const throttle = require('lodash/throttle');
|
||||
const { getResponseSender, Constants } = require('librechat-data-provider');
|
||||
const { getResponseSender, Constants, EModelEndpoint } = require('librechat-data-provider');
|
||||
const { createAbortController, handleAbortError } = require('~/server/middleware');
|
||||
const { sendMessage, createOnProgress } = require('~/server/utils');
|
||||
const { saveMessage, getConvo } = require('~/models');
|
||||
|
@ -48,7 +48,7 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
|
|||
|
||||
try {
|
||||
const { client } = await initializeClient({ req, res, endpointOption });
|
||||
|
||||
const unfinished = endpointOption.endpoint === EModelEndpoint.google ? false : true;
|
||||
const { onProgress: progressCallback, getPartialText } = createOnProgress({
|
||||
onProgress: throttle(
|
||||
({ text: partialText }) => {
|
||||
|
@ -59,7 +59,7 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
|
|||
parentMessageId: overrideParentMessageId ?? userMessageId,
|
||||
text: partialText,
|
||||
model: client.modelOptions.model,
|
||||
unfinished: true,
|
||||
unfinished,
|
||||
error: false,
|
||||
user,
|
||||
});
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
const throttle = require('lodash/throttle');
|
||||
const { getResponseSender } = require('librechat-data-provider');
|
||||
const { getResponseSender, EModelEndpoint } = require('librechat-data-provider');
|
||||
const { createAbortController, handleAbortError } = require('~/server/middleware');
|
||||
const { sendMessage, createOnProgress } = require('~/server/utils');
|
||||
const { saveMessage, getConvo } = require('~/models');
|
||||
|
@ -48,6 +48,7 @@ const EditController = async (req, res, next, initializeClient) => {
|
|||
}
|
||||
};
|
||||
|
||||
const unfinished = endpointOption.endpoint === EModelEndpoint.google ? false : true;
|
||||
const { onProgress: progressCallback, getPartialText } = createOnProgress({
|
||||
generation,
|
||||
onProgress: throttle(
|
||||
|
@ -59,7 +60,7 @@ const EditController = async (req, res, next, initializeClient) => {
|
|||
parentMessageId: overrideParentMessageId ?? userMessageId,
|
||||
text: partialText,
|
||||
model: endpointOption.modelOptions.model,
|
||||
unfinished: true,
|
||||
unfinished,
|
||||
isEdited: true,
|
||||
error: false,
|
||||
user,
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
const { EModelEndpoint } = require('librechat-data-provider');
|
||||
const { sendMessage, sendError, countTokens, isEnabled } = require('~/server/utils');
|
||||
const { truncateText, smartTruncateText } = require('~/app/clients/prompts');
|
||||
const { saveMessage, getConvo, getConvoTitle } = require('~/models');
|
||||
const clearPendingReq = require('~/cache/clearPendingReq');
|
||||
const abortControllers = require('./abortControllers');
|
||||
const { redactMessage } = require('~/config/parsers');
|
||||
const spendTokens = require('~/models/spendTokens');
|
||||
const { abortRun } = require('./abortRun');
|
||||
const { logger } = require('~/config');
|
||||
|
@ -100,7 +100,15 @@ const createAbortController = (req, res, getAbortData) => {
|
|||
};
|
||||
|
||||
const handleAbortError = async (res, req, error, data) => {
|
||||
logger.error('[handleAbortError] AI response error; aborting request:', error);
|
||||
if (error?.message?.includes('base64')) {
|
||||
logger.error('[handleAbortError] Error in base64 encoding', {
|
||||
...error,
|
||||
stack: smartTruncateText(error?.stack, 1000),
|
||||
message: truncateText(error.message, 350),
|
||||
});
|
||||
} else {
|
||||
logger.error('[handleAbortError] AI response error; aborting request:', error);
|
||||
}
|
||||
const { sender, conversationId, messageId, parentMessageId, partialText } = data;
|
||||
|
||||
if (error.stack && error.stack.includes('google')) {
|
||||
|
@ -109,13 +117,15 @@ const handleAbortError = async (res, req, error, data) => {
|
|||
);
|
||||
}
|
||||
|
||||
const errorText = 'An error occurred while processing your request. Please contact the Admin.';
|
||||
|
||||
const respondWithError = async (partialText) => {
|
||||
let options = {
|
||||
sender,
|
||||
messageId,
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
text: redactMessage(error.message),
|
||||
text: errorText,
|
||||
shouldSaveMessage: true,
|
||||
user: req.user.id,
|
||||
};
|
||||
|
|
|
@ -213,7 +213,13 @@ router.post('/avatar/:assistant_id', upload.single('file'), async (req, res) =>
|
|||
/** @type {{ openai: OpenAI }} */
|
||||
const { openai } = await initializeClient({ req, res });
|
||||
|
||||
const image = await uploadImageBuffer({ req, context: FileContext.avatar });
|
||||
const image = await uploadImageBuffer({
|
||||
req,
|
||||
context: FileContext.avatar,
|
||||
metadata: {
|
||||
buffer: req.file.buffer,
|
||||
},
|
||||
});
|
||||
|
||||
try {
|
||||
_metadata = JSON.parse(_metadata);
|
||||
|
|
|
@ -18,13 +18,15 @@ router.post('/', upload.single('input'), async (req, res) => {
|
|||
}
|
||||
|
||||
const fileStrategy = req.app.locals.fileStrategy;
|
||||
const webPBuffer = await resizeAvatar({
|
||||
const desiredFormat = req.app.locals.imageOutputType;
|
||||
const resizedBuffer = await resizeAvatar({
|
||||
userId,
|
||||
input,
|
||||
desiredFormat,
|
||||
});
|
||||
|
||||
const { processAvatar } = getStrategyFunctions(fileStrategy);
|
||||
const url = await processAvatar({ buffer: webPBuffer, userId, manual });
|
||||
const url = await processAvatar({ buffer: resizedBuffer, userId, manual });
|
||||
|
||||
res.json({ url });
|
||||
} catch (error) {
|
||||
|
|
|
@ -3,6 +3,7 @@ const {
|
|||
FileSources,
|
||||
Capabilities,
|
||||
EModelEndpoint,
|
||||
EImageOutputType,
|
||||
defaultSocialLogins,
|
||||
validateAzureGroups,
|
||||
mapModelToAzureConfig,
|
||||
|
@ -181,6 +182,7 @@ const AppService = async (app) => {
|
|||
fileConfig: config?.fileConfig,
|
||||
interface: config?.interface,
|
||||
secureImageLinks: config?.secureImageLinks,
|
||||
imageOutputType: config?.imageOutputType?.toLowerCase() ?? EImageOutputType.PNG,
|
||||
paths,
|
||||
...endpointLocals,
|
||||
};
|
||||
|
@ -204,6 +206,12 @@ const AppService = async (app) => {
|
|||
`,
|
||||
);
|
||||
}
|
||||
|
||||
if (process.env.GOOGLE_API_KEY) {
|
||||
logger.warn(
|
||||
'The `GOOGLE_API_KEY` environment variable is deprecated.\nPlease use the `GOOGLE_SEARCH_API_KEY` environment variable instead.',
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
module.exports = AppService;
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
const {
|
||||
FileSources,
|
||||
EModelEndpoint,
|
||||
EImageOutputType,
|
||||
defaultSocialLogins,
|
||||
validateAzureGroups,
|
||||
deprecatedAzureVariables,
|
||||
|
@ -107,6 +108,10 @@ describe('AppService', () => {
|
|||
},
|
||||
},
|
||||
paths: expect.anything(),
|
||||
imageOutputType: expect.any(String),
|
||||
interface: undefined,
|
||||
fileConfig: undefined,
|
||||
secureImageLinks: undefined,
|
||||
});
|
||||
});
|
||||
|
||||
|
@ -125,6 +130,31 @@ describe('AppService', () => {
|
|||
expect(logger.info).toHaveBeenCalledWith(expect.stringContaining('Outdated Config version'));
|
||||
});
|
||||
|
||||
it('should change the `imageOutputType` based on config value', async () => {
|
||||
require('./Config/loadCustomConfig').mockImplementationOnce(() =>
|
||||
Promise.resolve({
|
||||
version: '0.10.0',
|
||||
imageOutputType: EImageOutputType.WEBP,
|
||||
}),
|
||||
);
|
||||
|
||||
await AppService(app);
|
||||
|
||||
expect(app.locals.imageOutputType).toEqual(EImageOutputType.WEBP);
|
||||
});
|
||||
|
||||
it('should default to `PNG` `imageOutputType` with no provided type', async () => {
|
||||
require('./Config/loadCustomConfig').mockImplementationOnce(() =>
|
||||
Promise.resolve({
|
||||
version: '0.10.0',
|
||||
}),
|
||||
);
|
||||
|
||||
await AppService(app);
|
||||
|
||||
expect(app.locals.imageOutputType).toEqual(EImageOutputType.PNG);
|
||||
});
|
||||
|
||||
it('should initialize Firebase when fileStrategy is firebase', async () => {
|
||||
require('./Config/loadCustomConfig').mockImplementationOnce(() =>
|
||||
Promise.resolve({
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
const path = require('path');
|
||||
const { CacheKeys, configSchema } = require('librechat-data-provider');
|
||||
const { CacheKeys, configSchema, EImageOutputType } = require('librechat-data-provider');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
const loadYaml = require('~/utils/loadYaml');
|
||||
const { logger } = require('~/config');
|
||||
|
@ -55,6 +55,20 @@ async function loadCustomConfig() {
|
|||
}
|
||||
|
||||
const result = configSchema.strict().safeParse(customConfig);
|
||||
if (result?.error?.errors?.some((err) => err?.path && err.path?.includes('imageOutputType'))) {
|
||||
throw new Error(
|
||||
`
|
||||
Please specify a correct \`imageOutputType\` value (case-sensitive).
|
||||
|
||||
The available options are:
|
||||
- ${EImageOutputType.JPEG}
|
||||
- ${EImageOutputType.PNG}
|
||||
- ${EImageOutputType.WEBP}
|
||||
|
||||
Refer to the latest config file guide for more information:
|
||||
https://docs.librechat.ai/install/configuration/custom_config.html`,
|
||||
);
|
||||
}
|
||||
if (!result.success) {
|
||||
i === 0 && logger.error(`Invalid custom config file at ${configPath}`, result.error);
|
||||
i === 0 && i++;
|
||||
|
|
|
@ -8,7 +8,7 @@ const { updateFile } = require('~/models/File');
|
|||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
* Converts an image file to the WebP format. The function first resizes the image based on the specified
|
||||
* Converts an image file to the target format. The function first resizes the image based on the specified
|
||||
* resolution.
|
||||
*
|
||||
* @param {Object} params - The params object.
|
||||
|
@ -21,7 +21,7 @@ const { logger } = require('~/config');
|
|||
*
|
||||
* @returns {Promise<{ filepath: string, bytes: number, width: number, height: number}>}
|
||||
* A promise that resolves to an object containing:
|
||||
* - filepath: The path where the converted WebP image is saved.
|
||||
* - filepath: The path where the converted image is saved.
|
||||
* - bytes: The size of the converted image in bytes.
|
||||
* - width: The width of the converted image.
|
||||
* - height: The height of the converted image.
|
||||
|
@ -39,15 +39,16 @@ async function uploadImageToFirebase({ req, file, file_id, endpoint, resolution
|
|||
|
||||
let webPBuffer;
|
||||
let fileName = `${file_id}__${path.basename(inputFilePath)}`;
|
||||
if (extension.toLowerCase() === '.webp') {
|
||||
const targetExtension = `.${req.app.locals.imageOutputType}`;
|
||||
if (extension.toLowerCase() === targetExtension) {
|
||||
webPBuffer = resizedBuffer;
|
||||
} else {
|
||||
webPBuffer = await sharp(resizedBuffer).toFormat('webp').toBuffer();
|
||||
webPBuffer = await sharp(resizedBuffer).toFormat(req.app.locals.imageOutputType).toBuffer();
|
||||
// Replace or append the correct extension
|
||||
const extRegExp = new RegExp(path.extname(fileName) + '$');
|
||||
fileName = fileName.replace(extRegExp, '.webp');
|
||||
fileName = fileName.replace(extRegExp, targetExtension);
|
||||
if (!path.extname(fileName)) {
|
||||
fileName += '.webp';
|
||||
fileName += targetExtension;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -79,7 +80,7 @@ async function prepareImageURL(req, file) {
|
|||
* If the 'manual' flag is set to 'true', it also updates the user's avatar URL in the database.
|
||||
*
|
||||
* @param {object} params - The parameters object.
|
||||
* @param {Buffer} params.buffer - The Buffer containing the avatar image in WebP format.
|
||||
* @param {Buffer} params.buffer - The Buffer containing the avatar image.
|
||||
* @param {string} params.userId - The user ID.
|
||||
* @param {string} params.manual - A string flag indicating whether the update is manual ('true' or 'false').
|
||||
* @returns {Promise<string>} - A promise that resolves with the URL of the uploaded avatar.
|
||||
|
|
|
@ -6,11 +6,11 @@ const { updateUser } = require('~/models/userMethods');
|
|||
const { updateFile } = require('~/models/File');
|
||||
|
||||
/**
|
||||
* Converts an image file to the WebP format. The function first resizes the image based on the specified
|
||||
* Converts an image file to the target format. The function first resizes the image based on the specified
|
||||
* resolution.
|
||||
*
|
||||
* If the original image is already in WebP format, it writes the resized image back. Otherwise,
|
||||
* it converts the image to WebP format before saving.
|
||||
* If the original image is already in target format, it writes the resized image back. Otherwise,
|
||||
* it converts the image to target format before saving.
|
||||
*
|
||||
* The original image is deleted after conversion.
|
||||
* @param {Object} params - The params object.
|
||||
|
@ -24,7 +24,7 @@ const { updateFile } = require('~/models/File');
|
|||
*
|
||||
* @returns {Promise<{ filepath: string, bytes: number, width: number, height: number}>}
|
||||
* A promise that resolves to an object containing:
|
||||
* - filepath: The path where the converted WebP image is saved.
|
||||
* - filepath: The path where the converted image is saved.
|
||||
* - bytes: The size of the converted image in bytes.
|
||||
* - width: The width of the converted image.
|
||||
* - height: The height of the converted image.
|
||||
|
@ -48,16 +48,17 @@ async function uploadLocalImage({ req, file, file_id, endpoint, resolution = 'hi
|
|||
|
||||
const fileName = `${file_id}__${path.basename(inputFilePath)}`;
|
||||
const newPath = path.join(userPath, fileName);
|
||||
const targetExtension = `.${req.app.locals.imageOutputType}`;
|
||||
|
||||
if (extension.toLowerCase() === '.webp') {
|
||||
if (extension.toLowerCase() === targetExtension) {
|
||||
const bytes = Buffer.byteLength(resizedBuffer);
|
||||
await fs.promises.writeFile(newPath, resizedBuffer);
|
||||
const filepath = path.posix.join('/', 'images', req.user.id, path.basename(newPath));
|
||||
return { filepath, bytes, width, height };
|
||||
}
|
||||
|
||||
const outputFilePath = newPath.replace(extension, '.webp');
|
||||
const data = await sharp(resizedBuffer).toFormat('webp').toBuffer();
|
||||
const outputFilePath = newPath.replace(extension, targetExtension);
|
||||
const data = await sharp(resizedBuffer).toFormat(req.app.locals.imageOutputType).toBuffer();
|
||||
await fs.promises.writeFile(outputFilePath, data);
|
||||
const bytes = Buffer.byteLength(data);
|
||||
const filepath = path.posix.join('/', 'images', req.user.id, path.basename(outputFilePath));
|
||||
|
@ -109,7 +110,7 @@ async function prepareImagesLocal(req, file) {
|
|||
* If the 'manual' flag is set to 'true', it also updates the user's avatar URL in the database.
|
||||
*
|
||||
* @param {object} params - The parameters object.
|
||||
* @param {Buffer} params.buffer - The Buffer containing the avatar image in WebP format.
|
||||
* @param {Buffer} params.buffer - The Buffer containing the avatar image.
|
||||
* @param {string} params.userId - The user ID.
|
||||
* @param {string} params.manual - A string flag indicating whether the update is manual ('true' or 'false').
|
||||
* @returns {Promise<string>} - A promise that resolves with the URL of the uploaded avatar.
|
||||
|
|
|
@ -6,10 +6,11 @@ const { logger } = require('~/config');
|
|||
|
||||
/**
|
||||
* Uploads an avatar image for a user. This function can handle various types of input (URL, Buffer, or File object),
|
||||
* processes the image to a square format, converts it to WebP format, and returns the resized buffer.
|
||||
* processes the image to a square format, converts it to target format, and returns the resized buffer.
|
||||
*
|
||||
* @param {Object} params - The parameters object.
|
||||
* @param {string} params.userId - The unique identifier of the user for whom the avatar is being uploaded.
|
||||
* @param {string} options.desiredFormat - The desired output format of the image.
|
||||
* @param {(string|Buffer|File)} params.input - The input representing the avatar image. Can be a URL (string),
|
||||
* a Buffer, or a File object.
|
||||
*
|
||||
|
@ -19,7 +20,7 @@ const { logger } = require('~/config');
|
|||
* @throws {Error} Throws an error if the user ID is undefined, the input type is invalid, the image fetching fails,
|
||||
* or any other error occurs during the processing.
|
||||
*/
|
||||
async function resizeAvatar({ userId, input }) {
|
||||
async function resizeAvatar({ userId, input, desiredFormat }) {
|
||||
try {
|
||||
if (userId === undefined) {
|
||||
throw new Error('User ID is undefined');
|
||||
|
@ -53,7 +54,10 @@ async function resizeAvatar({ userId, input }) {
|
|||
})
|
||||
.toBuffer();
|
||||
|
||||
const { buffer } = await resizeAndConvert(squaredBuffer);
|
||||
const { buffer } = await resizeAndConvert({
|
||||
inputBuffer: squaredBuffer,
|
||||
desiredFormat,
|
||||
});
|
||||
return buffer;
|
||||
} catch (error) {
|
||||
logger.error('Error uploading the avatar:', error);
|
||||
|
|
|
@ -6,7 +6,7 @@ const { getStrategyFunctions } = require('../strategies');
|
|||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
* Converts an image file or buffer to WebP format with specified resolution.
|
||||
* Converts an image file or buffer to target output type with specified resolution.
|
||||
*
|
||||
* @param {Express.Request} req - The request object, containing user and app configuration data.
|
||||
* @param {Buffer | Express.Multer.File} file - The file object, containing either a path or a buffer.
|
||||
|
@ -15,7 +15,7 @@ const { logger } = require('~/config');
|
|||
* @returns {Promise<{filepath: string, bytes: number, width: number, height: number}>} An object containing the path, size, and dimensions of the converted image.
|
||||
* @throws Throws an error if there is an issue during the conversion process.
|
||||
*/
|
||||
async function convertToWebP(req, file, resolution = 'high', basename = '') {
|
||||
async function convertImage(req, file, resolution = 'high', basename = '') {
|
||||
try {
|
||||
let inputBuffer;
|
||||
let outputBuffer;
|
||||
|
@ -38,13 +38,13 @@ async function convertToWebP(req, file, resolution = 'high', basename = '') {
|
|||
height,
|
||||
} = await resizeImageBuffer(inputBuffer, resolution);
|
||||
|
||||
// Check if the file is already in WebP format
|
||||
// If it isn't, convert it:
|
||||
if (extension === '.webp') {
|
||||
// Check if the file is already in target format; if it isn't, convert it:
|
||||
const targetExtension = `.${req.app.locals.imageOutputType}`;
|
||||
if (extension === targetExtension) {
|
||||
outputBuffer = resizedBuffer;
|
||||
} else {
|
||||
outputBuffer = await sharp(resizedBuffer).toFormat('webp').toBuffer();
|
||||
extension = '.webp';
|
||||
outputBuffer = await sharp(resizedBuffer).toFormat(req.app.locals.imageOutputType).toBuffer();
|
||||
extension = targetExtension;
|
||||
}
|
||||
|
||||
// Generate a new filename for the output file
|
||||
|
@ -67,4 +67,4 @@ async function convertToWebP(req, file, resolution = 'high', basename = '') {
|
|||
}
|
||||
}
|
||||
|
||||
module.exports = { convertToWebP };
|
||||
module.exports = { convertImage };
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
const axios = require('axios');
|
||||
const { EModelEndpoint, FileSources } = require('librechat-data-provider');
|
||||
const { EModelEndpoint, FileSources, VisionModes } = require('librechat-data-provider');
|
||||
const { getStrategyFunctions } = require('../strategies');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
|
@ -30,11 +30,20 @@ const base64Only = new Set([EModelEndpoint.google, EModelEndpoint.anthropic]);
|
|||
* @param {Express.Request} req - The request object.
|
||||
* @param {Array<MongoFile>} files - The array of files to encode and format.
|
||||
* @param {EModelEndpoint} [endpoint] - Optional: The endpoint for the image.
|
||||
* @param {string} [mode] - Optional: The endpoint mode for the image.
|
||||
* @returns {Promise<Object>} - A promise that resolves to the result object containing the encoded images and file details.
|
||||
*/
|
||||
async function encodeAndFormat(req, files, endpoint) {
|
||||
async function encodeAndFormat(req, files, endpoint, mode) {
|
||||
const promises = [];
|
||||
const encodingMethods = {};
|
||||
const result = {
|
||||
files: [],
|
||||
image_urls: [],
|
||||
};
|
||||
|
||||
if (!files || !files.length) {
|
||||
return result;
|
||||
}
|
||||
|
||||
for (let file of files) {
|
||||
const source = file.source ?? FileSources.local;
|
||||
|
@ -69,11 +78,6 @@ async function encodeAndFormat(req, files, endpoint) {
|
|||
/** @type {Array<[MongoFile, string]>} */
|
||||
const formattedImages = await Promise.all(promises);
|
||||
|
||||
const result = {
|
||||
files: [],
|
||||
image_urls: [],
|
||||
};
|
||||
|
||||
for (const [file, imageContent] of formattedImages) {
|
||||
const fileMetadata = {
|
||||
type: file.type,
|
||||
|
@ -98,12 +102,18 @@ async function encodeAndFormat(req, files, endpoint) {
|
|||
image_url: {
|
||||
url: imageContent.startsWith('http')
|
||||
? imageContent
|
||||
: `data:image/webp;base64,${imageContent}`,
|
||||
: `data:${file.type};base64,${imageContent}`,
|
||||
detail,
|
||||
},
|
||||
};
|
||||
|
||||
if (endpoint && endpoint === EModelEndpoint.google) {
|
||||
if (endpoint && endpoint === EModelEndpoint.google && mode === VisionModes.generative) {
|
||||
delete imagePart.image_url;
|
||||
imagePart.inlineData = {
|
||||
mimeType: file.type,
|
||||
data: imageContent,
|
||||
};
|
||||
} else if (endpoint && endpoint === EModelEndpoint.google) {
|
||||
imagePart.image_url = imagePart.image_url.url;
|
||||
} else if (endpoint && endpoint === EModelEndpoint.anthropic) {
|
||||
imagePart.type = 'image';
|
||||
|
|
|
@ -62,14 +62,20 @@ async function resizeImageBuffer(inputBuffer, resolution, endpoint) {
|
|||
}
|
||||
|
||||
/**
|
||||
* Resizes an image buffer to webp format as well as reduces by specified or default 150 px width.
|
||||
* Resizes an image buffer to a specified format and width.
|
||||
*
|
||||
* @param {Buffer} inputBuffer - The buffer of the image to be resized.
|
||||
* @returns {Promise<{ buffer: Buffer, width: number, height: number, bytes: number }>} An object containing the resized image buffer, its size and dimensions.
|
||||
* @throws Will throw an error if the resolution parameter is invalid.
|
||||
* @param {Object} options - The options for resizing and converting the image.
|
||||
* @param {Buffer} options.inputBuffer - The buffer of the image to be resized.
|
||||
* @param {string} options.desiredFormat - The desired output format of the image.
|
||||
* @param {number} [options.width=150] - The desired width of the image. Defaults to 150 pixels.
|
||||
* @returns {Promise<{ buffer: Buffer, width: number, height: number, bytes: number }>} An object containing the resized image buffer, its size, and dimensions.
|
||||
* @throws Will throw an error if the resolution or format parameters are invalid.
|
||||
*/
|
||||
async function resizeAndConvert(inputBuffer, width = 150) {
|
||||
const resizedBuffer = await sharp(inputBuffer).resize({ width }).toFormat('webp').toBuffer();
|
||||
async function resizeAndConvert({ inputBuffer, desiredFormat, width = 150 }) {
|
||||
const resizedBuffer = await sharp(inputBuffer)
|
||||
.resize({ width })
|
||||
.toFormat(desiredFormat)
|
||||
.toBuffer();
|
||||
const resizedMetadata = await sharp(resizedBuffer).metadata();
|
||||
return {
|
||||
buffer: resizedBuffer,
|
||||
|
|
|
@ -12,7 +12,7 @@ const {
|
|||
hostImageIdSuffix,
|
||||
hostImageNamePrefix,
|
||||
} = require('librechat-data-provider');
|
||||
const { convertToWebP, resizeAndConvert } = require('~/server/services/Files/images');
|
||||
const { convertImage, resizeAndConvert } = require('~/server/services/Files/images');
|
||||
const { initializeClient } = require('~/server/services/Endpoints/assistants');
|
||||
const { createFile, updateFileUsage, deleteFiles } = require('~/models/File');
|
||||
const { LB_QueueAsyncCall } = require('~/server/utils/queue');
|
||||
|
@ -207,7 +207,7 @@ const processImageFile = async ({ req, res, file, metadata }) => {
|
|||
filename: file.originalname,
|
||||
context: FileContext.message_attachment,
|
||||
source,
|
||||
type: 'image/webp',
|
||||
type: `image/${req.app.locals.imageOutputType}`,
|
||||
width,
|
||||
height,
|
||||
},
|
||||
|
@ -223,9 +223,9 @@ const processImageFile = async ({ req, res, file, metadata }) => {
|
|||
* @param {Object} params - The parameters object.
|
||||
* @param {Express.Request} params.req - The Express request object.
|
||||
* @param {FileContext} params.context - The context of the file (e.g., 'avatar', 'image_generation', etc.)
|
||||
* @param {boolean} [params.resize=true] - Whether to resize and convert the image to WebP. Default is `true`.
|
||||
* @param {boolean} [params.resize=true] - Whether to resize and convert the image to target format. Default is `true`.
|
||||
* @param {{ buffer: Buffer, width: number, height: number, bytes: number, filename: string, type: string, file_id: string }} [params.metadata] - Required metadata for the file if resize is false.
|
||||
* @returns {Promise<{ filepath: string, filename: string, source: string, type: 'image/webp'}>}
|
||||
* @returns {Promise<{ filepath: string, filename: string, source: string, type: string}>}
|
||||
*/
|
||||
const uploadImageBuffer = async ({ req, context, metadata = {}, resize = true }) => {
|
||||
const source = req.app.locals.fileStrategy;
|
||||
|
@ -233,9 +233,14 @@ const uploadImageBuffer = async ({ req, context, metadata = {}, resize = true })
|
|||
let { buffer, width, height, bytes, filename, file_id, type } = metadata;
|
||||
if (resize) {
|
||||
file_id = v4();
|
||||
type = 'image/webp';
|
||||
({ buffer, width, height, bytes } = await resizeAndConvert(req.file.buffer));
|
||||
filename = path.basename(req.file.originalname, path.extname(req.file.originalname)) + '.webp';
|
||||
type = `image/${req.app.locals.imageOutputType}`;
|
||||
({ buffer, width, height, bytes } = await resizeAndConvert({
|
||||
inputBuffer: buffer,
|
||||
desiredFormat: req.app.locals.imageOutputType,
|
||||
}));
|
||||
filename = `${path.basename(req.file.originalname, path.extname(req.file.originalname))}.${
|
||||
req.app.locals.imageOutputType
|
||||
}`;
|
||||
}
|
||||
|
||||
const filepath = await saveBuffer({ userId: req.user.id, fileName: filename, buffer });
|
||||
|
@ -363,7 +368,7 @@ const processOpenAIFile = async ({
|
|||
};
|
||||
|
||||
/**
|
||||
* Process OpenAI image files, convert to webp, save and return file metadata.
|
||||
* Process OpenAI image files, convert to target format, save and return file metadata.
|
||||
* @param {object} params - The params object.
|
||||
* @param {Express.Request} params.req - The Express request object.
|
||||
* @param {Buffer} params.buffer - The image buffer.
|
||||
|
@ -375,12 +380,12 @@ const processOpenAIFile = async ({
|
|||
const processOpenAIImageOutput = async ({ req, buffer, file_id, filename, fileExt }) => {
|
||||
const currentDate = new Date();
|
||||
const formattedDate = currentDate.toISOString();
|
||||
const _file = await convertToWebP(req, buffer, 'high', `${file_id}${fileExt}`);
|
||||
const _file = await convertImage(req, buffer, 'high', `${file_id}${fileExt}`);
|
||||
const file = {
|
||||
..._file,
|
||||
usage: 1,
|
||||
user: req.user.id,
|
||||
type: 'image/webp',
|
||||
type: `image/${req.app.locals.imageOutputType}`,
|
||||
createdAt: formattedDate,
|
||||
updatedAt: formattedDate,
|
||||
source: req.app.locals.fileStrategy,
|
||||
|
|
|
@ -25,12 +25,12 @@ const handleExistingUser = async (oldUser, avatarUrl) => {
|
|||
await oldUser.save();
|
||||
} else if (!isLocal && (oldUser.avatar === null || !oldUser.avatar.includes('?manual=true'))) {
|
||||
const userId = oldUser._id;
|
||||
const webPBuffer = await resizeAvatar({
|
||||
const resizedBuffer = await resizeAvatar({
|
||||
userId,
|
||||
input: avatarUrl,
|
||||
});
|
||||
const { processAvatar } = getStrategyFunctions(fileStrategy);
|
||||
oldUser.avatar = await processAvatar({ buffer: webPBuffer, userId });
|
||||
oldUser.avatar = await processAvatar({ buffer: resizedBuffer, userId });
|
||||
await oldUser.save();
|
||||
}
|
||||
};
|
||||
|
@ -83,12 +83,12 @@ const createNewUser = async ({
|
|||
|
||||
if (!isLocal) {
|
||||
const userId = newUser._id;
|
||||
const webPBuffer = await resizeAvatar({
|
||||
const resizedBuffer = await resizeAvatar({
|
||||
userId,
|
||||
input: avatarUrl,
|
||||
});
|
||||
const { processAvatar } = getStrategyFunctions(fileStrategy);
|
||||
newUser.avatar = await processAvatar({ buffer: webPBuffer, userId });
|
||||
newUser.avatar = await processAvatar({ buffer: resizedBuffer, userId });
|
||||
await newUser.save();
|
||||
}
|
||||
|
||||
|
|
|
@ -14,6 +14,12 @@
|
|||
* @memberof typedefs
|
||||
*/
|
||||
|
||||
/**
|
||||
* @exports GenerativeModel
|
||||
* @typedef {import('@google/generative-ai').GenerativeModel} GenerativeModel
|
||||
* @memberof typedefs
|
||||
*/
|
||||
|
||||
/**
|
||||
* @exports AssistantStreamEvent
|
||||
* @typedef {import('openai').default.Beta.AssistantStreamEvent} AssistantStreamEvent
|
||||
|
@ -295,6 +301,12 @@
|
|||
* @memberof typedefs
|
||||
*/
|
||||
|
||||
/**
|
||||
* @exports EImageOutputType
|
||||
* @typedef {import('librechat-data-provider').EImageOutputType} EImageOutputType
|
||||
* @memberof typedefs
|
||||
*/
|
||||
|
||||
/**
|
||||
* @exports TCustomConfig
|
||||
* @typedef {import('librechat-data-provider').TCustomConfig} TCustomConfig
|
||||
|
|
|
@ -65,12 +65,14 @@ const cohereModels = {
|
|||
command: 4086, // -10 from max
|
||||
'command-nightly': 8182, // -10 from max
|
||||
'command-r': 127500, // -500 from max
|
||||
'command-r-plus:': 127500, // -500 from max
|
||||
'command-r-plus': 127500, // -500 from max
|
||||
};
|
||||
|
||||
const googleModels = {
|
||||
/* Max I/O is combined so we subtract the amount from max response tokens for actual total */
|
||||
gemini: 32750, // -10 from max
|
||||
gemini: 30720, // -2048 from max
|
||||
'gemini-pro-vision': 12288, // -4096 from max
|
||||
'gemini-1.5': 1048576, // -8192 from max
|
||||
'text-bison-32k': 32758, // -10 from max
|
||||
'chat-bison-32k': 32758, // -10 from max
|
||||
'code-bison-32k': 32758, // -10 from max
|
||||
|
|
|
@ -131,6 +131,18 @@ describe('getModelMaxTokens', () => {
|
|||
});
|
||||
|
||||
test('should return correct tokens for partial match - Google models', () => {
|
||||
expect(getModelMaxTokens('gemini-1.5-pro-latest', EModelEndpoint.google)).toBe(
|
||||
maxTokensMap[EModelEndpoint.google]['gemini-1.5'],
|
||||
);
|
||||
expect(getModelMaxTokens('gemini-1.5-pro-preview-0409', EModelEndpoint.google)).toBe(
|
||||
maxTokensMap[EModelEndpoint.google]['gemini-1.5'],
|
||||
);
|
||||
expect(getModelMaxTokens('gemini-pro-vision', EModelEndpoint.google)).toBe(
|
||||
maxTokensMap[EModelEndpoint.google]['gemini-pro-vision'],
|
||||
);
|
||||
expect(getModelMaxTokens('gemini-1.0', EModelEndpoint.google)).toBe(
|
||||
maxTokensMap[EModelEndpoint.google]['gemini'],
|
||||
);
|
||||
expect(getModelMaxTokens('gemini-pro', EModelEndpoint.google)).toBe(
|
||||
maxTokensMap[EModelEndpoint.google]['gemini'],
|
||||
);
|
||||
|
@ -142,6 +154,15 @@ describe('getModelMaxTokens', () => {
|
|||
);
|
||||
});
|
||||
|
||||
test('should return correct tokens for partial match - Cohere models', () => {
|
||||
expect(getModelMaxTokens('command', EModelEndpoint.custom)).toBe(
|
||||
maxTokensMap[EModelEndpoint.custom]['command'],
|
||||
);
|
||||
expect(getModelMaxTokens('command-r-plus', EModelEndpoint.custom)).toBe(
|
||||
maxTokensMap[EModelEndpoint.custom]['command-r-plus'],
|
||||
);
|
||||
});
|
||||
|
||||
test('should return correct tokens when using a custom endpointTokenConfig', () => {
|
||||
const customTokenConfig = {
|
||||
'custom-model': 12345,
|
||||
|
|
|
@ -2,19 +2,19 @@ import { useEffect } from 'react';
|
|||
import TextareaAutosize from 'react-textarea-autosize';
|
||||
import { EModelEndpoint, endpointSettings } from 'librechat-data-provider';
|
||||
import type { TModelSelectProps } from '~/common';
|
||||
import { ESide } from '~/common';
|
||||
import {
|
||||
SelectDropDown,
|
||||
Input,
|
||||
Label,
|
||||
Slider,
|
||||
InputNumber,
|
||||
HoverCard,
|
||||
InputNumber,
|
||||
SelectDropDown,
|
||||
HoverCardTrigger,
|
||||
} from '~/components/ui';
|
||||
import OptionHover from './OptionHover';
|
||||
import { cn, defaultTextProps, optionText, removeFocusOutlines } from '~/utils/';
|
||||
import { cn, defaultTextProps, optionText, removeFocusOutlines } from '~/utils';
|
||||
import { useLocalize } from '~/hooks';
|
||||
import { ESide } from '~/common';
|
||||
|
||||
export default function Settings({ conversation, setOption, models, readonly }: TModelSelectProps) {
|
||||
const localize = useLocalize();
|
||||
|
@ -53,10 +53,6 @@ export default function Settings({ conversation, setOption, models, readonly }:
|
|||
const setTopK = setOption('topK');
|
||||
const setMaxOutputTokens = setOption('maxOutputTokens');
|
||||
|
||||
const isGenerativeModel = model?.toLowerCase()?.includes('gemini');
|
||||
const isChatModel = !isGenerativeModel && model?.toLowerCase()?.includes('chat');
|
||||
const isTextModel = !isGenerativeModel && !isChatModel && /code|text/.test(model ?? '');
|
||||
|
||||
return (
|
||||
<div className="grid grid-cols-5 gap-6">
|
||||
<div className="col-span-5 flex flex-col items-center justify-start gap-6 sm:col-span-3">
|
||||
|
@ -147,91 +143,87 @@ export default function Settings({ conversation, setOption, models, readonly }:
|
|||
</HoverCardTrigger>
|
||||
<OptionHover endpoint={conversation?.endpoint ?? ''} type="temp" side={ESide.Left} />
|
||||
</HoverCard>
|
||||
{!isTextModel && (
|
||||
<>
|
||||
<HoverCard openDelay={300}>
|
||||
<HoverCardTrigger className="grid w-full items-center gap-2">
|
||||
<div className="flex justify-between">
|
||||
<Label htmlFor="top-p-int" className="text-left text-sm font-medium">
|
||||
{localize('com_endpoint_top_p')}{' '}
|
||||
<small className="opacity-40">
|
||||
({localize('com_endpoint_default_with_num', google.topP.default + '')})
|
||||
</small>
|
||||
</Label>
|
||||
<InputNumber
|
||||
id="top-p-int"
|
||||
disabled={readonly}
|
||||
value={topP}
|
||||
onChange={(value) => setTopP(value ?? google.topP.default)}
|
||||
max={google.topP.max}
|
||||
min={google.topP.min}
|
||||
step={google.topP.step}
|
||||
controls={false}
|
||||
className={cn(
|
||||
defaultTextProps,
|
||||
cn(
|
||||
optionText,
|
||||
'reset-rc-number-input reset-rc-number-input-text-right h-auto w-12 border-0 group-hover/temp:border-gray-200',
|
||||
),
|
||||
)}
|
||||
/>
|
||||
</div>
|
||||
<Slider
|
||||
disabled={readonly}
|
||||
value={[topP ?? google.topP.default]}
|
||||
onValueChange={(value) => setTopP(value[0])}
|
||||
doubleClickHandler={() => setTopP(google.topP.default)}
|
||||
max={google.topP.max}
|
||||
min={google.topP.min}
|
||||
step={google.topP.step}
|
||||
className="flex h-4 w-full"
|
||||
/>
|
||||
</HoverCardTrigger>
|
||||
<OptionHover endpoint={conversation?.endpoint ?? ''} type="topp" side={ESide.Left} />
|
||||
</HoverCard>
|
||||
<HoverCard openDelay={300}>
|
||||
<HoverCardTrigger className="grid w-full items-center gap-2">
|
||||
<div className="flex justify-between">
|
||||
<Label htmlFor="top-p-int" className="text-left text-sm font-medium">
|
||||
{localize('com_endpoint_top_p')}{' '}
|
||||
<small className="opacity-40">
|
||||
({localize('com_endpoint_default_with_num', google.topP.default + '')})
|
||||
</small>
|
||||
</Label>
|
||||
<InputNumber
|
||||
id="top-p-int"
|
||||
disabled={readonly}
|
||||
value={topP}
|
||||
onChange={(value) => setTopP(value ?? google.topP.default)}
|
||||
max={google.topP.max}
|
||||
min={google.topP.min}
|
||||
step={google.topP.step}
|
||||
controls={false}
|
||||
className={cn(
|
||||
defaultTextProps,
|
||||
cn(
|
||||
optionText,
|
||||
'reset-rc-number-input reset-rc-number-input-text-right h-auto w-12 border-0 group-hover/temp:border-gray-200',
|
||||
),
|
||||
)}
|
||||
/>
|
||||
</div>
|
||||
<Slider
|
||||
disabled={readonly}
|
||||
value={[topP ?? google.topP.default]}
|
||||
onValueChange={(value) => setTopP(value[0])}
|
||||
doubleClickHandler={() => setTopP(google.topP.default)}
|
||||
max={google.topP.max}
|
||||
min={google.topP.min}
|
||||
step={google.topP.step}
|
||||
className="flex h-4 w-full"
|
||||
/>
|
||||
</HoverCardTrigger>
|
||||
<OptionHover endpoint={conversation?.endpoint ?? ''} type="topp" side={ESide.Left} />
|
||||
</HoverCard>
|
||||
|
||||
<HoverCard openDelay={300}>
|
||||
<HoverCardTrigger className="grid w-full items-center gap-2">
|
||||
<div className="flex justify-between">
|
||||
<Label htmlFor="top-k-int" className="text-left text-sm font-medium">
|
||||
{localize('com_endpoint_top_k')}{' '}
|
||||
<small className="opacity-40">
|
||||
({localize('com_endpoint_default_with_num', google.topK.default + '')})
|
||||
</small>
|
||||
</Label>
|
||||
<InputNumber
|
||||
id="top-k-int"
|
||||
disabled={readonly}
|
||||
value={topK}
|
||||
onChange={(value) => setTopK(value ?? google.topK.default)}
|
||||
max={google.topK.max}
|
||||
min={google.topK.min}
|
||||
step={google.topK.step}
|
||||
controls={false}
|
||||
className={cn(
|
||||
defaultTextProps,
|
||||
cn(
|
||||
optionText,
|
||||
'reset-rc-number-input reset-rc-number-input-text-right h-auto w-12 border-0 group-hover/temp:border-gray-200',
|
||||
),
|
||||
)}
|
||||
/>
|
||||
</div>
|
||||
<Slider
|
||||
disabled={readonly}
|
||||
value={[topK ?? google.topK.default]}
|
||||
onValueChange={(value) => setTopK(value[0])}
|
||||
doubleClickHandler={() => setTopK(google.topK.default)}
|
||||
max={google.topK.max}
|
||||
min={google.topK.min}
|
||||
step={google.topK.step}
|
||||
className="flex h-4 w-full"
|
||||
/>
|
||||
</HoverCardTrigger>
|
||||
<OptionHover endpoint={conversation?.endpoint ?? ''} type="topk" side={ESide.Left} />
|
||||
</HoverCard>
|
||||
</>
|
||||
)}
|
||||
<HoverCard openDelay={300}>
|
||||
<HoverCardTrigger className="grid w-full items-center gap-2">
|
||||
<div className="flex justify-between">
|
||||
<Label htmlFor="top-k-int" className="text-left text-sm font-medium">
|
||||
{localize('com_endpoint_top_k')}{' '}
|
||||
<small className="opacity-40">
|
||||
({localize('com_endpoint_default_with_num', google.topK.default + '')})
|
||||
</small>
|
||||
</Label>
|
||||
<InputNumber
|
||||
id="top-k-int"
|
||||
disabled={readonly}
|
||||
value={topK}
|
||||
onChange={(value) => setTopK(value ?? google.topK.default)}
|
||||
max={google.topK.max}
|
||||
min={google.topK.min}
|
||||
step={google.topK.step}
|
||||
controls={false}
|
||||
className={cn(
|
||||
defaultTextProps,
|
||||
cn(
|
||||
optionText,
|
||||
'reset-rc-number-input reset-rc-number-input-text-right h-auto w-12 border-0 group-hover/temp:border-gray-200',
|
||||
),
|
||||
)}
|
||||
/>
|
||||
</div>
|
||||
<Slider
|
||||
disabled={readonly}
|
||||
value={[topK ?? google.topK.default]}
|
||||
onValueChange={(value) => setTopK(value[0])}
|
||||
doubleClickHandler={() => setTopK(google.topK.default)}
|
||||
max={google.topK.max}
|
||||
min={google.topK.min}
|
||||
step={google.topK.step}
|
||||
className="flex h-4 w-full"
|
||||
/>
|
||||
</HoverCardTrigger>
|
||||
<OptionHover endpoint={conversation?.endpoint ?? ''} type="topk" side={ESide.Left} />
|
||||
</HoverCard>
|
||||
<HoverCard openDelay={300}>
|
||||
<HoverCardTrigger className="grid w-full items-center gap-2">
|
||||
<div className="flex justify-between">
|
||||
|
|
|
@ -7,7 +7,7 @@ weight: -7
|
|||
# Google Search Plugin
|
||||
Through the plugins endpoint, you can use google search for answers to your questions with assistance from GPT! To get started, you need to get a Google Custom Search API key, and a Google Custom Search Engine ID. You can then define these as follows in your `.env` file:
|
||||
```env
|
||||
GOOGLE_API_KEY="...."
|
||||
GOOGLE_SEARCH_API_KEY="...."
|
||||
GOOGLE_CSE_ID="...."
|
||||
```
|
||||
|
||||
|
|
|
@ -269,7 +269,7 @@ Here is an example of a plugin with more than one credential variable
|
|||
"description": "This is your Google Custom Search Engine ID. For instructions on how to obtain this, see <a href='https://github.com/danny-avila/LibreChat/blob/main/docs/features/plugins/google_search.md'>Our Docs</a>."
|
||||
},
|
||||
{
|
||||
"authField": "GOOGLE_API_KEY",
|
||||
"authField": "GOOGLE_SEARCH_API_KEY",
|
||||
"label": "Google API Key",
|
||||
"description": "This is your Google Custom Search API Key. For instructions on how to obtain this, see <a href='https://github.com/danny-avila/LibreChat/blob/main/docs/features/plugins/google_search.md'>Our Docs</a>."
|
||||
}
|
||||
|
|
|
@ -11,6 +11,11 @@ weight: -10
|
|||
Certain changes in the updates may impact cookies, leading to unexpected behaviors if not cleared properly.
|
||||
|
||||
---
|
||||
## v0.7.1+
|
||||
|
||||
!!! info "🔍 Google Search Plugin"
|
||||
|
||||
- **[Google Search Plugin](../features/plugins/google_search.md)**: Changed the environment variable for this plugin from `GOOGLE_API_KEY` to `GOOGLE_SEARCH_API_KEY` due to a conflict with the Google Generative AI library pulling this variable automatically. If you are using this plugin, please update your `.env` file accordingly.
|
||||
|
||||
## v0.7.0+
|
||||
|
||||
|
|
|
@ -121,7 +121,7 @@ ASSISTANTS_BASE_URL=http://your-alt-baseURL:3080/
|
|||
|
||||
## Google
|
||||
|
||||
For the Google Endpoint, you can either use the **Generative Language API** (for Gemini models), or the **Vertex AI API** (for PaLM2 & Codey models, Gemini support coming soon).
|
||||
For the Google Endpoint, you can either use the **Generative Language API** (for Gemini models), or the **Vertex AI API** (for Gemini, PaLM2 & Codey models).
|
||||
|
||||
The Generative Language API uses an API key, which you can get from **Google AI Studio**.
|
||||
|
||||
|
@ -131,12 +131,12 @@ Instructions for both are given below.
|
|||
|
||||
### Generative Language API (Gemini)
|
||||
|
||||
**60 Gemini requests/minute are currently free until early next year when it enters general availability.**
|
||||
**[See here for Gemini API pricing and rate limits](https://ai.google.dev/pricing)**
|
||||
|
||||
⚠️ Google will be using that free input/output to help improve the model, with data de-identified from your Google Account and API key.
|
||||
⚠️ While Google models are free, they are using your input/output to help improve the model, with data de-identified from your Google Account and API key.
|
||||
⚠️ During this period, your messages “may be accessible to trained reviewers.”
|
||||
|
||||
To use Gemini models, you'll need an API key. If you don't already have one, create a key in Google AI Studio.
|
||||
To use Gemini models through Google AI Studio, you'll need an API key. If you don't already have one, create a key in Google AI Studio.
|
||||
|
||||
Get an API key here: **[makersuite.google.com](https://makersuite.google.com/app/apikey)**
|
||||
|
||||
|
@ -151,16 +151,30 @@ Or, you can make users provide it from the frontend by setting the following:
|
|||
GOOGLE_KEY=user_provided
|
||||
```
|
||||
|
||||
Since fetching the models list isn't yet supported, you should set the models you want to use in the .env file.
|
||||
|
||||
For your convenience, these are the latest models as of 4/15/24 that can be used with the Generative Language API:
|
||||
|
||||
```bash
|
||||
GOOGLE_MODELS=gemini-1.0-pro,gemini-1.0-pro-001,gemini-1.0-pro-latest,gemini-1.0-pro-vision-latest,gemini-1.5-pro-latest,gemini-pro,gemini-pro-vision
|
||||
```
|
||||
|
||||
Notes:
|
||||
- PaLM2 and Codey models cannot be accessed through the Generative Language API, only through Vertex AI.
|
||||
- A gemini-pro model or `gemini-pro-vision` are required in your list for attaching images.
|
||||
- Using LibreChat, PaLM2 and Codey models can only be accessed through Vertex AI, not the Generative Language API.
|
||||
- Only models that support the `generateContent` method can be used natively with LibreChat + the Gen AI API.
|
||||
- Selecting `gemini-pro-vision` for messages with attachments is not necessary as it will be switched behind the scenes for you
|
||||
- Since `gemini-pro-vision`does not accept non-attachment messages, messages without attachments are automatically switched to use `gemini-pro` (otherwise, Google responds with an error)
|
||||
- With the Google endpoint, you cannot use both Vertex AI and Generative Language API at the same time. You must choose one or the other.
|
||||
- Some PaLM/Codey models and `gemini-pro-vision` may fail when `maxOutputTokens` is set to a high value. If you encounter this issue, try reducing the value through the conversation parameters.
|
||||
|
||||
Setting `GOOGLE_KEY=user_provided` in your .env file will configure both the Vertex AI Service Account JSON key file and the Generative Language API key to be provided from the frontend like so:
|
||||
Setting `GOOGLE_KEY=user_provided` in your .env file sets both the Vertex AI Service Account JSON key file and the Generative Language API key to be provided from the frontend like so:
|
||||
|
||||

|
||||
|
||||
### Vertex AI (PaLM 2 & Codey)
|
||||
### Vertex AI
|
||||
|
||||
**[See here for Vertex API pricing and rate limits](https://cloud.google.com/vertex-ai/generative-ai/pricing)**
|
||||
|
||||
To setup Google LLMs (via Google Cloud Vertex AI), first, signup for Google Cloud: **[cloud.google.com](https://cloud.google.com/)**
|
||||
|
||||
|
@ -199,7 +213,13 @@ Alternatively, you can make users provide it from the frontend by setting the fo
|
|||
GOOGLE_KEY=user_provided
|
||||
```
|
||||
|
||||
Note: Using Gemini models through Vertex AI is possible but not yet supported.
|
||||
Since fetching the models list isn't yet supported, you should set the models you want to use in the .env file.
|
||||
|
||||
For your convenience, these are the latest models as of 4/15/24 that can be used with the Generative Language API:
|
||||
|
||||
```bash
|
||||
GOOGLE_MODELS=gemini-1.5-pro-preview-0409,gemini-1.0-pro-vision-001,gemini-pro,gemini-pro-vision,chat-bison,chat-bison-32k,codechat-bison,codechat-bison-32k,text-bison,text-bison-32k,text-unicorn,code-gecko,code-bison,code-bison-32k
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
|
|
|
@ -209,6 +209,13 @@ This example configuration file sets up LibreChat with detailed options across s
|
|||
- **Description**: Whether or not to secure access to image links that are hosted locally by the app. Default: false.
|
||||
- **Example**: `secureImageLinks: true`
|
||||
|
||||
### Image Output Type
|
||||
- **Key**: `imageOutputType`
|
||||
- **Type**: String, "png" | "webp" | "jpeg"
|
||||
- **Description**: The image output type for image responses. Defaults to "png" if omitted.
|
||||
- **Note**: Case-sensitive. Google endpoint only supports "jpeg" and "png" output types.
|
||||
- **Example**: `imageOutputType: "webp"`
|
||||
|
||||
### File Configuration
|
||||
- **Key**: `fileConfig`
|
||||
- **Type**: Object
|
||||
|
|
|
@ -523,7 +523,7 @@ Remember to replace placeholder text such as "Your DALL-E-3 System Prompt here"
|
|||
See detailed instructions here: [Google Search](../../features/plugins/google_search.md)
|
||||
|
||||
```bash
|
||||
GOOGLE_API_KEY=
|
||||
GOOGLE_SEARCH_API_KEY=
|
||||
GOOGLE_CSE_ID=
|
||||
```
|
||||
|
||||
|
|
567
package-lock.json
generated
567
package-lock.json
generated
|
@ -43,10 +43,12 @@
|
|||
"dependencies": {
|
||||
"@anthropic-ai/sdk": "^0.16.1",
|
||||
"@azure/search-documents": "^12.0.0",
|
||||
"@google/generative-ai": "^0.5.0",
|
||||
"@keyv/mongo": "^2.1.8",
|
||||
"@keyv/redis": "^2.8.1",
|
||||
"@langchain/community": "^0.0.17",
|
||||
"@langchain/google-genai": "^0.0.8",
|
||||
"@langchain/community": "^0.0.46",
|
||||
"@langchain/google-genai": "^0.0.11",
|
||||
"@langchain/google-vertexai": "^0.0.5",
|
||||
"axios": "^1.3.4",
|
||||
"bcryptjs": "^2.4.3",
|
||||
"cheerio": "^1.0.0-rc.12",
|
||||
|
@ -106,6 +108,33 @@
|
|||
"supertest": "^6.3.3"
|
||||
}
|
||||
},
|
||||
"api/node_modules/@aws-crypto/sha256-js": {
|
||||
"version": "5.2.0",
|
||||
"resolved": "https://registry.npmjs.org/@aws-crypto/sha256-js/-/sha256-js-5.2.0.tgz",
|
||||
"integrity": "sha512-FFQQyu7edu4ufvIZ+OadFpHHOt+eSTBaYaki44c+akjg7qZg9oOQeLlk77F6tSYqjDAFClrHJk9tMf0HdVyOvA==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@aws-crypto/util": "^5.2.0",
|
||||
"@aws-sdk/types": "^3.222.0",
|
||||
"tslib": "^2.6.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=16.0.0"
|
||||
}
|
||||
},
|
||||
"api/node_modules/@aws-crypto/util": {
|
||||
"version": "5.2.0",
|
||||
"resolved": "https://registry.npmjs.org/@aws-crypto/util/-/util-5.2.0.tgz",
|
||||
"integrity": "sha512-4RkU9EsI6ZpBve5fseQlGNUWKMa1RLPQ1dnjnQoe07ldfIzcsGb5hC5W0Dm7u423KWzawlrpbjXBrXCEv9zazQ==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@aws-sdk/types": "^3.222.0",
|
||||
"@smithy/util-utf8": "^2.0.0",
|
||||
"tslib": "^2.6.2"
|
||||
}
|
||||
},
|
||||
"api/node_modules/@firebase/analytics": {
|
||||
"version": "0.10.0",
|
||||
"resolved": "https://registry.npmjs.org/@firebase/analytics/-/analytics-0.10.0.tgz",
|
||||
|
@ -600,6 +629,400 @@
|
|||
"resolved": "https://registry.npmjs.org/@firebase/webchannel-wrapper/-/webchannel-wrapper-0.10.3.tgz",
|
||||
"integrity": "sha512-+ZplYUN3HOpgCfgInqgdDAbkGGVzES1cs32JJpeqoh87SkRobGXElJx+1GZSaDqzFL+bYiX18qEcBK76mYs8uA=="
|
||||
},
|
||||
"api/node_modules/@google/generative-ai": {
|
||||
"version": "0.5.0",
|
||||
"resolved": "https://registry.npmjs.org/@google/generative-ai/-/generative-ai-0.5.0.tgz",
|
||||
"integrity": "sha512-uxfN3mROgVxb+9KrlVHIiglcWgNE86pVTS9TRkGS6hMvQoZ5TrB1TIMcW5ZYWxlRhMwtmEdJnNMUxSqM3Qp/Ag==",
|
||||
"engines": {
|
||||
"node": ">=18.0.0"
|
||||
}
|
||||
},
|
||||
"api/node_modules/@langchain/community": {
|
||||
"version": "0.0.46",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/community/-/community-0.0.46.tgz",
|
||||
"integrity": "sha512-KGuxf4Y4s60/tIAerlk5kY6tJjAQ7wqZj4lxBzQftjqWe53qcI0y9kVUanogRtXlUAQHAR/BCztkQ0eAXaJEGw==",
|
||||
"dependencies": {
|
||||
"@langchain/core": "~0.1.44",
|
||||
"@langchain/openai": "~0.0.19",
|
||||
"expr-eval": "^2.0.2",
|
||||
"flat": "^5.0.2",
|
||||
"langsmith": "~0.1.1",
|
||||
"uuid": "^9.0.0",
|
||||
"zod": "^3.22.3",
|
||||
"zod-to-json-schema": "^3.22.5"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@aws-crypto/sha256-js": "^5.0.0",
|
||||
"@aws-sdk/client-bedrock-agent-runtime": "^3.485.0",
|
||||
"@aws-sdk/client-bedrock-runtime": "^3.422.0",
|
||||
"@aws-sdk/client-dynamodb": "^3.310.0",
|
||||
"@aws-sdk/client-kendra": "^3.352.0",
|
||||
"@aws-sdk/client-lambda": "^3.310.0",
|
||||
"@aws-sdk/client-sagemaker-runtime": "^3.310.0",
|
||||
"@aws-sdk/client-sfn": "^3.310.0",
|
||||
"@aws-sdk/credential-provider-node": "^3.388.0",
|
||||
"@azure/search-documents": "^12.0.0",
|
||||
"@clickhouse/client": "^0.2.5",
|
||||
"@cloudflare/ai": "*",
|
||||
"@datastax/astra-db-ts": "^0.1.4",
|
||||
"@elastic/elasticsearch": "^8.4.0",
|
||||
"@getmetal/metal-sdk": "*",
|
||||
"@getzep/zep-js": "^0.9.0",
|
||||
"@gomomento/sdk": "^1.51.1",
|
||||
"@gomomento/sdk-core": "^1.51.1",
|
||||
"@google-ai/generativelanguage": "^0.2.1",
|
||||
"@gradientai/nodejs-sdk": "^1.2.0",
|
||||
"@huggingface/inference": "^2.6.4",
|
||||
"@mozilla/readability": "*",
|
||||
"@opensearch-project/opensearch": "*",
|
||||
"@pinecone-database/pinecone": "*",
|
||||
"@planetscale/database": "^1.8.0",
|
||||
"@premai/prem-sdk": "^0.3.25",
|
||||
"@qdrant/js-client-rest": "^1.2.0",
|
||||
"@raycast/api": "^1.55.2",
|
||||
"@rockset/client": "^0.9.1",
|
||||
"@smithy/eventstream-codec": "^2.0.5",
|
||||
"@smithy/protocol-http": "^3.0.6",
|
||||
"@smithy/signature-v4": "^2.0.10",
|
||||
"@smithy/util-utf8": "^2.0.0",
|
||||
"@supabase/postgrest-js": "^1.1.1",
|
||||
"@supabase/supabase-js": "^2.10.0",
|
||||
"@tensorflow-models/universal-sentence-encoder": "*",
|
||||
"@tensorflow/tfjs-converter": "*",
|
||||
"@tensorflow/tfjs-core": "*",
|
||||
"@upstash/redis": "^1.20.6",
|
||||
"@upstash/vector": "^1.0.2",
|
||||
"@vercel/kv": "^0.2.3",
|
||||
"@vercel/postgres": "^0.5.0",
|
||||
"@writerai/writer-sdk": "^0.40.2",
|
||||
"@xata.io/client": "^0.28.0",
|
||||
"@xenova/transformers": "^2.5.4",
|
||||
"@zilliz/milvus2-sdk-node": ">=2.2.7",
|
||||
"better-sqlite3": "^9.4.0",
|
||||
"cassandra-driver": "^4.7.2",
|
||||
"cborg": "^4.1.1",
|
||||
"chromadb": "*",
|
||||
"closevector-common": "0.1.3",
|
||||
"closevector-node": "0.1.6",
|
||||
"closevector-web": "0.1.6",
|
||||
"cohere-ai": "*",
|
||||
"convex": "^1.3.1",
|
||||
"couchbase": "^4.3.0",
|
||||
"discord.js": "^14.14.1",
|
||||
"dria": "^0.0.3",
|
||||
"duck-duck-scrape": "^2.2.5",
|
||||
"faiss-node": "^0.5.1",
|
||||
"firebase-admin": "^11.9.0 || ^12.0.0",
|
||||
"google-auth-library": "^8.9.0",
|
||||
"googleapis": "^126.0.1",
|
||||
"hnswlib-node": "^1.4.2",
|
||||
"html-to-text": "^9.0.5",
|
||||
"interface-datastore": "^8.2.11",
|
||||
"ioredis": "^5.3.2",
|
||||
"it-all": "^3.0.4",
|
||||
"jsdom": "*",
|
||||
"jsonwebtoken": "^9.0.2",
|
||||
"llmonitor": "^0.5.9",
|
||||
"lodash": "^4.17.21",
|
||||
"lunary": "^0.6.11",
|
||||
"mongodb": ">=5.2.0",
|
||||
"mysql2": "^3.3.3",
|
||||
"neo4j-driver": "*",
|
||||
"node-llama-cpp": "*",
|
||||
"pg": "^8.11.0",
|
||||
"pg-copy-streams": "^6.0.5",
|
||||
"pickleparser": "^0.2.1",
|
||||
"portkey-ai": "^0.1.11",
|
||||
"redis": "*",
|
||||
"replicate": "^0.18.0",
|
||||
"typeorm": "^0.3.12",
|
||||
"typesense": "^1.5.3",
|
||||
"usearch": "^1.1.1",
|
||||
"vectordb": "^0.1.4",
|
||||
"voy-search": "0.6.2",
|
||||
"weaviate-ts-client": "*",
|
||||
"web-auth-library": "^1.0.3",
|
||||
"ws": "^8.14.2"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@aws-crypto/sha256-js": {
|
||||
"optional": true
|
||||
},
|
||||
"@aws-sdk/client-bedrock-agent-runtime": {
|
||||
"optional": true
|
||||
},
|
||||
"@aws-sdk/client-bedrock-runtime": {
|
||||
"optional": true
|
||||
},
|
||||
"@aws-sdk/client-dynamodb": {
|
||||
"optional": true
|
||||
},
|
||||
"@aws-sdk/client-kendra": {
|
||||
"optional": true
|
||||
},
|
||||
"@aws-sdk/client-lambda": {
|
||||
"optional": true
|
||||
},
|
||||
"@aws-sdk/client-sagemaker-runtime": {
|
||||
"optional": true
|
||||
},
|
||||
"@aws-sdk/client-sfn": {
|
||||
"optional": true
|
||||
},
|
||||
"@aws-sdk/credential-provider-node": {
|
||||
"optional": true
|
||||
},
|
||||
"@azure/search-documents": {
|
||||
"optional": true
|
||||
},
|
||||
"@clickhouse/client": {
|
||||
"optional": true
|
||||
},
|
||||
"@cloudflare/ai": {
|
||||
"optional": true
|
||||
},
|
||||
"@datastax/astra-db-ts": {
|
||||
"optional": true
|
||||
},
|
||||
"@elastic/elasticsearch": {
|
||||
"optional": true
|
||||
},
|
||||
"@getmetal/metal-sdk": {
|
||||
"optional": true
|
||||
},
|
||||
"@getzep/zep-js": {
|
||||
"optional": true
|
||||
},
|
||||
"@gomomento/sdk": {
|
||||
"optional": true
|
||||
},
|
||||
"@gomomento/sdk-core": {
|
||||
"optional": true
|
||||
},
|
||||
"@google-ai/generativelanguage": {
|
||||
"optional": true
|
||||
},
|
||||
"@gradientai/nodejs-sdk": {
|
||||
"optional": true
|
||||
},
|
||||
"@huggingface/inference": {
|
||||
"optional": true
|
||||
},
|
||||
"@mozilla/readability": {
|
||||
"optional": true
|
||||
},
|
||||
"@opensearch-project/opensearch": {
|
||||
"optional": true
|
||||
},
|
||||
"@pinecone-database/pinecone": {
|
||||
"optional": true
|
||||
},
|
||||
"@planetscale/database": {
|
||||
"optional": true
|
||||
},
|
||||
"@premai/prem-sdk": {
|
||||
"optional": true
|
||||
},
|
||||
"@qdrant/js-client-rest": {
|
||||
"optional": true
|
||||
},
|
||||
"@raycast/api": {
|
||||
"optional": true
|
||||
},
|
||||
"@rockset/client": {
|
||||
"optional": true
|
||||
},
|
||||
"@smithy/eventstream-codec": {
|
||||
"optional": true
|
||||
},
|
||||
"@smithy/protocol-http": {
|
||||
"optional": true
|
||||
},
|
||||
"@smithy/signature-v4": {
|
||||
"optional": true
|
||||
},
|
||||
"@smithy/util-utf8": {
|
||||
"optional": true
|
||||
},
|
||||
"@supabase/postgrest-js": {
|
||||
"optional": true
|
||||
},
|
||||
"@supabase/supabase-js": {
|
||||
"optional": true
|
||||
},
|
||||
"@tensorflow-models/universal-sentence-encoder": {
|
||||
"optional": true
|
||||
},
|
||||
"@tensorflow/tfjs-converter": {
|
||||
"optional": true
|
||||
},
|
||||
"@tensorflow/tfjs-core": {
|
||||
"optional": true
|
||||
},
|
||||
"@upstash/redis": {
|
||||
"optional": true
|
||||
},
|
||||
"@upstash/vector": {
|
||||
"optional": true
|
||||
},
|
||||
"@vercel/kv": {
|
||||
"optional": true
|
||||
},
|
||||
"@vercel/postgres": {
|
||||
"optional": true
|
||||
},
|
||||
"@writerai/writer-sdk": {
|
||||
"optional": true
|
||||
},
|
||||
"@xata.io/client": {
|
||||
"optional": true
|
||||
},
|
||||
"@xenova/transformers": {
|
||||
"optional": true
|
||||
},
|
||||
"@zilliz/milvus2-sdk-node": {
|
||||
"optional": true
|
||||
},
|
||||
"better-sqlite3": {
|
||||
"optional": true
|
||||
},
|
||||
"cassandra-driver": {
|
||||
"optional": true
|
||||
},
|
||||
"cborg": {
|
||||
"optional": true
|
||||
},
|
||||
"chromadb": {
|
||||
"optional": true
|
||||
},
|
||||
"closevector-common": {
|
||||
"optional": true
|
||||
},
|
||||
"closevector-node": {
|
||||
"optional": true
|
||||
},
|
||||
"closevector-web": {
|
||||
"optional": true
|
||||
},
|
||||
"cohere-ai": {
|
||||
"optional": true
|
||||
},
|
||||
"convex": {
|
||||
"optional": true
|
||||
},
|
||||
"couchbase": {
|
||||
"optional": true
|
||||
},
|
||||
"discord.js": {
|
||||
"optional": true
|
||||
},
|
||||
"dria": {
|
||||
"optional": true
|
||||
},
|
||||
"duck-duck-scrape": {
|
||||
"optional": true
|
||||
},
|
||||
"faiss-node": {
|
||||
"optional": true
|
||||
},
|
||||
"firebase-admin": {
|
||||
"optional": true
|
||||
},
|
||||
"google-auth-library": {
|
||||
"optional": true
|
||||
},
|
||||
"googleapis": {
|
||||
"optional": true
|
||||
},
|
||||
"hnswlib-node": {
|
||||
"optional": true
|
||||
},
|
||||
"html-to-text": {
|
||||
"optional": true
|
||||
},
|
||||
"interface-datastore": {
|
||||
"optional": true
|
||||
},
|
||||
"ioredis": {
|
||||
"optional": true
|
||||
},
|
||||
"it-all": {
|
||||
"optional": true
|
||||
},
|
||||
"jsdom": {
|
||||
"optional": true
|
||||
},
|
||||
"jsonwebtoken": {
|
||||
"optional": true
|
||||
},
|
||||
"llmonitor": {
|
||||
"optional": true
|
||||
},
|
||||
"lodash": {
|
||||
"optional": true
|
||||
},
|
||||
"lunary": {
|
||||
"optional": true
|
||||
},
|
||||
"mongodb": {
|
||||
"optional": true
|
||||
},
|
||||
"mysql2": {
|
||||
"optional": true
|
||||
},
|
||||
"neo4j-driver": {
|
||||
"optional": true
|
||||
},
|
||||
"node-llama-cpp": {
|
||||
"optional": true
|
||||
},
|
||||
"pg": {
|
||||
"optional": true
|
||||
},
|
||||
"pg-copy-streams": {
|
||||
"optional": true
|
||||
},
|
||||
"pickleparser": {
|
||||
"optional": true
|
||||
},
|
||||
"portkey-ai": {
|
||||
"optional": true
|
||||
},
|
||||
"redis": {
|
||||
"optional": true
|
||||
},
|
||||
"replicate": {
|
||||
"optional": true
|
||||
},
|
||||
"typeorm": {
|
||||
"optional": true
|
||||
},
|
||||
"typesense": {
|
||||
"optional": true
|
||||
},
|
||||
"usearch": {
|
||||
"optional": true
|
||||
},
|
||||
"vectordb": {
|
||||
"optional": true
|
||||
},
|
||||
"voy-search": {
|
||||
"optional": true
|
||||
},
|
||||
"weaviate-ts-client": {
|
||||
"optional": true
|
||||
},
|
||||
"web-auth-library": {
|
||||
"optional": true
|
||||
},
|
||||
"ws": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"api/node_modules/@types/node": {
|
||||
"version": "18.19.14",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.14.tgz",
|
||||
|
@ -641,6 +1064,18 @@
|
|||
"@firebase/util": "1.9.3"
|
||||
}
|
||||
},
|
||||
"api/node_modules/langsmith": {
|
||||
"version": "0.1.14",
|
||||
"resolved": "https://registry.npmjs.org/langsmith/-/langsmith-0.1.14.tgz",
|
||||
"integrity": "sha512-iEzQLLB7/0nRpAwNBAR7B7N64fyByg5UsNjSvLaCCkQ9AS68PSafjB8xQkyI8QXXrGjU1dEqDRoa8m4SUuRdUw==",
|
||||
"dependencies": {
|
||||
"@types/uuid": "^9.0.1",
|
||||
"commander": "^10.0.1",
|
||||
"p-queue": "^6.6.2",
|
||||
"p-retry": "4",
|
||||
"uuid": "^9.0.0"
|
||||
}
|
||||
},
|
||||
"api/node_modules/node-fetch": {
|
||||
"version": "2.6.7",
|
||||
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.7.tgz",
|
||||
|
@ -6548,19 +6983,18 @@
|
|||
}
|
||||
},
|
||||
"node_modules/@langchain/core": {
|
||||
"version": "0.1.23",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/core/-/core-0.1.23.tgz",
|
||||
"integrity": "sha512-Kn2AiwEMHW9+o6bkKiEUbJ8abQMlEVoePTKw6axdnEOE9zX5Epl1iRCJo+Id5ajNYSYXjWky4puqz75OcFGD6w==",
|
||||
"version": "0.1.57",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/core/-/core-0.1.57.tgz",
|
||||
"integrity": "sha512-6wOwidPkkRcANrOKl88+YYpm3jHfpg6W8EqZHQCImSAlxdEhyDSq2eeQKHOPCFCrfNWkClaNn+Wlzzz4Qwf9Tg==",
|
||||
"dependencies": {
|
||||
"ansi-styles": "^5.0.0",
|
||||
"camelcase": "6",
|
||||
"decamelize": "1.2.0",
|
||||
"js-tiktoken": "^1.0.8",
|
||||
"langsmith": "~0.0.48",
|
||||
"langsmith": "~0.1.7",
|
||||
"ml-distance": "^4.0.0",
|
||||
"p-queue": "^6.6.2",
|
||||
"p-retry": "4",
|
||||
"sax": "^1.3.0",
|
||||
"uuid": "^9.0.0",
|
||||
"zod": "^3.22.4",
|
||||
"zod-to-json-schema": "^3.22.3"
|
||||
|
@ -6580,10 +7014,48 @@
|
|||
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/@langchain/core/node_modules/langsmith": {
|
||||
"version": "0.1.14",
|
||||
"resolved": "https://registry.npmjs.org/langsmith/-/langsmith-0.1.14.tgz",
|
||||
"integrity": "sha512-iEzQLLB7/0nRpAwNBAR7B7N64fyByg5UsNjSvLaCCkQ9AS68PSafjB8xQkyI8QXXrGjU1dEqDRoa8m4SUuRdUw==",
|
||||
"dependencies": {
|
||||
"@types/uuid": "^9.0.1",
|
||||
"commander": "^10.0.1",
|
||||
"p-queue": "^6.6.2",
|
||||
"p-retry": "4",
|
||||
"uuid": "^9.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@langchain/google-common": {
|
||||
"version": "0.0.9",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/google-common/-/google-common-0.0.9.tgz",
|
||||
"integrity": "sha512-jP7vIgsigUSYYVyT5hej4rg8fV8sutxI6Vm2B2xQGNwUXiCQIDPfA9bmlGyXPWomILKB21dRUqNkun/AMYAWvA==",
|
||||
"dependencies": {
|
||||
"@langchain/core": "~0.1.56",
|
||||
"uuid": "^9.0.0",
|
||||
"zod-to-json-schema": "^3.22.4"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
}
|
||||
},
|
||||
"node_modules/@langchain/google-gauth": {
|
||||
"version": "0.0.5",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/google-gauth/-/google-gauth-0.0.5.tgz",
|
||||
"integrity": "sha512-kSMqFnNcoRA5yq4WMbqh4OwRNnV/yJ9+U+tppBeL4rwFyrWSTPDxqnIQ0yJz72khu7hzfoxKag+GUiGGSTNiVQ==",
|
||||
"dependencies": {
|
||||
"@langchain/core": "~0.1.56",
|
||||
"@langchain/google-common": "~0.0.5",
|
||||
"google-auth-library": "^8.9.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
}
|
||||
},
|
||||
"node_modules/@langchain/google-genai": {
|
||||
"version": "0.0.8",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/google-genai/-/google-genai-0.0.8.tgz",
|
||||
"integrity": "sha512-lsFGtjzTxtC2pMUJuqiFXC72ObHEhc8r3IYUnd0J0NKkrae3n/Zm7+hw1rZQUOdKG2eAVIPIWkPGq2AzECSznA==",
|
||||
"version": "0.0.11",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/google-genai/-/google-genai-0.0.11.tgz",
|
||||
"integrity": "sha512-o4+r+ETmcPqcrRTJeJQQ0c796IAx1dvVkZvFsUqLhTIteIQuAc2KenY/UDGQxZVghw6fZf4irN/PvkNHJjfgWw==",
|
||||
"dependencies": {
|
||||
"@google/generative-ai": "^0.1.3",
|
||||
"@langchain/core": "~0.1.5"
|
||||
|
@ -6592,14 +7064,26 @@
|
|||
"node": ">=18"
|
||||
}
|
||||
},
|
||||
"node_modules/@langchain/openai": {
|
||||
"version": "0.0.14",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/openai/-/openai-0.0.14.tgz",
|
||||
"integrity": "sha512-co6nRylPrLGY/C3JYxhHt6cxLq07P086O7K3QaZH7SFFErIN9wSzJonpvhZR07DEUq6eK6wKgh2ORxA/NcjSRQ==",
|
||||
"node_modules/@langchain/google-vertexai": {
|
||||
"version": "0.0.5",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/google-vertexai/-/google-vertexai-0.0.5.tgz",
|
||||
"integrity": "sha512-prtF/lo0I0KQligTdSH1UJA1jSlqHco6UbQ19RrwkNq9N6cnB5SE25a0qTrvZygf6+VEI9qMBw1y0UMQUGpHMQ==",
|
||||
"dependencies": {
|
||||
"@langchain/core": "~0.1.13",
|
||||
"@langchain/core": "~0.1.56",
|
||||
"@langchain/google-gauth": "~0.0.5"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
}
|
||||
},
|
||||
"node_modules/@langchain/openai": {
|
||||
"version": "0.0.27",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/openai/-/openai-0.0.27.tgz",
|
||||
"integrity": "sha512-yPwJ/8YEK2dvsPi+4LiohYF1/MLvry9IE+w2B4CHD+bVOupGG62300QZkxXV241YytyYGC1PvI/EzBPoJCvEKQ==",
|
||||
"dependencies": {
|
||||
"@langchain/core": "~0.1.45",
|
||||
"js-tiktoken": "^1.0.7",
|
||||
"openai": "^4.26.0",
|
||||
"openai": "^4.32.1",
|
||||
"zod": "^3.22.4",
|
||||
"zod-to-json-schema": "^3.22.3"
|
||||
},
|
||||
|
@ -6608,23 +7092,22 @@
|
|||
}
|
||||
},
|
||||
"node_modules/@langchain/openai/node_modules/@types/node": {
|
||||
"version": "18.19.14",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.14.tgz",
|
||||
"integrity": "sha512-EnQ4Us2rmOS64nHDWr0XqAD8DsO6f3XR6lf9UIIrZQpUzPVdN/oPuEzfDWNHSyXLvoGgjuEm/sPwFGSSs35Wtg==",
|
||||
"version": "18.19.31",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.31.tgz",
|
||||
"integrity": "sha512-ArgCD39YpyyrtFKIqMDvjz79jto5fcI/SVUs2HwB+f0dAzq68yqOdyaSivLiLugSziTpNXLQrVb7RZFmdZzbhA==",
|
||||
"dependencies": {
|
||||
"undici-types": "~5.26.4"
|
||||
}
|
||||
},
|
||||
"node_modules/@langchain/openai/node_modules/openai": {
|
||||
"version": "4.26.1",
|
||||
"resolved": "https://registry.npmjs.org/openai/-/openai-4.26.1.tgz",
|
||||
"integrity": "sha512-DvWbjhWbappsFRatOWmu4Dp1/Q4RG9oOz6CfOSjy0/Drb8G+5iAiqWAO4PfpGIkhOOKtvvNfQri2SItl+U7LhQ==",
|
||||
"version": "4.33.0",
|
||||
"resolved": "https://registry.npmjs.org/openai/-/openai-4.33.0.tgz",
|
||||
"integrity": "sha512-Sh4KvplkvkAREuhb8yZpohqsOo08cBBu6LNWLD8YyMxe8yCxbE+ouJYUs1X2oDPrzQGANj0rFNQYiwW9gWLBOg==",
|
||||
"dependencies": {
|
||||
"@types/node": "^18.11.18",
|
||||
"@types/node-fetch": "^2.6.4",
|
||||
"abort-controller": "^3.0.0",
|
||||
"agentkeepalive": "^4.2.1",
|
||||
"digest-fetch": "^1.3.0",
|
||||
"form-data-encoder": "1.7.2",
|
||||
"formdata-node": "^4.3.2",
|
||||
"node-fetch": "^2.6.7",
|
||||
|
@ -10491,7 +10974,6 @@
|
|||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/arrify/-/arrify-2.0.1.tgz",
|
||||
"integrity": "sha512-3duEwti880xqi4eAMN8AyR4a0ByT90zoYdLlevfrvU43vb0YZwZVfxOgxWrLXXXpyugL0hNZc9G6BiB5B3nUug==",
|
||||
"devOptional": true,
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
|
@ -14196,9 +14678,7 @@
|
|||
"node_modules/fast-text-encoding": {
|
||||
"version": "1.0.6",
|
||||
"resolved": "https://registry.npmjs.org/fast-text-encoding/-/fast-text-encoding-1.0.6.tgz",
|
||||
"integrity": "sha512-VhXlQgj9ioXCqGstD37E/HBeqEGV/qOD/kmbVG8h5xKBYvM1L3lR1Zn4555cQ8GkYbJa8aJSipLPndE1k6zK2w==",
|
||||
"optional": true,
|
||||
"peer": true
|
||||
"integrity": "sha512-VhXlQgj9ioXCqGstD37E/HBeqEGV/qOD/kmbVG8h5xKBYvM1L3lR1Zn4555cQ8GkYbJa8aJSipLPndE1k6zK2w=="
|
||||
},
|
||||
"node_modules/fast-uri": {
|
||||
"version": "2.3.0",
|
||||
|
@ -14847,8 +15327,6 @@
|
|||
"version": "5.3.0",
|
||||
"resolved": "https://registry.npmjs.org/gcp-metadata/-/gcp-metadata-5.3.0.tgz",
|
||||
"integrity": "sha512-FNTkdNEnBdlqF2oatizolQqNANMrcqJt6AAYt99B3y1aLLC8Hc5IOBb+ZnnzllodEEf6xMBp6wRcBbc16fa65w==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"gaxios": "^5.0.0",
|
||||
"json-bigint": "^1.0.0"
|
||||
|
@ -14861,8 +15339,6 @@
|
|||
"version": "5.1.3",
|
||||
"resolved": "https://registry.npmjs.org/gaxios/-/gaxios-5.1.3.tgz",
|
||||
"integrity": "sha512-95hVgBRgEIRQQQHIbnxBXeHbW4TqFk4ZDJW7wmVtvYar72FdhRIo1UGOLS2eRAKCPEdPBWu+M7+A33D9CdX9rA==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"extend": "^3.0.2",
|
||||
"https-proxy-agent": "^5.0.0",
|
||||
|
@ -15112,8 +15588,6 @@
|
|||
"version": "8.9.0",
|
||||
"resolved": "https://registry.npmjs.org/google-auth-library/-/google-auth-library-8.9.0.tgz",
|
||||
"integrity": "sha512-f7aQCJODJFmYWN6PeNKzgvy9LI2tYmXnzpNDHEjG5sDNPgGb2FXQyTBnXeSH+PAtpKESFD+LmHw3Ox3mN7e1Fg==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"arrify": "^2.0.0",
|
||||
"base64-js": "^1.3.0",
|
||||
|
@ -15133,8 +15607,6 @@
|
|||
"version": "5.1.3",
|
||||
"resolved": "https://registry.npmjs.org/gaxios/-/gaxios-5.1.3.tgz",
|
||||
"integrity": "sha512-95hVgBRgEIRQQQHIbnxBXeHbW4TqFk4ZDJW7wmVtvYar72FdhRIo1UGOLS2eRAKCPEdPBWu+M7+A33D9CdX9rA==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"extend": "^3.0.2",
|
||||
"https-proxy-agent": "^5.0.0",
|
||||
|
@ -15149,8 +15621,6 @@
|
|||
"version": "6.0.0",
|
||||
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz",
|
||||
"integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"yallist": "^4.0.0"
|
||||
},
|
||||
|
@ -15161,16 +15631,12 @@
|
|||
"node_modules/google-auth-library/node_modules/yallist": {
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
|
||||
"integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==",
|
||||
"optional": true,
|
||||
"peer": true
|
||||
"integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A=="
|
||||
},
|
||||
"node_modules/google-p12-pem": {
|
||||
"version": "4.0.1",
|
||||
"resolved": "https://registry.npmjs.org/google-p12-pem/-/google-p12-pem-4.0.1.tgz",
|
||||
"integrity": "sha512-WPkN4yGtz05WZ5EhtlxNDWPhC4JIic6G8ePitwUWy4l+XPVYec+a0j0Ts47PDtW59y3RwAhUd9/h9ZZ63px6RQ==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"node-forge": "^1.3.1"
|
||||
},
|
||||
|
@ -15315,8 +15781,6 @@
|
|||
"version": "6.1.2",
|
||||
"resolved": "https://registry.npmjs.org/gtoken/-/gtoken-6.1.2.tgz",
|
||||
"integrity": "sha512-4ccGpzz7YAr7lxrT2neugmXQ3hP9ho2gcaityLVkiUecAiwiy60Ii8gRbZeOsXV19fYaRjgBSshs8kXw+NKCPQ==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"gaxios": "^5.0.1",
|
||||
"google-p12-pem": "^4.0.0",
|
||||
|
@ -15330,8 +15794,6 @@
|
|||
"version": "5.1.3",
|
||||
"resolved": "https://registry.npmjs.org/gaxios/-/gaxios-5.1.3.tgz",
|
||||
"integrity": "sha512-95hVgBRgEIRQQQHIbnxBXeHbW4TqFk4ZDJW7wmVtvYar72FdhRIo1UGOLS2eRAKCPEdPBWu+M7+A33D9CdX9rA==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"extend": "^3.0.2",
|
||||
"https-proxy-agent": "^5.0.0",
|
||||
|
@ -20527,8 +20989,6 @@
|
|||
"version": "1.3.1",
|
||||
"resolved": "https://registry.npmjs.org/node-forge/-/node-forge-1.3.1.tgz",
|
||||
"integrity": "sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">= 6.13.0"
|
||||
}
|
||||
|
@ -24760,11 +25220,6 @@
|
|||
"resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
|
||||
"integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
|
||||
},
|
||||
"node_modules/sax": {
|
||||
"version": "1.3.0",
|
||||
"resolved": "https://registry.npmjs.org/sax/-/sax-1.3.0.tgz",
|
||||
"integrity": "sha512-0s+oAmw9zLl1V1cS9BtZN7JAd0cW5e0QH4W3LWEK6a4LaLEA2OTpGYWDY+6XasBLtz6wkm3u1xRw95mRuJ59WA=="
|
||||
},
|
||||
"node_modules/saxes": {
|
||||
"version": "6.0.0",
|
||||
"resolved": "https://registry.npmjs.org/saxes/-/saxes-6.0.0.tgz",
|
||||
|
@ -28141,9 +28596,9 @@
|
|||
}
|
||||
},
|
||||
"node_modules/zod-to-json-schema": {
|
||||
"version": "3.22.4",
|
||||
"resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.22.4.tgz",
|
||||
"integrity": "sha512-2Ed5dJ+n/O3cU383xSY28cuVi0BCQhF8nYqWU5paEpl7fVdqdAmiLdqLyfblbNdfOFwFfi/mqU4O1pwc60iBhQ==",
|
||||
"version": "3.22.5",
|
||||
"resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.22.5.tgz",
|
||||
"integrity": "sha512-+akaPo6a0zpVCCseDed504KBJUQpEW5QZw7RMneNmKw+fGaML1Z9tUNLnHHAC8x6dzVRO1eB2oEMyZRnuBZg7Q==",
|
||||
"peerDependencies": {
|
||||
"zod": "^3.22.4"
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "librechat-data-provider",
|
||||
"version": "0.5.3",
|
||||
"version": "0.5.4",
|
||||
"description": "data services for librechat apps",
|
||||
"main": "dist/index.js",
|
||||
"module": "dist/index.es.js",
|
||||
|
|
|
@ -186,10 +186,17 @@ export const rateLimitSchema = z.object({
|
|||
.optional(),
|
||||
});
|
||||
|
||||
export enum EImageOutputType {
|
||||
PNG = 'png',
|
||||
WEBP = 'webp',
|
||||
JPEG = 'jpeg',
|
||||
}
|
||||
|
||||
export const configSchema = z.object({
|
||||
version: z.string(),
|
||||
cache: z.boolean().optional().default(true),
|
||||
secureImageLinks: z.boolean().optional(),
|
||||
imageOutputType: z.nativeEnum(EImageOutputType).optional().default(EImageOutputType.PNG),
|
||||
interface: z
|
||||
.object({
|
||||
privacyPolicy: z
|
||||
|
@ -382,7 +389,17 @@ export const supportsBalanceCheck = {
|
|||
[EModelEndpoint.azureOpenAI]: true,
|
||||
};
|
||||
|
||||
export const visionModels = ['gpt-4-vision', 'llava-13b', 'gemini-pro-vision', 'claude-3'];
|
||||
export const visionModels = [
|
||||
'gpt-4-vision',
|
||||
'llava-13b',
|
||||
'gemini-pro-vision',
|
||||
'claude-3',
|
||||
'gemini-1.5',
|
||||
'gpt-4-turbo',
|
||||
];
|
||||
export enum VisionModes {
|
||||
generative = 'generative',
|
||||
}
|
||||
|
||||
export function validateVisionModel({
|
||||
model,
|
||||
|
@ -397,6 +414,10 @@ export function validateVisionModel({
|
|||
return false;
|
||||
}
|
||||
|
||||
if (model === 'gpt-4-turbo-preview') {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (availableModels && !availableModels.includes(model)) {
|
||||
return false;
|
||||
}
|
||||
|
@ -485,6 +506,8 @@ export enum AuthKeys {
|
|||
GOOGLE_SERVICE_KEY = 'GOOGLE_SERVICE_KEY',
|
||||
/**
|
||||
* API key to use Google Generative AI.
|
||||
*
|
||||
* Note: this is not for Environment Variables, but to access encrypted object values.
|
||||
*/
|
||||
GOOGLE_API_KEY = 'GOOGLE_API_KEY',
|
||||
}
|
||||
|
@ -546,7 +569,7 @@ export enum Constants {
|
|||
/**
|
||||
* Key for the Custom Config's version (librechat.yaml).
|
||||
*/
|
||||
CONFIG_VERSION = '1.0.5',
|
||||
CONFIG_VERSION = '1.0.6',
|
||||
/**
|
||||
* Standard value for the first message's `parentMessageId` value, to indicate no parent exists.
|
||||
*/
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue