mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-22 11:20:15 +01:00
🤖 feat(Anthropic): Claude 3 & Vision Support (#1984)
* chore: bump anthropic SDK * chore: update anthropic config settings (fileSupport, default models) * feat: anthropic multi modal formatting * refactor: update vision models and use endpoint specific max long side resizing * feat(anthropic): multimodal messages, retry logic, and messages payload * chore: add more safety to trimming content due to whitespace error for assistant messages * feat(anthropic): token accounting and resending multiple images in progress * chore: bump data-provider * feat(anthropic): resendImages feature * chore: optimize Edit/Ask controllers, switch model back to req model * fix: false positive of invalid model * refactor(validateVisionModel): use object as arg, pass in additional/available models * refactor(validateModel): use helper function, `getModelsConfig` * feat: add modelsConfig to endpointOption so it gets passed to all clients, use for properly validating vision models * refactor: initialize default vision model and make sure it's available before assigning it * refactor(useSSE): avoid resetting model if user selected a new model between request and response * feat: show rate in transaction logging * fix: return tokenCountMap regardless of payload shape
This commit is contained in:
parent
b023c5683d
commit
8263ddda3f
28 changed files with 599 additions and 115 deletions
|
|
@ -1,7 +1,15 @@
|
|||
const Anthropic = require('@anthropic-ai/sdk');
|
||||
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
|
||||
const { getResponseSender, EModelEndpoint } = require('librechat-data-provider');
|
||||
const {
|
||||
getResponseSender,
|
||||
EModelEndpoint,
|
||||
validateVisionModel,
|
||||
} = require('librechat-data-provider');
|
||||
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
|
||||
const spendTokens = require('~/models/spendTokens');
|
||||
const { getModelMaxTokens } = require('~/utils');
|
||||
const { formatMessage } = require('./prompts');
|
||||
const { getFiles } = require('~/models/File');
|
||||
const BaseClient = require('./BaseClient');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
|
|
@ -10,12 +18,20 @@ const AI_PROMPT = '\n\nAssistant:';
|
|||
|
||||
const tokenizersCache = {};
|
||||
|
||||
/** Helper function to introduce a delay before retrying */
|
||||
function delayBeforeRetry(attempts, baseDelay = 1000) {
|
||||
return new Promise((resolve) => setTimeout(resolve, baseDelay * attempts));
|
||||
}
|
||||
|
||||
class AnthropicClient extends BaseClient {
|
||||
constructor(apiKey, options = {}) {
|
||||
super(apiKey, options);
|
||||
this.apiKey = apiKey || process.env.ANTHROPIC_API_KEY;
|
||||
this.userLabel = HUMAN_PROMPT;
|
||||
this.assistantLabel = AI_PROMPT;
|
||||
this.contextStrategy = options.contextStrategy
|
||||
? options.contextStrategy.toLowerCase()
|
||||
: 'discard';
|
||||
this.setOptions(options);
|
||||
}
|
||||
|
||||
|
|
@ -47,6 +63,12 @@ class AnthropicClient extends BaseClient {
|
|||
stop: modelOptions.stop, // no stop method for now
|
||||
};
|
||||
|
||||
this.isClaude3 = this.modelOptions.model.includes('claude-3');
|
||||
this.useMessages = this.isClaude3 || !!this.options.attachments;
|
||||
|
||||
this.defaultVisionModel = this.options.visionModel ?? 'claude-3-sonnet-20240229';
|
||||
this.checkVisionRequest(this.options.attachments);
|
||||
|
||||
this.maxContextTokens =
|
||||
getModelMaxTokens(this.modelOptions.model, EModelEndpoint.anthropic) ?? 100000;
|
||||
this.maxResponseTokens = this.modelOptions.maxOutputTokens || 1500;
|
||||
|
|
@ -99,6 +121,119 @@ class AnthropicClient extends BaseClient {
|
|||
return new Anthropic(options);
|
||||
}
|
||||
|
||||
getTokenCountForResponse(response) {
|
||||
return this.getTokenCountForMessage({
|
||||
role: 'assistant',
|
||||
content: response.text,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* Checks if the model is a vision model based on request attachments and sets the appropriate options:
|
||||
* - Sets `this.modelOptions.model` to `gpt-4-vision-preview` if the request is a vision request.
|
||||
* - Sets `this.isVisionModel` to `true` if vision request.
|
||||
* - Deletes `this.modelOptions.stop` if vision request.
|
||||
* @param {Array<Promise<MongoFile[]> | MongoFile[]> | Record<string, MongoFile[]>} attachments
|
||||
*/
|
||||
checkVisionRequest(attachments) {
|
||||
const availableModels = this.options.modelsConfig?.[EModelEndpoint.anthropic];
|
||||
this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels });
|
||||
|
||||
const visionModelAvailable = availableModels?.includes(this.defaultVisionModel);
|
||||
if (attachments && visionModelAvailable && !this.isVisionModel) {
|
||||
this.modelOptions.model = this.defaultVisionModel;
|
||||
this.isVisionModel = true;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate the token cost in tokens for an image based on its dimensions and detail level.
|
||||
*
|
||||
* For reference, see: https://docs.anthropic.com/claude/docs/vision#image-costs
|
||||
*
|
||||
* @param {Object} image - The image object.
|
||||
* @param {number} image.width - The width of the image.
|
||||
* @param {number} image.height - The height of the image.
|
||||
* @returns {number} The calculated token cost measured by tokens.
|
||||
*
|
||||
*/
|
||||
calculateImageTokenCost({ width, height }) {
|
||||
return Math.ceil((width * height) / 750);
|
||||
}
|
||||
|
||||
async addImageURLs(message, attachments) {
|
||||
const { files, image_urls } = await encodeAndFormat(
|
||||
this.options.req,
|
||||
attachments,
|
||||
EModelEndpoint.anthropic,
|
||||
);
|
||||
message.image_urls = image_urls;
|
||||
return files;
|
||||
}
|
||||
|
||||
async recordTokenUsage({ promptTokens, completionTokens }) {
|
||||
logger.debug('[AnthropicClient] recordTokenUsage:', { promptTokens, completionTokens });
|
||||
await spendTokens(
|
||||
{
|
||||
user: this.user,
|
||||
model: this.modelOptions.model,
|
||||
context: 'message',
|
||||
conversationId: this.conversationId,
|
||||
endpointTokenConfig: this.options.endpointTokenConfig,
|
||||
},
|
||||
{ promptTokens, completionTokens },
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {TMessage[]} _messages
|
||||
* @returns {TMessage[]}
|
||||
*/
|
||||
async addPreviousAttachments(_messages) {
|
||||
if (!this.options.resendImages) {
|
||||
return _messages;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {TMessage} message
|
||||
*/
|
||||
const processMessage = async (message) => {
|
||||
if (!this.message_file_map) {
|
||||
/** @type {Record<string, MongoFile[]> */
|
||||
this.message_file_map = {};
|
||||
}
|
||||
|
||||
const fileIds = message.files.map((file) => file.file_id);
|
||||
const files = await getFiles({
|
||||
file_id: { $in: fileIds },
|
||||
});
|
||||
|
||||
await this.addImageURLs(message, files);
|
||||
|
||||
this.message_file_map[message.messageId] = files;
|
||||
return message;
|
||||
};
|
||||
|
||||
const promises = [];
|
||||
|
||||
for (const message of _messages) {
|
||||
if (!message.files) {
|
||||
promises.push(message);
|
||||
continue;
|
||||
}
|
||||
|
||||
promises.push(processMessage(message));
|
||||
}
|
||||
|
||||
const messages = await Promise.all(promises);
|
||||
|
||||
this.checkVisionRequest(this.message_file_map);
|
||||
return messages;
|
||||
}
|
||||
|
||||
async buildMessages(messages, parentMessageId) {
|
||||
const orderedMessages = this.constructor.getMessagesForConversation({
|
||||
messages,
|
||||
|
|
@ -107,28 +242,127 @@ class AnthropicClient extends BaseClient {
|
|||
|
||||
logger.debug('[AnthropicClient] orderedMessages', { orderedMessages, parentMessageId });
|
||||
|
||||
const formattedMessages = orderedMessages.map((message) => ({
|
||||
if (!this.isVisionModel && this.options.attachments) {
|
||||
throw new Error('Attachments are only supported with the Claude 3 family of models');
|
||||
} else if (this.options.attachments) {
|
||||
const attachments = (await this.options.attachments).filter((file) =>
|
||||
file.type.includes('image'),
|
||||
);
|
||||
|
||||
const latestMessage = orderedMessages[orderedMessages.length - 1];
|
||||
|
||||
if (this.message_file_map) {
|
||||
this.message_file_map[latestMessage.messageId] = attachments;
|
||||
} else {
|
||||
this.message_file_map = {
|
||||
[latestMessage.messageId]: attachments,
|
||||
};
|
||||
}
|
||||
|
||||
const files = await this.addImageURLs(latestMessage, attachments);
|
||||
|
||||
this.options.attachments = files;
|
||||
}
|
||||
|
||||
const formattedMessages = orderedMessages.map((message, i) => {
|
||||
const formattedMessage = this.useMessages
|
||||
? formatMessage({
|
||||
message,
|
||||
endpoint: EModelEndpoint.anthropic,
|
||||
})
|
||||
: {
|
||||
author: message.isCreatedByUser ? this.userLabel : this.assistantLabel,
|
||||
content: message?.content ?? message.text,
|
||||
}));
|
||||
};
|
||||
|
||||
const needsTokenCount = this.contextStrategy && !orderedMessages[i].tokenCount;
|
||||
/* If tokens were never counted, or, is a Vision request and the message has files, count again */
|
||||
if (needsTokenCount || (this.isVisionModel && (message.image_urls || message.files))) {
|
||||
orderedMessages[i].tokenCount = this.getTokenCountForMessage(formattedMessage);
|
||||
}
|
||||
|
||||
/* If message has files, calculate image token cost */
|
||||
if (this.message_file_map && this.message_file_map[message.messageId]) {
|
||||
const attachments = this.message_file_map[message.messageId];
|
||||
for (const file of attachments) {
|
||||
orderedMessages[i].tokenCount += this.calculateImageTokenCost({
|
||||
width: file.width,
|
||||
height: file.height,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
formattedMessage.tokenCount = orderedMessages[i].tokenCount;
|
||||
return formattedMessage;
|
||||
});
|
||||
|
||||
let { context: messagesInWindow, remainingContextTokens } =
|
||||
await this.getMessagesWithinTokenLimit(formattedMessages);
|
||||
|
||||
const tokenCountMap = orderedMessages
|
||||
.slice(orderedMessages.length - messagesInWindow.length)
|
||||
.reduce((map, message, index) => {
|
||||
const { messageId } = message;
|
||||
if (!messageId) {
|
||||
return map;
|
||||
}
|
||||
|
||||
map[messageId] = orderedMessages[index].tokenCount;
|
||||
return map;
|
||||
}, {});
|
||||
|
||||
logger.debug('[AnthropicClient]', {
|
||||
messagesInWindow: messagesInWindow.length,
|
||||
remainingContextTokens,
|
||||
});
|
||||
|
||||
let lastAuthor = '';
|
||||
let groupedMessages = [];
|
||||
|
||||
for (let message of formattedMessages) {
|
||||
for (let i = 0; i < messagesInWindow.length; i++) {
|
||||
const message = messagesInWindow[i];
|
||||
const author = message.role ?? message.author;
|
||||
// If last author is not same as current author, add to new group
|
||||
if (lastAuthor !== message.author) {
|
||||
groupedMessages.push({
|
||||
author: message.author,
|
||||
if (lastAuthor !== author) {
|
||||
const newMessage = {
|
||||
content: [message.content],
|
||||
});
|
||||
lastAuthor = message.author;
|
||||
};
|
||||
|
||||
if (message.role) {
|
||||
newMessage.role = message.role;
|
||||
} else {
|
||||
newMessage.author = message.author;
|
||||
}
|
||||
|
||||
groupedMessages.push(newMessage);
|
||||
lastAuthor = author;
|
||||
// If same author, append content to the last group
|
||||
} else {
|
||||
groupedMessages[groupedMessages.length - 1].content.push(message.content);
|
||||
}
|
||||
}
|
||||
|
||||
groupedMessages = groupedMessages.map((msg, i) => {
|
||||
const isLast = i === groupedMessages.length - 1;
|
||||
if (msg.content.length === 1) {
|
||||
const content = msg.content[0];
|
||||
return {
|
||||
...msg,
|
||||
// reason: final assistant content cannot end with trailing whitespace
|
||||
content:
|
||||
isLast && this.useMessages && msg.role === 'assistant' && typeof content === 'string'
|
||||
? content?.trim()
|
||||
: content,
|
||||
};
|
||||
}
|
||||
|
||||
if (!this.useMessages && msg.tokenCount) {
|
||||
delete msg.tokenCount;
|
||||
}
|
||||
|
||||
return msg;
|
||||
});
|
||||
|
||||
let identityPrefix = '';
|
||||
if (this.options.userLabel) {
|
||||
identityPrefix = `\nHuman's name: ${this.options.userLabel}`;
|
||||
|
|
@ -154,7 +388,8 @@ class AnthropicClient extends BaseClient {
|
|||
// Prompt AI to respond, empty if last message was from AI
|
||||
let isEdited = lastAuthor === this.assistantLabel;
|
||||
const promptSuffix = isEdited ? '' : `${promptPrefix}${this.assistantLabel}\n`;
|
||||
let currentTokenCount = isEdited
|
||||
let currentTokenCount =
|
||||
isEdited || this.useMEssages
|
||||
? this.getTokenCount(promptPrefix)
|
||||
: this.getTokenCount(promptSuffix);
|
||||
|
||||
|
|
@ -224,7 +459,69 @@ class AnthropicClient extends BaseClient {
|
|||
return true;
|
||||
};
|
||||
|
||||
const messagesPayload = [];
|
||||
const buildMessagesPayload = async () => {
|
||||
let canContinue = true;
|
||||
|
||||
if (promptPrefix) {
|
||||
this.systemMessage = promptPrefix;
|
||||
}
|
||||
|
||||
while (currentTokenCount < maxTokenCount && groupedMessages.length > 0 && canContinue) {
|
||||
const message = groupedMessages.pop();
|
||||
|
||||
let tokenCountForMessage = message.tokenCount ?? this.getTokenCountForMessage(message);
|
||||
|
||||
const newTokenCount = currentTokenCount + tokenCountForMessage;
|
||||
const exceededMaxCount = newTokenCount > maxTokenCount;
|
||||
|
||||
if (exceededMaxCount && messagesPayload.length === 0) {
|
||||
throw new Error(
|
||||
`Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`,
|
||||
);
|
||||
} else if (exceededMaxCount) {
|
||||
canContinue = false;
|
||||
break;
|
||||
}
|
||||
|
||||
delete message.tokenCount;
|
||||
messagesPayload.unshift(message);
|
||||
currentTokenCount = newTokenCount;
|
||||
|
||||
// Switch off isEdited after using it once
|
||||
if (isEdited && message.role === 'assistant') {
|
||||
isEdited = false;
|
||||
}
|
||||
|
||||
// Wait for next tick to avoid blocking the event loop
|
||||
await new Promise((resolve) => setImmediate(resolve));
|
||||
}
|
||||
};
|
||||
|
||||
const processTokens = () => {
|
||||
// Add 2 tokens for metadata after all messages have been counted.
|
||||
currentTokenCount += 2;
|
||||
|
||||
// Use up to `this.maxContextTokens` tokens (prompt + response), but try to leave `this.maxTokens` tokens for the response.
|
||||
this.modelOptions.maxOutputTokens = Math.min(
|
||||
this.maxContextTokens - currentTokenCount,
|
||||
this.maxResponseTokens,
|
||||
);
|
||||
};
|
||||
|
||||
if (this.modelOptions.model.startsWith('claude-3')) {
|
||||
await buildMessagesPayload();
|
||||
processTokens();
|
||||
return {
|
||||
prompt: messagesPayload,
|
||||
context: messagesInWindow,
|
||||
promptTokens: currentTokenCount,
|
||||
tokenCountMap,
|
||||
};
|
||||
} else {
|
||||
await buildPromptBody();
|
||||
processTokens();
|
||||
}
|
||||
|
||||
if (nextMessage.remove) {
|
||||
promptBody = promptBody.replace(nextMessage.messageString, '');
|
||||
|
|
@ -234,22 +531,19 @@ class AnthropicClient extends BaseClient {
|
|||
|
||||
let prompt = `${promptBody}${promptSuffix}`;
|
||||
|
||||
// Add 2 tokens for metadata after all messages have been counted.
|
||||
currentTokenCount += 2;
|
||||
|
||||
// Use up to `this.maxContextTokens` tokens (prompt + response), but try to leave `this.maxTokens` tokens for the response.
|
||||
this.modelOptions.maxOutputTokens = Math.min(
|
||||
this.maxContextTokens - currentTokenCount,
|
||||
this.maxResponseTokens,
|
||||
);
|
||||
|
||||
return { prompt, context };
|
||||
return { prompt, context, promptTokens: currentTokenCount, tokenCountMap };
|
||||
}
|
||||
|
||||
getCompletion() {
|
||||
logger.debug('AnthropicClient doesn\'t use getCompletion (all handled in sendCompletion)');
|
||||
}
|
||||
|
||||
async createResponse(client, options) {
|
||||
return this.useMessages
|
||||
? await client.messages.create(options)
|
||||
: await client.completions.create(options);
|
||||
}
|
||||
|
||||
async sendCompletion(payload, { onProgress, abortController }) {
|
||||
if (!abortController) {
|
||||
abortController = new AbortController();
|
||||
|
|
@ -279,36 +573,88 @@ class AnthropicClient extends BaseClient {
|
|||
topP: top_p,
|
||||
topK: top_k,
|
||||
} = this.modelOptions;
|
||||
|
||||
const requestOptions = {
|
||||
prompt: payload,
|
||||
model,
|
||||
stream: stream || true,
|
||||
max_tokens_to_sample: maxOutputTokens || 1500,
|
||||
stop_sequences,
|
||||
temperature,
|
||||
metadata,
|
||||
top_p,
|
||||
top_k,
|
||||
};
|
||||
|
||||
if (this.useMessages) {
|
||||
requestOptions.messages = payload;
|
||||
requestOptions.max_tokens = maxOutputTokens || 1500;
|
||||
} else {
|
||||
requestOptions.prompt = payload;
|
||||
requestOptions.max_tokens_to_sample = maxOutputTokens || 1500;
|
||||
}
|
||||
|
||||
if (this.systemMessage) {
|
||||
requestOptions.system = this.systemMessage;
|
||||
}
|
||||
|
||||
logger.debug('[AnthropicClient]', { ...requestOptions });
|
||||
const response = await client.completions.create(requestOptions);
|
||||
|
||||
const handleChunk = (currentChunk) => {
|
||||
if (currentChunk) {
|
||||
text += currentChunk;
|
||||
onProgress(currentChunk);
|
||||
}
|
||||
};
|
||||
|
||||
const maxRetries = 3;
|
||||
async function processResponse() {
|
||||
let attempts = 0;
|
||||
|
||||
while (attempts < maxRetries) {
|
||||
let response;
|
||||
try {
|
||||
response = await this.createResponse(client, requestOptions);
|
||||
|
||||
signal.addEventListener('abort', () => {
|
||||
logger.debug('[AnthropicClient] message aborted!');
|
||||
if (response.controller?.abort) {
|
||||
response.controller.abort();
|
||||
}
|
||||
});
|
||||
|
||||
for await (const completion of response) {
|
||||
// Uncomment to debug message stream
|
||||
// logger.debug(completion);
|
||||
text += completion.completion;
|
||||
onProgress(completion.completion);
|
||||
// Handle each completion as before
|
||||
if (completion?.delta?.text) {
|
||||
handleChunk(completion.delta.text);
|
||||
} else if (completion.completion) {
|
||||
handleChunk(completion.completion);
|
||||
}
|
||||
}
|
||||
|
||||
// Successful processing, exit loop
|
||||
break;
|
||||
} catch (error) {
|
||||
attempts += 1;
|
||||
logger.warn(
|
||||
`User: ${this.user} | Anthropic Request ${attempts} failed: ${error.message}`,
|
||||
);
|
||||
|
||||
if (attempts < maxRetries) {
|
||||
await delayBeforeRetry(attempts, 350);
|
||||
} else {
|
||||
throw new Error(`Operation failed after ${maxRetries} attempts: ${error.message}`);
|
||||
}
|
||||
} finally {
|
||||
signal.removeEventListener('abort', () => {
|
||||
logger.debug('[AnthropicClient] message aborted!');
|
||||
if (response.controller?.abort) {
|
||||
response.controller.abort();
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
await processResponse.bind(this)();
|
||||
|
||||
return text.trim();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ const { GoogleVertexAI } = require('langchain/llms/googlevertexai');
|
|||
const { ChatGoogleGenerativeAI } = require('@langchain/google-genai');
|
||||
const { ChatGoogleVertexAI } = require('langchain/chat_models/googlevertexai');
|
||||
const { AIMessage, HumanMessage, SystemMessage } = require('langchain/schema');
|
||||
const { encodeAndFormat } = require('~/server/services/Files/images');
|
||||
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
|
||||
const {
|
||||
validateVisionModel,
|
||||
|
|
@ -13,6 +12,7 @@ const {
|
|||
EModelEndpoint,
|
||||
AuthKeys,
|
||||
} = require('librechat-data-provider');
|
||||
const { encodeAndFormat } = require('~/server/services/Files/images');
|
||||
const { getModelMaxTokens } = require('~/utils');
|
||||
const { formatMessage } = require('./prompts');
|
||||
const BaseClient = require('./BaseClient');
|
||||
|
|
@ -124,18 +124,28 @@ class GoogleClient extends BaseClient {
|
|||
// stop: modelOptions.stop // no stop method for now
|
||||
};
|
||||
|
||||
if (this.options.attachments) {
|
||||
this.modelOptions.model = 'gemini-pro-vision';
|
||||
/* Validation vision request */
|
||||
this.defaultVisionModel = this.options.visionModel ?? 'gemini-pro-vision';
|
||||
const availableModels = this.options.modelsConfig?.[EModelEndpoint.google];
|
||||
this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels });
|
||||
|
||||
if (
|
||||
this.options.attachments &&
|
||||
availableModels?.includes(this.defaultVisionModel) &&
|
||||
!this.isVisionModel
|
||||
) {
|
||||
this.modelOptions.model = this.defaultVisionModel;
|
||||
this.isVisionModel = true;
|
||||
}
|
||||
|
||||
// TODO: as of 12/14/23, only gemini models are "Generative AI" models provided by Google
|
||||
this.isGenerativeModel = this.modelOptions.model.includes('gemini');
|
||||
this.isVisionModel = validateVisionModel(this.modelOptions.model);
|
||||
const { isGenerativeModel } = this;
|
||||
if (this.isVisionModel && !this.options.attachments) {
|
||||
this.modelOptions.model = 'gemini-pro';
|
||||
this.isVisionModel = false;
|
||||
}
|
||||
|
||||
// TODO: as of 12/14/23, only gemini models are "Generative AI" models provided by Google
|
||||
this.isGenerativeModel = this.modelOptions.model.includes('gemini');
|
||||
const { isGenerativeModel } = this;
|
||||
this.isChatModel = !isGenerativeModel && this.modelOptions.model.includes('chat');
|
||||
const { isChatModel } = this;
|
||||
this.isTextModel =
|
||||
|
|
|
|||
|
|
@ -91,6 +91,7 @@ class OpenAIClient extends BaseClient {
|
|||
};
|
||||
}
|
||||
|
||||
this.defaultVisionModel = this.options.visionModel ?? 'gpt-4-vision-preview';
|
||||
this.checkVisionRequest(this.options.attachments);
|
||||
|
||||
const { OPENROUTER_API_KEY, OPENAI_FORCE_PROMPT } = process.env ?? {};
|
||||
|
|
@ -225,10 +226,12 @@ class OpenAIClient extends BaseClient {
|
|||
* @param {Array<Promise<MongoFile[]> | MongoFile[]> | Record<string, MongoFile[]>} attachments
|
||||
*/
|
||||
checkVisionRequest(attachments) {
|
||||
this.isVisionModel = validateVisionModel(this.modelOptions.model);
|
||||
const availableModels = this.options.modelsConfig?.[this.options.endpoint];
|
||||
this.isVisionModel = validateVisionModel({ mmodel: this.modelOptions.model, availableModels });
|
||||
|
||||
if (attachments && !this.isVisionModel) {
|
||||
this.modelOptions.model = 'gpt-4-vision-preview';
|
||||
const visionModelAvailable = availableModels?.includes(this.defaultVisionModel);
|
||||
if (attachments && visionModelAvailable && !this.isVisionModel) {
|
||||
this.modelOptions.model = this.defaultVisionModel;
|
||||
this.isVisionModel = true;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
const { EModelEndpoint } = require('librechat-data-provider');
|
||||
const { HumanMessage, AIMessage, SystemMessage } = require('langchain/schema');
|
||||
|
||||
/**
|
||||
|
|
@ -7,10 +8,16 @@ const { HumanMessage, AIMessage, SystemMessage } = require('langchain/schema');
|
|||
* @param {Object} params.message - The message object to format.
|
||||
* @param {string} [params.message.role] - The role of the message sender (must be 'user').
|
||||
* @param {string} [params.message.content] - The text content of the message.
|
||||
* @param {EModelEndpoint} [params.endpoint] - Identifier for specific endpoint handling
|
||||
* @param {Array<string>} [params.image_urls] - The image_urls to attach to the message.
|
||||
* @returns {(Object)} - The formatted message.
|
||||
*/
|
||||
const formatVisionMessage = ({ message, image_urls }) => {
|
||||
const formatVisionMessage = ({ message, image_urls, endpoint }) => {
|
||||
if (endpoint === EModelEndpoint.anthropic) {
|
||||
message.content = [...image_urls, { type: 'text', text: message.content }];
|
||||
return message;
|
||||
}
|
||||
|
||||
message.content = [{ type: 'text', text: message.content }, ...image_urls];
|
||||
|
||||
return message;
|
||||
|
|
@ -29,10 +36,11 @@ const formatVisionMessage = ({ message, image_urls }) => {
|
|||
* @param {Array<string>} [params.message.image_urls] - The image_urls attached to the message for Vision API.
|
||||
* @param {string} [params.userName] - The name of the user.
|
||||
* @param {string} [params.assistantName] - The name of the assistant.
|
||||
* @param {string} [params.endpoint] - Identifier for specific endpoint handling
|
||||
* @param {boolean} [params.langChain=false] - Whether to return a LangChain message object.
|
||||
* @returns {(Object|HumanMessage|AIMessage|SystemMessage)} - The formatted message.
|
||||
*/
|
||||
const formatMessage = ({ message, userName, assistantName, langChain = false }) => {
|
||||
const formatMessage = ({ message, userName, assistantName, endpoint, langChain = false }) => {
|
||||
let { role: _role, _name, sender, text, content: _content, lc_id } = message;
|
||||
if (lc_id && lc_id[2] && !langChain) {
|
||||
const roleMapping = {
|
||||
|
|
@ -51,7 +59,11 @@ const formatMessage = ({ message, userName, assistantName, langChain = false })
|
|||
|
||||
const { image_urls } = message;
|
||||
if (Array.isArray(image_urls) && image_urls.length > 0 && role === 'user') {
|
||||
return formatVisionMessage({ message: formattedMessage, image_urls: message.image_urls });
|
||||
return formatVisionMessage({
|
||||
message: formattedMessage,
|
||||
image_urls: message.image_urls,
|
||||
endpoint,
|
||||
});
|
||||
}
|
||||
|
||||
if (_name) {
|
||||
|
|
|
|||
|
|
@ -43,9 +43,10 @@ transactionSchema.statics.create = async function (transactionData) {
|
|||
).lean();
|
||||
|
||||
return {
|
||||
rate: transaction.rate,
|
||||
user: transaction.user.toString(),
|
||||
[transaction.tokenType]: transaction.tokenValue,
|
||||
balance: updatedBalance.tokenCredits,
|
||||
[transaction.tokenType]: transaction.tokenValue,
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -51,7 +51,9 @@ const spendTokens = async (txData, tokenUsage) => {
|
|||
logger.debug('[spendTokens] Transaction data record against balance:', {
|
||||
user: prompt.user,
|
||||
prompt: prompt.prompt,
|
||||
promptRate: prompt.rate,
|
||||
completion: completion.completion,
|
||||
completionRate: completion.rate,
|
||||
balance: completion.balance,
|
||||
});
|
||||
} catch (err) {
|
||||
|
|
|
|||
|
|
@ -13,6 +13,12 @@ const tokenValues = {
|
|||
'gpt-3.5-turbo-1106': { prompt: 1, completion: 2 },
|
||||
'gpt-4-1106': { prompt: 10, completion: 30 },
|
||||
'gpt-3.5-turbo-0125': { prompt: 0.5, completion: 1.5 },
|
||||
'claude-3-opus': { prompt: 15, completion: 75 },
|
||||
'claude-3-sonnet': { prompt: 3, completion: 15 },
|
||||
'claude-3-haiku': { prompt: 0.25, completion: 1.25 },
|
||||
'claude-2.1': { prompt: 8, completion: 24 },
|
||||
'claude-2': { prompt: 8, completion: 24 },
|
||||
'claude-': { prompt: 0.8, completion: 2.4 },
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -46,6 +52,8 @@ const getValueKey = (model, endpoint) => {
|
|||
return '32k';
|
||||
} else if (modelName.includes('gpt-4')) {
|
||||
return '8k';
|
||||
} else if (tokenValues[modelName]) {
|
||||
return modelName;
|
||||
}
|
||||
|
||||
return undefined;
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@
|
|||
},
|
||||
"homepage": "https://librechat.ai",
|
||||
"dependencies": {
|
||||
"@anthropic-ai/sdk": "^0.5.4",
|
||||
"@anthropic-ai/sdk": "^0.16.1",
|
||||
"@azure/search-documents": "^12.0.0",
|
||||
"@keyv/mongo": "^2.1.8",
|
||||
"@keyv/redis": "^2.8.1",
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
const { getResponseSender, Constants } = require('librechat-data-provider');
|
||||
const { sendMessage, createOnProgress } = require('~/server/utils');
|
||||
const { saveMessage, getConvoTitle, getConvo } = require('~/models');
|
||||
const { createAbortController, handleAbortError } = require('~/server/middleware');
|
||||
const { sendMessage, createOnProgress } = require('~/server/utils');
|
||||
const { saveMessage, getConvo } = require('~/models');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const AskController = async (req, res, next, initializeClient, addTitle) => {
|
||||
|
|
@ -134,16 +134,21 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
|
|||
|
||||
response.endpoint = endpointOption.endpoint;
|
||||
|
||||
const conversation = await getConvo(user, conversationId);
|
||||
conversation.title =
|
||||
conversation && !conversation.title ? null : conversation?.title || 'New Chat';
|
||||
|
||||
if (client.options.attachments) {
|
||||
userMessage.files = client.options.attachments;
|
||||
conversation.model = endpointOption.modelOptions.model;
|
||||
delete userMessage.image_urls;
|
||||
}
|
||||
|
||||
if (!abortController.signal.aborted) {
|
||||
sendMessage(res, {
|
||||
title: await getConvoTitle(user, conversationId),
|
||||
final: true,
|
||||
conversation: await getConvo(user, conversationId),
|
||||
conversation,
|
||||
title: conversation.title,
|
||||
requestMessage: userMessage,
|
||||
responseMessage: response,
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
const { getResponseSender } = require('librechat-data-provider');
|
||||
const { sendMessage, createOnProgress } = require('~/server/utils');
|
||||
const { saveMessage, getConvoTitle, getConvo } = require('~/models');
|
||||
const { createAbortController, handleAbortError } = require('~/server/middleware');
|
||||
const { sendMessage, createOnProgress } = require('~/server/utils');
|
||||
const { saveMessage, getConvo } = require('~/models');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const EditController = async (req, res, next, initializeClient) => {
|
||||
|
|
@ -131,11 +131,19 @@ const EditController = async (req, res, next, initializeClient) => {
|
|||
response = { ...response, ...metadata };
|
||||
}
|
||||
|
||||
const conversation = await getConvo(user, conversationId);
|
||||
conversation.title =
|
||||
conversation && !conversation.title ? null : conversation?.title || 'New Chat';
|
||||
|
||||
if (client.options.attachments) {
|
||||
conversation.model = endpointOption.modelOptions.model;
|
||||
}
|
||||
|
||||
if (!abortController.signal.aborted) {
|
||||
sendMessage(res, {
|
||||
title: await getConvoTitle(user, conversationId),
|
||||
final: true,
|
||||
conversation: await getConvo(user, conversationId),
|
||||
conversation,
|
||||
title: conversation.title,
|
||||
requestMessage: userMessage,
|
||||
responseMessage: response,
|
||||
});
|
||||
|
|
|
|||
|
|
@ -2,6 +2,16 @@ const { CacheKeys } = require('librechat-data-provider');
|
|||
const { loadDefaultModels, loadConfigModels } = require('~/server/services/Config');
|
||||
const { getLogStores } = require('~/cache');
|
||||
|
||||
const getModelsConfig = async (req) => {
|
||||
const cache = getLogStores(CacheKeys.CONFIG_STORE);
|
||||
let modelsConfig = await cache.get(CacheKeys.MODELS_CONFIG);
|
||||
if (!modelsConfig) {
|
||||
modelsConfig = await loadModels(req);
|
||||
}
|
||||
|
||||
return modelsConfig;
|
||||
};
|
||||
|
||||
/**
|
||||
* Loads the models from the config.
|
||||
* @param {Express.Request} req - The Express request object.
|
||||
|
|
@ -27,4 +37,4 @@ async function modelController(req, res) {
|
|||
res.send(modelConfig);
|
||||
}
|
||||
|
||||
module.exports = { modelController, loadModels };
|
||||
module.exports = { modelController, loadModels, getModelsConfig };
|
||||
|
|
|
|||
|
|
@ -1,11 +1,12 @@
|
|||
const { parseConvo, EModelEndpoint } = require('librechat-data-provider');
|
||||
const { getModelsConfig } = require('~/server/controllers/ModelController');
|
||||
const { processFiles } = require('~/server/services/Files/process');
|
||||
const gptPlugins = require('~/server/services/Endpoints/gptPlugins');
|
||||
const anthropic = require('~/server/services/Endpoints/anthropic');
|
||||
const assistant = require('~/server/services/Endpoints/assistant');
|
||||
const openAI = require('~/server/services/Endpoints/openAI');
|
||||
const custom = require('~/server/services/Endpoints/custom');
|
||||
const google = require('~/server/services/Endpoints/google');
|
||||
const assistant = require('~/server/services/Endpoints/assistant');
|
||||
|
||||
const buildFunction = {
|
||||
[EModelEndpoint.openAI]: openAI.buildOptions,
|
||||
|
|
@ -17,7 +18,7 @@ const buildFunction = {
|
|||
[EModelEndpoint.assistants]: assistant.buildOptions,
|
||||
};
|
||||
|
||||
function buildEndpointOption(req, res, next) {
|
||||
async function buildEndpointOption(req, res, next) {
|
||||
const { endpoint, endpointType } = req.body;
|
||||
const parsedBody = parseConvo({ endpoint, endpointType, conversation: req.body });
|
||||
req.body.endpointOption = buildFunction[endpointType ?? endpoint](
|
||||
|
|
@ -25,6 +26,10 @@ function buildEndpointOption(req, res, next) {
|
|||
parsedBody,
|
||||
endpointType,
|
||||
);
|
||||
|
||||
const modelsConfig = await getModelsConfig(req);
|
||||
req.body.endpointOption.modelsConfig = modelsConfig;
|
||||
|
||||
if (req.body.files) {
|
||||
// hold the promise
|
||||
req.body.endpointOption.attachments = processFiles(req.body.files);
|
||||
|
|
|
|||
|
|
@ -1,8 +1,7 @@
|
|||
const { CacheKeys, ViolationTypes } = require('librechat-data-provider');
|
||||
const { loadModels } = require('~/server/controllers/ModelController');
|
||||
const { logViolation, getLogStores } = require('~/cache');
|
||||
const { ViolationTypes } = require('librechat-data-provider');
|
||||
const { getModelsConfig } = require('~/server/controllers/ModelController');
|
||||
const { handleError } = require('~/server/utils');
|
||||
|
||||
const { logViolation } = require('~/cache');
|
||||
/**
|
||||
* Validates the model of the request.
|
||||
*
|
||||
|
|
@ -17,11 +16,7 @@ const validateModel = async (req, res, next) => {
|
|||
return handleError(res, { text: 'Model not provided' });
|
||||
}
|
||||
|
||||
const cache = getLogStores(CacheKeys.CONFIG_STORE);
|
||||
let modelsConfig = await cache.get(CacheKeys.MODELS_CONFIG);
|
||||
if (!modelsConfig) {
|
||||
modelsConfig = await loadModels(req);
|
||||
}
|
||||
const modelsConfig = await getModelsConfig(req);
|
||||
|
||||
if (!modelsConfig) {
|
||||
return handleError(res, { text: 'Models not loaded' });
|
||||
|
|
|
|||
|
|
@ -1,9 +1,10 @@
|
|||
const buildOptions = (endpoint, parsedBody) => {
|
||||
const { modelLabel, promptPrefix, ...rest } = parsedBody;
|
||||
const { modelLabel, promptPrefix, resendImages, ...rest } = parsedBody;
|
||||
const endpointOption = {
|
||||
endpoint,
|
||||
modelLabel,
|
||||
promptPrefix,
|
||||
resendImages,
|
||||
modelOptions: {
|
||||
...rest,
|
||||
},
|
||||
|
|
|
|||
|
|
@ -11,12 +11,13 @@ const { logger } = require('~/config');
|
|||
* Converts an image file to the WebP format. The function first resizes the image based on the specified
|
||||
* resolution.
|
||||
*
|
||||
*
|
||||
* @param {Express.Request} req - The request object from Express. It should have a `user` property with an `id`
|
||||
* @param {Object} params - The params object.
|
||||
* @param {Express.Request} params.req - The request object from Express. It should have a `user` property with an `id`
|
||||
* representing the user, and an `app.locals.paths` object with an `imageOutput` path.
|
||||
* @param {Express.Multer.File} file - The file object, which is part of the request. The file object should
|
||||
* @param {Express.Multer.File} params.file - The file object, which is part of the request. The file object should
|
||||
* have a `path` property that points to the location of the uploaded file.
|
||||
* @param {string} [resolution='high'] - Optional. The desired resolution for the image resizing. Default is 'high'.
|
||||
* @param {EModelEndpoint} params.endpoint - The params object.
|
||||
* @param {string} [params.resolution='high'] - Optional. The desired resolution for the image resizing. Default is 'high'.
|
||||
*
|
||||
* @returns {Promise<{ filepath: string, bytes: number, width: number, height: number}>}
|
||||
* A promise that resolves to an object containing:
|
||||
|
|
@ -25,10 +26,14 @@ const { logger } = require('~/config');
|
|||
* - width: The width of the converted image.
|
||||
* - height: The height of the converted image.
|
||||
*/
|
||||
async function uploadImageToFirebase(req, file, resolution = 'high') {
|
||||
async function uploadImageToFirebase({ req, file, endpoint, resolution = 'high' }) {
|
||||
const inputFilePath = file.path;
|
||||
const inputBuffer = await fs.promises.readFile(inputFilePath);
|
||||
const { buffer: resizedBuffer, width, height } = await resizeImageBuffer(inputBuffer, resolution);
|
||||
const {
|
||||
buffer: resizedBuffer,
|
||||
width,
|
||||
height,
|
||||
} = await resizeImageBuffer(inputBuffer, resolution, endpoint);
|
||||
const extension = path.extname(inputFilePath);
|
||||
const userId = req.user.id;
|
||||
|
||||
|
|
|
|||
|
|
@ -13,12 +13,13 @@ const { updateFile } = require('~/models/File');
|
|||
* it converts the image to WebP format before saving.
|
||||
*
|
||||
* The original image is deleted after conversion.
|
||||
*
|
||||
* @param {Object} req - The request object from Express. It should have a `user` property with an `id`
|
||||
* @param {Object} params - The params object.
|
||||
* @param {Object} params.req - The request object from Express. It should have a `user` property with an `id`
|
||||
* representing the user, and an `app.locals.paths` object with an `imageOutput` path.
|
||||
* @param {Express.Multer.File} file - The file object, which is part of the request. The file object should
|
||||
* @param {Express.Multer.File} params.file - The file object, which is part of the request. The file object should
|
||||
* have a `path` property that points to the location of the uploaded file.
|
||||
* @param {string} [resolution='high'] - Optional. The desired resolution for the image resizing. Default is 'high'.
|
||||
* @param {EModelEndpoint} params.endpoint - The params object.
|
||||
* @param {string} [params.resolution='high'] - Optional. The desired resolution for the image resizing. Default is 'high'.
|
||||
*
|
||||
* @returns {Promise<{ filepath: string, bytes: number, width: number, height: number}>}
|
||||
* A promise that resolves to an object containing:
|
||||
|
|
@ -27,10 +28,14 @@ const { updateFile } = require('~/models/File');
|
|||
* - width: The width of the converted image.
|
||||
* - height: The height of the converted image.
|
||||
*/
|
||||
async function uploadLocalImage(req, file, resolution = 'high') {
|
||||
async function uploadLocalImage({ req, file, endpoint, resolution = 'high' }) {
|
||||
const inputFilePath = file.path;
|
||||
const inputBuffer = await fs.promises.readFile(inputFilePath);
|
||||
const { buffer: resizedBuffer, width, height } = await resizeImageBuffer(inputBuffer, resolution);
|
||||
const {
|
||||
buffer: resizedBuffer,
|
||||
width,
|
||||
height,
|
||||
} = await resizeImageBuffer(inputBuffer, resolution, endpoint);
|
||||
const extension = path.extname(inputFilePath);
|
||||
|
||||
const { imageOutput } = req.app.locals.paths;
|
||||
|
|
|
|||
|
|
@ -23,6 +23,8 @@ async function fetchImageToBase64(url) {
|
|||
}
|
||||
}
|
||||
|
||||
const base64Only = new Set([EModelEndpoint.google, EModelEndpoint.anthropic]);
|
||||
|
||||
/**
|
||||
* Encodes and formats the given files.
|
||||
* @param {Express.Request} req - The request object.
|
||||
|
|
@ -50,7 +52,7 @@ async function encodeAndFormat(req, files, endpoint) {
|
|||
encodingMethods[source] = prepareImagePayload;
|
||||
|
||||
/* Google doesn't support passing URLs to payload */
|
||||
if (source !== FileSources.local && endpoint === EModelEndpoint.google) {
|
||||
if (source !== FileSources.local && base64Only.has(endpoint)) {
|
||||
const [_file, imageURL] = await prepareImagePayload(req, file);
|
||||
promises.push([_file, await fetchImageToBase64(imageURL)]);
|
||||
continue;
|
||||
|
|
@ -81,6 +83,14 @@ async function encodeAndFormat(req, files, endpoint) {
|
|||
|
||||
if (endpoint && endpoint === EModelEndpoint.google) {
|
||||
imagePart.image_url = imagePart.image_url.url;
|
||||
} else if (endpoint && endpoint === EModelEndpoint.anthropic) {
|
||||
imagePart.type = 'image';
|
||||
imagePart.source = {
|
||||
type: 'base64',
|
||||
media_type: file.type,
|
||||
data: imageContent,
|
||||
};
|
||||
delete imagePart.image_url;
|
||||
}
|
||||
|
||||
result.image_urls.push(imagePart);
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
const sharp = require('sharp');
|
||||
const { EModelEndpoint } = require('librechat-data-provider');
|
||||
|
||||
/**
|
||||
* Resizes an image from a given buffer based on the specified resolution.
|
||||
|
|
@ -7,13 +8,14 @@ const sharp = require('sharp');
|
|||
* @param {'low' | 'high'} resolution - The resolution to resize the image to.
|
||||
* 'low' for a maximum of 512x512 resolution,
|
||||
* 'high' for a maximum of 768x2000 resolution.
|
||||
* @param {EModelEndpoint} endpoint - Identifier for specific endpoint handling
|
||||
* @returns {Promise<{buffer: Buffer, width: number, height: number}>} An object containing the resized image buffer and its dimensions.
|
||||
* @throws Will throw an error if the resolution parameter is invalid.
|
||||
*/
|
||||
async function resizeImageBuffer(inputBuffer, resolution) {
|
||||
async function resizeImageBuffer(inputBuffer, resolution, endpoint) {
|
||||
const maxLowRes = 512;
|
||||
const maxShortSideHighRes = 768;
|
||||
const maxLongSideHighRes = 2000;
|
||||
const maxLongSideHighRes = endpoint === EModelEndpoint.anthropic ? 1568 : 2000;
|
||||
|
||||
let newWidth, newHeight;
|
||||
let resizeOptions = { fit: 'inside', withoutEnlargement: true };
|
||||
|
|
|
|||
|
|
@ -184,8 +184,8 @@ const processFileURL = async ({ fileStrategy, userId, URL, fileName, basePath, c
|
|||
const processImageFile = async ({ req, res, file, metadata }) => {
|
||||
const source = req.app.locals.fileStrategy;
|
||||
const { handleImageUpload } = getStrategyFunctions(source);
|
||||
const { file_id, temp_file_id } = metadata;
|
||||
const { filepath, bytes, width, height } = await handleImageUpload(req, file);
|
||||
const { file_id, temp_file_id, endpoint } = metadata;
|
||||
const { filepath, bytes, width, height } = await handleImageUpload({ req, file, endpoint });
|
||||
const result = await createFile(
|
||||
{
|
||||
user: req.user.id,
|
||||
|
|
|
|||
|
|
@ -75,8 +75,12 @@ const googleModels = {
|
|||
};
|
||||
|
||||
const anthropicModels = {
|
||||
'claude-2.1': 200000,
|
||||
'claude-': 100000,
|
||||
'claude-2': 100000,
|
||||
'claude-2.1': 200000,
|
||||
'claude-3-haiku': 200000,
|
||||
'claude-3-sonnet': 200000,
|
||||
'claude-3-opus': 200000,
|
||||
};
|
||||
|
||||
// Order is important here: by model series and context size (gpt-4 then gpt-3, ascending)
|
||||
|
|
|
|||
|
|
@ -6,10 +6,11 @@ import {
|
|||
Input,
|
||||
Label,
|
||||
Slider,
|
||||
InputNumber,
|
||||
Switch,
|
||||
HoverCard,
|
||||
HoverCardTrigger,
|
||||
InputNumber,
|
||||
SelectDropDown,
|
||||
HoverCardTrigger,
|
||||
} from '~/components/ui';
|
||||
import OptionHover from './OptionHover';
|
||||
import { cn, defaultTextProps, optionText, removeFocusOutlines } from '~/utils/';
|
||||
|
|
@ -20,8 +21,16 @@ export default function Settings({ conversation, setOption, models, readonly }:
|
|||
if (!conversation) {
|
||||
return null;
|
||||
}
|
||||
const { model, modelLabel, promptPrefix, temperature, topP, topK, maxOutputTokens } =
|
||||
conversation;
|
||||
const {
|
||||
model,
|
||||
modelLabel,
|
||||
promptPrefix,
|
||||
temperature,
|
||||
topP,
|
||||
topK,
|
||||
maxOutputTokens,
|
||||
resendImages,
|
||||
} = conversation;
|
||||
|
||||
const setModel = setOption('model');
|
||||
const setModelLabel = setOption('modelLabel');
|
||||
|
|
@ -30,6 +39,7 @@ export default function Settings({ conversation, setOption, models, readonly }:
|
|||
const setTopP = setOption('topP');
|
||||
const setTopK = setOption('topK');
|
||||
const setMaxOutputTokens = setOption('maxOutputTokens');
|
||||
const setResendImages = setOption('resendImages');
|
||||
|
||||
return (
|
||||
<div className="grid grid-cols-5 gap-6">
|
||||
|
|
@ -244,6 +254,27 @@ export default function Settings({ conversation, setOption, models, readonly }:
|
|||
side={ESide.Left}
|
||||
/>
|
||||
</HoverCard>
|
||||
<HoverCard openDelay={500}>
|
||||
<HoverCardTrigger className="grid w-full">
|
||||
<div className="flex justify-between">
|
||||
<Label htmlFor="resend-images" className="text-left text-sm font-medium">
|
||||
{localize('com_endpoint_plug_resend_images')}{' '}
|
||||
</Label>
|
||||
<Switch
|
||||
id="resend-images"
|
||||
checked={resendImages ?? false}
|
||||
onCheckedChange={(checked: boolean) => setResendImages(checked)}
|
||||
disabled={readonly}
|
||||
className="flex"
|
||||
/>
|
||||
<OptionHover
|
||||
endpoint={conversation?.endpoint ?? ''}
|
||||
type="resend"
|
||||
side={ESide.Bottom}
|
||||
/>
|
||||
</div>
|
||||
</HoverCardTrigger>
|
||||
</HoverCard>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ const types = {
|
|||
topp: 'com_endpoint_anthropic_topp',
|
||||
topk: 'com_endpoint_anthropic_topk',
|
||||
maxoutputtokens: 'com_endpoint_anthropic_maxoutputtokens',
|
||||
resend: openAI.resend,
|
||||
},
|
||||
google: {
|
||||
temp: 'com_endpoint_google_temp',
|
||||
|
|
|
|||
|
|
@ -309,9 +309,8 @@ export default function useSSE(submission: TSubmission | null, index = 0) {
|
|||
...conversation,
|
||||
};
|
||||
|
||||
// Revert to previous model if the model was auto-switched by backend due to message attachments
|
||||
if (conversation.model?.includes('vision') && !submissionConvo.model?.includes('vision')) {
|
||||
update.model = submissionConvo?.model;
|
||||
if (prevState?.model && prevState.model !== submissionConvo.model) {
|
||||
update.model = prevState.model;
|
||||
}
|
||||
|
||||
setStorage(update);
|
||||
|
|
|
|||
11
package-lock.json
generated
11
package-lock.json
generated
|
|
@ -41,7 +41,7 @@
|
|||
"version": "0.6.10",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@anthropic-ai/sdk": "^0.5.4",
|
||||
"@anthropic-ai/sdk": "^0.16.1",
|
||||
"@azure/search-documents": "^12.0.0",
|
||||
"@keyv/mongo": "^2.1.8",
|
||||
"@keyv/redis": "^2.8.1",
|
||||
|
|
@ -280,9 +280,9 @@
|
|||
}
|
||||
},
|
||||
"node_modules/@anthropic-ai/sdk": {
|
||||
"version": "0.5.10",
|
||||
"resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.5.10.tgz",
|
||||
"integrity": "sha512-P8xrIuTUO/6wDzcjQRUROXp4WSqtngbXaE4GpEu0PhEmnq/1Q8vbF1s0o7W07EV3j8zzRoyJxAKovUJtNXH7ew==",
|
||||
"version": "0.16.1",
|
||||
"resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.16.1.tgz",
|
||||
"integrity": "sha512-vHgvfWEyFy5ktqam56Nrhv8MVa7EJthsRYNi+1OrFFfyrj9tR2/aji1QbVbQjYU/pPhPFaYrdCEC/MLPFrmKwA==",
|
||||
"dependencies": {
|
||||
"@types/node": "^18.11.18",
|
||||
"@types/node-fetch": "^2.6.4",
|
||||
|
|
@ -291,7 +291,8 @@
|
|||
"digest-fetch": "^1.3.0",
|
||||
"form-data-encoder": "1.7.2",
|
||||
"formdata-node": "^4.3.2",
|
||||
"node-fetch": "^2.6.7"
|
||||
"node-fetch": "^2.6.7",
|
||||
"web-streams-polyfill": "^3.2.1"
|
||||
}
|
||||
},
|
||||
"node_modules/@anthropic-ai/sdk/node_modules/@types/node": {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "librechat-data-provider",
|
||||
"version": "0.4.6",
|
||||
"version": "0.4.7",
|
||||
"description": "data services for librechat apps",
|
||||
"main": "dist/index.js",
|
||||
"module": "dist/index.es.js",
|
||||
|
|
|
|||
|
|
@ -241,6 +241,8 @@ export const defaultModels = {
|
|||
'code-bison-32k',
|
||||
],
|
||||
[EModelEndpoint.anthropic]: [
|
||||
'claude-3-opus-20240229',
|
||||
'claude-3-sonnet-20240229',
|
||||
'claude-2.1',
|
||||
'claude-2',
|
||||
'claude-1.2',
|
||||
|
|
@ -301,21 +303,31 @@ export const modularEndpoints = new Set<EModelEndpoint | string>([
|
|||
|
||||
export const supportsBalanceCheck = {
|
||||
[EModelEndpoint.openAI]: true,
|
||||
[EModelEndpoint.anthropic]: true,
|
||||
[EModelEndpoint.azureOpenAI]: true,
|
||||
[EModelEndpoint.gptPlugins]: true,
|
||||
[EModelEndpoint.custom]: true,
|
||||
};
|
||||
|
||||
export const visionModels = ['gpt-4-vision', 'llava-13b', 'gemini-pro-vision'];
|
||||
export const visionModels = ['gpt-4-vision', 'llava-13b', 'gemini-pro-vision', 'claude-3'];
|
||||
|
||||
export function validateVisionModel(
|
||||
model: string | undefined,
|
||||
additionalModels: string[] | undefined = [],
|
||||
) {
|
||||
export function validateVisionModel({
|
||||
model,
|
||||
additionalModels = [],
|
||||
availableModels,
|
||||
}: {
|
||||
model: string;
|
||||
additionalModels?: string[];
|
||||
availableModels?: string[];
|
||||
}) {
|
||||
if (!model) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (availableModels && !availableModels.includes(model)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return visionModels.concat(additionalModels).some((visionModel) => model.includes(visionModel));
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ export const supportsFiles = {
|
|||
[EModelEndpoint.google]: true,
|
||||
[EModelEndpoint.assistants]: true,
|
||||
[EModelEndpoint.azureOpenAI]: true,
|
||||
[EModelEndpoint.anthropic]: true,
|
||||
[EModelEndpoint.custom]: true,
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -391,6 +391,7 @@ export const anthropicSchema = tConversationSchema
|
|||
maxOutputTokens: true,
|
||||
topP: true,
|
||||
topK: true,
|
||||
resendImages: true,
|
||||
})
|
||||
.transform((obj) => ({
|
||||
...obj,
|
||||
|
|
@ -401,6 +402,7 @@ export const anthropicSchema = tConversationSchema
|
|||
maxOutputTokens: obj.maxOutputTokens ?? 4000,
|
||||
topP: obj.topP ?? 0.7,
|
||||
topK: obj.topK ?? 5,
|
||||
resendImages: obj.resendImages ?? false,
|
||||
}))
|
||||
.catch(() => ({
|
||||
model: 'claude-1',
|
||||
|
|
@ -410,6 +412,7 @@ export const anthropicSchema = tConversationSchema
|
|||
maxOutputTokens: 4000,
|
||||
topP: 0.7,
|
||||
topK: 5,
|
||||
resendImages: false,
|
||||
}));
|
||||
|
||||
export const chatGPTBrowserSchema = tConversationSchema
|
||||
|
|
@ -568,6 +571,7 @@ export const compactAnthropicSchema = tConversationSchema
|
|||
maxOutputTokens: true,
|
||||
topP: true,
|
||||
topK: true,
|
||||
resendImages: true,
|
||||
})
|
||||
.transform((obj) => {
|
||||
const newObj: Partial<TConversation> = { ...obj };
|
||||
|
|
@ -583,6 +587,9 @@ export const compactAnthropicSchema = tConversationSchema
|
|||
if (newObj.topK === 5) {
|
||||
delete newObj.topK;
|
||||
}
|
||||
if (newObj.resendImages !== true) {
|
||||
delete newObj.resendImages;
|
||||
}
|
||||
|
||||
return removeNullishValues(newObj);
|
||||
})
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue