diff --git a/.env.example b/.env.example
index 71185686ca..e235b6cbb9 100644
--- a/.env.example
+++ b/.env.example
@@ -88,7 +88,7 @@ PROXY=
#============#
ANTHROPIC_API_KEY=user_provided
-# ANTHROPIC_MODELS=claude-3-5-haiku-20241022,claude-3-5-sonnet-20241022,claude-3-5-sonnet-latest,claude-3-5-sonnet-20240620,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307,claude-2.1,claude-2,claude-1.2,claude-1,claude-1-100k,claude-instant-1,claude-instant-1-100k
+# ANTHROPIC_MODELS=claude-3-7-sonnet-latest,claude-3-7-sonnet-20250219,claude-3-5-haiku-20241022,claude-3-5-sonnet-20241022,claude-3-5-sonnet-latest,claude-3-5-sonnet-20240620,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307,claude-2.1,claude-2,claude-1.2,claude-1,claude-1-100k,claude-instant-1,claude-instant-1-100k
# ANTHROPIC_REVERSE_PROXY=
#============#
@@ -175,7 +175,7 @@ GOOGLE_KEY=user_provided
#============#
OPENAI_API_KEY=user_provided
-# OPENAI_MODELS=o1,o1-mini,o1-preview,gpt-4o,chatgpt-4o-latest,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
+# OPENAI_MODELS=o1,o1-mini,o1-preview,gpt-4o,gpt-4.5-preview,chatgpt-4o-latest,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
DEBUG_OPENAI=false
@@ -209,12 +209,6 @@ ASSISTANTS_API_KEY=user_provided
# More info, including how to enable use of Assistants with Azure here:
# https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints/azure#using-assistants-with-azure
-#============#
-# OpenRouter #
-#============#
-# !!!Warning: Use the variable above instead of this one. Using this one will override the OpenAI endpoint
-# OPENROUTER_API_KEY=
-
#============#
# Plugins #
#============#
@@ -254,6 +248,13 @@ AZURE_AI_SEARCH_SEARCH_OPTION_SELECT=
# DALLE3_AZURE_API_VERSION=
# DALLE2_AZURE_API_VERSION=
+# Flux
+#-----------------
+FLUX_API_BASE_URL=https://api.us1.bfl.ai
+# FLUX_API_BASE_URL = 'https://api.bfl.ml';
+
+# Get your API key at https://api.us1.bfl.ai/auth/profile
+# FLUX_API_KEY=
# Google
#-----------------
diff --git a/.github/workflows/backend-review.yml b/.github/workflows/backend-review.yml
index 5bc3d3b2db..b7bccecae8 100644
--- a/.github/workflows/backend-review.yml
+++ b/.github/workflows/backend-review.yml
@@ -39,6 +39,9 @@ jobs:
- name: Install MCP Package
run: npm run build:mcp
+ - name: Install Data Schemas Package
+ run: npm run build:data-schemas
+
- name: Create empty auth.json file
run: |
mkdir -p api/data
@@ -61,4 +64,7 @@ jobs:
run: cd api && npm run test:ci
- name: Run librechat-data-provider unit tests
- run: cd packages/data-provider && npm run test:ci
\ No newline at end of file
+ run: cd packages/data-provider && npm run test:ci
+
+ - name: Run librechat-mcp unit tests
+ run: cd packages/mcp && npm run test:ci
\ No newline at end of file
diff --git a/.github/workflows/data-schemas.yml b/.github/workflows/data-schemas.yml
new file mode 100644
index 0000000000..fee72fbe02
--- /dev/null
+++ b/.github/workflows/data-schemas.yml
@@ -0,0 +1,58 @@
+name: Publish `@librechat/data-schemas` to NPM
+
+on:
+ push:
+ branches:
+ - main
+ paths:
+ - 'packages/data-schemas/package.json'
+ workflow_dispatch:
+ inputs:
+ reason:
+ description: 'Reason for manual trigger'
+ required: false
+ default: 'Manual publish requested'
+
+jobs:
+ build-and-publish:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Use Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: '18.x'
+
+ - name: Install dependencies
+ run: cd packages/data-schemas && npm ci
+
+ - name: Build
+ run: cd packages/data-schemas && npm run build
+
+ - name: Set up npm authentication
+ run: echo "//registry.npmjs.org/:_authToken=${{ secrets.PUBLISH_NPM_TOKEN }}" > ~/.npmrc
+
+ - name: Check version change
+ id: check
+ working-directory: packages/data-schemas
+ run: |
+ PACKAGE_VERSION=$(node -p "require('./package.json').version")
+ PUBLISHED_VERSION=$(npm view @librechat/data-schemas version 2>/dev/null || echo "0.0.0")
+ if [ "$PACKAGE_VERSION" = "$PUBLISHED_VERSION" ]; then
+ echo "No version change, skipping publish"
+ echo "skip=true" >> $GITHUB_OUTPUT
+ else
+ echo "Version changed, proceeding with publish"
+ echo "skip=false" >> $GITHUB_OUTPUT
+ fi
+
+ - name: Pack package
+ if: steps.check.outputs.skip != 'true'
+ working-directory: packages/data-schemas
+ run: npm pack
+
+ - name: Publish
+ if: steps.check.outputs.skip != 'true'
+ working-directory: packages/data-schemas
+ run: npm publish *.tgz --access public
\ No newline at end of file
diff --git a/.github/workflows/generate-release-changelog-pr.yml b/.github/workflows/generate-release-changelog-pr.yml
index c3bceae9de..004431e577 100644
--- a/.github/workflows/generate-release-changelog-pr.yml
+++ b/.github/workflows/generate-release-changelog-pr.yml
@@ -84,11 +84,11 @@ jobs:
with:
token: ${{ secrets.GITHUB_TOKEN }}
sign-commits: true
- commit-message: "chore: update CHANGELOG for release ${GITHUB_REF##*/}"
+ commit-message: "chore: update CHANGELOG for release ${{ github.ref_name }}"
base: main
- branch: "changelog/${GITHUB_REF##*/}"
+ branch: "changelog/${{ github.ref_name }}"
reviewers: danny-avila
- title: "chore: update CHANGELOG for release ${GITHUB_REF##*/}"
+ title: "chore: update CHANGELOG for release ${{ github.ref_name }}"
body: |
**Description**:
- - This PR updates the CHANGELOG.md by removing the "Unreleased" section and adding new release notes for release ${GITHUB_REF##*/} above previous releases.
\ No newline at end of file
+ - This PR updates the CHANGELOG.md by removing the "Unreleased" section and adding new release notes for release ${{ github.ref_name }} above previous releases.
\ No newline at end of file
diff --git a/.github/workflows/unused-packages.yml b/.github/workflows/unused-packages.yml
index 7a95f9c5be..442e70e52c 100644
--- a/.github/workflows/unused-packages.yml
+++ b/.github/workflows/unused-packages.yml
@@ -1,6 +1,12 @@
name: Detect Unused NPM Packages
-on: [pull_request]
+on:
+ pull_request:
+ paths:
+ - 'package.json'
+ - 'package-lock.json'
+ - 'client/**'
+ - 'api/**'
jobs:
detect-unused-packages:
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000000..292bd76f40
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,16 @@
+# Changelog
+
+All notable changes to this project will be documented in this file.
+
+## [Unreleased]
+
+### โจ New Features
+
+- ๐ช feat: Agent Artifacts by **@danny-avila** in [#5804](https://github.com/danny-avila/LibreChat/pull/5804)
+
+### โ๏ธ Other Changes
+
+- ๐ chore: Enforce 18next Language Keys by **@rubentalstra** in [#5803](https://github.com/danny-avila/LibreChat/pull/5803)
+- ๐ refactor: Parent Message ID Handling on Error, Update Translations, Bump Agents by **@danny-avila** in [#5833](https://github.com/danny-avila/LibreChat/pull/5833)
+
+---
diff --git a/Dockerfile b/Dockerfile
index 46cabe6dff..d9113eb650 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,4 +1,4 @@
-# v0.7.7-rc1
+# v0.7.7
# Base node image
FROM node:20-alpine AS node
diff --git a/Dockerfile.multi b/Dockerfile.multi
index 570fbecf31..40721137bb 100644
--- a/Dockerfile.multi
+++ b/Dockerfile.multi
@@ -1,5 +1,5 @@
# Dockerfile.multi
-# v0.7.7-rc1
+# v0.7.7
# Base for all builds
FROM node:20-alpine AS base-min
@@ -11,6 +11,7 @@ RUN npm config set fetch-retry-maxtimeout 600000 && \
COPY package*.json ./
COPY packages/data-provider/package*.json ./packages/data-provider/
COPY packages/mcp/package*.json ./packages/mcp/
+COPY packages/data-schemas/package*.json ./packages/data-schemas/
COPY client/package*.json ./client/
COPY api/package*.json ./api/
@@ -32,6 +33,13 @@ COPY packages/mcp ./
COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist
RUN npm run build
+# Build data-schemas
+FROM base AS data-schemas-build
+WORKDIR /app/packages/data-schemas
+COPY packages/data-schemas ./
+COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist
+RUN npm run build
+
# Client build
FROM base AS client-build
WORKDIR /app/client
@@ -49,8 +57,9 @@ COPY api ./api
COPY config ./config
COPY --from=data-provider-build /app/packages/data-provider/dist ./packages/data-provider/dist
COPY --from=mcp-build /app/packages/mcp/dist ./packages/mcp/dist
+COPY --from=data-schemas-build /app/packages/data-schemas/dist ./packages/data-schemas/dist
COPY --from=client-build /app/client/dist ./client/dist
WORKDIR /app/api
EXPOSE 3080
ENV HOST=0.0.0.0
-CMD ["node", "server/index.js"]
+CMD ["node", "server/index.js"]
\ No newline at end of file
diff --git a/README.md b/README.md
index 2e662ac262..3e02c2cc08 100644
--- a/README.md
+++ b/README.md
@@ -81,7 +81,7 @@
- [Fork Messages & Conversations](https://www.librechat.ai/docs/features/fork) for Advanced Context control
- ๐ฌ **Multimodal & File Interactions**:
- - Upload and analyze images with Claude 3, GPT-4o, o1, Llama-Vision, and Gemini ๐ธ
+ - Upload and analyze images with Claude 3, GPT-4.5, GPT-4o, o1, Llama-Vision, and Gemini ๐ธ
- Chat with Files using Custom Endpoints, OpenAI, Azure, Anthropic, AWS Bedrock, & Google ๐๏ธ
- ๐ **Multilingual UI**:
@@ -197,6 +197,6 @@ We thank [Locize](https://locize.com) for their translation management tools tha
-
+
diff --git a/api/app/clients/AnthropicClient.js b/api/app/clients/AnthropicClient.js
index 522b6beb4f..19f4a3930a 100644
--- a/api/app/clients/AnthropicClient.js
+++ b/api/app/clients/AnthropicClient.js
@@ -7,7 +7,7 @@ const {
getResponseSender,
validateVisionModel,
} = require('librechat-data-provider');
-const { encodeAndFormat } = require('~/server/services/Files/images/encode');
+const { SplitStreamHandler: _Handler, GraphEvents } = require('@librechat/agents');
const {
truncateText,
formatMessage,
@@ -16,16 +16,31 @@ const {
parseParamFromPrompt,
createContextHandlers,
} = require('./prompts');
+const {
+ getClaudeHeaders,
+ configureReasoning,
+ checkPromptCacheSupport,
+} = require('~/server/services/Endpoints/anthropic/helpers');
const { getModelMaxTokens, getModelMaxOutputTokens, matchModelName } = require('~/utils');
const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens');
+const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const Tokenizer = require('~/server/services/Tokenizer');
+const { logger, sendEvent } = require('~/config');
const { sleep } = require('~/server/utils');
const BaseClient = require('./BaseClient');
-const { logger } = require('~/config');
const HUMAN_PROMPT = '\n\nHuman:';
const AI_PROMPT = '\n\nAssistant:';
+class SplitStreamHandler extends _Handler {
+ getDeltaContent(chunk) {
+ return (chunk?.delta?.text ?? chunk?.completion) || '';
+ }
+ getReasoningDelta(chunk) {
+ return chunk?.delta?.thinking || '';
+ }
+}
+
/** Helper function to introduce a delay before retrying */
function delayBeforeRetry(attempts, baseDelay = 1000) {
return new Promise((resolve) => setTimeout(resolve, baseDelay * attempts));
@@ -68,6 +83,8 @@ class AnthropicClient extends BaseClient {
/** The key for the usage object's output tokens
* @type {string} */
this.outputTokensKey = 'output_tokens';
+ /** @type {SplitStreamHandler | undefined} */
+ this.streamHandler;
}
setOptions(options) {
@@ -97,9 +114,10 @@ class AnthropicClient extends BaseClient {
const modelMatch = matchModelName(this.modelOptions.model, EModelEndpoint.anthropic);
this.isClaude3 = modelMatch.includes('claude-3');
- this.isLegacyOutput = !modelMatch.includes('claude-3-5-sonnet');
- this.supportsCacheControl =
- this.options.promptCache && this.checkPromptCacheSupport(modelMatch);
+ this.isLegacyOutput = !(
+ /claude-3[-.]5-sonnet/.test(modelMatch) || /claude-3[-.]7/.test(modelMatch)
+ );
+ this.supportsCacheControl = this.options.promptCache && checkPromptCacheSupport(modelMatch);
if (
this.isLegacyOutput &&
@@ -125,7 +143,7 @@ class AnthropicClient extends BaseClient {
this.options.endpointType ?? this.options.endpoint,
this.options.endpointTokenConfig,
) ??
- 1500;
+ anthropicSettings.maxOutputTokens.reset(this.modelOptions.model);
this.maxPromptTokens =
this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens;
@@ -171,18 +189,9 @@ class AnthropicClient extends BaseClient {
options.baseURL = this.options.reverseProxyUrl;
}
- if (
- this.supportsCacheControl &&
- requestOptions?.model &&
- requestOptions.model.includes('claude-3-5-sonnet')
- ) {
- options.defaultHeaders = {
- 'anthropic-beta': 'max-tokens-3-5-sonnet-2024-07-15,prompt-caching-2024-07-31',
- };
- } else if (this.supportsCacheControl) {
- options.defaultHeaders = {
- 'anthropic-beta': 'prompt-caching-2024-07-31',
- };
+ const headers = getClaudeHeaders(requestOptions?.model, this.supportsCacheControl);
+ if (headers) {
+ options.defaultHeaders = headers;
}
return new Anthropic(options);
@@ -668,29 +677,38 @@ class AnthropicClient extends BaseClient {
* @returns {Promise} The response from the Anthropic client.
*/
async createResponse(client, options, useMessages) {
- return useMessages ?? this.useMessages
+ return (useMessages ?? this.useMessages)
? await client.messages.create(options)
: await client.completions.create(options);
}
+ getMessageMapMethod() {
+ /**
+ * @param {TMessage} msg
+ */
+ return (msg) => {
+ if (msg.text != null && msg.text && msg.text.startsWith(':::thinking')) {
+ msg.text = msg.text.replace(/:::thinking.*?:::/gs, '').trim();
+ }
+
+ return msg;
+ };
+ }
+
/**
- * @param {string} modelName
- * @returns {boolean}
+ * @param {string[]} [intermediateReply]
+ * @returns {string}
*/
- checkPromptCacheSupport(modelName) {
- const modelMatch = matchModelName(modelName, EModelEndpoint.anthropic);
- if (modelMatch.includes('claude-3-5-sonnet-latest')) {
- return false;
+ getStreamText(intermediateReply) {
+ if (!this.streamHandler) {
+ return intermediateReply?.join('') ?? '';
}
- if (
- modelMatch === 'claude-3-5-sonnet' ||
- modelMatch === 'claude-3-5-haiku' ||
- modelMatch === 'claude-3-haiku' ||
- modelMatch === 'claude-3-opus'
- ) {
- return true;
- }
- return false;
+
+ const reasoningText = this.streamHandler.reasoningTokens.join('');
+
+ const reasoningBlock = reasoningText.length > 0 ? `:::thinking\n${reasoningText}\n:::\n` : '';
+
+ return `${reasoningBlock}${this.streamHandler.tokens.join('')}`;
}
async sendCompletion(payload, { onProgress, abortController }) {
@@ -710,7 +728,6 @@ class AnthropicClient extends BaseClient {
user_id: this.user,
};
- let text = '';
const {
stream,
model,
@@ -721,22 +738,34 @@ class AnthropicClient extends BaseClient {
topK: top_k,
} = this.modelOptions;
- const requestOptions = {
+ let requestOptions = {
model,
stream: stream || true,
stop_sequences,
temperature,
metadata,
- top_p,
- top_k,
};
if (this.useMessages) {
requestOptions.messages = payload;
- requestOptions.max_tokens = maxOutputTokens || legacy.maxOutputTokens.default;
+ requestOptions.max_tokens =
+ maxOutputTokens || anthropicSettings.maxOutputTokens.reset(requestOptions.model);
} else {
requestOptions.prompt = payload;
- requestOptions.max_tokens_to_sample = maxOutputTokens || 1500;
+ requestOptions.max_tokens_to_sample = maxOutputTokens || legacy.maxOutputTokens.default;
+ }
+
+ requestOptions = configureReasoning(requestOptions, {
+ thinking: this.options.thinking,
+ thinkingBudget: this.options.thinkingBudget,
+ });
+
+ if (!/claude-3[-.]7/.test(model)) {
+ requestOptions.top_p = top_p;
+ requestOptions.top_k = top_k;
+ } else if (requestOptions.thinking == null) {
+ requestOptions.topP = top_p;
+ requestOptions.topK = top_k;
}
if (this.systemMessage && this.supportsCacheControl === true) {
@@ -756,13 +785,17 @@ class AnthropicClient extends BaseClient {
}
logger.debug('[AnthropicClient]', { ...requestOptions });
+ this.streamHandler = new SplitStreamHandler({
+ accumulate: true,
+ runId: this.responseMessageId,
+ handlers: {
+ [GraphEvents.ON_RUN_STEP]: (event) => sendEvent(this.options.res, event),
+ [GraphEvents.ON_MESSAGE_DELTA]: (event) => sendEvent(this.options.res, event),
+ [GraphEvents.ON_REASONING_DELTA]: (event) => sendEvent(this.options.res, event),
+ },
+ });
- const handleChunk = (currentChunk) => {
- if (currentChunk) {
- text += currentChunk;
- onProgress(currentChunk);
- }
- };
+ let intermediateReply = this.streamHandler.tokens;
const maxRetries = 3;
const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
@@ -783,22 +816,15 @@ class AnthropicClient extends BaseClient {
});
for await (const completion of response) {
- // Handle each completion as before
const type = completion?.type ?? '';
if (tokenEventTypes.has(type)) {
logger.debug(`[AnthropicClient] ${type}`, completion);
this[type] = completion;
}
- if (completion?.delta?.text) {
- handleChunk(completion.delta.text);
- } else if (completion.completion) {
- handleChunk(completion.completion);
- }
-
+ this.streamHandler.handle(completion);
await sleep(streamRate);
}
- // Successful processing, exit loop
break;
} catch (error) {
attempts += 1;
@@ -808,6 +834,10 @@ class AnthropicClient extends BaseClient {
if (attempts < maxRetries) {
await delayBeforeRetry(attempts, 350);
+ } else if (this.streamHandler && this.streamHandler.reasoningTokens.length) {
+ return this.getStreamText();
+ } else if (intermediateReply.length > 0) {
+ return this.getStreamText(intermediateReply);
} else {
throw new Error(`Operation failed after ${maxRetries} attempts: ${error.message}`);
}
@@ -823,8 +853,7 @@ class AnthropicClient extends BaseClient {
}
await processResponse.bind(this)();
-
- return text.trim();
+ return this.getStreamText(intermediateReply);
}
getSaveOptions() {
@@ -834,6 +863,8 @@ class AnthropicClient extends BaseClient {
promptPrefix: this.options.promptPrefix,
modelLabel: this.options.modelLabel,
promptCache: this.options.promptCache,
+ thinking: this.options.thinking,
+ thinkingBudget: this.options.thinkingBudget,
resendFiles: this.options.resendFiles,
iconURL: this.options.iconURL,
greeting: this.options.greeting,
diff --git a/api/app/clients/BaseClient.js b/api/app/clients/BaseClient.js
index ebf3ca12d9..61b39a8f6d 100644
--- a/api/app/clients/BaseClient.js
+++ b/api/app/clients/BaseClient.js
@@ -5,10 +5,11 @@ const {
isAgentsEndpoint,
isParamEndpoint,
EModelEndpoint,
+ excludedKeys,
ErrorTypes,
Constants,
} = require('librechat-data-provider');
-const { getMessages, saveMessage, updateMessage, saveConvo } = require('~/models');
+const { getMessages, saveMessage, updateMessage, saveConvo, getConvo } = require('~/models');
const { addSpaceIfNeeded, isEnabled } = require('~/server/utils');
const { truncateToolCallOutputs } = require('./prompts');
const checkBalance = require('~/models/checkBalance');
@@ -55,6 +56,10 @@ class BaseClient {
* Flag to determine if the client re-submitted the latest assistant message.
* @type {boolean | undefined} */
this.continued;
+ /**
+ * Flag to determine if the client has already fetched the conversation while saving new messages.
+ * @type {boolean | undefined} */
+ this.fetchedConvo;
/** @type {TMessage[]} */
this.currentMessages = [];
/** @type {import('librechat-data-provider').VisionModes | undefined} */
@@ -863,16 +868,39 @@ class BaseClient {
return { message: savedMessage };
}
- const conversation = await saveConvo(
- this.options.req,
- {
- conversationId: message.conversationId,
- endpoint: this.options.endpoint,
- endpointType: this.options.endpointType,
- ...endpointOptions,
- },
- { context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveConvo' },
- );
+ const fieldsToKeep = {
+ conversationId: message.conversationId,
+ endpoint: this.options.endpoint,
+ endpointType: this.options.endpointType,
+ ...endpointOptions,
+ };
+
+ const existingConvo =
+ this.fetchedConvo === true
+ ? null
+ : await getConvo(this.options.req?.user?.id, message.conversationId);
+
+ const unsetFields = {};
+ if (existingConvo != null) {
+ this.fetchedConvo = true;
+ for (const key in existingConvo) {
+ if (!key) {
+ continue;
+ }
+ if (excludedKeys.has(key)) {
+ continue;
+ }
+
+ if (endpointOptions?.[key] === undefined) {
+ unsetFields[key] = 1;
+ }
+ }
+ }
+
+ const conversation = await saveConvo(this.options.req, fieldsToKeep, {
+ context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveConvo',
+ unsetFields,
+ });
return { message: savedMessage, conversation };
}
diff --git a/api/app/clients/GoogleClient.js b/api/app/clients/GoogleClient.js
index e816843ea7..58ee783d2a 100644
--- a/api/app/clients/GoogleClient.js
+++ b/api/app/clients/GoogleClient.js
@@ -827,7 +827,8 @@ class GoogleClient extends BaseClient {
let reply = '';
const { abortController } = options;
- const model = this.modelOptions.modelName ?? this.modelOptions.model ?? '';
+ const model =
+ this.options.titleModel ?? this.modelOptions.modelName ?? this.modelOptions.model ?? '';
const safetySettings = getSafetySettings(model);
if (!EXCLUDED_GENAI_MODELS.test(model) && !this.project_id) {
logger.debug('Identified titling model as GenAI version');
diff --git a/api/app/clients/OpenAIClient.js b/api/app/clients/OpenAIClient.js
index 7bd7879dcf..9a89e34879 100644
--- a/api/app/clients/OpenAIClient.js
+++ b/api/app/clients/OpenAIClient.js
@@ -109,15 +109,15 @@ class OpenAIClient extends BaseClient {
const omniPattern = /\b(o1|o3)\b/i;
this.isOmni = omniPattern.test(this.modelOptions.model);
- const { OPENROUTER_API_KEY, OPENAI_FORCE_PROMPT } = process.env ?? {};
- if (OPENROUTER_API_KEY && !this.azure) {
- this.apiKey = OPENROUTER_API_KEY;
- this.useOpenRouter = true;
- }
-
+ const { OPENAI_FORCE_PROMPT } = process.env ?? {};
const { reverseProxyUrl: reverseProxy } = this.options;
- if (!this.useOpenRouter && reverseProxy && reverseProxy.includes(KnownEndpoints.openrouter)) {
+ if (
+ !this.useOpenRouter &&
+ ((reverseProxy && reverseProxy.includes(KnownEndpoints.openrouter)) ||
+ (this.options.endpoint &&
+ this.options.endpoint.toLowerCase().includes(KnownEndpoints.openrouter)))
+ ) {
this.useOpenRouter = true;
}
@@ -303,7 +303,9 @@ class OpenAIClient extends BaseClient {
}
getEncoding() {
- return this.model?.includes('gpt-4o') ? 'o200k_base' : 'cl100k_base';
+ return this.modelOptions?.model && /gpt-4[^-\s]/.test(this.modelOptions.model)
+ ? 'o200k_base'
+ : 'cl100k_base';
}
/**
@@ -610,7 +612,7 @@ class OpenAIClient extends BaseClient {
}
initializeLLM({
- model = 'gpt-4o-mini',
+ model = openAISettings.model.default,
modelName,
temperature = 0.2,
max_tokens,
@@ -711,7 +713,7 @@ class OpenAIClient extends BaseClient {
const { OPENAI_TITLE_MODEL } = process.env ?? {};
- let model = this.options.titleModel ?? OPENAI_TITLE_MODEL ?? 'gpt-4o-mini';
+ let model = this.options.titleModel ?? OPENAI_TITLE_MODEL ?? openAISettings.model.default;
if (model === Constants.CURRENT_MODEL) {
model = this.modelOptions.model;
}
@@ -904,7 +906,7 @@ ${convo}
let prompt;
// TODO: remove the gpt fallback and make it specific to endpoint
- const { OPENAI_SUMMARY_MODEL = 'gpt-4o-mini' } = process.env ?? {};
+ const { OPENAI_SUMMARY_MODEL = openAISettings.model.default } = process.env ?? {};
let model = this.options.summaryModel ?? OPENAI_SUMMARY_MODEL;
if (model === Constants.CURRENT_MODEL) {
model = this.modelOptions.model;
@@ -1305,8 +1307,12 @@ ${convo}
) {
delete modelOptions.stream;
delete modelOptions.stop;
- } else if (!this.isOmni && modelOptions.reasoning_effort != null) {
+ } else if (
+ (!this.isOmni || /^o1-(mini|preview)/i.test(modelOptions.model)) &&
+ modelOptions.reasoning_effort != null
+ ) {
delete modelOptions.reasoning_effort;
+ delete modelOptions.temperature;
}
let reasoningKey = 'reasoning_content';
@@ -1314,6 +1320,12 @@ ${convo}
modelOptions.include_reasoning = true;
reasoningKey = 'reasoning';
}
+ if (this.useOpenRouter && modelOptions.reasoning_effort != null) {
+ modelOptions.reasoning = {
+ effort: modelOptions.reasoning_effort,
+ };
+ delete modelOptions.reasoning_effort;
+ }
this.streamHandler = new SplitStreamHandler({
reasoningKey,
diff --git a/api/app/clients/prompts/addCacheControl.js b/api/app/clients/prompts/addCacheControl.js
index eed5910dc9..6bfd901a65 100644
--- a/api/app/clients/prompts/addCacheControl.js
+++ b/api/app/clients/prompts/addCacheControl.js
@@ -1,7 +1,7 @@
/**
* Anthropic API: Adds cache control to the appropriate user messages in the payload.
- * @param {Array} messages - The array of message objects.
- * @returns {Array} - The updated array of message objects with cache control added.
+ * @param {Array} messages - The array of message objects.
+ * @returns {Array} - The updated array of message objects with cache control added.
*/
function addCacheControl(messages) {
if (!Array.isArray(messages) || messages.length < 2) {
@@ -13,7 +13,9 @@ function addCacheControl(messages) {
for (let i = updatedMessages.length - 1; i >= 0 && userMessagesModified < 2; i--) {
const message = updatedMessages[i];
- if (message.role !== 'user') {
+ if (message.getType != null && message.getType() !== 'human') {
+ continue;
+ } else if (message.getType == null && message.role !== 'user') {
continue;
}
diff --git a/api/app/clients/prompts/formatAgentMessages.spec.js b/api/app/clients/prompts/formatAgentMessages.spec.js
index 957409d6ab..360fa00a34 100644
--- a/api/app/clients/prompts/formatAgentMessages.spec.js
+++ b/api/app/clients/prompts/formatAgentMessages.spec.js
@@ -325,4 +325,37 @@ describe('formatAgentMessages', () => {
);
expect(result[0].content).not.toContain('Analyzing the problem...');
});
+
+ it('should exclude ERROR type content parts', () => {
+ const payload = [
+ {
+ role: 'assistant',
+ content: [
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Hello there' },
+ {
+ type: ContentTypes.ERROR,
+ [ContentTypes.ERROR]:
+ 'An error occurred while processing the request: Something went wrong',
+ },
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Final answer' },
+ ],
+ },
+ ];
+
+ const result = formatAgentMessages(payload);
+
+ expect(result).toHaveLength(1);
+ expect(result[0]).toBeInstanceOf(AIMessage);
+ expect(result[0].content).toEqual([
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Hello there' },
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Final answer' },
+ ]);
+
+ // Make sure no error content exists in the result
+ const hasErrorContent = result[0].content.some(
+ (item) =>
+ item.type === ContentTypes.ERROR || JSON.stringify(item).includes('An error occurred'),
+ );
+ expect(hasErrorContent).toBe(false);
+ });
});
diff --git a/api/app/clients/prompts/formatMessages.js b/api/app/clients/prompts/formatMessages.js
index 235e51e51f..4e8d3bd5a5 100644
--- a/api/app/clients/prompts/formatMessages.js
+++ b/api/app/clients/prompts/formatMessages.js
@@ -211,6 +211,8 @@ const formatAgentMessages = (payload) => {
} else if (part.type === ContentTypes.THINK) {
hasReasoning = true;
continue;
+ } else if (part.type === ContentTypes.ERROR) {
+ continue;
} else {
currentContent.push(part);
}
diff --git a/api/app/clients/specs/AnthropicClient.test.js b/api/app/clients/specs/AnthropicClient.test.js
index eef6bb6748..223f3038c0 100644
--- a/api/app/clients/specs/AnthropicClient.test.js
+++ b/api/app/clients/specs/AnthropicClient.test.js
@@ -1,3 +1,4 @@
+const { SplitStreamHandler } = require('@librechat/agents');
const { anthropicSettings } = require('librechat-data-provider');
const AnthropicClient = require('~/app/clients/AnthropicClient');
@@ -405,4 +406,327 @@ describe('AnthropicClient', () => {
expect(Number.isNaN(result)).toBe(false);
});
});
+
+ describe('maxOutputTokens handling for different models', () => {
+ it('should not cap maxOutputTokens for Claude 3.5 Sonnet models', () => {
+ const client = new AnthropicClient('test-api-key');
+ const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 10;
+
+ client.setOptions({
+ modelOptions: {
+ model: 'claude-3-5-sonnet',
+ maxOutputTokens: highTokenValue,
+ },
+ });
+
+ expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue);
+
+ // Test with decimal notation
+ client.setOptions({
+ modelOptions: {
+ model: 'claude-3.5-sonnet',
+ maxOutputTokens: highTokenValue,
+ },
+ });
+
+ expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue);
+ });
+
+ it('should not cap maxOutputTokens for Claude 3.7 models', () => {
+ const client = new AnthropicClient('test-api-key');
+ const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 2;
+
+ client.setOptions({
+ modelOptions: {
+ model: 'claude-3-7-sonnet',
+ maxOutputTokens: highTokenValue,
+ },
+ });
+
+ expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue);
+
+ // Test with decimal notation
+ client.setOptions({
+ modelOptions: {
+ model: 'claude-3.7-sonnet',
+ maxOutputTokens: highTokenValue,
+ },
+ });
+
+ expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue);
+ });
+
+ it('should cap maxOutputTokens for Claude 3.5 Haiku models', () => {
+ const client = new AnthropicClient('test-api-key');
+ const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 2;
+
+ client.setOptions({
+ modelOptions: {
+ model: 'claude-3-5-haiku',
+ maxOutputTokens: highTokenValue,
+ },
+ });
+
+ expect(client.modelOptions.maxOutputTokens).toBe(
+ anthropicSettings.legacy.maxOutputTokens.default,
+ );
+
+ // Test with decimal notation
+ client.setOptions({
+ modelOptions: {
+ model: 'claude-3.5-haiku',
+ maxOutputTokens: highTokenValue,
+ },
+ });
+
+ expect(client.modelOptions.maxOutputTokens).toBe(
+ anthropicSettings.legacy.maxOutputTokens.default,
+ );
+ });
+
+ it('should cap maxOutputTokens for Claude 3 Haiku and Opus models', () => {
+ const client = new AnthropicClient('test-api-key');
+ const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 2;
+
+ // Test haiku
+ client.setOptions({
+ modelOptions: {
+ model: 'claude-3-haiku',
+ maxOutputTokens: highTokenValue,
+ },
+ });
+
+ expect(client.modelOptions.maxOutputTokens).toBe(
+ anthropicSettings.legacy.maxOutputTokens.default,
+ );
+
+ // Test opus
+ client.setOptions({
+ modelOptions: {
+ model: 'claude-3-opus',
+ maxOutputTokens: highTokenValue,
+ },
+ });
+
+ expect(client.modelOptions.maxOutputTokens).toBe(
+ anthropicSettings.legacy.maxOutputTokens.default,
+ );
+ });
+ });
+
+ describe('topK/topP parameters for different models', () => {
+ beforeEach(() => {
+ // Mock the SplitStreamHandler
+ jest.spyOn(SplitStreamHandler.prototype, 'handle').mockImplementation(() => {});
+ });
+
+ afterEach(() => {
+ jest.restoreAllMocks();
+ });
+
+ it('should include top_k and top_p parameters for non-claude-3.7 models', async () => {
+ const client = new AnthropicClient('test-api-key');
+
+ // Create a mock async generator function
+ async function* mockAsyncGenerator() {
+ yield { type: 'message_start', message: { usage: {} } };
+ yield { delta: { text: 'Test response' } };
+ yield { type: 'message_delta', usage: {} };
+ }
+
+ // Mock createResponse to return the async generator
+ jest.spyOn(client, 'createResponse').mockImplementation(() => {
+ return mockAsyncGenerator();
+ });
+
+ client.setOptions({
+ modelOptions: {
+ model: 'claude-3-opus',
+ temperature: 0.7,
+ topK: 10,
+ topP: 0.9,
+ },
+ });
+
+ // Mock getClient to capture the request options
+ let capturedOptions = null;
+ jest.spyOn(client, 'getClient').mockImplementation((options) => {
+ capturedOptions = options;
+ return {};
+ });
+
+ const payload = [{ role: 'user', content: 'Test message' }];
+ await client.sendCompletion(payload, {});
+
+ // Check the options passed to getClient
+ expect(capturedOptions).toHaveProperty('top_k', 10);
+ expect(capturedOptions).toHaveProperty('top_p', 0.9);
+ });
+
+ it('should include top_k and top_p parameters for claude-3-5-sonnet models', async () => {
+ const client = new AnthropicClient('test-api-key');
+
+ // Create a mock async generator function
+ async function* mockAsyncGenerator() {
+ yield { type: 'message_start', message: { usage: {} } };
+ yield { delta: { text: 'Test response' } };
+ yield { type: 'message_delta', usage: {} };
+ }
+
+ // Mock createResponse to return the async generator
+ jest.spyOn(client, 'createResponse').mockImplementation(() => {
+ return mockAsyncGenerator();
+ });
+
+ client.setOptions({
+ modelOptions: {
+ model: 'claude-3-5-sonnet',
+ temperature: 0.7,
+ topK: 10,
+ topP: 0.9,
+ },
+ });
+
+ // Mock getClient to capture the request options
+ let capturedOptions = null;
+ jest.spyOn(client, 'getClient').mockImplementation((options) => {
+ capturedOptions = options;
+ return {};
+ });
+
+ const payload = [{ role: 'user', content: 'Test message' }];
+ await client.sendCompletion(payload, {});
+
+ // Check the options passed to getClient
+ expect(capturedOptions).toHaveProperty('top_k', 10);
+ expect(capturedOptions).toHaveProperty('top_p', 0.9);
+ });
+
+ it('should not include top_k and top_p parameters for claude-3-7-sonnet models', async () => {
+ const client = new AnthropicClient('test-api-key');
+
+ // Create a mock async generator function
+ async function* mockAsyncGenerator() {
+ yield { type: 'message_start', message: { usage: {} } };
+ yield { delta: { text: 'Test response' } };
+ yield { type: 'message_delta', usage: {} };
+ }
+
+ // Mock createResponse to return the async generator
+ jest.spyOn(client, 'createResponse').mockImplementation(() => {
+ return mockAsyncGenerator();
+ });
+
+ client.setOptions({
+ modelOptions: {
+ model: 'claude-3-7-sonnet',
+ temperature: 0.7,
+ topK: 10,
+ topP: 0.9,
+ },
+ });
+
+ // Mock getClient to capture the request options
+ let capturedOptions = null;
+ jest.spyOn(client, 'getClient').mockImplementation((options) => {
+ capturedOptions = options;
+ return {};
+ });
+
+ const payload = [{ role: 'user', content: 'Test message' }];
+ await client.sendCompletion(payload, {});
+
+ // Check the options passed to getClient
+ expect(capturedOptions).not.toHaveProperty('top_k');
+ expect(capturedOptions).not.toHaveProperty('top_p');
+ });
+
+ it('should not include top_k and top_p parameters for models with decimal notation (claude-3.7)', async () => {
+ const client = new AnthropicClient('test-api-key');
+
+ // Create a mock async generator function
+ async function* mockAsyncGenerator() {
+ yield { type: 'message_start', message: { usage: {} } };
+ yield { delta: { text: 'Test response' } };
+ yield { type: 'message_delta', usage: {} };
+ }
+
+ // Mock createResponse to return the async generator
+ jest.spyOn(client, 'createResponse').mockImplementation(() => {
+ return mockAsyncGenerator();
+ });
+
+ client.setOptions({
+ modelOptions: {
+ model: 'claude-3.7-sonnet',
+ temperature: 0.7,
+ topK: 10,
+ topP: 0.9,
+ },
+ });
+
+ // Mock getClient to capture the request options
+ let capturedOptions = null;
+ jest.spyOn(client, 'getClient').mockImplementation((options) => {
+ capturedOptions = options;
+ return {};
+ });
+
+ const payload = [{ role: 'user', content: 'Test message' }];
+ await client.sendCompletion(payload, {});
+
+ // Check the options passed to getClient
+ expect(capturedOptions).not.toHaveProperty('top_k');
+ expect(capturedOptions).not.toHaveProperty('top_p');
+ });
+ });
+
+ it('should include top_k and top_p parameters for Claude-3.7 models when thinking is explicitly disabled', async () => {
+ const client = new AnthropicClient('test-api-key', {
+ modelOptions: {
+ model: 'claude-3-7-sonnet',
+ temperature: 0.7,
+ topK: 10,
+ topP: 0.9,
+ },
+ thinking: false,
+ });
+
+ async function* mockAsyncGenerator() {
+ yield { type: 'message_start', message: { usage: {} } };
+ yield { delta: { text: 'Test response' } };
+ yield { type: 'message_delta', usage: {} };
+ }
+
+ jest.spyOn(client, 'createResponse').mockImplementation(() => {
+ return mockAsyncGenerator();
+ });
+
+ let capturedOptions = null;
+ jest.spyOn(client, 'getClient').mockImplementation((options) => {
+ capturedOptions = options;
+ return {};
+ });
+
+ const payload = [{ role: 'user', content: 'Test message' }];
+ await client.sendCompletion(payload, {});
+
+ expect(capturedOptions).toHaveProperty('topK', 10);
+ expect(capturedOptions).toHaveProperty('topP', 0.9);
+
+ client.setOptions({
+ modelOptions: {
+ model: 'claude-3.7-sonnet',
+ temperature: 0.7,
+ topK: 10,
+ topP: 0.9,
+ },
+ thinking: false,
+ });
+
+ await client.sendCompletion(payload, {});
+
+ expect(capturedOptions).toHaveProperty('topK', 10);
+ expect(capturedOptions).toHaveProperty('topP', 0.9);
+ });
});
diff --git a/api/app/clients/specs/BaseClient.test.js b/api/app/clients/specs/BaseClient.test.js
index e899449fb9..0dae5b14d3 100644
--- a/api/app/clients/specs/BaseClient.test.js
+++ b/api/app/clients/specs/BaseClient.test.js
@@ -30,6 +30,8 @@ jest.mock('~/models', () => ({
updateFileUsage: jest.fn(),
}));
+const { getConvo, saveConvo } = require('~/models');
+
jest.mock('@langchain/openai', () => {
return {
ChatOpenAI: jest.fn().mockImplementation(() => {
@@ -540,10 +542,11 @@ describe('BaseClient', () => {
test('saveMessageToDatabase is called with the correct arguments', async () => {
const saveOptions = TestClient.getSaveOptions();
- const user = {}; // Mock user
+ const user = {};
const opts = { user };
+ const saveSpy = jest.spyOn(TestClient, 'saveMessageToDatabase');
await TestClient.sendMessage('Hello, world!', opts);
- expect(TestClient.saveMessageToDatabase).toHaveBeenCalledWith(
+ expect(saveSpy).toHaveBeenCalledWith(
expect.objectContaining({
sender: expect.any(String),
text: expect.any(String),
@@ -557,6 +560,157 @@ describe('BaseClient', () => {
);
});
+ test('should handle existing conversation when getConvo retrieves one', async () => {
+ const existingConvo = {
+ conversationId: 'existing-convo-id',
+ endpoint: 'openai',
+ endpointType: 'openai',
+ model: 'gpt-3.5-turbo',
+ messages: [
+ { role: 'user', content: 'Existing message 1' },
+ { role: 'assistant', content: 'Existing response 1' },
+ ],
+ temperature: 1,
+ };
+
+ const { temperature: _temp, ...newConvo } = existingConvo;
+
+ const user = {
+ id: 'user-id',
+ };
+
+ getConvo.mockResolvedValue(existingConvo);
+ saveConvo.mockResolvedValue(newConvo);
+
+ TestClient = initializeFakeClient(
+ apiKey,
+ {
+ ...options,
+ req: {
+ user,
+ },
+ },
+ [],
+ );
+
+ const saveSpy = jest.spyOn(TestClient, 'saveMessageToDatabase');
+
+ const newMessage = 'New message in existing conversation';
+ const response = await TestClient.sendMessage(newMessage, {
+ user,
+ conversationId: existingConvo.conversationId,
+ });
+
+ expect(getConvo).toHaveBeenCalledWith(user.id, existingConvo.conversationId);
+ expect(TestClient.conversationId).toBe(existingConvo.conversationId);
+ expect(response.conversationId).toBe(existingConvo.conversationId);
+ expect(TestClient.fetchedConvo).toBe(true);
+
+ expect(saveSpy).toHaveBeenCalledWith(
+ expect.objectContaining({
+ conversationId: existingConvo.conversationId,
+ text: newMessage,
+ }),
+ expect.any(Object),
+ expect.any(Object),
+ );
+
+ expect(saveConvo).toHaveBeenCalledTimes(2);
+ expect(saveConvo).toHaveBeenCalledWith(
+ expect.any(Object),
+ expect.objectContaining({
+ conversationId: existingConvo.conversationId,
+ }),
+ expect.objectContaining({
+ context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveConvo',
+ unsetFields: {
+ temperature: 1,
+ },
+ }),
+ );
+
+ await TestClient.sendMessage('Another message', {
+ conversationId: existingConvo.conversationId,
+ });
+ expect(getConvo).toHaveBeenCalledTimes(1);
+ });
+
+ test('should correctly handle existing conversation and unset fields appropriately', async () => {
+ const existingConvo = {
+ conversationId: 'existing-convo-id',
+ endpoint: 'openai',
+ endpointType: 'openai',
+ model: 'gpt-3.5-turbo',
+ messages: [
+ { role: 'user', content: 'Existing message 1' },
+ { role: 'assistant', content: 'Existing response 1' },
+ ],
+ title: 'Existing Conversation',
+ someExistingField: 'existingValue',
+ anotherExistingField: 'anotherValue',
+ temperature: 0.7,
+ modelLabel: 'GPT-3.5',
+ };
+
+ getConvo.mockResolvedValue(existingConvo);
+ saveConvo.mockResolvedValue(existingConvo);
+
+ TestClient = initializeFakeClient(
+ apiKey,
+ {
+ ...options,
+ modelOptions: {
+ model: 'gpt-4',
+ temperature: 0.5,
+ },
+ },
+ [],
+ );
+
+ const newMessage = 'New message in existing conversation';
+ await TestClient.sendMessage(newMessage, {
+ conversationId: existingConvo.conversationId,
+ });
+
+ expect(saveConvo).toHaveBeenCalledTimes(2);
+
+ const saveConvoCall = saveConvo.mock.calls[0];
+ const [, savedFields, saveOptions] = saveConvoCall;
+
+ // Instead of checking all excludedKeys, we'll just check specific fields
+ // that we know should be excluded
+ expect(savedFields).not.toHaveProperty('messages');
+ expect(savedFields).not.toHaveProperty('title');
+
+ // Only check that someExistingField is in unsetFields
+ expect(saveOptions.unsetFields).toHaveProperty('someExistingField', 1);
+
+ // Mock saveConvo to return the expected fields
+ saveConvo.mockImplementation((req, fields) => {
+ return Promise.resolve({
+ ...fields,
+ endpoint: 'openai',
+ endpointType: 'openai',
+ model: 'gpt-4',
+ temperature: 0.5,
+ });
+ });
+
+ // Only check the conversationId since that's the only field we can be sure about
+ expect(savedFields).toHaveProperty('conversationId', 'existing-convo-id');
+
+ expect(TestClient.fetchedConvo).toBe(true);
+
+ await TestClient.sendMessage('Another message', {
+ conversationId: existingConvo.conversationId,
+ });
+
+ expect(getConvo).toHaveBeenCalledTimes(1);
+
+ const secondSaveConvoCall = saveConvo.mock.calls[1];
+ expect(secondSaveConvoCall[2]).toHaveProperty('unsetFields', {});
+ });
+
test('sendCompletion is called with the correct arguments', async () => {
const payload = {}; // Mock payload
TestClient.buildMessages.mockReturnValue({ prompt: payload, tokenCountMap: null });
diff --git a/api/app/clients/specs/FakeClient.js b/api/app/clients/specs/FakeClient.js
index 7f4b75e1db..a466bb97f9 100644
--- a/api/app/clients/specs/FakeClient.js
+++ b/api/app/clients/specs/FakeClient.js
@@ -56,7 +56,6 @@ const initializeFakeClient = (apiKey, options, fakeMessages) => {
let TestClient = new FakeClient(apiKey);
TestClient.options = options;
TestClient.abortController = { abort: jest.fn() };
- TestClient.saveMessageToDatabase = jest.fn();
TestClient.loadHistory = jest
.fn()
.mockImplementation((conversationId, parentMessageId = null) => {
@@ -86,7 +85,6 @@ const initializeFakeClient = (apiKey, options, fakeMessages) => {
return 'Mock response text';
});
- // eslint-disable-next-line no-unused-vars
TestClient.getCompletion = jest.fn().mockImplementation(async (..._args) => {
return {
choices: [
diff --git a/api/app/clients/specs/OpenAIClient.test.js b/api/app/clients/specs/OpenAIClient.test.js
index 2aaec518eb..0e811cf38a 100644
--- a/api/app/clients/specs/OpenAIClient.test.js
+++ b/api/app/clients/specs/OpenAIClient.test.js
@@ -202,14 +202,6 @@ describe('OpenAIClient', () => {
expect(client.modelOptions.temperature).toBe(0.7);
});
- it('should set apiKey and useOpenRouter if OPENROUTER_API_KEY is present', () => {
- process.env.OPENROUTER_API_KEY = 'openrouter-key';
- client.setOptions({});
- expect(client.apiKey).toBe('openrouter-key');
- expect(client.useOpenRouter).toBe(true);
- delete process.env.OPENROUTER_API_KEY; // Cleanup
- });
-
it('should set FORCE_PROMPT based on OPENAI_FORCE_PROMPT or reverseProxyUrl', () => {
process.env.OPENAI_FORCE_PROMPT = 'true';
client.setOptions({});
@@ -534,7 +526,6 @@ describe('OpenAIClient', () => {
afterEach(() => {
delete process.env.AZURE_OPENAI_DEFAULT_MODEL;
delete process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME;
- delete process.env.OPENROUTER_API_KEY;
});
it('should call getCompletion and fetchEventSource when using a text/instruct model', async () => {
diff --git a/api/app/clients/tools/index.js b/api/app/clients/tools/index.js
index b8df50c77d..df436fb089 100644
--- a/api/app/clients/tools/index.js
+++ b/api/app/clients/tools/index.js
@@ -2,9 +2,10 @@ const availableTools = require('./manifest.json');
// Structured Tools
const DALLE3 = require('./structured/DALLE3');
+const FluxAPI = require('./structured/FluxAPI');
const OpenWeather = require('./structured/OpenWeather');
-const createYouTubeTools = require('./structured/YouTube');
const StructuredWolfram = require('./structured/Wolfram');
+const createYouTubeTools = require('./structured/YouTube');
const StructuredACS = require('./structured/AzureAISearch');
const StructuredSD = require('./structured/StableDiffusion');
const GoogleSearchAPI = require('./structured/GoogleSearch');
@@ -30,6 +31,7 @@ module.exports = {
manifestToolMap,
// Structured Tools
DALLE3,
+ FluxAPI,
OpenWeather,
StructuredSD,
StructuredACS,
diff --git a/api/app/clients/tools/manifest.json b/api/app/clients/tools/manifest.json
index 7cb92b8d87..43be7a4e6c 100644
--- a/api/app/clients/tools/manifest.json
+++ b/api/app/clients/tools/manifest.json
@@ -164,5 +164,19 @@
"description": "Sign up at OpenWeather, then get your key at API keys."
}
]
+ },
+ {
+ "name": "Flux",
+ "pluginKey": "flux",
+ "description": "Generate images using text with the Flux API.",
+ "icon": "https://blackforestlabs.ai/wp-content/uploads/2024/07/bfl_logo_retraced_blk.png",
+ "isAuthRequired": "true",
+ "authConfig": [
+ {
+ "authField": "FLUX_API_KEY",
+ "label": "Your Flux API Key",
+ "description": "Provide your Flux API key from your user profile."
+ }
+ ]
}
]
diff --git a/api/app/clients/tools/structured/DALLE3.js b/api/app/clients/tools/structured/DALLE3.js
index b604ad4ea4..81200e3a61 100644
--- a/api/app/clients/tools/structured/DALLE3.js
+++ b/api/app/clients/tools/structured/DALLE3.js
@@ -1,14 +1,17 @@
const { z } = require('zod');
const path = require('path');
const OpenAI = require('openai');
+const fetch = require('node-fetch');
const { v4: uuidv4 } = require('uuid');
const { Tool } = require('@langchain/core/tools');
const { HttpsProxyAgent } = require('https-proxy-agent');
-const { FileContext } = require('librechat-data-provider');
+const { FileContext, ContentTypes } = require('librechat-data-provider');
const { getImageBasename } = require('~/server/services/Files/images');
const extractBaseURL = require('~/utils/extractBaseURL');
const { logger } = require('~/config');
+const displayMessage =
+ 'DALL-E displayed an image. All generated images are already plainly visible, so don\'t repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.';
class DALLE3 extends Tool {
constructor(fields = {}) {
super();
@@ -114,10 +117,7 @@ class DALLE3 extends Tool {
if (this.isAgent === true && typeof value === 'string') {
return [value, {}];
} else if (this.isAgent === true && typeof value === 'object') {
- return [
- 'DALL-E displayed an image. All generated images are already plainly visible, so don\'t repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.',
- value,
- ];
+ return [displayMessage, value];
}
return value;
@@ -160,6 +160,32 @@ Error Message: ${error.message}`);
);
}
+ if (this.isAgent) {
+ let fetchOptions = {};
+ if (process.env.PROXY) {
+ fetchOptions.agent = new HttpsProxyAgent(process.env.PROXY);
+ }
+ const imageResponse = await fetch(theImageUrl, fetchOptions);
+ const arrayBuffer = await imageResponse.arrayBuffer();
+ const base64 = Buffer.from(arrayBuffer).toString('base64');
+ const content = [
+ {
+ type: ContentTypes.IMAGE_URL,
+ image_url: {
+ url: `data:image/jpeg;base64,${base64}`,
+ },
+ },
+ ];
+
+ const response = [
+ {
+ type: ContentTypes.TEXT,
+ text: displayMessage,
+ },
+ ];
+ return [response, { content }];
+ }
+
const imageBasename = getImageBasename(theImageUrl);
const imageExt = path.extname(imageBasename);
diff --git a/api/app/clients/tools/structured/FluxAPI.js b/api/app/clients/tools/structured/FluxAPI.js
new file mode 100644
index 0000000000..80f9772200
--- /dev/null
+++ b/api/app/clients/tools/structured/FluxAPI.js
@@ -0,0 +1,554 @@
+const { z } = require('zod');
+const axios = require('axios');
+const fetch = require('node-fetch');
+const { v4: uuidv4 } = require('uuid');
+const { Tool } = require('@langchain/core/tools');
+const { HttpsProxyAgent } = require('https-proxy-agent');
+const { FileContext, ContentTypes } = require('librechat-data-provider');
+const { logger } = require('~/config');
+
+const displayMessage =
+ 'Flux displayed an image. All generated images are already plainly visible, so don\'t repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.';
+
+/**
+ * FluxAPI - A tool for generating high-quality images from text prompts using the Flux API.
+ * Each call generates one image. If multiple images are needed, make multiple consecutive calls with the same or varied prompts.
+ */
+class FluxAPI extends Tool {
+ // Pricing constants in USD per image
+ static PRICING = {
+ FLUX_PRO_1_1_ULTRA: -0.06, // /v1/flux-pro-1.1-ultra
+ FLUX_PRO_1_1: -0.04, // /v1/flux-pro-1.1
+ FLUX_PRO: -0.05, // /v1/flux-pro
+ FLUX_DEV: -0.025, // /v1/flux-dev
+ FLUX_PRO_FINETUNED: -0.06, // /v1/flux-pro-finetuned
+ FLUX_PRO_1_1_ULTRA_FINETUNED: -0.07, // /v1/flux-pro-1.1-ultra-finetuned
+ };
+
+ constructor(fields = {}) {
+ super();
+
+ /** @type {boolean} Used to initialize the Tool without necessary variables. */
+ this.override = fields.override ?? false;
+
+ this.userId = fields.userId;
+ this.fileStrategy = fields.fileStrategy;
+
+ /** @type {boolean} **/
+ this.isAgent = fields.isAgent;
+ this.returnMetadata = fields.returnMetadata ?? false;
+
+ if (fields.processFileURL) {
+ /** @type {processFileURL} Necessary for output to contain all image metadata. */
+ this.processFileURL = fields.processFileURL.bind(this);
+ }
+
+ this.apiKey = fields.FLUX_API_KEY || this.getApiKey();
+
+ this.name = 'flux';
+ this.description =
+ 'Use Flux to generate images from text descriptions. This tool can generate images and list available finetunes. Each generate call creates one image. For multiple images, make multiple consecutive calls.';
+
+ this.description_for_model = `// Transform any image description into a detailed, high-quality prompt. Never submit a prompt under 3 sentences. Follow these core rules:
+ // 1. ALWAYS enhance basic prompts into 5-10 detailed sentences (e.g., "a cat" becomes: "A close-up photo of a sleek Siamese cat with piercing blue eyes. The cat sits elegantly on a vintage leather armchair, its tail curled gracefully around its paws. Warm afternoon sunlight streams through a nearby window, casting gentle shadows across its face and highlighting the subtle variations in its cream and chocolate-point fur. The background is softly blurred, creating a shallow depth of field that draws attention to the cat's expressive features. The overall composition has a peaceful, contemplative mood with a professional photography style.")
+ // 2. Each prompt MUST be 3-6 descriptive sentences minimum, focusing on visual elements: lighting, composition, mood, and style
+ // Use action: 'list_finetunes' to see available custom models. When using finetunes, use endpoint: '/v1/flux-pro-finetuned' (default) or '/v1/flux-pro-1.1-ultra-finetuned' for higher quality and aspect ratio.`;
+
+ // Add base URL from environment variable with fallback
+ this.baseUrl = process.env.FLUX_API_BASE_URL || 'https://api.us1.bfl.ai';
+
+ // Define the schema for structured input
+ this.schema = z.object({
+ action: z
+ .enum(['generate', 'list_finetunes', 'generate_finetuned'])
+ .default('generate')
+ .describe(
+ 'Action to perform: "generate" for image generation, "generate_finetuned" for finetuned model generation, "list_finetunes" to get available custom models',
+ ),
+ prompt: z
+ .string()
+ .optional()
+ .describe(
+ 'Text prompt for image generation. Required when action is "generate". Not used for list_finetunes.',
+ ),
+ width: z
+ .number()
+ .optional()
+ .describe(
+ 'Width of the generated image in pixels. Must be a multiple of 32. Default is 1024.',
+ ),
+ height: z
+ .number()
+ .optional()
+ .describe(
+ 'Height of the generated image in pixels. Must be a multiple of 32. Default is 768.',
+ ),
+ prompt_upsampling: z
+ .boolean()
+ .optional()
+ .default(false)
+ .describe('Whether to perform upsampling on the prompt.'),
+ steps: z
+ .number()
+ .int()
+ .optional()
+ .describe('Number of steps to run the model for, a number from 1 to 50. Default is 40.'),
+ seed: z.number().optional().describe('Optional seed for reproducibility.'),
+ safety_tolerance: z
+ .number()
+ .optional()
+ .default(6)
+ .describe(
+ 'Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict.',
+ ),
+ endpoint: z
+ .enum([
+ '/v1/flux-pro-1.1',
+ '/v1/flux-pro',
+ '/v1/flux-dev',
+ '/v1/flux-pro-1.1-ultra',
+ '/v1/flux-pro-finetuned',
+ '/v1/flux-pro-1.1-ultra-finetuned',
+ ])
+ .optional()
+ .default('/v1/flux-pro-1.1')
+ .describe('Endpoint to use for image generation.'),
+ raw: z
+ .boolean()
+ .optional()
+ .default(false)
+ .describe(
+ 'Generate less processed, more natural-looking images. Only works for /v1/flux-pro-1.1-ultra.',
+ ),
+ finetune_id: z.string().optional().describe('ID of the finetuned model to use'),
+ finetune_strength: z
+ .number()
+ .optional()
+ .default(1.1)
+ .describe('Strength of the finetuning effect (typically between 0.1 and 1.2)'),
+ guidance: z.number().optional().default(2.5).describe('Guidance scale for finetuned models'),
+ aspect_ratio: z
+ .string()
+ .optional()
+ .default('16:9')
+ .describe('Aspect ratio for ultra models (e.g., "16:9")'),
+ });
+ }
+
+ getAxiosConfig() {
+ const config = {};
+ if (process.env.PROXY) {
+ config.httpsAgent = new HttpsProxyAgent(process.env.PROXY);
+ }
+ return config;
+ }
+
+ /** @param {Object|string} value */
+ getDetails(value) {
+ if (typeof value === 'string') {
+ return value;
+ }
+ return JSON.stringify(value, null, 2);
+ }
+
+ getApiKey() {
+ const apiKey = process.env.FLUX_API_KEY || '';
+ if (!apiKey && !this.override) {
+ throw new Error('Missing FLUX_API_KEY environment variable.');
+ }
+ return apiKey;
+ }
+
+ wrapInMarkdown(imageUrl) {
+ const serverDomain = process.env.DOMAIN_SERVER || 'http://localhost:3080';
+ return ``;
+ }
+
+ returnValue(value) {
+ if (this.isAgent === true && typeof value === 'string') {
+ return [value, {}];
+ } else if (this.isAgent === true && typeof value === 'object') {
+ if (Array.isArray(value)) {
+ return value;
+ }
+ return [displayMessage, value];
+ }
+ return value;
+ }
+
+ async _call(data) {
+ const { action = 'generate', ...imageData } = data;
+
+ // Use provided API key for this request if available, otherwise use default
+ const requestApiKey = this.apiKey || this.getApiKey();
+
+ // Handle list_finetunes action
+ if (action === 'list_finetunes') {
+ return this.getMyFinetunes(requestApiKey);
+ }
+
+ // Handle finetuned generation
+ if (action === 'generate_finetuned') {
+ return this.generateFinetunedImage(imageData, requestApiKey);
+ }
+
+ // For generate action, ensure prompt is provided
+ if (!imageData.prompt) {
+ throw new Error('Missing required field: prompt');
+ }
+
+ let payload = {
+ prompt: imageData.prompt,
+ prompt_upsampling: imageData.prompt_upsampling || false,
+ safety_tolerance: imageData.safety_tolerance || 6,
+ output_format: imageData.output_format || 'png',
+ };
+
+ // Add optional parameters if provided
+ if (imageData.width) {
+ payload.width = imageData.width;
+ }
+ if (imageData.height) {
+ payload.height = imageData.height;
+ }
+ if (imageData.steps) {
+ payload.steps = imageData.steps;
+ }
+ if (imageData.seed !== undefined) {
+ payload.seed = imageData.seed;
+ }
+ if (imageData.raw) {
+ payload.raw = imageData.raw;
+ }
+
+ const generateUrl = `${this.baseUrl}${imageData.endpoint || '/v1/flux-pro'}`;
+ const resultUrl = `${this.baseUrl}/v1/get_result`;
+
+ logger.debug('[FluxAPI] Generating image with payload:', payload);
+ logger.debug('[FluxAPI] Using endpoint:', generateUrl);
+
+ let taskResponse;
+ try {
+ taskResponse = await axios.post(generateUrl, payload, {
+ headers: {
+ 'x-key': requestApiKey,
+ 'Content-Type': 'application/json',
+ Accept: 'application/json',
+ },
+ ...this.getAxiosConfig(),
+ });
+ } catch (error) {
+ const details = this.getDetails(error?.response?.data || error.message);
+ logger.error('[FluxAPI] Error while submitting task:', details);
+
+ return this.returnValue(
+ `Something went wrong when trying to generate the image. The Flux API may be unavailable:
+ Error Message: ${details}`,
+ );
+ }
+
+ const taskId = taskResponse.data.id;
+
+ // Polling for the result
+ let status = 'Pending';
+ let resultData = null;
+ while (status !== 'Ready' && status !== 'Error') {
+ try {
+ // Wait 2 seconds between polls
+ await new Promise((resolve) => setTimeout(resolve, 2000));
+ const resultResponse = await axios.get(resultUrl, {
+ headers: {
+ 'x-key': requestApiKey,
+ Accept: 'application/json',
+ },
+ params: { id: taskId },
+ ...this.getAxiosConfig(),
+ });
+ status = resultResponse.data.status;
+
+ if (status === 'Ready') {
+ resultData = resultResponse.data.result;
+ break;
+ } else if (status === 'Error') {
+ logger.error('[FluxAPI] Error in task:', resultResponse.data);
+ return this.returnValue('An error occurred during image generation.');
+ }
+ } catch (error) {
+ const details = this.getDetails(error?.response?.data || error.message);
+ logger.error('[FluxAPI] Error while getting result:', details);
+ return this.returnValue('An error occurred while retrieving the image.');
+ }
+ }
+
+ // If no result data
+ if (!resultData || !resultData.sample) {
+ logger.error('[FluxAPI] No image data received from API. Response:', resultData);
+ return this.returnValue('No image data received from Flux API.');
+ }
+
+ // Try saving the image locally
+ const imageUrl = resultData.sample;
+ const imageName = `img-${uuidv4()}.png`;
+
+ if (this.isAgent) {
+ try {
+ // Fetch the image and convert to base64
+ const fetchOptions = {};
+ if (process.env.PROXY) {
+ fetchOptions.agent = new HttpsProxyAgent(process.env.PROXY);
+ }
+ const imageResponse = await fetch(imageUrl, fetchOptions);
+ const arrayBuffer = await imageResponse.arrayBuffer();
+ const base64 = Buffer.from(arrayBuffer).toString('base64');
+ const content = [
+ {
+ type: ContentTypes.IMAGE_URL,
+ image_url: {
+ url: `data:image/png;base64,${base64}`,
+ },
+ },
+ ];
+
+ const response = [
+ {
+ type: ContentTypes.TEXT,
+ text: displayMessage,
+ },
+ ];
+ return [response, { content }];
+ } catch (error) {
+ logger.error('Error processing image for agent:', error);
+ return this.returnValue(`Failed to process the image. ${error.message}`);
+ }
+ }
+
+ try {
+ logger.debug('[FluxAPI] Saving image:', imageUrl);
+ const result = await this.processFileURL({
+ fileStrategy: this.fileStrategy,
+ userId: this.userId,
+ URL: imageUrl,
+ fileName: imageName,
+ basePath: 'images',
+ context: FileContext.image_generation,
+ });
+
+ logger.debug('[FluxAPI] Image saved to path:', result.filepath);
+
+ // Calculate cost based on endpoint
+ /**
+ * TODO: Cost handling
+ const endpoint = imageData.endpoint || '/v1/flux-pro';
+ const endpointKey = Object.entries(FluxAPI.PRICING).find(([key, _]) =>
+ endpoint.includes(key.toLowerCase().replace(/_/g, '-')),
+ )?.[0];
+ const cost = FluxAPI.PRICING[endpointKey] || 0;
+ */
+ this.result = this.returnMetadata ? result : this.wrapInMarkdown(result.filepath);
+ return this.returnValue(this.result);
+ } catch (error) {
+ const details = this.getDetails(error?.message ?? 'No additional error details.');
+ logger.error('Error while saving the image:', details);
+ return this.returnValue(`Failed to save the image locally. ${details}`);
+ }
+ }
+
+ async getMyFinetunes(apiKey = null) {
+ const finetunesUrl = `${this.baseUrl}/v1/my_finetunes`;
+ const detailsUrl = `${this.baseUrl}/v1/finetune_details`;
+
+ try {
+ const headers = {
+ 'x-key': apiKey || this.getApiKey(),
+ 'Content-Type': 'application/json',
+ Accept: 'application/json',
+ };
+
+ // Get list of finetunes
+ const response = await axios.get(finetunesUrl, {
+ headers,
+ ...this.getAxiosConfig(),
+ });
+ const finetunes = response.data.finetunes;
+
+ // Fetch details for each finetune
+ const finetuneDetails = await Promise.all(
+ finetunes.map(async (finetuneId) => {
+ try {
+ const detailResponse = await axios.get(`${detailsUrl}?finetune_id=${finetuneId}`, {
+ headers,
+ ...this.getAxiosConfig(),
+ });
+ return {
+ id: finetuneId,
+ ...detailResponse.data,
+ };
+ } catch (error) {
+ logger.error(`[FluxAPI] Error fetching details for finetune ${finetuneId}:`, error);
+ return {
+ id: finetuneId,
+ error: 'Failed to fetch details',
+ };
+ }
+ }),
+ );
+
+ if (this.isAgent) {
+ const formattedDetails = JSON.stringify(finetuneDetails, null, 2);
+ return [`Here are the available finetunes:\n${formattedDetails}`, null];
+ }
+ return JSON.stringify(finetuneDetails);
+ } catch (error) {
+ const details = this.getDetails(error?.response?.data || error.message);
+ logger.error('[FluxAPI] Error while getting finetunes:', details);
+ const errorMsg = `Failed to get finetunes: ${details}`;
+ return this.isAgent ? this.returnValue([errorMsg, {}]) : new Error(errorMsg);
+ }
+ }
+
+ async generateFinetunedImage(imageData, requestApiKey) {
+ if (!imageData.prompt) {
+ throw new Error('Missing required field: prompt');
+ }
+
+ if (!imageData.finetune_id) {
+ throw new Error(
+ 'Missing required field: finetune_id for finetuned generation. Please supply a finetune_id!',
+ );
+ }
+
+ // Validate endpoint is appropriate for finetuned generation
+ const validFinetunedEndpoints = ['/v1/flux-pro-finetuned', '/v1/flux-pro-1.1-ultra-finetuned'];
+ const endpoint = imageData.endpoint || '/v1/flux-pro-finetuned';
+
+ if (!validFinetunedEndpoints.includes(endpoint)) {
+ throw new Error(
+ `Invalid endpoint for finetuned generation. Must be one of: ${validFinetunedEndpoints.join(', ')}`,
+ );
+ }
+
+ let payload = {
+ prompt: imageData.prompt,
+ prompt_upsampling: imageData.prompt_upsampling || false,
+ safety_tolerance: imageData.safety_tolerance || 6,
+ output_format: imageData.output_format || 'png',
+ finetune_id: imageData.finetune_id,
+ finetune_strength: imageData.finetune_strength || 1.0,
+ guidance: imageData.guidance || 2.5,
+ };
+
+ // Add optional parameters if provided
+ if (imageData.width) {
+ payload.width = imageData.width;
+ }
+ if (imageData.height) {
+ payload.height = imageData.height;
+ }
+ if (imageData.steps) {
+ payload.steps = imageData.steps;
+ }
+ if (imageData.seed !== undefined) {
+ payload.seed = imageData.seed;
+ }
+ if (imageData.raw) {
+ payload.raw = imageData.raw;
+ }
+
+ const generateUrl = `${this.baseUrl}${endpoint}`;
+ const resultUrl = `${this.baseUrl}/v1/get_result`;
+
+ logger.debug('[FluxAPI] Generating finetuned image with payload:', payload);
+ logger.debug('[FluxAPI] Using endpoint:', generateUrl);
+
+ let taskResponse;
+ try {
+ taskResponse = await axios.post(generateUrl, payload, {
+ headers: {
+ 'x-key': requestApiKey,
+ 'Content-Type': 'application/json',
+ Accept: 'application/json',
+ },
+ ...this.getAxiosConfig(),
+ });
+ } catch (error) {
+ const details = this.getDetails(error?.response?.data || error.message);
+ logger.error('[FluxAPI] Error while submitting finetuned task:', details);
+ return this.returnValue(
+ `Something went wrong when trying to generate the finetuned image. The Flux API may be unavailable:
+ Error Message: ${details}`,
+ );
+ }
+
+ const taskId = taskResponse.data.id;
+
+ // Polling for the result
+ let status = 'Pending';
+ let resultData = null;
+ while (status !== 'Ready' && status !== 'Error') {
+ try {
+ // Wait 2 seconds between polls
+ await new Promise((resolve) => setTimeout(resolve, 2000));
+ const resultResponse = await axios.get(resultUrl, {
+ headers: {
+ 'x-key': requestApiKey,
+ Accept: 'application/json',
+ },
+ params: { id: taskId },
+ ...this.getAxiosConfig(),
+ });
+ status = resultResponse.data.status;
+
+ if (status === 'Ready') {
+ resultData = resultResponse.data.result;
+ break;
+ } else if (status === 'Error') {
+ logger.error('[FluxAPI] Error in finetuned task:', resultResponse.data);
+ return this.returnValue('An error occurred during finetuned image generation.');
+ }
+ } catch (error) {
+ const details = this.getDetails(error?.response?.data || error.message);
+ logger.error('[FluxAPI] Error while getting finetuned result:', details);
+ return this.returnValue('An error occurred while retrieving the finetuned image.');
+ }
+ }
+
+ // If no result data
+ if (!resultData || !resultData.sample) {
+ logger.error('[FluxAPI] No image data received from API. Response:', resultData);
+ return this.returnValue('No image data received from Flux API.');
+ }
+
+ // Try saving the image locally
+ const imageUrl = resultData.sample;
+ const imageName = `img-${uuidv4()}.png`;
+
+ try {
+ logger.debug('[FluxAPI] Saving finetuned image:', imageUrl);
+ const result = await this.processFileURL({
+ fileStrategy: this.fileStrategy,
+ userId: this.userId,
+ URL: imageUrl,
+ fileName: imageName,
+ basePath: 'images',
+ context: FileContext.image_generation,
+ });
+
+ logger.debug('[FluxAPI] Finetuned image saved to path:', result.filepath);
+
+ // Calculate cost based on endpoint
+ const endpointKey = endpoint.includes('ultra')
+ ? 'FLUX_PRO_1_1_ULTRA_FINETUNED'
+ : 'FLUX_PRO_FINETUNED';
+ const cost = FluxAPI.PRICING[endpointKey] || 0;
+ // Return the result based on returnMetadata flag
+ this.result = this.returnMetadata ? result : this.wrapInMarkdown(result.filepath);
+ return this.returnValue(this.result);
+ } catch (error) {
+ const details = this.getDetails(error?.message ?? 'No additional error details.');
+ logger.error('Error while saving the finetuned image:', details);
+ return this.returnValue(`Failed to save the finetuned image locally. ${details}`);
+ }
+ }
+}
+
+module.exports = FluxAPI;
diff --git a/api/app/clients/tools/structured/StableDiffusion.js b/api/app/clients/tools/structured/StableDiffusion.js
index 6309da35d8..25a9e0abd3 100644
--- a/api/app/clients/tools/structured/StableDiffusion.js
+++ b/api/app/clients/tools/structured/StableDiffusion.js
@@ -6,10 +6,13 @@ const axios = require('axios');
const sharp = require('sharp');
const { v4: uuidv4 } = require('uuid');
const { Tool } = require('@langchain/core/tools');
-const { FileContext } = require('librechat-data-provider');
+const { FileContext, ContentTypes } = require('librechat-data-provider');
const paths = require('~/config/paths');
const { logger } = require('~/config');
+const displayMessage =
+ 'Stable Diffusion displayed an image. All generated images are already plainly visible, so don\'t repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.';
+
class StableDiffusionAPI extends Tool {
constructor(fields) {
super();
@@ -21,6 +24,8 @@ class StableDiffusionAPI extends Tool {
this.override = fields.override ?? false;
/** @type {boolean} Necessary for output to contain all image metadata. */
this.returnMetadata = fields.returnMetadata ?? false;
+ /** @type {boolean} */
+ this.isAgent = fields.isAgent;
if (fields.uploadImageBuffer) {
/** @type {uploadImageBuffer} Necessary for output to contain all image metadata. */
this.uploadImageBuffer = fields.uploadImageBuffer.bind(this);
@@ -66,6 +71,16 @@ class StableDiffusionAPI extends Tool {
return ``;
}
+ returnValue(value) {
+ if (this.isAgent === true && typeof value === 'string') {
+ return [value, {}];
+ } else if (this.isAgent === true && typeof value === 'object') {
+ return [displayMessage, value];
+ }
+
+ return value;
+ }
+
getServerURL() {
const url = process.env.SD_WEBUI_URL || '';
if (!url && !this.override) {
@@ -113,6 +128,25 @@ class StableDiffusionAPI extends Tool {
}
try {
+ if (this.isAgent) {
+ const content = [
+ {
+ type: ContentTypes.IMAGE_URL,
+ image_url: {
+ url: `data:image/png;base64,${image}`,
+ },
+ },
+ ];
+
+ const response = [
+ {
+ type: ContentTypes.TEXT,
+ text: displayMessage,
+ },
+ ];
+ return [response, { content }];
+ }
+
const buffer = Buffer.from(image.split(',', 1)[0], 'base64');
if (this.returnMetadata && this.uploadImageBuffer && this.req) {
const file = await this.uploadImageBuffer({
@@ -154,7 +188,7 @@ class StableDiffusionAPI extends Tool {
logger.error('[StableDiffusion] Error while saving the image:', error);
}
- return this.result;
+ return this.returnValue(this.result);
}
}
diff --git a/api/app/clients/tools/util/handleTools.js b/api/app/clients/tools/util/handleTools.js
index f1dfa24a49..ae19a158ee 100644
--- a/api/app/clients/tools/util/handleTools.js
+++ b/api/app/clients/tools/util/handleTools.js
@@ -10,6 +10,7 @@ const {
GoogleSearchAPI,
// Structured Tools
DALLE3,
+ FluxAPI,
OpenWeather,
StructuredSD,
StructuredACS,
@@ -182,6 +183,7 @@ const loadTools = async ({
returnMap = false,
}) => {
const toolConstructors = {
+ flux: FluxAPI,
calculator: Calculator,
google: GoogleSearchAPI,
open_weather: OpenWeather,
@@ -230,9 +232,10 @@ const loadTools = async ({
};
const toolOptions = {
- serpapi: { location: 'Austin,Texas,United States', hl: 'en', gl: 'us' },
+ flux: imageGenOptions,
dalle: imageGenOptions,
'stable-diffusion': imageGenOptions,
+ serpapi: { location: 'Austin,Texas,United States', hl: 'en', gl: 'us' },
};
const toolContextMap = {};
diff --git a/api/cache/keyvRedis.js b/api/cache/keyvRedis.js
index 816dcd29b2..49620c49ae 100644
--- a/api/cache/keyvRedis.js
+++ b/api/cache/keyvRedis.js
@@ -9,7 +9,7 @@ const { REDIS_URI, USE_REDIS, USE_REDIS_CLUSTER, REDIS_CA, REDIS_KEY_PREFIX, RED
let keyvRedis;
const redis_prefix = REDIS_KEY_PREFIX || '';
-const redis_max_listeners = REDIS_MAX_LISTENERS || 10;
+const redis_max_listeners = Number(REDIS_MAX_LISTENERS) || 10;
function mapURI(uri) {
const regex =
diff --git a/api/lib/db/indexSync.js b/api/lib/db/indexSync.js
index 9c40e684d3..75acd9d231 100644
--- a/api/lib/db/indexSync.js
+++ b/api/lib/db/indexSync.js
@@ -1,6 +1,6 @@
const { MeiliSearch } = require('meilisearch');
-const Conversation = require('~/models/schema/convoSchema');
-const Message = require('~/models/schema/messageSchema');
+const { Conversation } = require('~/models/Conversation');
+const { Message } = require('~/models/Message');
const { isEnabled } = require('~/server/utils');
const { logger } = require('~/config');
diff --git a/api/models/Action.js b/api/models/Action.js
index 299b3bf20a..677b4d78df 100644
--- a/api/models/Action.js
+++ b/api/models/Action.js
@@ -1,5 +1,5 @@
const mongoose = require('mongoose');
-const actionSchema = require('./schema/action');
+const { actionSchema } = require('@librechat/data-schemas');
const Action = mongoose.model('action', actionSchema);
diff --git a/api/models/Agent.js b/api/models/Agent.js
index 6ea203113c..1d3ea5af0c 100644
--- a/api/models/Agent.js
+++ b/api/models/Agent.js
@@ -9,7 +9,7 @@ const {
removeAgentFromAllProjects,
} = require('./Project');
const getLogStores = require('~/cache/getLogStores');
-const agentSchema = require('./schema/agent');
+const { agentSchema } = require('@librechat/data-schemas');
const Agent = mongoose.model('agent', agentSchema);
diff --git a/api/models/Assistant.js b/api/models/Assistant.js
index d0e73ad4e7..a8a5b98157 100644
--- a/api/models/Assistant.js
+++ b/api/models/Assistant.js
@@ -1,5 +1,5 @@
const mongoose = require('mongoose');
-const assistantSchema = require('./schema/assistant');
+const { assistantSchema } = require('@librechat/data-schemas');
const Assistant = mongoose.model('assistant', assistantSchema);
diff --git a/api/models/Balance.js b/api/models/Balance.js
index 24d9087b77..f7978d8049 100644
--- a/api/models/Balance.js
+++ b/api/models/Balance.js
@@ -1,5 +1,5 @@
const mongoose = require('mongoose');
-const balanceSchema = require('./schema/balance');
+const { balanceSchema } = require('@librechat/data-schemas');
const { getMultiplier } = require('./tx');
const { logger } = require('~/config');
diff --git a/api/models/Banner.js b/api/models/Banner.js
index 8d439dae28..0f20faeba8 100644
--- a/api/models/Banner.js
+++ b/api/models/Banner.js
@@ -1,5 +1,9 @@
-const Banner = require('./schema/banner');
+const mongoose = require('mongoose');
const logger = require('~/config/winston');
+const { bannerSchema } = require('@librechat/data-schemas');
+
+const Banner = mongoose.model('Banner', bannerSchema);
+
/**
* Retrieves the current active banner.
* @returns {Promise