feat: Azure Vision Support & Docs Update (#1389)

* feat(AzureOpenAI): Vision Support

* chore(ci/OpenAIClient.test): update test to reflect Azure now uses chatCompletion method as opposed to getCompletion, while still testing the latter method

* docs: update documentation mainly revolving around Azure setup, but also reformatting the 'Tokens and API' section completely

* docs: add images and links to ai_setup.md

* docs: ai setup reference
This commit is contained in:
Danny Avila 2023-12-18 18:43:50 -05:00 committed by GitHub
parent c9d3e0ab6a
commit 8d563d61f1
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
14 changed files with 378 additions and 85 deletions

View file

@ -81,7 +81,7 @@ Keep up with the latest updates by visiting the releases page - [Releases](https
* [Windows Install💙](docs/install/windows_install.md)
* Configuration
* [.env Configuration](./docs/install/dotenv.md)
* [APIs and Tokens](docs/install/apis_and_tokens.md)
* [AI Setup](docs/install/ai_setup.md)
* [User Auth System](docs/install/user_auth_system.md)
* [Online MongoDB Database](docs/install/mongodb.md)
* [Default Language](docs/install/default_language.md)

View file

@ -397,7 +397,7 @@ class OpenAIClient extends BaseClient {
let streamResult = null;
this.modelOptions.user = this.user;
const invalidBaseUrl = this.completionsUrl && extractBaseURL(this.completionsUrl) === null;
const useOldMethod = !!(this.azure || invalidBaseUrl || !this.isChatCompletion);
const useOldMethod = !!(invalidBaseUrl || !this.isChatCompletion);
if (typeof opts.onProgress === 'function' && useOldMethod) {
await this.getCompletion(
payload,
@ -764,6 +764,15 @@ ${convo}
modelOptions.max_tokens = 4000;
}
if (this.azure || this.options.azure) {
// Azure does not accept `model` in the body, so we need to remove it.
delete modelOptions.model;
opts.baseURL = this.azureEndpoint.split('/chat')[0];
opts.defaultQuery = { 'api-version': this.azure.azureOpenAIApiVersion };
opts.defaultHeaders = { ...opts.defaultHeaders, 'api-key': this.apiKey };
}
let chatCompletion;
const openai = new OpenAI({
apiKey: this.apiKey,

View file

@ -1,4 +1,5 @@
require('dotenv').config();
const OpenAI = require('openai');
const { fetchEventSource } = require('@waylaidwanderer/fetch-event-source');
const { genAzureChatCompletion } = require('~/utils/azureUtils');
const OpenAIClient = require('../OpenAIClient');
@ -41,6 +42,97 @@ jest.mock('langchain/chat_models/openai', () => {
};
});
jest.mock('openai');
jest.spyOn(OpenAI, 'constructor').mockImplementation(function (...options) {
// We can add additional logic here if needed
return new OpenAI(...options);
});
const finalChatCompletion = jest.fn().mockResolvedValue({
choices: [
{
message: { role: 'assistant', content: 'Mock message content' },
finish_reason: 'Mock finish reason',
},
],
});
const stream = jest.fn().mockImplementation(() => {
let isDone = false;
let isError = false;
let errorCallback = null;
const onEventHandlers = {
abort: () => {
// Mock abort behavior
},
error: (callback) => {
errorCallback = callback; // Save the error callback for later use
},
finalMessage: (callback) => {
callback({ role: 'assistant', content: 'Mock Response' });
isDone = true; // Set stream to done
},
};
const mockStream = {
on: jest.fn((event, callback) => {
if (onEventHandlers[event]) {
onEventHandlers[event](callback);
}
return mockStream;
}),
finalChatCompletion,
controller: { abort: jest.fn() },
triggerError: () => {
isError = true;
if (errorCallback) {
errorCallback(new Error('Mock error'));
}
},
[Symbol.asyncIterator]: () => {
return {
next: () => {
if (isError) {
return Promise.reject(new Error('Mock error'));
}
if (isDone) {
return Promise.resolve({ done: true });
}
const chunk = { choices: [{ delta: { content: 'Mock chunk' } }] };
return Promise.resolve({ value: chunk, done: false });
},
};
},
};
return mockStream;
});
const create = jest.fn().mockResolvedValue({
choices: [
{
message: { content: 'Mock message content' },
finish_reason: 'Mock finish reason',
},
],
});
OpenAI.mockImplementation(() => ({
beta: {
chat: {
completions: {
stream,
},
},
},
chat: {
completions: {
create,
},
},
}));
describe('OpenAIClient', () => {
let client, client2;
const model = 'gpt-4';
@ -456,45 +548,78 @@ describe('OpenAIClient', () => {
});
});
describe('sendMessage/getCompletion', () => {
describe('sendMessage/getCompletion/chatCompletion', () => {
afterEach(() => {
delete process.env.AZURE_OPENAI_DEFAULT_MODEL;
delete process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME;
delete process.env.OPENROUTER_API_KEY;
});
it('[Azure OpenAI] should call getCompletion and fetchEventSource with correct args', async () => {
// Set a default model
process.env.AZURE_OPENAI_DEFAULT_MODEL = 'gpt4-turbo';
it('should call getCompletion and fetchEventSource when using a text/instruct model', async () => {
const model = 'text-davinci-003';
const onProgress = jest.fn().mockImplementation(() => ({}));
client.azure = defaultAzureOptions;
const getCompletion = jest.spyOn(client, 'getCompletion');
await client.sendMessage('Hi mom!', {
replaceOptions: true,
const testClient = new OpenAIClient('test-api-key', {
...defaultOptions,
onProgress,
azure: defaultAzureOptions,
modelOptions: { model },
});
const getCompletion = jest.spyOn(testClient, 'getCompletion');
await testClient.sendMessage('Hi mom!', { onProgress });
expect(getCompletion).toHaveBeenCalled();
expect(getCompletion.mock.calls.length).toBe(1);
expect(getCompletion.mock.calls[0][0][0].role).toBe('user');
expect(getCompletion.mock.calls[0][0][0].content).toBe('Hi mom!');
expect(getCompletion.mock.calls[0][0]).toBe(
'||>Instructions:\nYou are ChatGPT, a large language model trained by OpenAI. Respond conversationally.\nCurrent date: December 18, 2023\n\n||>User:\nHi mom!\n||>Assistant:\n',
);
expect(fetchEventSource).toHaveBeenCalled();
expect(fetchEventSource.mock.calls.length).toBe(1);
// Check if the first argument (url) is correct
const expectedURL = genAzureChatCompletion(defaultAzureOptions);
const firstCallArgs = fetchEventSource.mock.calls[0];
const expectedURL = 'https://api.openai.com/v1/completions';
expect(firstCallArgs[0]).toBe(expectedURL);
// Should not have model in the deployment name
expect(firstCallArgs[0]).not.toContain('gpt4-turbo');
// Should not include the model in request body
const requestBody = JSON.parse(firstCallArgs[1].body);
expect(requestBody).not.toHaveProperty('model');
expect(requestBody).toHaveProperty('model');
expect(requestBody.model).toBe(model);
});
it('[Azure OpenAI] should call chatCompletion and OpenAI.stream with correct args', async () => {
// Set a default model
process.env.AZURE_OPENAI_DEFAULT_MODEL = 'gpt4-turbo';
const onProgress = jest.fn().mockImplementation(() => ({}));
client.azure = defaultAzureOptions;
const chatCompletion = jest.spyOn(client, 'chatCompletion');
await client.sendMessage('Hi mom!', {
replaceOptions: true,
...defaultOptions,
modelOptions: { model: 'gpt4-turbo', stream: true },
onProgress,
azure: defaultAzureOptions,
});
expect(chatCompletion).toHaveBeenCalled();
expect(chatCompletion.mock.calls.length).toBe(1);
const chatCompletionArgs = chatCompletion.mock.calls[0][0];
const { payload } = chatCompletionArgs;
expect(payload[0].role).toBe('user');
expect(payload[0].content).toBe('Hi mom!');
// Azure OpenAI does not use the model property, and will error if it's passed
// This check ensures the model property is not present
const streamArgs = stream.mock.calls[0][0];
expect(streamArgs).not.toHaveProperty('model');
// Check if the baseURL is correct
const constructorArgs = OpenAI.mock.calls[0][0];
const expectedURL = genAzureChatCompletion(defaultAzureOptions).split('/chat')[0];
expect(constructorArgs.baseURL).toBe(expectedURL);
});
});
});

View file

@ -334,7 +334,7 @@ ALLOW_REGISTRATION=false
```
**Resources:**
- [Tokens/Apis/etc](../install/apis_and_tokens.md)
- [Tokens/Apis/etc](../install/ai_setup.md)
- [User/Auth System](../install/user_auth_system.md)
### **3. Start docker, and then run the installation/update script**

View file

@ -68,8 +68,8 @@ sudo reboot
## Tokens/Apis/etc:
- Make sure you have all the needed variables for the following before moving forward
### [Get Your API keys and Tokens](../install/apis_and_tokens.md) (Required)
- You must set up at least one of these tokens or APIs to run the app.
### [Setup your AI Endpoints](../install/ai_setup.md) (Required)
- At least one AI endpoint should be setup for use.
### [User/Auth System](../install/user_auth_system.md) (Optional)
- How to set up the user/auth system and Google login.
### [Plugins](../features/plugins/introduction.md)

View file

@ -15,12 +15,19 @@ Errors logs are also saved in the same location: `./api/logs/error-%DATE%.log`.
### Setup
Keep debug logs enabled with the following environment variable. Even if you never set this variable, debug logs will be generated, but you have the option to disable them by setting it to `FALSE`.
Toggle debug logs with the following environment variable. By default, even if you never set this variable, debug logs will be generated, but you have the option to disable them by setting it to `FALSE`.
Note: it's recommended to disable debug logs in a production environment.
```bash
DEBUG_LOGGING=TRUE
```
```bash
# in a production environment
DEBUG_LOGGING=FALSE
```
For verbose server output in the console/terminal, you can also set the following:
```bash

View file

@ -1,22 +1,78 @@
# How to setup various tokens and APIs for the project
# Table of Contents
This doc explains how to setup various tokens and APIs for the project. You will need some of these tokens and APIs to run the app and use its features. You must set up at least one of these tokens or APIs to run the app.
1. [AI Setup](#ai-setup)
- [General Information](#general)
- [Free AI APIs](#free-ai-apis)
- [Setting a Default Endpoint](#setting-a-default-endpoint)
- [Setting a Default Preset](#setting-a-default-preset)
- [OpenAI](#openai)
- [Anthropic](#anthropic)
- [Google](#google)
- [Generative Language API (Gemini)](#generative-language-api-gemini)
- [Vertex AI (PaLM 2 & Codey)](#vertex-ai-palm-2--codey)
- [Azure OpenAI](#azure-openai)
- [Required Variables](#required-variables)
- [Model Deployments](#model-deployments)
- [Setting a Default Model for Azure](#setting-a-default-model-for-azure)
- [Enabling Auto-Generated Titles with Azure](#enabling-auto-generated-titles-with-azure)
- [Using GPT-4 Vision with Azure](#using-gpt-4-vision-with-azure)
- [Setting Azure OpenAI as the Default Endpoint](#setting-azure-openai-as-the-default-endpoint)
- [Optional Variables](#optional-variables)
- [Using Plugins with Azure](#using-plugins-with-azure)
- [Unofficial APIs](#unofficial-apis)
- [ChatGPTBrowser](#chatgptbrowser)
- [BingAI](#bingai)
2. [Conclusion](#conclusion)
### Docker notes
---
**If you use docker, you should rebuild the docker image each time you update your credentials**
# AI Setup
This doc explains how to setup your AI providers, their APIs and credentials.
**"Endpoints"** refer to the AI provider, configuration or API to use, which determines what models and settings are available for the current chat request.
For example, OpenAI, Google, Plugins, Azure OpenAI, Anthropic, are all different "endpoints". Since OpenAI was the first supported endpoint, it's listed first by default.
Using the default environment values from `.env.example` will enable several endpoints, with credentials to be provided on a per-user basis from the web app. Alternatively, you can provide credentials for all users of your instance.
This guide will walk you through setting up each Endpoint as needed.
**Reminder: If you use docker, you should [rebuild the docker image (here's how)](dotenv.md) each time you update your credentials**
*Note: Configuring pre-made Endpoint/model/conversation settings as singular options for your users is a planned feature. See the related discussion here: [System-wide custom model settings (lightweight GPTs) #1291](https://github.com/danny-avila/LibreChat/discussions/1291)*
## General
### [Free AI APIs](free_ai_apis.md)
### Setting a Default Endpoint
In the case where you have multiple endpoints setup, but want a specific one to be first in the order, you need to set the following environment variable.
Rebuild command:
```bash
npm run update:docker
# OR, if you don't have npm
docker-compose build --no-cache
# .env file
# No spaces between values
ENDPOINTS=azureOpenAI,openAI,google
```
Alternatively, you can create a new file named `docker-compose.override.yml` in the same directory as your main `docker-compose.yml` file for LibreChat, where you can set your .env variables as needed under `environment`. See the [docker docs](https://docs.docker.com/compose/multiple-compose-files/extends/#understanding-multiple-compose-files) for more info, and you can also view an example of an override file for LibreChat in the ["Manage Your Database" section](https://docs.librechat.ai/features/manage_your_database.html)
Note that LibreChat will use your last selected endpoint when creating a new conversation. So if Azure OpenAI is first in the order, but you used or view an OpenAI conversation last, when you hit "New Chat," OpenAI will be selected with its default conversation settings.
## OpenAI API key
To override this behavior, you need a preset and you need to set that specific preset as the default one to use on every new chat.
### Setting a Default Preset
A preset refers to a specific Endpoint/Model/Conversation Settings that you can save.
The default preset will always be used when creating a new conversation.
Here's a video to demonstrate:
https://github.com/danny-avila/LibreChat/assets/110412045/bbde830f-18d9-4884-88e5-1bd8f7ac585d
---
## OpenAI
To get your OpenAI API key, you need to:
@ -25,41 +81,20 @@ To get your OpenAI API key, you need to:
- Add a payment method to your account (this is not free, sorry 😬)
- Copy your secret key (sk-...) and save it in ./.env as OPENAI_API_KEY
## ChatGPT Free Access token
Notes:
- Selecting a vision model for messages with attachments is not necessary as it will be switched behind the scenes for you. If you didn't outright select a vision model, it will only be used for the vision request and you should still see the non-vision model you had selected after the request is successful
- OpenAI Vision models allow for messages without attachments
> Note that this is disabled by default and requires additional configuration to work.
> See: [ChatGPT Reverse Proxy](../features/pandoranext.md)
---
To get your Access token for ChatGPT 'Web Version', you need to:
- Go to [https://chat.openai.com](https://chat.openai.com)
- Create an account or log in with your existing one
- Visit [https://chat.openai.com/api/auth/session](https://chat.openai.com/api/auth/session)
- Copy the value of the "accessToken" field and save it in ./.env as CHATGPT_ACCESS_TOKEN
Warning: There may be a chance of your account being banned if you deploy the app to multiple users with this method. Use at your own risk. 😱
## Bing Access Token
To get your Bing Access Token, you have a few options:
- You can try leaving it blank and see if it works (fingers crossed 🤞)
- You can follow these [new instructions](https://github.com/danny-avila/LibreChat/issues/370#issuecomment-1560382302) (thanks @danny-avila for sharing 🙌)
- You can use MS Edge, navigate to bing.com, and do the following:
- Make sure you are logged in
- Open the DevTools by pressing F12 on your keyboard
- Click on the tab "Application" (On the left of the DevTools)
- Expand the "Cookies" (Under "Storage")
- Copy the value of the "\_U" cookie and save it in ./.env as BING_ACCESS_TOKEN
## Anthropic Endpoint (Claude)
## Anthropic
- Create an account at [https://console.anthropic.com/](https://console.anthropic.com/)
- Go to [https://console.anthropic.com/account/keys](https://console.anthropic.com/account/keys) and get your api key
- add it to `ANTHROPIC_API_KEY=` in the `.env` file
---
## Google
For the Google Endpoint, you can either use the **Generative Language API** (for Gemini models), or the **Vertex AI API** (for PaLM2 & Codey models, Gemini support coming soon).
@ -92,7 +127,10 @@ Or, you can make users provide it from the frontend by setting the following:
GOOGLE_KEY=user_provided
```
Note: PaLM2 and Codey models cannot be accessed through the Generative Language API, only through Vertex AI.
Notes:
- PaLM2 and Codey models cannot be accessed through the Generative Language API, only through Vertex AI.
- Selecting `gemini-pro-vision` for messages with attachments is not necessary as it will be switched behind the scenes for you
- Since `gemini-pro-vision`does not accept non-attachment messages, messages without attachments are automatically switched to use `gemini-pro` (otherwise, Google responds with an error)
Setting `GOOGLE_KEY=user_provided` in your .env file will configure both the Vertex AI Service Account JSON key file and the Generative Language API key to be provided from the frontend like so:
@ -139,6 +177,8 @@ GOOGLE_KEY=user_provided
Note: Using Gemini models through Vertex AI is possible but not yet supported.
---
## Azure OpenAI
In order to use Azure OpenAI with this project, specific environment variables must be set in your `.env` file. These variables will be used for constructing the API URLs.
@ -147,6 +187,8 @@ The variables needed are outlined below:
### Required Variables
These variables construct the API URL for Azure OpenAI.
* `AZURE_API_KEY`: Your Azure OpenAI API key.
* `AZURE_OPENAI_API_INSTANCE_NAME`: The instance name of your Azure OpenAI API.
* `AZURE_OPENAI_API_DEPLOYMENT_NAME`: The deployment name of your Azure OpenAI API.
@ -158,13 +200,33 @@ https://{AZURE_OPENAI_API_INSTANCE_NAME}.openai.azure.com/openai/deployments/{AZ
```
You should also consider changing the `AZURE_OPENAI_MODELS` variable to the models available in your deployment.
#### Additional Configuration Notes
```bash
# .env file
AZURE_OPENAI_MODELS=gpt-4-1106-preview,gpt-4,gpt-3.5-turbo,gpt-3.5-turbo-1106,gpt-4-vision-preview
```
- **Endpoint Construction**: The provided variables help customize the construction of the API URL for Azure.
Overriding the construction of the API URL will be possible but is not yet implemented. Follow progress on this feature here: [Issue #1266](https://github.com/danny-avila/LibreChat/issues/1266)
- **Model Deployment Naming**: As of 2023-11-10, the Azure API allows only one model per deployment. It's advisable to name your deployments after the model name (e.g., "gpt-3.5-turbo") for easy deployment switching. This is facilitated by setting `AZURE_USE_MODEL_AS_DEPLOYMENT_NAME` to `TRUE`.
### Model Deployments
Alternatively, use custom deployment names and set `AZURE_OPENAI_DEFAULT_MODEL` for expected functionality.
*Note: a change will be developed to improve current configuration settings, to allow multiple deployments/model configurations setup with ease: [#1390](https://github.com/danny-avila/LibreChat/issues/1390)*
As of 2023-12-18, the Azure API allows only one model per deployment.
**It's highly recommended** to name your deployments *after* the model name (e.g., "gpt-3.5-turbo") for easy deployment switching.
When you do so, LibreChat will correctly switch the deployment, while associating the correct max context per model, if you have the following environment variable set:
```bash
AZURE_USE_MODEL_AS_DEPLOYMENT_NAME=TRUE
```
For example, when you have set `AZURE_USE_MODEL_AS_DEPLOYMENT_NAME=TRUE`, the following deployment configuration provides the most seamless, error-free experience for LibreChat, including Vision support and tracking the correct max context tokens:
![Screenshot 2023-12-18 111742](https://github.com/danny-avila/LibreChat/assets/110412045/4aa8a61c-0317-4681-8262-a6382dcaa7b0)
Alternatively, you can use custom deployment names and set `AZURE_OPENAI_DEFAULT_MODEL` for expected functionality.
- **`AZURE_OPENAI_MODELS`**: List the available models, separated by commas without spaces. The first listed model will be the default. If left blank, internal settings will be used. Note that deployment names can't have periods, which are removed when generating the endpoint.
@ -186,7 +248,13 @@ AZURE_USE_MODEL_AS_DEPLOYMENT_NAME=TRUE
```
Note: Azure API does not use the `model` in the payload and is more of an identifying field for the LibreChat App. If using non-model deployment names, but you're having issues with the model not being recognized, you should set this field. It will also not be used as the deployment name if AZURE_USE_MODEL_AS_DEPLOYMENT_NAME is enabled, which will prioritize what the user selects as the model.
### Setting a Default Model for Azure
This section is relevant when you are **not** naming deployments after model names as shown above.
**Important:** The Azure OpenAI API does not use the `model` field in the payload but is a necessary identifier for LibreChat. If your deployment names do not correspond to the model names, and you're having issues with the model not being recognized, you should set this field to explicitly tell LibreChat to treat your Azure OpenAI API requests as if the specified model was selected.
If AZURE_USE_MODEL_AS_DEPLOYMENT_NAME is enabled, the model you set with `AZURE_OPENAI_DEFAULT_MODEL` will **not** be recognized and will **not** be used as the deployment name; instead, it will use the model selected by the user as the "deployment" name.
- **`AZURE_OPENAI_DEFAULT_MODEL`**: Override the model setting for Azure, useful if using custom deployment names.
@ -194,12 +262,38 @@ Example use:
```bash
# .env file
# MUST be a real OpenAI model, named exactly how it is recognized by OpenAI API (not Azure)
AZURE_OPENAI_DEFAULT_MODEL=gpt-3.5-turbo # do include periods in the model name here
```
### Enabling Auto-Generated Titles with Azure
The default titling model is set to `gpt-3.5-turbo`.
If you're using `AZURE_USE_MODEL_AS_DEPLOYMENT_NAME` and have "gpt-35-turbo" setup as a deployment name, this should work out-of-the-box.
In any case, you can adjust the title model as such: `OPENAI_TITLE_MODEL=your-title-model`
### Using GPT-4 Vision with Azure
Currently, the best way to setup Vision is to use your deployment names as the model names, as [shown here](#model-deployments)
This will work seamlessly as it does with the [OpenAI endpoint](#openai) (no need to select the vision model, it will be switched behind the scenes)
Alternatively, you can set the [required variables](#required-variables) to explicitly use your vision deployment, but this may limit you to exclusively using your vision deployment for all Azure chat settings.
As of December 18th, 2023, Vision models seem to have degraded performance with Azure OpenAI when compared to [OpenAI](#openai)
![image](https://github.com/danny-avila/LibreChat/assets/110412045/7306185f-c32c-4483-9167-af514cc1c2dd)
*Note: a change will be developed to improve current configuration settings, to allow multiple deployments/model configurations setup with ease: [#1390](https://github.com/danny-avila/LibreChat/issues/1390)*
### Optional Variables
*These variables are currently not used by LibreChat*
* `AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME`: The deployment name for completion. This is currently not in use but may be used in future.
* `AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME`: The deployment name for embedding. This is currently not in use but may be used in future.
@ -214,10 +308,53 @@ To use Azure with the Plugins endpoint, make sure the following environment vari
* `PLUGINS_USE_AZURE`: If set to "true" or any truthy value, this will enable the program to use Azure with the Plugins endpoint.
* `AZURE_API_KEY`: Your Azure API key must be set with an environment variable.
## That's it! You're all set. 🎉
---
## Unofficial APIs
**Important:** Stability for Unofficial APIs are not guaranteed. Access methods to these APIs are hacky, prone to errors, and patching, and are marked lowest in priority in LibreChat's development.
### ChatGPTBrowser
**Backend Access to https://chat.openai.com/api**
This is not to be confused with [OpenAI's Official API](#openai)!
> Note that this is disabled by default and requires additional configuration to work.
> Also, using this may have your data exposed to 3rd parties if using a proxy, and OpenAI may flag your account.
> See: [ChatGPT Reverse Proxy](../features/pandoranext.md)
To get your Access token for ChatGPT Browser Access, you need to:
- Go to [https://chat.openai.com](https://chat.openai.com)
- Create an account or log in with your existing one
- Visit [https://chat.openai.com/api/auth/session](https://chat.openai.com/api/auth/session)
- Copy the value of the "accessToken" field and save it in ./.env as CHATGPT_ACCESS_TOKEN
Warning: There may be a chance of your account being banned if you deploy the app to multiple users with this method. Use at your own risk. 😱
---
## [Free AI APIs](free_ai_apis.md)
### BingAI
To get your Bing Access Token, you have a few options:
- You can try leaving it blank and see if it works (fingers crossed 🤞)
- You can follow these [new instructions](https://github.com/danny-avila/LibreChat/issues/370#issuecomment-1560382302) (thanks @danny-avila for sharing 🙌)
- You can use MS Edge, navigate to bing.com, and do the following:
- Make sure you are logged in
- Open the DevTools by pressing F12 on your keyboard
- Click on the tab "Application" (On the left of the DevTools)
- Expand the "Cookies" (Under "Storage")
- Copy the value of the "\_U" cookie and save it in ./.env as BING_ACCESS_TOKEN
---
## Conclusion
<h3>That's it! You're all set. 🎉</h3>
---

View file

@ -23,8 +23,8 @@ Before running LibreChat with Docker, you need to configure some settings:
- Provide all necessary credentials in the `.env` file before the next step.
- Docker will read this env file. See the `.env.example` file for reference.
#### [API Keys and Tokens Setup](apis_and_tokens.md) (Required)
You must set up at least one of these tokens or APIs to run the app.
#### [AI Setup](ai_setup.md) (Required)
At least one AI endpoint should be setup for use.
#### [Manage Your MongoDB Database](../features/manage_your_database.md) (Optional)
Safely access and manage your MongoDB database using Mongo Express

View file

@ -3,6 +3,20 @@ Welcome to the comprehensive guide for configuring your application's environmen
While the default settings provide a solid foundation for a standard `docker` installation, delving into this guide will unveil the full potential of LibreChat. This guide empowers you to tailor LibreChat to your precise needs. Discover how to adjust language model availability, integrate social logins, manage the automatic moderation system, and much more. It's all about giving you the control to fine-tune LibreChat for an optimal user experience.
**If you use docker, you should rebuild the docker image each time you update your environment variables**
Rebuild command:
```bash
npm run update:docker
# OR, if you don't have npm
docker-compose build --no-cache
```
Alternatively, you can create a new file named `docker-compose.override.yml` in the same directory as your main `docker-compose.yml` file for LibreChat, where you can set your .env variables as needed under `environment`. See the [docker docs](https://docs.docker.com/compose/multiple-compose-files/extends/#understanding-multiple-compose-files) for more info, and you can also view an example of an override file for LibreChat in the ["Manage Your Database" section](https://docs.librechat.ai/features/manage_your_database.html)
---
## Server Configuration
### Customization
@ -96,7 +110,7 @@ PROXY=
```
### Anthropic
see: [Anthropic Endpoint](./apis_and_tokens.md#anthropic-endpoint-claude)
see: [Anthropic Endpoint](./ai_setup.md#anthropic)
- You can request an access key from https://console.anthropic.com/
- Leave `ANTHROPIC_API_KEY=` blank to disable this endpoint
- Set `ANTHROPIC_API_KEY=` to "user_provided" to allow users to provide their own API key from the WebUI
@ -110,7 +124,7 @@ ANTHROPIC_REVERSE_PROXY=
```
### Azure
see: [Azure OpenAI](./apis_and_tokens.md#azure-openai)
**Important:** See [the complete Azure OpenAI setup guide](./ai_setup.md#azure-openai) for thorough instructions on enabling Azure OpenAI
- To use Azure with this project, set the following variables. These will be used to build the API URL.
@ -155,7 +169,7 @@ PLUGINS_USE_AZURE="true"
```
### BingAI
Bing, also used for Sydney, jailbreak, and Bing Image Creator, see: [Bing Access token](./apis_and_tokens.md#bing-access-token) and [Bing Jailbreak](../features/bing_jailbreak.md)
Bing, also used for Sydney, jailbreak, and Bing Image Creator, see: [Bing Access token](./ai_setup.md#bingai) and [Bing Jailbreak](../features/bing_jailbreak.md)
- Follow these instructions to get your bing access token (it's best to use the full cookie string for that purpose): [Bing Access Token](https://github.com/danny-avila/LibreChat/issues/370#issuecomment-1560382302)
- Leave `BINGAI_TOKEN=` blank to disable this endpoint
@ -171,7 +185,7 @@ BINGAI_HOST=
```
### ChatGPT
see: [ChatGPT Free Access token](./apis_and_tokens.md#chatgpt-free-access-token)
see: [ChatGPT Free Access token](./ai_setup.md#chatgptbrowser)
> **Warning**: To use this endpoint you'll have to set up your own reverse proxy. Here is the installation guide to deploy your own (based on [PandoraNext](https://github.com/pandora-next/deploy)): **[PandoraNext Deployment Guide](../features/pandoranext.md)**
@ -192,7 +206,7 @@ CHATGPT_MODELS=text-davinci-002-render-sha
```
### Google
Follow these instruction to setup: [Google LLMs](./apis_and_tokens.md#google-llms)
Follow these instructions to setup the [Google Endpoint](./ai_setup.md#google)
```bash
GOOGLE_KEY=user_provided

View file

@ -91,8 +91,8 @@ sudo apt-get install -y nodejs
## [Create a MongoDB database](mongodb.md) (Required)
## [Get Your API keys and Tokens](apis_and_tokens.md) (Required)
- You must set up at least one of these tokens or APIs to run the app.
## [Setup your AI Endpoints](ai_setup.md) (Required)
- At least one AI endpoint should be setup for use.
## [User/Auth System](../install/user_auth_system.md) (Optional)
- How to set up the user/auth system and Google login.

View file

@ -24,8 +24,8 @@
> Choose only one option, online or brew. Both have pros and cons
### [Get Your API keys and Tokens](apis_and_tokens.md) (Required)
- You must set up at least one of these tokens or APIs to run the app.
### [Setup your AI Endpoints](ai_setup.md) (Required)
- At least one AI endpoint should be setup for use.
### [User/Auth System](../install/user_auth_system.md) (Optional)
- Set up the user/auth system and various social logins.

View file

@ -50,8 +50,8 @@ Have fun!
### [Create a MongoDB database](mongodb.md) (Required)
### [Get Your API keys and Tokens](apis_and_tokens.md) (Required)
- You must set up at least one of these tokens or APIs to run the app.
### [Setup your AI Endpoints](ai_setup.md) (Required)
- At least one AI endpoint should be setup for use.
### Download LibreChat (Required)
- (With Git) Open Terminal (command prompt) and clone the repository by running `git clone https://github.com/danny-avila/LibreChat.git`

View file

@ -90,7 +90,7 @@ nav:
- Configuration:
- .env Configuration: 'install/dotenv.md'
- Free AI APIs: 'install/free_ai_apis.md'
- APIs and Tokens: 'install/apis_and_tokens.md'
- AI Setup: 'install/ai_setup.md'
- User Auth System: 'install/user_auth_system.md'
- Online MongoDB Database: 'install/mongodb.md'
- Languages: 'install/default_language.md'

View file

@ -138,6 +138,7 @@ export const supportsFiles = {
[EModelEndpoint.openAI]: true,
[EModelEndpoint.google]: true,
[EModelEndpoint.assistant]: true,
[EModelEndpoint.azureOpenAI]: true,
};
export const supportsBalanceCheck = {