mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-09-21 21:50:49 +02:00

* chore: fix `endpoint` typescript issues and typo in console info message * feat(api): files GET endpoint and save only file_id references to messages * refactor(client): `useGetFiles` query hook, update file types, optimistic update of filesQuery on file upload * refactor(buildTree): update to use params object and accept fileMap * feat: map files to messages; refactor(ChatView): messages only available after files are fetched * fix: fetch files only when authenticated * feat(api): AppService - rename app.locals.configs to app.locals.paths - load custom config use fileStrategy from yaml config in app.locals * refactor: separate Firebase and Local strategies, call based on config * refactor: modularize file strategies and employ with use of DALL-E * refactor(librechat.yaml): add fileStrategy field * feat: add source to MongoFile schema, as well as BatchFile, and ExtendedFile types * feat: employ file strategies for upload/delete files * refactor(deleteFirebaseFile): add user id validation for firebase file deletion * chore(deleteFirebaseFile): update jsdocs * feat: employ strategies for vision requests * fix(client): handle messages with deleted files * fix(client): ensure `filesToDelete` always saves/sends `file.source` * feat(openAI): configurable `resendImages` and `imageDetail` * refactor(getTokenCountForMessage): recursive process only when array of Objects and only their values (not keys) aside from `image_url` types * feat(OpenAIClient): calculateImageTokenCost * chore: remove comment * refactor(uploadAvatar): employ fileStrategy for avatars, from social logins or user upload * docs: update docs on how to configure fileStrategy * fix(ci): mock winston and winston related modules, update DALLE3.spec.js with changes made * refactor(redis): change terminal message to reflect current development state * fix(DALL-E-2): pass fileStrategy to dall-e
76 lines
2.9 KiB
YAML
76 lines
2.9 KiB
YAML
# Configuration version (required)
|
|
version: 1.0.1
|
|
|
|
# Cache settings: Set to true to enable caching
|
|
cache: true
|
|
|
|
# Definition of custom endpoints
|
|
endpoints:
|
|
custom:
|
|
# Mistral AI API
|
|
- name: "Mistral" # Unique name for the endpoint
|
|
# For `apiKey` and `baseURL`, you can use environment variables that you define.
|
|
# recommended environment variables:
|
|
apiKey: "${MISTRAL_API_KEY}"
|
|
baseURL: "https://api.mistral.ai/v1"
|
|
|
|
# Models configuration
|
|
models:
|
|
# List of default models to use. At least one value is required.
|
|
default: ["mistral-tiny", "mistral-small", "mistral-medium"]
|
|
# Fetch option: Set to true to fetch models from API.
|
|
fetch: true # Defaults to false.
|
|
|
|
# Optional configurations
|
|
|
|
# Title Conversation setting
|
|
titleConvo: true # Set to true to enable title conversation
|
|
|
|
# Title Method: Choose between "completion" or "functions".
|
|
titleMethod: "completion" # Defaults to "completion" if omitted.
|
|
|
|
# Title Model: Specify the model to use for titles.
|
|
titleModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted.
|
|
|
|
# Summarize setting: Set to true to enable summarization.
|
|
summarize: false
|
|
|
|
# Summary Model: Specify the model to use if summarization is enabled.
|
|
summaryModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted.
|
|
|
|
# Force Prompt setting: If true, sends a `prompt` parameter instead of `messages`.
|
|
forcePrompt: false
|
|
|
|
# The label displayed for the AI model in messages.
|
|
modelDisplayLabel: "Mistral" # Default is "AI" when not set.
|
|
|
|
# Add additional parameters to the request. Default params will be overwritten.
|
|
addParams:
|
|
safe_mode: true # This field is specific to Mistral AI: https://docs.mistral.ai/api/
|
|
|
|
# Drop Default params parameters from the request. See default params in guide linked below.
|
|
dropParams: ["stop", "temperature", "top_p"]
|
|
# - stop # dropped since it's not recognized by Mistral AI API
|
|
# `temperature` and `top_p` are removed to allow Mistral AI API defaults to be used:
|
|
# - temperature
|
|
# - top_p
|
|
|
|
# OpenRouter.ai Example
|
|
- name: "OpenRouter"
|
|
# For `apiKey` and `baseURL`, you can use environment variables that you define.
|
|
# recommended environment variables:
|
|
# Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well.
|
|
apiKey: "${OPENROUTER_KEY}"
|
|
baseURL: "https://openrouter.ai/api/v1"
|
|
models:
|
|
default: ["gpt-3.5-turbo"]
|
|
fetch: true
|
|
titleConvo: true
|
|
titleModel: "gpt-3.5-turbo"
|
|
summarize: false
|
|
summaryModel: "gpt-3.5-turbo"
|
|
forcePrompt: false
|
|
modelDisplayLabel: "OpenRouter"
|
|
|
|
# See the Custom Configuration Guide for more information:
|
|
# https://docs.librechat.ai/install/configuration/custom_config.html
|