mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-16 08:20:14 +01:00
🦙 docs: Update Ollama + LiteLLM Instructions (#2302)
* Update litellm.md * set OPENAI_API_KEY of litellm service (needs to be set if ollama's openai api compatibility is used)
This commit is contained in:
parent
94950b6e8b
commit
09cd1a7e74
2 changed files with 6 additions and 5 deletions
|
|
@ -122,6 +122,7 @@ version: '3.4'
|
|||
# - ./litellm/litellm-config.yaml:/app/config.yaml
|
||||
# command: [ "--config", "/app/config.yaml", "--port", "8000", "--num_workers", "8" ]
|
||||
# environment:
|
||||
# OPENAI_API_KEY: none ## needs to be set if ollama's openai api compatibility is used
|
||||
# REDIS_HOST: redis
|
||||
# REDIS_PORT: 6379
|
||||
# REDIS_PASSWORD: RedisChangeMe
|
||||
|
|
|
|||
|
|
@ -48,13 +48,13 @@ model_list:
|
|||
rpm: 1440
|
||||
- model_name: mixtral
|
||||
litellm_params:
|
||||
model: ollama/mixtral:8x7b-instruct-v0.1-q5_K_M
|
||||
api_base: http://ollama:11434
|
||||
model: openai/mixtral:8x7b-instruct-v0.1-q5_K_M # use openai/* for ollama's openai api compatibility
|
||||
api_base: http://ollama:11434/v1
|
||||
stream: True
|
||||
- model_name: mistral
|
||||
litellm_params:
|
||||
model: ollama/mistral
|
||||
api_base: http://ollama:11434
|
||||
model: openai/mistral # use openai/* for ollama's openai api compatibility
|
||||
api_base: http://ollama:11434/v1
|
||||
stream: True
|
||||
litellm_settings:
|
||||
success_callback: ["langfuse"]
|
||||
|
|
@ -95,4 +95,4 @@ Key components and features include:
|
|||
- **Deployment and Performance**: Information on deploying LiteLLM Proxy and its performance metrics.
|
||||
- **Proxy CLI Arguments**: A wide range of command-line arguments for customization.
|
||||
|
||||
Overall, LiteLLM Server offers a comprehensive suite of tools for managing, deploying, and interacting with a variety of LLMs, making it a versatile choice for large-scale AI applications.
|
||||
Overall, LiteLLM Server offers a comprehensive suite of tools for managing, deploying, and interacting with a variety of LLMs, making it a versatile choice for large-scale AI applications.
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue