🌉 feat: Integrate Helicone AI Gateway Provider (#10287)

* feat: integrate Helicone AI gateway provider

- Add Helicone provider support with automatic model fetching
- Implement custom API logic for Helicone model registry endpoint
- Enable access to 75+ models from multiple AI providers through Helicone gateway
- Add Helicone to supported providers list in README
- Include Helicone configuration in example YAML

* docs: add Helicone to supported providers list

* fix comments

* fixed backgroundless helicone icon asset

* removed unecessesary changes

* replace svg helicone image instead of png
This commit is contained in:
_juliettech 2025-11-13 08:45:32 -05:00 committed by GitHub
parent 6e19026c48
commit bc561840bb
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 37 additions and 4 deletions

View file

@ -56,7 +56,7 @@
- [Custom Endpoints](https://www.librechat.ai/docs/quick_start/custom_endpoints): Use any OpenAI-compatible API with LibreChat, no proxy required - [Custom Endpoints](https://www.librechat.ai/docs/quick_start/custom_endpoints): Use any OpenAI-compatible API with LibreChat, no proxy required
- Compatible with [Local & Remote AI Providers](https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints): - Compatible with [Local & Remote AI Providers](https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints):
- Ollama, groq, Cohere, Mistral AI, Apple MLX, koboldcpp, together.ai, - Ollama, groq, Cohere, Mistral AI, Apple MLX, koboldcpp, together.ai,
- OpenRouter, Perplexity, ShuttleAI, Deepseek, Qwen, and more - OpenRouter, Helicone, Perplexity, ShuttleAI, Deepseek, Qwen, and more
- 🔧 **[Code Interpreter API](https://www.librechat.ai/docs/features/code_interpreter)**: - 🔧 **[Code Interpreter API](https://www.librechat.ai/docs/features/code_interpreter)**:
- Secure, Sandboxed Execution in Python, Node.js (JS/TS), Go, C/C++, Java, PHP, Rust, and Fortran - Secure, Sandboxed Execution in Python, Node.js (JS/TS), Go, C/C++, Java, PHP, Rust, and Fortran

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 50 KiB

View file

@ -12,6 +12,7 @@ const knownEndpointAssets = {
[KnownEndpoints.fireworks]: 'assets/fireworks.png', [KnownEndpoints.fireworks]: 'assets/fireworks.png',
[KnownEndpoints.google]: 'assets/google.svg', [KnownEndpoints.google]: 'assets/google.svg',
[KnownEndpoints.groq]: 'assets/groq.png', [KnownEndpoints.groq]: 'assets/groq.png',
[KnownEndpoints.helicone]: 'assets/helicone.png',
[KnownEndpoints.huggingface]: 'assets/huggingface.svg', [KnownEndpoints.huggingface]: 'assets/huggingface.svg',
[KnownEndpoints.mistral]: 'assets/mistral.png', [KnownEndpoints.mistral]: 'assets/mistral.png',
[KnownEndpoints.mlx]: 'assets/mlx.png', [KnownEndpoints.mlx]: 'assets/mlx.png',

View file

@ -301,6 +301,22 @@ endpoints:
dropParams: ['stop'] dropParams: ['stop']
modelDisplayLabel: 'OpenRouter' modelDisplayLabel: 'OpenRouter'
# Helicone Example
- name: 'Helicone'
# For `apiKey` and `baseURL`, you can use environment variables that you define.
# recommended environment variables:
apiKey: '${HELICONE_KEY}'
baseURL: 'https://ai-gateway.helicone.ai'
headers:
x-librechat-body-parentmessageid: '{{LIBRECHAT_BODY_PARENTMESSAGEID}}'
models:
default: ['gpt-4o-mini', 'claude-4.5-sonnet', 'llama-3.1-8b-instruct', 'gemini-2.5-flash-lite']
fetch: true
titleConvo: true
titleModel: 'gpt-4o-mini'
modelDisplayLabel: 'Helicone'
iconURL: https://marketing-assets-helicone.s3.us-west-2.amazonaws.com/helicone.png
# Portkey AI Example # Portkey AI Example
- name: 'Portkey' - name: 'Portkey'
apiKey: 'dummy' apiKey: 'dummy'
@ -407,15 +423,12 @@ endpoints:
# # Jina Reranking Configuration # # Jina Reranking Configuration
# jinaApiKey: '${JINA_API_KEY}' # Your Jina API key # jinaApiKey: '${JINA_API_KEY}' # Your Jina API key
# jinaApiUrl: '${JINA_API_URL}' # Custom Jina API URL (optional, defaults to https://api.jina.ai/v1/rerank) # jinaApiUrl: '${JINA_API_URL}' # Custom Jina API URL (optional, defaults to https://api.jina.ai/v1/rerank)
#
# # Other rerankers # # Other rerankers
# cohereApiKey: '${COHERE_API_KEY}' # cohereApiKey: '${COHERE_API_KEY}'
#
# # Search providers # # Search providers
# serperApiKey: '${SERPER_API_KEY}' # serperApiKey: '${SERPER_API_KEY}'
# searxngInstanceUrl: '${SEARXNG_INSTANCE_URL}' # searxngInstanceUrl: '${SEARXNG_INSTANCE_URL}'
# searxngApiKey: '${SEARXNG_API_KEY}' # searxngApiKey: '${SEARXNG_API_KEY}'
#
# # Content scrapers # # Content scrapers
# firecrawlApiKey: '${FIRECRAWL_API_KEY}' # firecrawlApiKey: '${FIRECRAWL_API_KEY}'
# firecrawlApiUrl: '${FIRECRAWL_API_URL}' # firecrawlApiUrl: '${FIRECRAWL_API_URL}'

View file

@ -911,6 +911,7 @@ export enum KnownEndpoints {
fireworks = 'fireworks', fireworks = 'fireworks',
deepseek = 'deepseek', deepseek = 'deepseek',
groq = 'groq', groq = 'groq',
helicone = 'helicone',
huggingface = 'huggingface', huggingface = 'huggingface',
mistral = 'mistral', mistral = 'mistral',
mlx = 'mlx', mlx = 'mlx',
@ -926,6 +927,7 @@ export enum KnownEndpoints {
export enum FetchTokenConfig { export enum FetchTokenConfig {
openrouter = KnownEndpoints.openrouter, openrouter = KnownEndpoints.openrouter,
helicone = KnownEndpoints.helicone
} }
export const defaultEndpoints: EModelEndpoint[] = [ export const defaultEndpoints: EModelEndpoint[] = [
@ -958,6 +960,7 @@ export const alternateName = {
[KnownEndpoints.deepseek]: 'DeepSeek', [KnownEndpoints.deepseek]: 'DeepSeek',
[KnownEndpoints.xai]: 'xAI', [KnownEndpoints.xai]: 'xAI',
[KnownEndpoints.vercel]: 'Vercel', [KnownEndpoints.vercel]: 'Vercel',
[KnownEndpoints.helicone]: 'Helicone',
}; };
const sharedOpenAIModels = [ const sharedOpenAIModels = [