mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 17:00:15 +01:00
🛣️ feat: directEndpoint Fetch Override for Custom Endpoints (#9179)
* feat: Add directEndpoint option to OpenAIConfigOptions and update fetch logic to override /chat/completions URL * feat: Add directEndpoint support to fetchModels and update loadConfigModels logic
This commit is contained in:
parent
e0ebb7097e
commit
a49b2b2833
4 changed files with 20 additions and 4 deletions
|
|
@ -76,10 +76,11 @@ async function loadConfigModels(req) {
|
||||||
fetchPromisesMap[uniqueKey] =
|
fetchPromisesMap[uniqueKey] =
|
||||||
fetchPromisesMap[uniqueKey] ||
|
fetchPromisesMap[uniqueKey] ||
|
||||||
fetchModels({
|
fetchModels({
|
||||||
user: req.user.id,
|
|
||||||
baseURL: BASE_URL,
|
|
||||||
apiKey: API_KEY,
|
|
||||||
name,
|
name,
|
||||||
|
apiKey: API_KEY,
|
||||||
|
baseURL: BASE_URL,
|
||||||
|
user: req.user.id,
|
||||||
|
direct: endpoint.directEndpoint,
|
||||||
userIdQuery: models.userIdQuery,
|
userIdQuery: models.userIdQuery,
|
||||||
});
|
});
|
||||||
uniqueKeyToEndpointsMap[uniqueKey] = uniqueKeyToEndpointsMap[uniqueKey] || [];
|
uniqueKeyToEndpointsMap[uniqueKey] = uniqueKeyToEndpointsMap[uniqueKey] || [];
|
||||||
|
|
|
||||||
|
|
@ -34,6 +34,7 @@ const { openAIApiKey, userProvidedOpenAI } = require('./Config/EndpointService')
|
||||||
* @param {string} params.apiKey - The API key for authentication with the API.
|
* @param {string} params.apiKey - The API key for authentication with the API.
|
||||||
* @param {string} params.baseURL - The base path URL for the API.
|
* @param {string} params.baseURL - The base path URL for the API.
|
||||||
* @param {string} [params.name='OpenAI'] - The name of the API; defaults to 'OpenAI'.
|
* @param {string} [params.name='OpenAI'] - The name of the API; defaults to 'OpenAI'.
|
||||||
|
* @param {boolean} [params.direct=false] - Whether `directEndpoint` was configured
|
||||||
* @param {boolean} [params.azure=false] - Whether to fetch models from Azure.
|
* @param {boolean} [params.azure=false] - Whether to fetch models from Azure.
|
||||||
* @param {boolean} [params.userIdQuery=false] - Whether to send the user ID as a query parameter.
|
* @param {boolean} [params.userIdQuery=false] - Whether to send the user ID as a query parameter.
|
||||||
* @param {boolean} [params.createTokenConfig=true] - Whether to create a token configuration from the API response.
|
* @param {boolean} [params.createTokenConfig=true] - Whether to create a token configuration from the API response.
|
||||||
|
|
@ -44,14 +45,16 @@ const { openAIApiKey, userProvidedOpenAI } = require('./Config/EndpointService')
|
||||||
const fetchModels = async ({
|
const fetchModels = async ({
|
||||||
user,
|
user,
|
||||||
apiKey,
|
apiKey,
|
||||||
baseURL,
|
baseURL: _baseURL,
|
||||||
name = EModelEndpoint.openAI,
|
name = EModelEndpoint.openAI,
|
||||||
|
direct,
|
||||||
azure = false,
|
azure = false,
|
||||||
userIdQuery = false,
|
userIdQuery = false,
|
||||||
createTokenConfig = true,
|
createTokenConfig = true,
|
||||||
tokenKey,
|
tokenKey,
|
||||||
}) => {
|
}) => {
|
||||||
let models = [];
|
let models = [];
|
||||||
|
const baseURL = direct ? extractBaseURL(_baseURL) : _baseURL;
|
||||||
|
|
||||||
if (!baseURL && !azure) {
|
if (!baseURL && !azure) {
|
||||||
return models;
|
return models;
|
||||||
|
|
|
||||||
|
|
@ -6,8 +6,11 @@ import type { AzureOpenAIInput } from '@langchain/openai';
|
||||||
import type { OpenAI } from 'openai';
|
import type { OpenAI } from 'openai';
|
||||||
import type * as t from '~/types';
|
import type * as t from '~/types';
|
||||||
import { sanitizeModelName, constructAzureURL } from '~/utils/azure';
|
import { sanitizeModelName, constructAzureURL } from '~/utils/azure';
|
||||||
|
import { createFetch } from '~/utils/generators';
|
||||||
import { isEnabled } from '~/utils/common';
|
import { isEnabled } from '~/utils/common';
|
||||||
|
|
||||||
|
type Fetch = (input: string | URL | Request, init?: RequestInit) => Promise<Response>;
|
||||||
|
|
||||||
export const knownOpenAIParams = new Set([
|
export const knownOpenAIParams = new Set([
|
||||||
// Constructor/Instance Parameters
|
// Constructor/Instance Parameters
|
||||||
'model',
|
'model',
|
||||||
|
|
@ -92,6 +95,7 @@ export function getOpenAIConfig(
|
||||||
const {
|
const {
|
||||||
modelOptions: _modelOptions = {},
|
modelOptions: _modelOptions = {},
|
||||||
reverseProxyUrl,
|
reverseProxyUrl,
|
||||||
|
directEndpoint,
|
||||||
defaultQuery,
|
defaultQuery,
|
||||||
headers,
|
headers,
|
||||||
proxy,
|
proxy,
|
||||||
|
|
@ -311,6 +315,13 @@ export function getOpenAIConfig(
|
||||||
llmConfig.modelKwargs = modelKwargs;
|
llmConfig.modelKwargs = modelKwargs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (directEndpoint === true && configOptions?.baseURL != null) {
|
||||||
|
configOptions.fetch = createFetch({
|
||||||
|
directEndpoint: directEndpoint,
|
||||||
|
reverseProxyUrl: configOptions?.baseURL,
|
||||||
|
}) as unknown as Fetch;
|
||||||
|
}
|
||||||
|
|
||||||
const result: t.LLMConfigResult = {
|
const result: t.LLMConfigResult = {
|
||||||
llmConfig,
|
llmConfig,
|
||||||
configOptions,
|
configOptions,
|
||||||
|
|
|
||||||
|
|
@ -12,6 +12,7 @@ export type OpenAIParameters = z.infer<typeof openAISchema>;
|
||||||
*/
|
*/
|
||||||
export interface OpenAIConfigOptions {
|
export interface OpenAIConfigOptions {
|
||||||
modelOptions?: Partial<OpenAIParameters>;
|
modelOptions?: Partial<OpenAIParameters>;
|
||||||
|
directEndpoint?: boolean;
|
||||||
reverseProxyUrl?: string;
|
reverseProxyUrl?: string;
|
||||||
defaultQuery?: Record<string, string | undefined>;
|
defaultQuery?: Record<string, string | undefined>;
|
||||||
headers?: Record<string, string>;
|
headers?: Record<string, string>;
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue