mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-09-21 21:50:49 +02:00
🛣️ feat: directEndpoint
Fetch Override for Custom Endpoints (#9179)
* feat: Add directEndpoint option to OpenAIConfigOptions and update fetch logic to override /chat/completions URL * feat: Add directEndpoint support to fetchModels and update loadConfigModels logic
This commit is contained in:
parent
e0ebb7097e
commit
a49b2b2833
4 changed files with 20 additions and 4 deletions
|
@ -76,10 +76,11 @@ async function loadConfigModels(req) {
|
|||
fetchPromisesMap[uniqueKey] =
|
||||
fetchPromisesMap[uniqueKey] ||
|
||||
fetchModels({
|
||||
user: req.user.id,
|
||||
baseURL: BASE_URL,
|
||||
apiKey: API_KEY,
|
||||
name,
|
||||
apiKey: API_KEY,
|
||||
baseURL: BASE_URL,
|
||||
user: req.user.id,
|
||||
direct: endpoint.directEndpoint,
|
||||
userIdQuery: models.userIdQuery,
|
||||
});
|
||||
uniqueKeyToEndpointsMap[uniqueKey] = uniqueKeyToEndpointsMap[uniqueKey] || [];
|
||||
|
|
|
@ -34,6 +34,7 @@ const { openAIApiKey, userProvidedOpenAI } = require('./Config/EndpointService')
|
|||
* @param {string} params.apiKey - The API key for authentication with the API.
|
||||
* @param {string} params.baseURL - The base path URL for the API.
|
||||
* @param {string} [params.name='OpenAI'] - The name of the API; defaults to 'OpenAI'.
|
||||
* @param {boolean} [params.direct=false] - Whether `directEndpoint` was configured
|
||||
* @param {boolean} [params.azure=false] - Whether to fetch models from Azure.
|
||||
* @param {boolean} [params.userIdQuery=false] - Whether to send the user ID as a query parameter.
|
||||
* @param {boolean} [params.createTokenConfig=true] - Whether to create a token configuration from the API response.
|
||||
|
@ -44,14 +45,16 @@ const { openAIApiKey, userProvidedOpenAI } = require('./Config/EndpointService')
|
|||
const fetchModels = async ({
|
||||
user,
|
||||
apiKey,
|
||||
baseURL,
|
||||
baseURL: _baseURL,
|
||||
name = EModelEndpoint.openAI,
|
||||
direct,
|
||||
azure = false,
|
||||
userIdQuery = false,
|
||||
createTokenConfig = true,
|
||||
tokenKey,
|
||||
}) => {
|
||||
let models = [];
|
||||
const baseURL = direct ? extractBaseURL(_baseURL) : _baseURL;
|
||||
|
||||
if (!baseURL && !azure) {
|
||||
return models;
|
||||
|
|
|
@ -6,8 +6,11 @@ import type { AzureOpenAIInput } from '@langchain/openai';
|
|||
import type { OpenAI } from 'openai';
|
||||
import type * as t from '~/types';
|
||||
import { sanitizeModelName, constructAzureURL } from '~/utils/azure';
|
||||
import { createFetch } from '~/utils/generators';
|
||||
import { isEnabled } from '~/utils/common';
|
||||
|
||||
type Fetch = (input: string | URL | Request, init?: RequestInit) => Promise<Response>;
|
||||
|
||||
export const knownOpenAIParams = new Set([
|
||||
// Constructor/Instance Parameters
|
||||
'model',
|
||||
|
@ -92,6 +95,7 @@ export function getOpenAIConfig(
|
|||
const {
|
||||
modelOptions: _modelOptions = {},
|
||||
reverseProxyUrl,
|
||||
directEndpoint,
|
||||
defaultQuery,
|
||||
headers,
|
||||
proxy,
|
||||
|
@ -311,6 +315,13 @@ export function getOpenAIConfig(
|
|||
llmConfig.modelKwargs = modelKwargs;
|
||||
}
|
||||
|
||||
if (directEndpoint === true && configOptions?.baseURL != null) {
|
||||
configOptions.fetch = createFetch({
|
||||
directEndpoint: directEndpoint,
|
||||
reverseProxyUrl: configOptions?.baseURL,
|
||||
}) as unknown as Fetch;
|
||||
}
|
||||
|
||||
const result: t.LLMConfigResult = {
|
||||
llmConfig,
|
||||
configOptions,
|
||||
|
|
|
@ -12,6 +12,7 @@ export type OpenAIParameters = z.infer<typeof openAISchema>;
|
|||
*/
|
||||
export interface OpenAIConfigOptions {
|
||||
modelOptions?: Partial<OpenAIParameters>;
|
||||
directEndpoint?: boolean;
|
||||
reverseProxyUrl?: string;
|
||||
defaultQuery?: Record<string, string | undefined>;
|
||||
headers?: Record<string, string>;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue