mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 08:50:15 +01:00
🔗 feat: User Provided Base URL for OpenAI endpoints (#1919)
* chore: bump browserslist-db@latest * refactor(EndpointService): simplify with `generateConfig`, utilize optional baseURL for OpenAI-based endpoints, use `isUserProvided` helper fn wherever needed * refactor(custom/initializeClient): use standardized naming for common variables * feat: user provided baseURL for openAI-based endpoints * refactor(custom/initializeClient): re-order operations * fix: knownendpoints enum definition and add FetchTokenConfig, bump data-provider * refactor(custom): use tokenKey dependent on userProvided conditions for caching and fetching endpointTokenConfig, anticipate token rates from custom config * refactor(custom): assure endpointTokenConfig is only accessed from cache if qualifies for fetching * fix(ci): update tests for initializeClient based on userProvideURL changes * fix(EndpointService): correct baseURL env var for assistants: `ASSISTANTS_BASE_URL` * fix: unnecessary run cancellation on res.close() when response.run is completed * feat(assistants): user provided URL option * ci: update tests and add test for `assistants` endpoint * chore: leaner condition for request closing * chore: more descriptive error message to provide keys again
This commit is contained in:
parent
53ae2d7bfb
commit
2f92b54787
17 changed files with 762 additions and 226 deletions
|
|
@ -20,6 +20,7 @@ const { openAIApiKey, userProvidedOpenAI } = require('./Config/EndpointService')
|
|||
* @param {boolean} [params.azure=false] - Whether to fetch models from Azure.
|
||||
* @param {boolean} [params.userIdQuery=false] - Whether to send the user ID as a query parameter.
|
||||
* @param {boolean} [params.createTokenConfig=true] - Whether to create a token configuration from the API response.
|
||||
* @param {string} [params.tokenKey] - The cache key to save the token configuration. Uses `name` if omitted.
|
||||
* @returns {Promise<string[]>} A promise that resolves to an array of model identifiers.
|
||||
* @async
|
||||
*/
|
||||
|
|
@ -31,6 +32,7 @@ const fetchModels = async ({
|
|||
azure = false,
|
||||
userIdQuery = false,
|
||||
createTokenConfig = true,
|
||||
tokenKey,
|
||||
}) => {
|
||||
let models = [];
|
||||
|
||||
|
|
@ -70,7 +72,7 @@ const fetchModels = async ({
|
|||
if (validationResult.success && createTokenConfig) {
|
||||
const endpointTokenConfig = processModelData(input);
|
||||
const cache = getLogStores(CacheKeys.TOKEN_CONFIG);
|
||||
await cache.set(name, endpointTokenConfig);
|
||||
await cache.set(tokenKey ?? name, endpointTokenConfig);
|
||||
}
|
||||
models = input.data.map((item) => item.id);
|
||||
} catch (error) {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue