mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 08:50:15 +01:00
* feat: support thinking budget parameter for Gemini 2.5 series (#6949, #7542) https://ai.google.dev/gemini-api/docs/thinking#set-budget * refactor: update thinking budget minimum value to -1 for dynamic thinking - see: https://ai.google.dev/gemini-api/docs/thinking#set-budget * chore: bump @librechat/agents to v2.4.43 * refactor: rename LLMConfigOptions to OpenAIConfigOptions for clarity and consistency - Updated type definitions and references in initialize.ts, llm.ts, and openai.ts to reflect the new naming convention. - Ensured that the OpenAI configuration options are consistently used across the relevant files. * refactor: port Google LLM methods to TypeScript Package * chore: update @librechat/agents version to 2.4.43 in package-lock.json and package.json * refactor: update thinking budget description for clarity and adjust placeholder in parameter settings * refactor: enhance googleSettings default value for thinking budget to support dynamic adjustment * chore: update @librechat/agents to v2.4.44 for Vertex Dynamic Thinking workaround * refactor: rename google config function, update `createRun` types, use `reasoning` as `reasoningKey` for Google * refactor: simplify placeholder handling in DynamicInput component * refactor: enhance thinking budget description for clarity and allow automatic decision by setting to "-1" * refactor: update text styling in OptionHover component for improved readability * chore: update @librechat/agents dependency to v2.4.46 in package.json and package-lock.json * chore: update @librechat/api version to 1.2.5 in package.json and package-lock.json * refactor: enhance `clientOptions` handling by filtering `omitTitleOptions`, add `json` field for Google models --------- Co-authored-by: ciffelia <15273128+ciffelia@users.noreply.github.com>
175 lines
5.1 KiB
TypeScript
175 lines
5.1 KiB
TypeScript
import { ErrorTypes, EModelEndpoint, mapModelToAzureConfig } from 'librechat-data-provider';
|
|
import type {
|
|
UserKeyValues,
|
|
OpenAIOptionsResult,
|
|
OpenAIConfigOptions,
|
|
InitializeOpenAIOptionsParams,
|
|
} from '~/types';
|
|
import { createHandleLLMNewToken } from '~/utils/generators';
|
|
import { getAzureCredentials } from '~/utils/azure';
|
|
import { isUserProvided } from '~/utils/common';
|
|
import { resolveHeaders } from '~/utils/env';
|
|
import { getOpenAIConfig } from './llm';
|
|
|
|
/**
|
|
* Initializes OpenAI options for agent usage. This function always returns configuration
|
|
* options and never creates a client instance (equivalent to optionsOnly=true behavior).
|
|
*
|
|
* @param params - Configuration parameters
|
|
* @returns Promise resolving to OpenAI configuration options
|
|
* @throws Error if API key is missing or user key has expired
|
|
*/
|
|
export const initializeOpenAI = async ({
|
|
req,
|
|
overrideModel,
|
|
endpointOption,
|
|
overrideEndpoint,
|
|
getUserKeyValues,
|
|
checkUserKeyExpiry,
|
|
}: InitializeOpenAIOptionsParams): Promise<OpenAIOptionsResult> => {
|
|
const { PROXY, OPENAI_API_KEY, AZURE_API_KEY, OPENAI_REVERSE_PROXY, AZURE_OPENAI_BASEURL } =
|
|
process.env;
|
|
|
|
const { key: expiresAt } = req.body;
|
|
const modelName = overrideModel ?? req.body.model;
|
|
const endpoint = overrideEndpoint ?? req.body.endpoint;
|
|
|
|
if (!endpoint) {
|
|
throw new Error('Endpoint is required');
|
|
}
|
|
|
|
const credentials = {
|
|
[EModelEndpoint.openAI]: OPENAI_API_KEY,
|
|
[EModelEndpoint.azureOpenAI]: AZURE_API_KEY,
|
|
};
|
|
|
|
const baseURLOptions = {
|
|
[EModelEndpoint.openAI]: OPENAI_REVERSE_PROXY,
|
|
[EModelEndpoint.azureOpenAI]: AZURE_OPENAI_BASEURL,
|
|
};
|
|
|
|
const userProvidesKey = isUserProvided(credentials[endpoint as keyof typeof credentials]);
|
|
const userProvidesURL = isUserProvided(baseURLOptions[endpoint as keyof typeof baseURLOptions]);
|
|
|
|
let userValues: UserKeyValues | null = null;
|
|
if (expiresAt && (userProvidesKey || userProvidesURL)) {
|
|
checkUserKeyExpiry(expiresAt, endpoint);
|
|
userValues = await getUserKeyValues({ userId: req.user.id, name: endpoint });
|
|
}
|
|
|
|
let apiKey = userProvidesKey
|
|
? userValues?.apiKey
|
|
: credentials[endpoint as keyof typeof credentials];
|
|
const baseURL = userProvidesURL
|
|
? userValues?.baseURL
|
|
: baseURLOptions[endpoint as keyof typeof baseURLOptions];
|
|
|
|
const clientOptions: OpenAIConfigOptions = {
|
|
proxy: PROXY ?? undefined,
|
|
reverseProxyUrl: baseURL || undefined,
|
|
streaming: true,
|
|
};
|
|
|
|
const isAzureOpenAI = endpoint === EModelEndpoint.azureOpenAI;
|
|
const azureConfig = isAzureOpenAI && req.app.locals[EModelEndpoint.azureOpenAI];
|
|
|
|
if (isAzureOpenAI && azureConfig) {
|
|
const { modelGroupMap, groupMap } = azureConfig;
|
|
const {
|
|
azureOptions,
|
|
baseURL: configBaseURL,
|
|
headers = {},
|
|
serverless,
|
|
} = mapModelToAzureConfig({
|
|
modelName: modelName || '',
|
|
modelGroupMap,
|
|
groupMap,
|
|
});
|
|
|
|
clientOptions.reverseProxyUrl = configBaseURL ?? clientOptions.reverseProxyUrl;
|
|
clientOptions.headers = resolveHeaders(
|
|
{ ...headers, ...(clientOptions.headers ?? {}) },
|
|
req.user,
|
|
);
|
|
|
|
const groupName = modelGroupMap[modelName || '']?.group;
|
|
if (groupName && groupMap[groupName]) {
|
|
clientOptions.addParams = groupMap[groupName]?.addParams;
|
|
clientOptions.dropParams = groupMap[groupName]?.dropParams;
|
|
}
|
|
|
|
apiKey = azureOptions.azureOpenAIApiKey;
|
|
clientOptions.azure = !serverless ? azureOptions : undefined;
|
|
|
|
if (serverless === true) {
|
|
clientOptions.defaultQuery = azureOptions.azureOpenAIApiVersion
|
|
? { 'api-version': azureOptions.azureOpenAIApiVersion }
|
|
: undefined;
|
|
|
|
if (!clientOptions.headers) {
|
|
clientOptions.headers = {};
|
|
}
|
|
clientOptions.headers['api-key'] = apiKey;
|
|
}
|
|
} else if (isAzureOpenAI) {
|
|
clientOptions.azure =
|
|
userProvidesKey && userValues?.apiKey ? JSON.parse(userValues.apiKey) : getAzureCredentials();
|
|
apiKey = clientOptions.azure?.azureOpenAIApiKey;
|
|
}
|
|
|
|
if (userProvidesKey && !apiKey) {
|
|
throw new Error(
|
|
JSON.stringify({
|
|
type: ErrorTypes.NO_USER_KEY,
|
|
}),
|
|
);
|
|
}
|
|
|
|
if (!apiKey) {
|
|
throw new Error(`${endpoint} API Key not provided.`);
|
|
}
|
|
|
|
const modelOptions = {
|
|
...endpointOption.model_parameters,
|
|
model: modelName,
|
|
user: req.user.id,
|
|
};
|
|
|
|
const finalClientOptions: OpenAIConfigOptions = {
|
|
...clientOptions,
|
|
modelOptions,
|
|
};
|
|
|
|
const options = getOpenAIConfig(apiKey, finalClientOptions, endpoint);
|
|
|
|
const openAIConfig = req.app.locals[EModelEndpoint.openAI];
|
|
const allConfig = req.app.locals.all;
|
|
const azureRate = modelName?.includes('gpt-4') ? 30 : 17;
|
|
|
|
let streamRate: number | undefined;
|
|
|
|
if (isAzureOpenAI && azureConfig) {
|
|
streamRate = azureConfig.streamRate ?? azureRate;
|
|
} else if (!isAzureOpenAI && openAIConfig) {
|
|
streamRate = openAIConfig.streamRate;
|
|
}
|
|
|
|
if (allConfig?.streamRate) {
|
|
streamRate = allConfig.streamRate;
|
|
}
|
|
|
|
if (streamRate) {
|
|
options.llmConfig.callbacks = [
|
|
{
|
|
handleLLMNewToken: createHandleLLMNewToken(streamRate),
|
|
},
|
|
];
|
|
}
|
|
|
|
const result: OpenAIOptionsResult = {
|
|
...options,
|
|
streamRate,
|
|
};
|
|
|
|
return result;
|
|
};
|