mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-09-22 06:00:56 +02:00

* chore: rename dir from `assistant` to plural * feat: `assistants` field for azure config, spread options in AppService * refactor: rename constructAzureURL param for azure as `azureOptions` * chore: bump openai and bun * chore(loadDefaultModels): change naming of assistant -> assistants * feat: load azure settings with currect baseURL for assistants' initializeClient * refactor: add `assistants` flags to groups and model configs, add mapGroupToAzureConfig * feat(loadConfigEndpoints): initialize assistants endpoint if azure flag `assistants` is enabled * feat(AppService): determine assistant models on startup, throw Error if none * refactor(useDeleteAssistantMutation): send model along with assistant id for delete mutations * feat: support listing and deleting assistants with azure * feat: add model query to assistant avatar upload * feat: add azure support for retrieveRun method * refactor: update OpenAIClient initialization * chore: update README * fix(ci): tests passing * refactor(uploadOpenAIFile): improve logging and use more efficient REST API method * refactor(useFileHandling): add model to metadata to target Azure region compatible with current model * chore(files): add azure naming pattern for valid file id recognition * fix(assistants): initialize openai with first available assistant model if none provided * refactor(uploadOpenAIFile): add content type for azure, initialize formdata before azure options * refactor(sleep): move sleep function out of Runs and into `~/server/utils` * fix(azureOpenAI/assistants): make sure to only overwrite models with assistant models if `assistants` flag is enabled * refactor(uploadOpenAIFile): revert to old method * chore(uploadOpenAIFile): use enum for file purpose * docs: azureOpenAI update guide with more info, examples * feat: enable/disable assistant capabilities and specify retrieval models * refactor: optional chain conditional statement in loadConfigModels.js * docs: add assistants examples * chore: update librechat.example.yaml * docs(azure): update note of file upload behavior in Azure OpenAI Assistants * chore: update docs and add descriptive message about assistant errors * fix: prevent message submission with invalid assistant or if files loading * style: update Landing icon & text when assistant is not selected * chore: bump librechat-data-provider to 0.4.8 * fix(assistants/azure): assign req.body.model for proper azure init to abort runs
81 lines
2.7 KiB
JavaScript
81 lines
2.7 KiB
JavaScript
const { ChatOpenAI } = require('langchain/chat_models/openai');
|
|
const { sanitizeModelName, constructAzureURL } = require('~/utils');
|
|
const { isEnabled } = require('~/server/utils');
|
|
|
|
/**
|
|
* Creates a new instance of a language model (LLM) for chat interactions.
|
|
*
|
|
* @param {Object} options - The options for creating the LLM.
|
|
* @param {ModelOptions} options.modelOptions - The options specific to the model, including modelName, temperature, presence_penalty, frequency_penalty, and other model-related settings.
|
|
* @param {ConfigOptions} options.configOptions - Configuration options for the API requests, including proxy settings and custom headers.
|
|
* @param {Callbacks} options.callbacks - Callback functions for managing the lifecycle of the LLM, including token buffers, context, and initial message count.
|
|
* @param {boolean} [options.streaming=false] - Determines if the LLM should operate in streaming mode.
|
|
* @param {string} options.openAIApiKey - The API key for OpenAI, used for authentication.
|
|
* @param {AzureOptions} [options.azure={}] - Optional Azure-specific configurations. If provided, Azure configurations take precedence over OpenAI configurations.
|
|
*
|
|
* @returns {ChatOpenAI} An instance of the ChatOpenAI class, configured with the provided options.
|
|
*
|
|
* @example
|
|
* const llm = createLLM({
|
|
* modelOptions: { modelName: 'gpt-3.5-turbo', temperature: 0.2 },
|
|
* configOptions: { basePath: 'https://example.api/path' },
|
|
* callbacks: { onMessage: handleMessage },
|
|
* openAIApiKey: 'your-api-key'
|
|
* });
|
|
*/
|
|
function createLLM({
|
|
modelOptions,
|
|
configOptions,
|
|
callbacks,
|
|
streaming = false,
|
|
openAIApiKey,
|
|
azure = {},
|
|
}) {
|
|
let credentials = { openAIApiKey };
|
|
let configuration = {
|
|
apiKey: openAIApiKey,
|
|
};
|
|
|
|
/** @type {AzureOptions} */
|
|
let azureOptions = {};
|
|
if (azure) {
|
|
const useModelName = isEnabled(process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME);
|
|
|
|
credentials = {};
|
|
configuration = {};
|
|
azureOptions = azure;
|
|
|
|
azureOptions.azureOpenAIApiDeploymentName = useModelName
|
|
? sanitizeModelName(modelOptions.modelName)
|
|
: azureOptions.azureOpenAIApiDeploymentName;
|
|
}
|
|
|
|
if (azure && process.env.AZURE_OPENAI_DEFAULT_MODEL) {
|
|
modelOptions.modelName = process.env.AZURE_OPENAI_DEFAULT_MODEL;
|
|
}
|
|
|
|
if (azure && configOptions.basePath) {
|
|
const azureURL = constructAzureURL({
|
|
baseURL: configOptions.basePath,
|
|
azureOptions,
|
|
});
|
|
azureOptions.azureOpenAIBasePath = azureURL.split(
|
|
`/${azureOptions.azureOpenAIApiDeploymentName}`,
|
|
)[0];
|
|
}
|
|
|
|
return new ChatOpenAI(
|
|
{
|
|
streaming,
|
|
credentials,
|
|
configuration,
|
|
...azureOptions,
|
|
...modelOptions,
|
|
...credentials,
|
|
callbacks,
|
|
},
|
|
configOptions,
|
|
);
|
|
}
|
|
|
|
module.exports = createLLM;
|