mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-09-21 21:50:49 +02:00

* feat: update PaLM icons * feat: add additional google models * POC: formatting inputs for Vertex AI streaming * refactor: move endpoints services outside of /routes dir to /services/Endpoints * refactor: shorten schemas import * refactor: rename PALM to GOOGLE * feat: make Google editable endpoint * feat: reusable Ask and Edit controllers based off Anthropic * chore: organize imports/logic * fix(parseConvo): include examples in googleSchema * fix: google only allows odd number of messages to be sent * fix: pass proxy to AnthropicClient * refactor: change `google` altName to `Google` * refactor: update getModelMaxTokens and related functions to handle maxTokensMap with nested endpoint model key/values * refactor: google Icon and response sender changes (Codey and Google logo instead of PaLM in all cases) * feat: google support for maxTokensMap * feat: google updated endpoints with Ask/Edit controllers, buildOptions, and initializeClient * feat(GoogleClient): now builds prompt for text models and supports real streaming from Vertex AI through langchain * chore(GoogleClient): remove comments, left before for reference in git history * docs: update google instructions (WIP) * docs(apis_and_tokens.md): add images to google instructions * docs: remove typo apis_and_tokens.md * Update apis_and_tokens.md * feat(Google): use default settings map, fully support context for both text and chat models, fully support examples for chat models * chore: update more PaLM references to Google * chore: move playwright out of workflows to avoid failing tests
52 lines
1.7 KiB
JavaScript
52 lines
1.7 KiB
JavaScript
const { EModelEndpoint } = require('~/server/services/Endpoints');
|
|
const loadAsyncEndpoints = require('./loadAsyncEndpoints');
|
|
const { config } = require('./EndpointService');
|
|
|
|
/**
|
|
* Load async endpoints and return a configuration object
|
|
* @function loadDefaultEndpointsConfig
|
|
* @returns {Promise<Object.<string, EndpointWithOrder>>} An object whose keys are endpoint names and values are objects that contain the endpoint configuration and an order.
|
|
*/
|
|
async function loadDefaultEndpointsConfig() {
|
|
const { google, gptPlugins } = await loadAsyncEndpoints();
|
|
const { openAI, bingAI, anthropic, azureOpenAI, chatGPTBrowser } = config;
|
|
|
|
let enabledEndpoints = [
|
|
EModelEndpoint.openAI,
|
|
EModelEndpoint.azureOpenAI,
|
|
EModelEndpoint.google,
|
|
EModelEndpoint.bingAI,
|
|
EModelEndpoint.chatGPTBrowser,
|
|
EModelEndpoint.gptPlugins,
|
|
EModelEndpoint.anthropic,
|
|
];
|
|
|
|
const endpointsEnv = process.env.ENDPOINTS || '';
|
|
if (endpointsEnv) {
|
|
enabledEndpoints = endpointsEnv
|
|
.split(',')
|
|
.filter((endpoint) => endpoint?.trim())
|
|
.map((endpoint) => endpoint.trim());
|
|
}
|
|
|
|
const endpointConfig = {
|
|
[EModelEndpoint.openAI]: openAI,
|
|
[EModelEndpoint.azureOpenAI]: azureOpenAI,
|
|
[EModelEndpoint.google]: google,
|
|
[EModelEndpoint.bingAI]: bingAI,
|
|
[EModelEndpoint.chatGPTBrowser]: chatGPTBrowser,
|
|
[EModelEndpoint.gptPlugins]: gptPlugins,
|
|
[EModelEndpoint.anthropic]: anthropic,
|
|
};
|
|
|
|
const orderedAndFilteredEndpoints = enabledEndpoints.reduce((config, key, index) => {
|
|
if (endpointConfig[key]) {
|
|
config[key] = { ...(endpointConfig[key] ?? {}), order: index };
|
|
}
|
|
return config;
|
|
}, {});
|
|
|
|
return orderedAndFilteredEndpoints;
|
|
}
|
|
|
|
module.exports = loadDefaultEndpointsConfig;
|