mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-09-22 06:00:56 +02:00

* feat: update PaLM icons * feat: add additional google models * POC: formatting inputs for Vertex AI streaming * refactor: move endpoints services outside of /routes dir to /services/Endpoints * refactor: shorten schemas import * refactor: rename PALM to GOOGLE * feat: make Google editable endpoint * feat: reusable Ask and Edit controllers based off Anthropic * chore: organize imports/logic * fix(parseConvo): include examples in googleSchema * fix: google only allows odd number of messages to be sent * fix: pass proxy to AnthropicClient * refactor: change `google` altName to `Google` * refactor: update getModelMaxTokens and related functions to handle maxTokensMap with nested endpoint model key/values * refactor: google Icon and response sender changes (Codey and Google logo instead of PaLM in all cases) * feat: google support for maxTokensMap * feat: google updated endpoints with Ask/Edit controllers, buildOptions, and initializeClient * feat(GoogleClient): now builds prompt for text models and supports real streaming from Vertex AI through langchain * chore(GoogleClient): remove comments, left before for reference in git history * docs: update google instructions (WIP) * docs(apis_and_tokens.md): add images to google instructions * docs: remove typo apis_and_tokens.md * Update apis_and_tokens.md * feat(Google): use default settings map, fully support context for both text and chat models, fully support examples for chat models * chore: update more PaLM references to Google * chore: move playwright out of workflows to avoid failing tests
43 lines
1.4 KiB
JavaScript
43 lines
1.4 KiB
JavaScript
const {
|
|
getOpenAIModels,
|
|
getChatGPTBrowserModels,
|
|
getAnthropicModels,
|
|
} = require('~/server/services/ModelService');
|
|
const { EModelEndpoint } = require('~/server/services/Endpoints');
|
|
const { useAzurePlugins } = require('~/server/services/Config/EndpointService').config;
|
|
|
|
const fitlerAssistantModels = (str) => {
|
|
return /gpt-4|gpt-3\\.5/i.test(str) && !/vision|instruct/i.test(str);
|
|
};
|
|
|
|
async function loadDefaultModels() {
|
|
const openAI = await getOpenAIModels();
|
|
const anthropic = getAnthropicModels();
|
|
const chatGPTBrowser = getChatGPTBrowserModels();
|
|
const azureOpenAI = await getOpenAIModels({ azure: true });
|
|
const gptPlugins = await getOpenAIModels({ azure: useAzurePlugins, plugins: true });
|
|
|
|
return {
|
|
[EModelEndpoint.openAI]: openAI,
|
|
[EModelEndpoint.azureOpenAI]: azureOpenAI,
|
|
[EModelEndpoint.assistant]: openAI.filter(fitlerAssistantModels),
|
|
[EModelEndpoint.google]: [
|
|
'chat-bison',
|
|
'chat-bison-32k',
|
|
'codechat-bison',
|
|
'codechat-bison-32k',
|
|
'text-bison',
|
|
'text-bison-32k',
|
|
'text-unicorn',
|
|
'code-gecko',
|
|
'code-bison',
|
|
'code-bison-32k',
|
|
],
|
|
[EModelEndpoint.bingAI]: ['BingAI', 'Sydney'],
|
|
[EModelEndpoint.chatGPTBrowser]: chatGPTBrowser,
|
|
[EModelEndpoint.gptPlugins]: gptPlugins,
|
|
[EModelEndpoint.anthropic]: anthropic,
|
|
};
|
|
}
|
|
|
|
module.exports = loadDefaultModels;
|