mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-09-22 08:12:00 +02:00
feat(Google): Support all Text/Chat Models, Response streaming, PaLM
-> Google
🤖 (#1316)
* feat: update PaLM icons * feat: add additional google models * POC: formatting inputs for Vertex AI streaming * refactor: move endpoints services outside of /routes dir to /services/Endpoints * refactor: shorten schemas import * refactor: rename PALM to GOOGLE * feat: make Google editable endpoint * feat: reusable Ask and Edit controllers based off Anthropic * chore: organize imports/logic * fix(parseConvo): include examples in googleSchema * fix: google only allows odd number of messages to be sent * fix: pass proxy to AnthropicClient * refactor: change `google` altName to `Google` * refactor: update getModelMaxTokens and related functions to handle maxTokensMap with nested endpoint model key/values * refactor: google Icon and response sender changes (Codey and Google logo instead of PaLM in all cases) * feat: google support for maxTokensMap * feat: google updated endpoints with Ask/Edit controllers, buildOptions, and initializeClient * feat(GoogleClient): now builds prompt for text models and supports real streaming from Vertex AI through langchain * chore(GoogleClient): remove comments, left before for reference in git history * docs: update google instructions (WIP) * docs(apis_and_tokens.md): add images to google instructions * docs: remove typo apis_and_tokens.md * Update apis_and_tokens.md * feat(Google): use default settings map, fully support context for both text and chat models, fully support examples for chat models * chore: update more PaLM references to Google * chore: move playwright out of workflows to avoid failing tests
This commit is contained in:
parent
8a1968b2f8
commit
583e978a82
90 changed files with 1613 additions and 784 deletions
|
@ -1,3 +1,5 @@
|
|||
const { EModelEndpoint } = require('~/server/services/Endpoints');
|
||||
|
||||
const models = [
|
||||
'text-davinci-003',
|
||||
'text-davinci-002',
|
||||
|
@ -39,20 +41,37 @@ const models = [
|
|||
|
||||
// Order is important here: by model series and context size (gpt-4 then gpt-3, ascending)
|
||||
const maxTokensMap = {
|
||||
'gpt-4': 8191,
|
||||
'gpt-4-0613': 8191,
|
||||
'gpt-4-32k': 32767,
|
||||
'gpt-4-32k-0314': 32767,
|
||||
'gpt-4-32k-0613': 32767,
|
||||
'gpt-3.5-turbo': 4095,
|
||||
'gpt-3.5-turbo-0613': 4095,
|
||||
'gpt-3.5-turbo-0301': 4095,
|
||||
'gpt-3.5-turbo-16k': 15999,
|
||||
'gpt-3.5-turbo-16k-0613': 15999,
|
||||
'gpt-3.5-turbo-1106': 16380, // -5 from max
|
||||
'gpt-4-1106': 127995, // -5 from max
|
||||
'claude-2.1': 200000,
|
||||
'claude-': 100000,
|
||||
[EModelEndpoint.openAI]: {
|
||||
'gpt-4': 8191,
|
||||
'gpt-4-0613': 8191,
|
||||
'gpt-4-32k': 32767,
|
||||
'gpt-4-32k-0314': 32767,
|
||||
'gpt-4-32k-0613': 32767,
|
||||
'gpt-3.5-turbo': 4095,
|
||||
'gpt-3.5-turbo-0613': 4095,
|
||||
'gpt-3.5-turbo-0301': 4095,
|
||||
'gpt-3.5-turbo-16k': 15999,
|
||||
'gpt-3.5-turbo-16k-0613': 15999,
|
||||
'gpt-3.5-turbo-1106': 16380, // -5 from max
|
||||
'gpt-4-1106': 127995, // -5 from max
|
||||
},
|
||||
[EModelEndpoint.google]: {
|
||||
/* Max I/O is 32k combined, so -1000 to leave room for response */
|
||||
'text-bison-32k': 31000,
|
||||
'chat-bison-32k': 31000,
|
||||
'code-bison-32k': 31000,
|
||||
'codechat-bison-32k': 31000,
|
||||
/* Codey, -5 from max: 6144 */
|
||||
'code-': 6139,
|
||||
'codechat-': 6139,
|
||||
/* PaLM2, -5 from max: 8192 */
|
||||
'text-': 8187,
|
||||
'chat-': 8187,
|
||||
},
|
||||
[EModelEndpoint.anthropic]: {
|
||||
'claude-2.1': 200000,
|
||||
'claude-': 100000,
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -60,6 +79,7 @@ const maxTokensMap = {
|
|||
* it searches for partial matches within the model name, checking keys in reverse order.
|
||||
*
|
||||
* @param {string} modelName - The name of the model to look up.
|
||||
* @param {string} endpoint - The endpoint (default is 'openAI').
|
||||
* @returns {number|undefined} The maximum tokens for the given model or undefined if no match is found.
|
||||
*
|
||||
* @example
|
||||
|
@ -67,19 +87,24 @@ const maxTokensMap = {
|
|||
* getModelMaxTokens('gpt-4-32k-unknown'); // Returns 32767
|
||||
* getModelMaxTokens('unknown-model'); // Returns undefined
|
||||
*/
|
||||
function getModelMaxTokens(modelName) {
|
||||
function getModelMaxTokens(modelName, endpoint = EModelEndpoint.openAI) {
|
||||
if (typeof modelName !== 'string') {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
if (maxTokensMap[modelName]) {
|
||||
return maxTokensMap[modelName];
|
||||
const tokensMap = maxTokensMap[endpoint];
|
||||
if (!tokensMap) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const keys = Object.keys(maxTokensMap);
|
||||
if (tokensMap[modelName]) {
|
||||
return tokensMap[modelName];
|
||||
}
|
||||
|
||||
const keys = Object.keys(tokensMap);
|
||||
for (let i = keys.length - 1; i >= 0; i--) {
|
||||
if (modelName.includes(keys[i])) {
|
||||
return maxTokensMap[keys[i]];
|
||||
return tokensMap[keys[i]];
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -91,6 +116,7 @@ function getModelMaxTokens(modelName) {
|
|||
* it searches for partial matches within the model name, checking keys in reverse order.
|
||||
*
|
||||
* @param {string} modelName - The name of the model to look up.
|
||||
* @param {string} endpoint - The endpoint (default is 'openAI').
|
||||
* @returns {string|undefined} The model name key for the given model; returns input if no match is found and is string.
|
||||
*
|
||||
* @example
|
||||
|
@ -98,16 +124,21 @@ function getModelMaxTokens(modelName) {
|
|||
* matchModelName('gpt-4-32k-unknown'); // Returns 'gpt-4-32k'
|
||||
* matchModelName('unknown-model'); // Returns undefined
|
||||
*/
|
||||
function matchModelName(modelName) {
|
||||
function matchModelName(modelName, endpoint = EModelEndpoint.openAI) {
|
||||
if (typeof modelName !== 'string') {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
if (maxTokensMap[modelName]) {
|
||||
const tokensMap = maxTokensMap[endpoint];
|
||||
if (!tokensMap) {
|
||||
return modelName;
|
||||
}
|
||||
|
||||
const keys = Object.keys(maxTokensMap);
|
||||
if (tokensMap[modelName]) {
|
||||
return modelName;
|
||||
}
|
||||
|
||||
const keys = Object.keys(tokensMap);
|
||||
for (let i = keys.length - 1; i >= 0; i--) {
|
||||
if (modelName.includes(keys[i])) {
|
||||
return keys[i];
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue