feat(Google): Support all Text/Chat Models, Response streaming, PaLM -> Google 🤖 (#1316)

* feat: update PaLM icons

* feat: add additional google models

* POC: formatting inputs for Vertex AI streaming

* refactor: move endpoints services outside of /routes dir to /services/Endpoints

* refactor: shorten schemas import

* refactor: rename PALM to GOOGLE

* feat: make Google editable endpoint

* feat: reusable Ask and Edit controllers based off Anthropic

* chore: organize imports/logic

* fix(parseConvo): include examples in googleSchema

* fix: google only allows odd number of messages to be sent

* fix: pass proxy to AnthropicClient

* refactor: change `google` altName to `Google`

* refactor: update getModelMaxTokens and related functions to handle maxTokensMap with nested endpoint model key/values

* refactor: google Icon and response sender changes (Codey and Google logo instead of PaLM in all cases)

* feat: google support for maxTokensMap

* feat: google updated endpoints with Ask/Edit controllers, buildOptions, and initializeClient

* feat(GoogleClient): now builds prompt for text models and supports real streaming from Vertex AI through langchain

* chore(GoogleClient): remove comments, left before for reference in git history

* docs: update google instructions (WIP)

* docs(apis_and_tokens.md): add images to google instructions

* docs: remove typo apis_and_tokens.md

* Update apis_and_tokens.md

* feat(Google): use default settings map, fully support context for both text and chat models, fully support examples for chat models

* chore: update more PaLM references to Google

* chore: move playwright out of workflows to avoid failing tests
This commit is contained in:
Danny Avila 2023-12-10 14:54:13 -05:00 committed by GitHub
parent 8a1968b2f8
commit 583e978a82
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
90 changed files with 1613 additions and 784 deletions

View file

@ -1,3 +1,5 @@
const { EModelEndpoint } = require('~/server/services/Endpoints');
const models = [
'text-davinci-003',
'text-davinci-002',
@ -39,20 +41,37 @@ const models = [
// Order is important here: by model series and context size (gpt-4 then gpt-3, ascending)
const maxTokensMap = {
'gpt-4': 8191,
'gpt-4-0613': 8191,
'gpt-4-32k': 32767,
'gpt-4-32k-0314': 32767,
'gpt-4-32k-0613': 32767,
'gpt-3.5-turbo': 4095,
'gpt-3.5-turbo-0613': 4095,
'gpt-3.5-turbo-0301': 4095,
'gpt-3.5-turbo-16k': 15999,
'gpt-3.5-turbo-16k-0613': 15999,
'gpt-3.5-turbo-1106': 16380, // -5 from max
'gpt-4-1106': 127995, // -5 from max
'claude-2.1': 200000,
'claude-': 100000,
[EModelEndpoint.openAI]: {
'gpt-4': 8191,
'gpt-4-0613': 8191,
'gpt-4-32k': 32767,
'gpt-4-32k-0314': 32767,
'gpt-4-32k-0613': 32767,
'gpt-3.5-turbo': 4095,
'gpt-3.5-turbo-0613': 4095,
'gpt-3.5-turbo-0301': 4095,
'gpt-3.5-turbo-16k': 15999,
'gpt-3.5-turbo-16k-0613': 15999,
'gpt-3.5-turbo-1106': 16380, // -5 from max
'gpt-4-1106': 127995, // -5 from max
},
[EModelEndpoint.google]: {
/* Max I/O is 32k combined, so -1000 to leave room for response */
'text-bison-32k': 31000,
'chat-bison-32k': 31000,
'code-bison-32k': 31000,
'codechat-bison-32k': 31000,
/* Codey, -5 from max: 6144 */
'code-': 6139,
'codechat-': 6139,
/* PaLM2, -5 from max: 8192 */
'text-': 8187,
'chat-': 8187,
},
[EModelEndpoint.anthropic]: {
'claude-2.1': 200000,
'claude-': 100000,
},
};
/**
@ -60,6 +79,7 @@ const maxTokensMap = {
* it searches for partial matches within the model name, checking keys in reverse order.
*
* @param {string} modelName - The name of the model to look up.
* @param {string} endpoint - The endpoint (default is 'openAI').
* @returns {number|undefined} The maximum tokens for the given model or undefined if no match is found.
*
* @example
@ -67,19 +87,24 @@ const maxTokensMap = {
* getModelMaxTokens('gpt-4-32k-unknown'); // Returns 32767
* getModelMaxTokens('unknown-model'); // Returns undefined
*/
function getModelMaxTokens(modelName) {
function getModelMaxTokens(modelName, endpoint = EModelEndpoint.openAI) {
if (typeof modelName !== 'string') {
return undefined;
}
if (maxTokensMap[modelName]) {
return maxTokensMap[modelName];
const tokensMap = maxTokensMap[endpoint];
if (!tokensMap) {
return undefined;
}
const keys = Object.keys(maxTokensMap);
if (tokensMap[modelName]) {
return tokensMap[modelName];
}
const keys = Object.keys(tokensMap);
for (let i = keys.length - 1; i >= 0; i--) {
if (modelName.includes(keys[i])) {
return maxTokensMap[keys[i]];
return tokensMap[keys[i]];
}
}
@ -91,6 +116,7 @@ function getModelMaxTokens(modelName) {
* it searches for partial matches within the model name, checking keys in reverse order.
*
* @param {string} modelName - The name of the model to look up.
* @param {string} endpoint - The endpoint (default is 'openAI').
* @returns {string|undefined} The model name key for the given model; returns input if no match is found and is string.
*
* @example
@ -98,16 +124,21 @@ function getModelMaxTokens(modelName) {
* matchModelName('gpt-4-32k-unknown'); // Returns 'gpt-4-32k'
* matchModelName('unknown-model'); // Returns undefined
*/
function matchModelName(modelName) {
function matchModelName(modelName, endpoint = EModelEndpoint.openAI) {
if (typeof modelName !== 'string') {
return undefined;
}
if (maxTokensMap[modelName]) {
const tokensMap = maxTokensMap[endpoint];
if (!tokensMap) {
return modelName;
}
const keys = Object.keys(maxTokensMap);
if (tokensMap[modelName]) {
return modelName;
}
const keys = Object.keys(tokensMap);
for (let i = keys.length - 1; i >= 0; i--) {
if (modelName.includes(keys[i])) {
return keys[i];

View file

@ -1,16 +1,23 @@
const { EModelEndpoint } = require('~/server/services/Endpoints');
const { getModelMaxTokens, matchModelName, maxTokensMap } = require('./tokens');
describe('getModelMaxTokens', () => {
test('should return correct tokens for exact match', () => {
expect(getModelMaxTokens('gpt-4-32k-0613')).toBe(maxTokensMap['gpt-4-32k-0613']);
expect(getModelMaxTokens('gpt-4-32k-0613')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-4-32k-0613'],
);
});
test('should return correct tokens for partial match', () => {
expect(getModelMaxTokens('gpt-4-32k-unknown')).toBe(maxTokensMap['gpt-4-32k']);
expect(getModelMaxTokens('gpt-4-32k-unknown')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-4-32k'],
);
});
test('should return correct tokens for partial match (OpenRouter)', () => {
expect(getModelMaxTokens('openai/gpt-4-32k')).toBe(maxTokensMap['gpt-4-32k']);
expect(getModelMaxTokens('openai/gpt-4-32k')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-4-32k'],
);
});
test('should return undefined for no match', () => {
@ -19,12 +26,14 @@ describe('getModelMaxTokens', () => {
test('should return correct tokens for another exact match', () => {
expect(getModelMaxTokens('gpt-3.5-turbo-16k-0613')).toBe(
maxTokensMap['gpt-3.5-turbo-16k-0613'],
maxTokensMap[EModelEndpoint.openAI]['gpt-3.5-turbo-16k-0613'],
);
});
test('should return correct tokens for another partial match', () => {
expect(getModelMaxTokens('gpt-3.5-turbo-unknown')).toBe(maxTokensMap['gpt-3.5-turbo']);
expect(getModelMaxTokens('gpt-3.5-turbo-unknown')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-3.5-turbo'],
);
});
test('should return undefined for undefined input', () => {
@ -41,26 +50,34 @@ describe('getModelMaxTokens', () => {
// 11/06 Update
test('should return correct tokens for gpt-3.5-turbo-1106 exact match', () => {
expect(getModelMaxTokens('gpt-3.5-turbo-1106')).toBe(maxTokensMap['gpt-3.5-turbo-1106']);
expect(getModelMaxTokens('gpt-3.5-turbo-1106')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-3.5-turbo-1106'],
);
});
test('should return correct tokens for gpt-4-1106 exact match', () => {
expect(getModelMaxTokens('gpt-4-1106')).toBe(maxTokensMap['gpt-4-1106']);
expect(getModelMaxTokens('gpt-4-1106')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-4-1106']);
});
test('should return correct tokens for gpt-3.5-turbo-1106 partial match', () => {
expect(getModelMaxTokens('something-/gpt-3.5-turbo-1106')).toBe(
maxTokensMap['gpt-3.5-turbo-1106'],
maxTokensMap[EModelEndpoint.openAI]['gpt-3.5-turbo-1106'],
);
expect(getModelMaxTokens('gpt-3.5-turbo-1106/something-/')).toBe(
maxTokensMap['gpt-3.5-turbo-1106'],
maxTokensMap[EModelEndpoint.openAI]['gpt-3.5-turbo-1106'],
);
});
test('should return correct tokens for gpt-4-1106 partial match', () => {
expect(getModelMaxTokens('gpt-4-1106/something')).toBe(maxTokensMap['gpt-4-1106']);
expect(getModelMaxTokens('gpt-4-1106-preview')).toBe(maxTokensMap['gpt-4-1106']);
expect(getModelMaxTokens('gpt-4-1106-vision-preview')).toBe(maxTokensMap['gpt-4-1106']);
expect(getModelMaxTokens('gpt-4-1106/something')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-4-1106'],
);
expect(getModelMaxTokens('gpt-4-1106-preview')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-4-1106'],
);
expect(getModelMaxTokens('gpt-4-1106-vision-preview')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-4-1106'],
);
});
test('should return correct tokens for Anthropic models', () => {
@ -74,13 +91,36 @@ describe('getModelMaxTokens', () => {
'claude-instant-1-100k',
];
const claude21MaxTokens = maxTokensMap['claude-2.1'];
const claudeMaxTokens = maxTokensMap['claude-'];
const claudeMaxTokens = maxTokensMap[EModelEndpoint.anthropic]['claude-'];
const claude21MaxTokens = maxTokensMap[EModelEndpoint.anthropic]['claude-2.1'];
models.forEach((model) => {
const expectedTokens = model === 'claude-2.1' ? claude21MaxTokens : claudeMaxTokens;
expect(getModelMaxTokens(model)).toEqual(expectedTokens);
expect(getModelMaxTokens(model, EModelEndpoint.anthropic)).toEqual(expectedTokens);
});
});
// Tests for Google models
test('should return correct tokens for exact match - Google models', () => {
expect(getModelMaxTokens('text-bison-32k', EModelEndpoint.google)).toBe(
maxTokensMap[EModelEndpoint.google]['text-bison-32k'],
);
expect(getModelMaxTokens('codechat-bison-32k', EModelEndpoint.google)).toBe(
maxTokensMap[EModelEndpoint.google]['codechat-bison-32k'],
);
});
test('should return undefined for no match - Google models', () => {
expect(getModelMaxTokens('unknown-google-model', EModelEndpoint.google)).toBeUndefined();
});
test('should return correct tokens for partial match - Google models', () => {
expect(getModelMaxTokens('code-', EModelEndpoint.google)).toBe(
maxTokensMap[EModelEndpoint.google]['code-'],
);
expect(getModelMaxTokens('chat-', EModelEndpoint.google)).toBe(
maxTokensMap[EModelEndpoint.google]['chat-'],
);
});
});
describe('matchModelName', () => {
@ -122,4 +162,21 @@ describe('matchModelName', () => {
expect(matchModelName('gpt-4-1106-preview')).toBe('gpt-4-1106');
expect(matchModelName('gpt-4-1106-vision-preview')).toBe('gpt-4-1106');
});
// Tests for Google models
it('should return the exact model name if it exists in maxTokensMap - Google models', () => {
expect(matchModelName('text-bison-32k', EModelEndpoint.google)).toBe('text-bison-32k');
expect(matchModelName('codechat-bison-32k', EModelEndpoint.google)).toBe('codechat-bison-32k');
});
it('should return the input model name if no match is found - Google models', () => {
expect(matchModelName('unknown-google-model', EModelEndpoint.google)).toBe(
'unknown-google-model',
);
});
it('should return the closest matching key for partial matches - Google models', () => {
expect(matchModelName('code-', EModelEndpoint.google)).toBe('code-');
expect(matchModelName('chat-', EModelEndpoint.google)).toBe('chat-');
});
});