mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-09-22 06:00:56 +02:00

* feat: update PaLM icons * feat: add additional google models * POC: formatting inputs for Vertex AI streaming * refactor: move endpoints services outside of /routes dir to /services/Endpoints * refactor: shorten schemas import * refactor: rename PALM to GOOGLE * feat: make Google editable endpoint * feat: reusable Ask and Edit controllers based off Anthropic * chore: organize imports/logic * fix(parseConvo): include examples in googleSchema * fix: google only allows odd number of messages to be sent * fix: pass proxy to AnthropicClient * refactor: change `google` altName to `Google` * refactor: update getModelMaxTokens and related functions to handle maxTokensMap with nested endpoint model key/values * refactor: google Icon and response sender changes (Codey and Google logo instead of PaLM in all cases) * feat: google support for maxTokensMap * feat: google updated endpoints with Ask/Edit controllers, buildOptions, and initializeClient * feat(GoogleClient): now builds prompt for text models and supports real streaming from Vertex AI through langchain * chore(GoogleClient): remove comments, left before for reference in git history * docs: update google instructions (WIP) * docs(apis_and_tokens.md): add images to google instructions * docs: remove typo apis_and_tokens.md * Update apis_and_tokens.md * feat(Google): use default settings map, fully support context for both text and chat models, fully support examples for chat models * chore: update more PaLM references to Google * chore: move playwright out of workflows to avoid failing tests
156 lines
4.1 KiB
JavaScript
156 lines
4.1 KiB
JavaScript
const { EModelEndpoint } = require('~/server/services/Endpoints');
|
|
|
|
const models = [
|
|
'text-davinci-003',
|
|
'text-davinci-002',
|
|
'text-davinci-001',
|
|
'text-curie-001',
|
|
'text-babbage-001',
|
|
'text-ada-001',
|
|
'davinci',
|
|
'curie',
|
|
'babbage',
|
|
'ada',
|
|
'code-davinci-002',
|
|
'code-davinci-001',
|
|
'code-cushman-002',
|
|
'code-cushman-001',
|
|
'davinci-codex',
|
|
'cushman-codex',
|
|
'text-davinci-edit-001',
|
|
'code-davinci-edit-001',
|
|
'text-embedding-ada-002',
|
|
'text-similarity-davinci-001',
|
|
'text-similarity-curie-001',
|
|
'text-similarity-babbage-001',
|
|
'text-similarity-ada-001',
|
|
'text-search-davinci-doc-001',
|
|
'text-search-curie-doc-001',
|
|
'text-search-babbage-doc-001',
|
|
'text-search-ada-doc-001',
|
|
'code-search-babbage-code-001',
|
|
'code-search-ada-code-001',
|
|
'gpt2',
|
|
'gpt-4',
|
|
'gpt-4-0314',
|
|
'gpt-4-32k',
|
|
'gpt-4-32k-0314',
|
|
'gpt-3.5-turbo',
|
|
'gpt-3.5-turbo-0301',
|
|
];
|
|
|
|
// Order is important here: by model series and context size (gpt-4 then gpt-3, ascending)
|
|
const maxTokensMap = {
|
|
[EModelEndpoint.openAI]: {
|
|
'gpt-4': 8191,
|
|
'gpt-4-0613': 8191,
|
|
'gpt-4-32k': 32767,
|
|
'gpt-4-32k-0314': 32767,
|
|
'gpt-4-32k-0613': 32767,
|
|
'gpt-3.5-turbo': 4095,
|
|
'gpt-3.5-turbo-0613': 4095,
|
|
'gpt-3.5-turbo-0301': 4095,
|
|
'gpt-3.5-turbo-16k': 15999,
|
|
'gpt-3.5-turbo-16k-0613': 15999,
|
|
'gpt-3.5-turbo-1106': 16380, // -5 from max
|
|
'gpt-4-1106': 127995, // -5 from max
|
|
},
|
|
[EModelEndpoint.google]: {
|
|
/* Max I/O is 32k combined, so -1000 to leave room for response */
|
|
'text-bison-32k': 31000,
|
|
'chat-bison-32k': 31000,
|
|
'code-bison-32k': 31000,
|
|
'codechat-bison-32k': 31000,
|
|
/* Codey, -5 from max: 6144 */
|
|
'code-': 6139,
|
|
'codechat-': 6139,
|
|
/* PaLM2, -5 from max: 8192 */
|
|
'text-': 8187,
|
|
'chat-': 8187,
|
|
},
|
|
[EModelEndpoint.anthropic]: {
|
|
'claude-2.1': 200000,
|
|
'claude-': 100000,
|
|
},
|
|
};
|
|
|
|
/**
|
|
* Retrieves the maximum tokens for a given model name. If the exact model name isn't found,
|
|
* it searches for partial matches within the model name, checking keys in reverse order.
|
|
*
|
|
* @param {string} modelName - The name of the model to look up.
|
|
* @param {string} endpoint - The endpoint (default is 'openAI').
|
|
* @returns {number|undefined} The maximum tokens for the given model or undefined if no match is found.
|
|
*
|
|
* @example
|
|
* getModelMaxTokens('gpt-4-32k-0613'); // Returns 32767
|
|
* getModelMaxTokens('gpt-4-32k-unknown'); // Returns 32767
|
|
* getModelMaxTokens('unknown-model'); // Returns undefined
|
|
*/
|
|
function getModelMaxTokens(modelName, endpoint = EModelEndpoint.openAI) {
|
|
if (typeof modelName !== 'string') {
|
|
return undefined;
|
|
}
|
|
|
|
const tokensMap = maxTokensMap[endpoint];
|
|
if (!tokensMap) {
|
|
return undefined;
|
|
}
|
|
|
|
if (tokensMap[modelName]) {
|
|
return tokensMap[modelName];
|
|
}
|
|
|
|
const keys = Object.keys(tokensMap);
|
|
for (let i = keys.length - 1; i >= 0; i--) {
|
|
if (modelName.includes(keys[i])) {
|
|
return tokensMap[keys[i]];
|
|
}
|
|
}
|
|
|
|
return undefined;
|
|
}
|
|
|
|
/**
|
|
* Retrieves the model name key for a given model name input. If the exact model name isn't found,
|
|
* it searches for partial matches within the model name, checking keys in reverse order.
|
|
*
|
|
* @param {string} modelName - The name of the model to look up.
|
|
* @param {string} endpoint - The endpoint (default is 'openAI').
|
|
* @returns {string|undefined} The model name key for the given model; returns input if no match is found and is string.
|
|
*
|
|
* @example
|
|
* matchModelName('gpt-4-32k-0613'); // Returns 'gpt-4-32k-0613'
|
|
* matchModelName('gpt-4-32k-unknown'); // Returns 'gpt-4-32k'
|
|
* matchModelName('unknown-model'); // Returns undefined
|
|
*/
|
|
function matchModelName(modelName, endpoint = EModelEndpoint.openAI) {
|
|
if (typeof modelName !== 'string') {
|
|
return undefined;
|
|
}
|
|
|
|
const tokensMap = maxTokensMap[endpoint];
|
|
if (!tokensMap) {
|
|
return modelName;
|
|
}
|
|
|
|
if (tokensMap[modelName]) {
|
|
return modelName;
|
|
}
|
|
|
|
const keys = Object.keys(tokensMap);
|
|
for (let i = keys.length - 1; i >= 0; i--) {
|
|
if (modelName.includes(keys[i])) {
|
|
return keys[i];
|
|
}
|
|
}
|
|
|
|
return modelName;
|
|
}
|
|
|
|
module.exports = {
|
|
tiktokenModels: new Set(models),
|
|
maxTokensMap,
|
|
getModelMaxTokens,
|
|
matchModelName,
|
|
};
|