mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-09-22 08:12:00 +02:00

* feat: bare bones implementation of claude client (WIP) * feat: client implementation of Claude (WIP) * fix: add claude to store * feat: bare bones implementation of claude client (WIP) * switch eventsource * Try new method of calling claude with anthropic sdk * (WIP) Finish initial claude client implementation and api * debugging update * fix(ClaudeClient.js): fix prompt prefixes for HUMAN_PROMPT and AI_PROMPT fix(ClaudeClient.js): refactor buildMessages logic for correct handling of messages refactor(ClaudeClient.js): refactor buildPrompt method to buildMessages for use in BaseClient sendMessage method refactor(ClaudeClient.js): refactor getCompletion method to sendCompletion for use in BaseClient sendMessage method refactor(ClaudeClient.js): omit getMessageMapMethod method for future refactoring refactor(ClaudeClient.js): remove unused sendMessage method to prefer BaseClient message fix(askClaude.js): error in getIds method was causing a frontend crash, userMessage was not defined fix(askClaude.js): import abortMessage function from utils module feat(askClaude.js): add /abort route to handle message abort requests feat(askClaude.js): create abortControllers map to store abort controllers feat(askClaude.js): implement abortAsk function to handle message abort logic feat(askClaude.js): add onStart callback to handle message start logic feat(HoverButtons.jsx): add 'claude' as a supported endpoint for branching * fix(ClaudeClient.js): update defaultPrefix and promptPrefix messages includes 'Remember your instructions' as Claude is trained to recognize labels preceding colons as participants of a conversation * Change name from claude to anthropic * add settings to handleSubmit and models to endpoints * Implement Claude settings * use svg for anthropic icon * Implement abort * Implement reverse proxy * remove png icons * replace web browser plugin * remove default prefix * fix styling of claude icon * fix console error from svg properties * remove single quote requirement from eslintrc * fix(AnthropicClient.js): fix labels for HUMAN_PROMPT and AI_PROMPT feat(AnthropicClient.js): add support for custom userLabel and modelLabel options feat(AnthropicClient.js): add user_id metadata to requestOptions in getCompletion method feat(anthropic, AnthropicClient.js): add debug logging * refactor(AnthropicClient.js): change promptSuffix variable declaration from let to const * fix(EndpointOptionsDialog.jsx): remove unnecessary code that changes endpointName from 'anthropic' to 'Claude' fix(utils/index.jsx): fix alternateName value for 'anthropic' from 'Claude' to 'Anthropic' * fix(AnthropicIcon): fix sizing/rendering/name of anthropic icon * fix(AnthropicClient.js): change maxContextTokens default value to 99999 fix(AnthropicClient.js): change maxResponseTokens default value to 1500 fix(AnthropicClient.js): remove unnecessary code for setting maxContextTokens and maxResponseTokens based on modelOptions fix(AnthropicClient.js): change max_tokens_to_sample default value to 1500 fix(anthropic.js): pass endpointOption.token to AnthropicClient constructor * Update .env.example * fix(AnthropicClient.js): remove exceeding message when it puts us over the token limit fix(AnthropicClient.js): handle case when the first message exceeds the token limit fix(AnthropicClient.js): throw error when prompt is too long fix(AnthropicClient.js): adjust max tokens calculation to use maxOutputTokens fix(anthropic.js): remove console.log statement in ask route * feat(server/index): increase incoming json payload allowed size --------- Co-authored-by: Danny Avila <messagedaniel@protonmail.com>
88 lines
3.2 KiB
JavaScript
88 lines
3.2 KiB
JavaScript
const express = require('express');
|
|
const router = express.Router();
|
|
const { availableTools } = require('../../app/clients/tools');
|
|
|
|
const getOpenAIModels = (opts = { azure: false }) => {
|
|
let models = ['gpt-4', 'gpt-4-0613', 'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-0301', 'text-davinci-003' ];
|
|
const key = opts.azure ? 'AZURE_OPENAI_MODELS' : 'OPENAI_MODELS';
|
|
if (process.env[key]) models = String(process.env[key]).split(',');
|
|
|
|
return models;
|
|
};
|
|
|
|
const getChatGPTBrowserModels = () => {
|
|
let models = ['text-davinci-002-render-sha', 'gpt-4'];
|
|
if (process.env.CHATGPT_MODELS) models = String(process.env.CHATGPT_MODELS).split(',');
|
|
|
|
return models;
|
|
};
|
|
const getAnthropicModels = () => {
|
|
let models = ['claude-1', 'claude-1-100k', 'claude-instant-1', 'claude-instant-1-100k', 'claude-2'];
|
|
if (process.env.ANTHROPIC_MODELS) models = String(process.env.ANTHROPIC_MODELS).split(',');
|
|
|
|
return models;
|
|
};
|
|
|
|
const getPluginModels = () => {
|
|
let models = ['gpt-4', 'gpt-4-0613', 'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-0301'];
|
|
if (process.env.PLUGIN_MODELS) models = String(process.env.PLUGIN_MODELS).split(',');
|
|
|
|
return models;
|
|
};
|
|
|
|
let i = 0;
|
|
router.get('/', async function (req, res) {
|
|
let key, palmUser;
|
|
try {
|
|
key = require('../../data/auth.json');
|
|
} catch (e) {
|
|
if (i === 0) {
|
|
console.log('No \'auth.json\' file (service account key) found in /api/data/ for PaLM models');
|
|
i++;
|
|
}
|
|
}
|
|
|
|
if (process.env.PALM_KEY === 'user_provided') {
|
|
palmUser = true;
|
|
if (i <= 1) {
|
|
console.log('User will provide key for PaLM models');
|
|
i++;
|
|
}
|
|
}
|
|
|
|
const google =
|
|
key || palmUser
|
|
? { userProvide: palmUser, availableModels: ['chat-bison', 'text-bison', 'codechat-bison'] }
|
|
: false;
|
|
const openAIApiKey = process.env.OPENAI_API_KEY;
|
|
const azureOpenAIApiKey = process.env.AZURE_API_KEY;
|
|
const userProvidedOpenAI = openAIApiKey ? openAIApiKey === 'user_provided' : azureOpenAIApiKey === 'user_provided';
|
|
const openAI = openAIApiKey
|
|
? { availableModels: getOpenAIModels(), userProvide: openAIApiKey === 'user_provided' }
|
|
: false;
|
|
const azureOpenAI = azureOpenAIApiKey
|
|
? { availableModels: getOpenAIModels({ azure: true }), userProvide: azureOpenAIApiKey === 'user_provided' }
|
|
: false;
|
|
const gptPlugins = openAIApiKey || azureOpenAIApiKey
|
|
? { availableModels: getPluginModels(), availableTools, availableAgents: ['classic', 'functions'], userProvide: userProvidedOpenAI }
|
|
: false;
|
|
const bingAI = process.env.BINGAI_TOKEN
|
|
? { userProvide: process.env.BINGAI_TOKEN == 'user_provided' }
|
|
: false;
|
|
const chatGPTBrowser = process.env.CHATGPT_TOKEN
|
|
? {
|
|
userProvide: process.env.CHATGPT_TOKEN == 'user_provided',
|
|
availableModels: getChatGPTBrowserModels()
|
|
}
|
|
: false;
|
|
const anthropic = process.env.ANTHROPIC_API_KEY
|
|
? {
|
|
userProvide: process.env.ANTHROPIC_API_KEY == 'user_provided',
|
|
availableModels: getAnthropicModels()
|
|
}
|
|
: false;
|
|
|
|
res.send(JSON.stringify({ azureOpenAI, openAI, google, bingAI, chatGPTBrowser, gptPlugins, anthropic }));
|
|
});
|
|
|
|
module.exports = { router, getOpenAIModels, getChatGPTBrowserModels };
|