mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-01-05 01:58:50 +01:00
Merge branch 'main' into feat/Custom-Token-Rates-for-Endpoints
This commit is contained in:
commit
59a232812d
27 changed files with 568 additions and 244 deletions
|
|
@ -209,12 +209,6 @@ ASSISTANTS_API_KEY=user_provided
|
|||
# More info, including how to enable use of Assistants with Azure here:
|
||||
# https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints/azure#using-assistants-with-azure
|
||||
|
||||
#============#
|
||||
# OpenRouter #
|
||||
#============#
|
||||
# !!!Warning: Use the variable above instead of this one. Using this one will override the OpenAI endpoint
|
||||
# OPENROUTER_API_KEY=
|
||||
|
||||
#============#
|
||||
# Plugins #
|
||||
#============#
|
||||
|
|
|
|||
|
|
@ -7,8 +7,7 @@ const {
|
|||
getResponseSender,
|
||||
validateVisionModel,
|
||||
} = require('librechat-data-provider');
|
||||
const { SplitStreamHandler, GraphEvents } = require('@librechat/agents');
|
||||
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
|
||||
const { SplitStreamHandler: _Handler, GraphEvents } = require('@librechat/agents');
|
||||
const {
|
||||
truncateText,
|
||||
formatMessage,
|
||||
|
|
@ -24,6 +23,7 @@ const {
|
|||
} = require('~/server/services/Endpoints/anthropic/helpers');
|
||||
const { getModelMaxTokens, getModelMaxOutputTokens, matchModelName } = require('~/utils');
|
||||
const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens');
|
||||
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
|
||||
const Tokenizer = require('~/server/services/Tokenizer');
|
||||
const { logger, sendEvent } = require('~/config');
|
||||
const { sleep } = require('~/server/utils');
|
||||
|
|
@ -32,6 +32,15 @@ const BaseClient = require('./BaseClient');
|
|||
const HUMAN_PROMPT = '\n\nHuman:';
|
||||
const AI_PROMPT = '\n\nAssistant:';
|
||||
|
||||
class SplitStreamHandler extends _Handler {
|
||||
getDeltaContent(chunk) {
|
||||
return (chunk?.delta?.text ?? chunk?.completion) || '';
|
||||
}
|
||||
getReasoningDelta(chunk) {
|
||||
return chunk?.delta?.thinking || '';
|
||||
}
|
||||
}
|
||||
|
||||
/** Helper function to introduce a delay before retrying */
|
||||
function delayBeforeRetry(attempts, baseDelay = 1000) {
|
||||
return new Promise((resolve) => setTimeout(resolve, baseDelay * attempts));
|
||||
|
|
@ -105,7 +114,9 @@ class AnthropicClient extends BaseClient {
|
|||
|
||||
const modelMatch = matchModelName(this.modelOptions.model, EModelEndpoint.anthropic);
|
||||
this.isClaude3 = modelMatch.includes('claude-3');
|
||||
this.isLegacyOutput = !modelMatch.includes('claude-3-5-sonnet');
|
||||
this.isLegacyOutput = !(
|
||||
/claude-3[-.]5-sonnet/.test(modelMatch) || /claude-3[-.]7/.test(modelMatch)
|
||||
);
|
||||
this.supportsCacheControl = this.options.promptCache && checkPromptCacheSupport(modelMatch);
|
||||
|
||||
if (
|
||||
|
|
@ -733,10 +744,17 @@ class AnthropicClient extends BaseClient {
|
|||
stop_sequences,
|
||||
temperature,
|
||||
metadata,
|
||||
top_p,
|
||||
top_k,
|
||||
};
|
||||
|
||||
if (!/claude-3[-.]7/.test(model)) {
|
||||
if (top_p !== undefined) {
|
||||
requestOptions.top_p = top_p;
|
||||
}
|
||||
if (top_k !== undefined) {
|
||||
requestOptions.top_k = top_k;
|
||||
}
|
||||
}
|
||||
|
||||
if (this.useMessages) {
|
||||
requestOptions.messages = payload;
|
||||
requestOptions.max_tokens =
|
||||
|
|
@ -798,50 +816,16 @@ class AnthropicClient extends BaseClient {
|
|||
}
|
||||
});
|
||||
|
||||
/** @param {string} chunk */
|
||||
const handleChunk = (chunk) => {
|
||||
this.streamHandler.handle({
|
||||
choices: [
|
||||
{
|
||||
delta: {
|
||||
content: chunk,
|
||||
},
|
||||
},
|
||||
],
|
||||
});
|
||||
};
|
||||
/** @param {string} chunk */
|
||||
const handleReasoningChunk = (chunk) => {
|
||||
this.streamHandler.handle({
|
||||
choices: [
|
||||
{
|
||||
delta: {
|
||||
reasoning_content: chunk,
|
||||
},
|
||||
},
|
||||
],
|
||||
});
|
||||
};
|
||||
|
||||
for await (const completion of response) {
|
||||
// Handle each completion as before
|
||||
const type = completion?.type ?? '';
|
||||
if (tokenEventTypes.has(type)) {
|
||||
logger.debug(`[AnthropicClient] ${type}`, completion);
|
||||
this[type] = completion;
|
||||
}
|
||||
if (completion?.delta?.thinking) {
|
||||
handleReasoningChunk(completion.delta.thinking);
|
||||
} else if (completion?.delta?.text) {
|
||||
handleChunk(completion.delta.text);
|
||||
} else if (completion.completion) {
|
||||
handleChunk(completion.completion);
|
||||
}
|
||||
|
||||
this.streamHandler.handle(completion);
|
||||
await sleep(streamRate);
|
||||
}
|
||||
|
||||
// Successful processing, exit loop
|
||||
break;
|
||||
} catch (error) {
|
||||
attempts += 1;
|
||||
|
|
|
|||
|
|
@ -5,10 +5,11 @@ const {
|
|||
isAgentsEndpoint,
|
||||
isParamEndpoint,
|
||||
EModelEndpoint,
|
||||
excludedKeys,
|
||||
ErrorTypes,
|
||||
Constants,
|
||||
} = require('librechat-data-provider');
|
||||
const { getMessages, saveMessage, updateMessage, saveConvo } = require('~/models');
|
||||
const { getMessages, saveMessage, updateMessage, saveConvo, getConvo } = require('~/models');
|
||||
const { addSpaceIfNeeded, isEnabled } = require('~/server/utils');
|
||||
const { truncateToolCallOutputs } = require('./prompts');
|
||||
const checkBalance = require('~/models/checkBalance');
|
||||
|
|
@ -55,6 +56,10 @@ class BaseClient {
|
|||
* Flag to determine if the client re-submitted the latest assistant message.
|
||||
* @type {boolean | undefined} */
|
||||
this.continued;
|
||||
/**
|
||||
* Flag to determine if the client has already fetched the conversation while saving new messages.
|
||||
* @type {boolean | undefined} */
|
||||
this.fetchedConvo;
|
||||
/** @type {TMessage[]} */
|
||||
this.currentMessages = [];
|
||||
/** @type {import('librechat-data-provider').VisionModes | undefined} */
|
||||
|
|
@ -863,16 +868,39 @@ class BaseClient {
|
|||
return { message: savedMessage };
|
||||
}
|
||||
|
||||
const conversation = await saveConvo(
|
||||
this.options.req,
|
||||
{
|
||||
conversationId: message.conversationId,
|
||||
endpoint: this.options.endpoint,
|
||||
endpointType: this.options.endpointType,
|
||||
...endpointOptions,
|
||||
},
|
||||
{ context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveConvo' },
|
||||
);
|
||||
const fieldsToKeep = {
|
||||
conversationId: message.conversationId,
|
||||
endpoint: this.options.endpoint,
|
||||
endpointType: this.options.endpointType,
|
||||
...endpointOptions,
|
||||
};
|
||||
|
||||
const existingConvo =
|
||||
this.fetchedConvo === true
|
||||
? null
|
||||
: await getConvo(this.options.req?.user?.id, message.conversationId);
|
||||
|
||||
const unsetFields = {};
|
||||
if (existingConvo != null) {
|
||||
this.fetchedConvo = true;
|
||||
for (const key in existingConvo) {
|
||||
if (!key) {
|
||||
continue;
|
||||
}
|
||||
if (excludedKeys.has(key)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (endpointOptions?.[key] === undefined) {
|
||||
unsetFields[key] = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const conversation = await saveConvo(this.options.req, fieldsToKeep, {
|
||||
context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveConvo',
|
||||
unsetFields,
|
||||
});
|
||||
|
||||
return { message: savedMessage, conversation };
|
||||
}
|
||||
|
|
|
|||
|
|
@ -109,12 +109,7 @@ class OpenAIClient extends BaseClient {
|
|||
const omniPattern = /\b(o1|o3)\b/i;
|
||||
this.isOmni = omniPattern.test(this.modelOptions.model);
|
||||
|
||||
const { OPENROUTER_API_KEY, OPENAI_FORCE_PROMPT } = process.env ?? {};
|
||||
if (OPENROUTER_API_KEY && !this.azure) {
|
||||
this.apiKey = OPENROUTER_API_KEY;
|
||||
this.useOpenRouter = true;
|
||||
}
|
||||
|
||||
const { OPENAI_FORCE_PROMPT } = process.env ?? {};
|
||||
const { reverseProxyUrl: reverseProxy } = this.options;
|
||||
|
||||
if (!this.useOpenRouter && reverseProxy && reverseProxy.includes(KnownEndpoints.openrouter)) {
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
const { SplitStreamHandler } = require('@librechat/agents');
|
||||
const { anthropicSettings } = require('librechat-data-provider');
|
||||
const AnthropicClient = require('~/app/clients/AnthropicClient');
|
||||
|
||||
|
|
@ -405,4 +406,278 @@ describe('AnthropicClient', () => {
|
|||
expect(Number.isNaN(result)).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('maxOutputTokens handling for different models', () => {
|
||||
it('should not cap maxOutputTokens for Claude 3.5 Sonnet models', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 10;
|
||||
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-3-5-sonnet',
|
||||
maxOutputTokens: highTokenValue,
|
||||
},
|
||||
});
|
||||
|
||||
expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue);
|
||||
|
||||
// Test with decimal notation
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-3.5-sonnet',
|
||||
maxOutputTokens: highTokenValue,
|
||||
},
|
||||
});
|
||||
|
||||
expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue);
|
||||
});
|
||||
|
||||
it('should not cap maxOutputTokens for Claude 3.7 models', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 2;
|
||||
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-3-7-sonnet',
|
||||
maxOutputTokens: highTokenValue,
|
||||
},
|
||||
});
|
||||
|
||||
expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue);
|
||||
|
||||
// Test with decimal notation
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-3.7-sonnet',
|
||||
maxOutputTokens: highTokenValue,
|
||||
},
|
||||
});
|
||||
|
||||
expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue);
|
||||
});
|
||||
|
||||
it('should cap maxOutputTokens for Claude 3.5 Haiku models', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 2;
|
||||
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-3-5-haiku',
|
||||
maxOutputTokens: highTokenValue,
|
||||
},
|
||||
});
|
||||
|
||||
expect(client.modelOptions.maxOutputTokens).toBe(
|
||||
anthropicSettings.legacy.maxOutputTokens.default,
|
||||
);
|
||||
|
||||
// Test with decimal notation
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-3.5-haiku',
|
||||
maxOutputTokens: highTokenValue,
|
||||
},
|
||||
});
|
||||
|
||||
expect(client.modelOptions.maxOutputTokens).toBe(
|
||||
anthropicSettings.legacy.maxOutputTokens.default,
|
||||
);
|
||||
});
|
||||
|
||||
it('should cap maxOutputTokens for Claude 3 Haiku and Opus models', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 2;
|
||||
|
||||
// Test haiku
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-3-haiku',
|
||||
maxOutputTokens: highTokenValue,
|
||||
},
|
||||
});
|
||||
|
||||
expect(client.modelOptions.maxOutputTokens).toBe(
|
||||
anthropicSettings.legacy.maxOutputTokens.default,
|
||||
);
|
||||
|
||||
// Test opus
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-3-opus',
|
||||
maxOutputTokens: highTokenValue,
|
||||
},
|
||||
});
|
||||
|
||||
expect(client.modelOptions.maxOutputTokens).toBe(
|
||||
anthropicSettings.legacy.maxOutputTokens.default,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('topK/topP parameters for different models', () => {
|
||||
beforeEach(() => {
|
||||
// Mock the SplitStreamHandler
|
||||
jest.spyOn(SplitStreamHandler.prototype, 'handle').mockImplementation(() => {});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
jest.restoreAllMocks();
|
||||
});
|
||||
|
||||
it('should include top_k and top_p parameters for non-claude-3.7 models', async () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
|
||||
// Create a mock async generator function
|
||||
async function* mockAsyncGenerator() {
|
||||
yield { type: 'message_start', message: { usage: {} } };
|
||||
yield { delta: { text: 'Test response' } };
|
||||
yield { type: 'message_delta', usage: {} };
|
||||
}
|
||||
|
||||
// Mock createResponse to return the async generator
|
||||
jest.spyOn(client, 'createResponse').mockImplementation(() => {
|
||||
return mockAsyncGenerator();
|
||||
});
|
||||
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-3-opus',
|
||||
temperature: 0.7,
|
||||
topK: 10,
|
||||
topP: 0.9,
|
||||
},
|
||||
});
|
||||
|
||||
// Mock getClient to capture the request options
|
||||
let capturedOptions = null;
|
||||
jest.spyOn(client, 'getClient').mockImplementation((options) => {
|
||||
capturedOptions = options;
|
||||
return {};
|
||||
});
|
||||
|
||||
const payload = [{ role: 'user', content: 'Test message' }];
|
||||
await client.sendCompletion(payload, {});
|
||||
|
||||
// Check the options passed to getClient
|
||||
expect(capturedOptions).toHaveProperty('top_k', 10);
|
||||
expect(capturedOptions).toHaveProperty('top_p', 0.9);
|
||||
});
|
||||
|
||||
it('should include top_k and top_p parameters for claude-3-5-sonnet models', async () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
|
||||
// Create a mock async generator function
|
||||
async function* mockAsyncGenerator() {
|
||||
yield { type: 'message_start', message: { usage: {} } };
|
||||
yield { delta: { text: 'Test response' } };
|
||||
yield { type: 'message_delta', usage: {} };
|
||||
}
|
||||
|
||||
// Mock createResponse to return the async generator
|
||||
jest.spyOn(client, 'createResponse').mockImplementation(() => {
|
||||
return mockAsyncGenerator();
|
||||
});
|
||||
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-3-5-sonnet',
|
||||
temperature: 0.7,
|
||||
topK: 10,
|
||||
topP: 0.9,
|
||||
},
|
||||
});
|
||||
|
||||
// Mock getClient to capture the request options
|
||||
let capturedOptions = null;
|
||||
jest.spyOn(client, 'getClient').mockImplementation((options) => {
|
||||
capturedOptions = options;
|
||||
return {};
|
||||
});
|
||||
|
||||
const payload = [{ role: 'user', content: 'Test message' }];
|
||||
await client.sendCompletion(payload, {});
|
||||
|
||||
// Check the options passed to getClient
|
||||
expect(capturedOptions).toHaveProperty('top_k', 10);
|
||||
expect(capturedOptions).toHaveProperty('top_p', 0.9);
|
||||
});
|
||||
|
||||
it('should not include top_k and top_p parameters for claude-3-7-sonnet models', async () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
|
||||
// Create a mock async generator function
|
||||
async function* mockAsyncGenerator() {
|
||||
yield { type: 'message_start', message: { usage: {} } };
|
||||
yield { delta: { text: 'Test response' } };
|
||||
yield { type: 'message_delta', usage: {} };
|
||||
}
|
||||
|
||||
// Mock createResponse to return the async generator
|
||||
jest.spyOn(client, 'createResponse').mockImplementation(() => {
|
||||
return mockAsyncGenerator();
|
||||
});
|
||||
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-3-7-sonnet',
|
||||
temperature: 0.7,
|
||||
topK: 10,
|
||||
topP: 0.9,
|
||||
},
|
||||
});
|
||||
|
||||
// Mock getClient to capture the request options
|
||||
let capturedOptions = null;
|
||||
jest.spyOn(client, 'getClient').mockImplementation((options) => {
|
||||
capturedOptions = options;
|
||||
return {};
|
||||
});
|
||||
|
||||
const payload = [{ role: 'user', content: 'Test message' }];
|
||||
await client.sendCompletion(payload, {});
|
||||
|
||||
// Check the options passed to getClient
|
||||
expect(capturedOptions).not.toHaveProperty('top_k');
|
||||
expect(capturedOptions).not.toHaveProperty('top_p');
|
||||
});
|
||||
|
||||
it('should not include top_k and top_p parameters for models with decimal notation (claude-3.7)', async () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
|
||||
// Create a mock async generator function
|
||||
async function* mockAsyncGenerator() {
|
||||
yield { type: 'message_start', message: { usage: {} } };
|
||||
yield { delta: { text: 'Test response' } };
|
||||
yield { type: 'message_delta', usage: {} };
|
||||
}
|
||||
|
||||
// Mock createResponse to return the async generator
|
||||
jest.spyOn(client, 'createResponse').mockImplementation(() => {
|
||||
return mockAsyncGenerator();
|
||||
});
|
||||
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-3.7-sonnet',
|
||||
temperature: 0.7,
|
||||
topK: 10,
|
||||
topP: 0.9,
|
||||
},
|
||||
});
|
||||
|
||||
// Mock getClient to capture the request options
|
||||
let capturedOptions = null;
|
||||
jest.spyOn(client, 'getClient').mockImplementation((options) => {
|
||||
capturedOptions = options;
|
||||
return {};
|
||||
});
|
||||
|
||||
const payload = [{ role: 'user', content: 'Test message' }];
|
||||
await client.sendCompletion(payload, {});
|
||||
|
||||
// Check the options passed to getClient
|
||||
expect(capturedOptions).not.toHaveProperty('top_k');
|
||||
expect(capturedOptions).not.toHaveProperty('top_p');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -202,14 +202,6 @@ describe('OpenAIClient', () => {
|
|||
expect(client.modelOptions.temperature).toBe(0.7);
|
||||
});
|
||||
|
||||
it('should set apiKey and useOpenRouter if OPENROUTER_API_KEY is present', () => {
|
||||
process.env.OPENROUTER_API_KEY = 'openrouter-key';
|
||||
client.setOptions({});
|
||||
expect(client.apiKey).toBe('openrouter-key');
|
||||
expect(client.useOpenRouter).toBe(true);
|
||||
delete process.env.OPENROUTER_API_KEY; // Cleanup
|
||||
});
|
||||
|
||||
it('should set FORCE_PROMPT based on OPENAI_FORCE_PROMPT or reverseProxyUrl', () => {
|
||||
process.env.OPENAI_FORCE_PROMPT = 'true';
|
||||
client.setOptions({});
|
||||
|
|
@ -534,7 +526,6 @@ describe('OpenAIClient', () => {
|
|||
afterEach(() => {
|
||||
delete process.env.AZURE_OPENAI_DEFAULT_MODEL;
|
||||
delete process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME;
|
||||
delete process.env.OPENROUTER_API_KEY;
|
||||
});
|
||||
|
||||
it('should call getCompletion and fetchEventSource when using a text/instruct model', async () => {
|
||||
|
|
|
|||
|
|
@ -104,10 +104,16 @@ module.exports = {
|
|||
update.expiredAt = null;
|
||||
}
|
||||
|
||||
/** @type {{ $set: Partial<TConversation>; $unset?: Record<keyof TConversation, number> }} */
|
||||
const updateOperation = { $set: update };
|
||||
if (metadata && metadata.unsetFields && Object.keys(metadata.unsetFields).length > 0) {
|
||||
updateOperation.$unset = metadata.unsetFields;
|
||||
}
|
||||
|
||||
/** Note: the resulting Model object is necessary for Meilisearch operations */
|
||||
const conversation = await Conversation.findOneAndUpdate(
|
||||
{ conversationId, user: req.user.id },
|
||||
update,
|
||||
updateOperation,
|
||||
{
|
||||
new: true,
|
||||
upsert: true,
|
||||
|
|
|
|||
|
|
@ -13,6 +13,13 @@ const Token = mongoose.model('Token', tokenSchema);
|
|||
*/
|
||||
async function fixIndexes() {
|
||||
try {
|
||||
if (
|
||||
process.env.NODE_ENV === 'CI' ||
|
||||
process.env.NODE_ENV === 'development' ||
|
||||
process.env.NODE_ENV === 'test'
|
||||
) {
|
||||
return;
|
||||
}
|
||||
const indexes = await Token.collection.indexes();
|
||||
logger.debug('Existing Token Indexes:', JSON.stringify(indexes, null, 2));
|
||||
const unwantedTTLIndexes = indexes.filter(
|
||||
|
|
|
|||
|
|
@ -20,8 +20,6 @@ const convoSchema = mongoose.Schema(
|
|||
index: true,
|
||||
},
|
||||
messages: [{ type: mongoose.Schema.Types.ObjectId, ref: 'Message' }],
|
||||
// google only
|
||||
examples: { type: [{ type: mongoose.Schema.Types.Mixed }], default: undefined },
|
||||
agentOptions: {
|
||||
type: mongoose.Schema.Types.Mixed,
|
||||
},
|
||||
|
|
@ -48,12 +46,12 @@ if (process.env.MEILI_HOST && process.env.MEILI_MASTER_KEY) {
|
|||
convoSchema.plugin(mongoMeili, {
|
||||
host: process.env.MEILI_HOST,
|
||||
apiKey: process.env.MEILI_MASTER_KEY,
|
||||
indexName: 'convos', // Will get created automatically if it doesn't exist already
|
||||
/** Note: Will get created automatically if it doesn't exist already */
|
||||
indexName: 'convos',
|
||||
primaryKey: 'conversationId',
|
||||
});
|
||||
}
|
||||
|
||||
// Create TTL index
|
||||
convoSchema.index({ expiredAt: 1 }, { expireAfterSeconds: 0 });
|
||||
convoSchema.index({ createdAt: 1, updatedAt: 1 });
|
||||
convoSchema.index({ conversationId: 1, user: 1 }, { unique: true });
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
const mongoose = require('mongoose');
|
||||
|
||||
const conversationPreset = {
|
||||
// endpoint: [azureOpenAI, openAI, anthropic, chatGPTBrowser]
|
||||
endpoint: {
|
||||
|
|
@ -24,6 +26,7 @@ const conversationPreset = {
|
|||
required: false,
|
||||
},
|
||||
// for google only
|
||||
examples: { type: [{ type: mongoose.Schema.Types.Mixed }], default: undefined },
|
||||
modelLabel: {
|
||||
type: String,
|
||||
required: false,
|
||||
|
|
@ -129,56 +132,6 @@ const conversationPreset = {
|
|||
},
|
||||
};
|
||||
|
||||
const agentOptions = {
|
||||
model: {
|
||||
type: String,
|
||||
required: false,
|
||||
},
|
||||
// for azureOpenAI, openAI only
|
||||
chatGptLabel: {
|
||||
type: String,
|
||||
required: false,
|
||||
},
|
||||
modelLabel: {
|
||||
type: String,
|
||||
required: false,
|
||||
},
|
||||
promptPrefix: {
|
||||
type: String,
|
||||
required: false,
|
||||
},
|
||||
temperature: {
|
||||
type: Number,
|
||||
required: false,
|
||||
},
|
||||
top_p: {
|
||||
type: Number,
|
||||
required: false,
|
||||
},
|
||||
// for google only
|
||||
topP: {
|
||||
type: Number,
|
||||
required: false,
|
||||
},
|
||||
topK: {
|
||||
type: Number,
|
||||
required: false,
|
||||
},
|
||||
maxOutputTokens: {
|
||||
type: Number,
|
||||
required: false,
|
||||
},
|
||||
presence_penalty: {
|
||||
type: Number,
|
||||
required: false,
|
||||
},
|
||||
frequency_penalty: {
|
||||
type: Number,
|
||||
required: false,
|
||||
},
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
conversationPreset,
|
||||
agentOptions,
|
||||
};
|
||||
|
|
|
|||
|
|
@ -23,8 +23,6 @@ const presetSchema = mongoose.Schema(
|
|||
order: {
|
||||
type: Number,
|
||||
},
|
||||
// google only
|
||||
examples: [{ type: mongoose.Schema.Types.Mixed }],
|
||||
...conversationPreset,
|
||||
agentOptions: {
|
||||
type: mongoose.Schema.Types.Mixed,
|
||||
|
|
|
|||
|
|
@ -43,14 +43,21 @@ function getLLMConfig(apiKey, options = {}) {
|
|||
model: mergedOptions.model,
|
||||
stream: mergedOptions.stream,
|
||||
temperature: mergedOptions.temperature,
|
||||
topP: mergedOptions.topP,
|
||||
topK: mergedOptions.topK,
|
||||
stopSequences: mergedOptions.stop,
|
||||
maxTokens:
|
||||
mergedOptions.maxOutputTokens || anthropicSettings.maxOutputTokens.reset(mergedOptions.model),
|
||||
clientOptions: {},
|
||||
};
|
||||
|
||||
if (!/claude-3[-.]7/.test(mergedOptions.model)) {
|
||||
if (mergedOptions.topP !== undefined) {
|
||||
requestOptions.topP = mergedOptions.topP;
|
||||
}
|
||||
if (mergedOptions.topK !== undefined) {
|
||||
requestOptions.topK = mergedOptions.topK;
|
||||
}
|
||||
}
|
||||
|
||||
const supportsCacheControl =
|
||||
systemOptions.promptCache === true && checkPromptCacheSupport(requestOptions.model);
|
||||
const headers = getClaudeHeaders(requestOptions.model, supportsCacheControl);
|
||||
|
|
|
|||
112
api/server/services/Endpoints/anthropic/llm.spec.js
Normal file
112
api/server/services/Endpoints/anthropic/llm.spec.js
Normal file
|
|
@ -0,0 +1,112 @@
|
|||
const { anthropicSettings } = require('librechat-data-provider');
|
||||
const { getLLMConfig } = require('~/server/services/Endpoints/anthropic/llm');
|
||||
|
||||
jest.mock('https-proxy-agent', () => ({
|
||||
HttpsProxyAgent: jest.fn().mockImplementation((proxy) => ({ proxy })),
|
||||
}));
|
||||
|
||||
describe('getLLMConfig', () => {
|
||||
it('should create a basic configuration with default values', () => {
|
||||
const result = getLLMConfig('test-api-key', { modelOptions: {} });
|
||||
|
||||
expect(result.llmConfig).toHaveProperty('apiKey', 'test-api-key');
|
||||
expect(result.llmConfig).toHaveProperty('model', anthropicSettings.model.default);
|
||||
expect(result.llmConfig).toHaveProperty('stream', true);
|
||||
expect(result.llmConfig).toHaveProperty('maxTokens');
|
||||
});
|
||||
|
||||
it('should include proxy settings when provided', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {},
|
||||
proxy: 'http://proxy:8080',
|
||||
});
|
||||
|
||||
expect(result.llmConfig.clientOptions).toHaveProperty('httpAgent');
|
||||
expect(result.llmConfig.clientOptions.httpAgent).toHaveProperty('proxy', 'http://proxy:8080');
|
||||
});
|
||||
|
||||
it('should include reverse proxy URL when provided', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {},
|
||||
reverseProxyUrl: 'http://reverse-proxy',
|
||||
});
|
||||
|
||||
expect(result.llmConfig.clientOptions).toHaveProperty('baseURL', 'http://reverse-proxy');
|
||||
});
|
||||
|
||||
it('should include topK and topP for non-Claude-3.7 models', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3-opus',
|
||||
topK: 10,
|
||||
topP: 0.9,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.llmConfig).toHaveProperty('topK', 10);
|
||||
expect(result.llmConfig).toHaveProperty('topP', 0.9);
|
||||
});
|
||||
|
||||
it('should include topK and topP for Claude-3.5 models', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3-5-sonnet',
|
||||
topK: 10,
|
||||
topP: 0.9,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.llmConfig).toHaveProperty('topK', 10);
|
||||
expect(result.llmConfig).toHaveProperty('topP', 0.9);
|
||||
});
|
||||
|
||||
it('should NOT include topK and topP for Claude-3-7 models (hyphen notation)', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3-7-sonnet',
|
||||
topK: 10,
|
||||
topP: 0.9,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.llmConfig).not.toHaveProperty('topK');
|
||||
expect(result.llmConfig).not.toHaveProperty('topP');
|
||||
});
|
||||
|
||||
it('should NOT include topK and topP for Claude-3.7 models (decimal notation)', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3.7-sonnet',
|
||||
topK: 10,
|
||||
topP: 0.9,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.llmConfig).not.toHaveProperty('topK');
|
||||
expect(result.llmConfig).not.toHaveProperty('topP');
|
||||
});
|
||||
|
||||
it('should handle custom maxOutputTokens', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3-opus',
|
||||
maxOutputTokens: 2048,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.llmConfig).toHaveProperty('maxTokens', 2048);
|
||||
});
|
||||
|
||||
it('should handle promptCache setting', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3-5-sonnet',
|
||||
promptCache: true,
|
||||
},
|
||||
});
|
||||
|
||||
// We're not checking specific header values since that depends on the actual helper function
|
||||
// Just verifying that the promptCache setting is processed
|
||||
expect(result.llmConfig).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
|
@ -129,9 +129,6 @@ const fetchOpenAIModels = async (opts, _models = []) => {
|
|||
// .split('/deployments')[0]
|
||||
// .concat(`/models?api-version=${azure.azureOpenAIApiVersion}`);
|
||||
// apiKey = azureOpenAIApiKey;
|
||||
} else if (process.env.OPENROUTER_API_KEY) {
|
||||
reverseProxyUrl = 'https://openrouter.ai/api/v1';
|
||||
apiKey = process.env.OPENROUTER_API_KEY;
|
||||
}
|
||||
|
||||
if (reverseProxyUrl) {
|
||||
|
|
@ -218,7 +215,7 @@ const getOpenAIModels = async (opts) => {
|
|||
return models;
|
||||
}
|
||||
|
||||
if (userProvidedOpenAI && !process.env.OPENROUTER_API_KEY) {
|
||||
if (userProvidedOpenAI) {
|
||||
return models;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -161,22 +161,6 @@ describe('getOpenAIModels', () => {
|
|||
expect(models).toEqual(expect.arrayContaining(['openai-model', 'openai-model-2']));
|
||||
});
|
||||
|
||||
it('attempts to use OPENROUTER_API_KEY if set', async () => {
|
||||
process.env.OPENROUTER_API_KEY = 'test-router-key';
|
||||
const expectedModels = ['model-router-1', 'model-router-2'];
|
||||
|
||||
axios.get.mockResolvedValue({
|
||||
data: {
|
||||
data: expectedModels.map((id) => ({ id })),
|
||||
},
|
||||
});
|
||||
|
||||
const models = await getOpenAIModels({ user: 'user456' });
|
||||
|
||||
expect(models).toEqual(expect.arrayContaining(expectedModels));
|
||||
expect(axios.get).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('utilizes proxy configuration when PROXY is set', async () => {
|
||||
axios.get.mockResolvedValue({
|
||||
data: {
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
import { RotateCcw } from 'lucide-react';
|
||||
import React, { useMemo, useState, useEffect, useCallback } from 'react';
|
||||
import { getSettingsKeys, tConvoUpdateSchema } from 'librechat-data-provider';
|
||||
import { excludedKeys, getSettingsKeys, tConvoUpdateSchema } from 'librechat-data-provider';
|
||||
import type { TPreset } from 'librechat-data-provider';
|
||||
import { SaveAsPresetDialog } from '~/components/Endpoints';
|
||||
import { useSetIndexOptions, useLocalize } from '~/hooks';
|
||||
|
|
@ -9,23 +10,6 @@ import { componentMapping } from './components';
|
|||
import { useChatContext } from '~/Providers';
|
||||
import { settings } from './settings';
|
||||
|
||||
const excludedKeys = new Set([
|
||||
'conversationId',
|
||||
'title',
|
||||
'endpoint',
|
||||
'endpointType',
|
||||
'createdAt',
|
||||
'updatedAt',
|
||||
'messages',
|
||||
'isArchived',
|
||||
'tags',
|
||||
'user',
|
||||
'__v',
|
||||
'_id',
|
||||
'tools',
|
||||
'model',
|
||||
]);
|
||||
|
||||
export default function Parameters() {
|
||||
const localize = useLocalize();
|
||||
const { conversation, setConversation } = useChatContext();
|
||||
|
|
@ -105,6 +89,31 @@ export default function Parameters() {
|
|||
});
|
||||
}, [parameters, setConversation]);
|
||||
|
||||
const resetParameters = useCallback(() => {
|
||||
setConversation((prev) => {
|
||||
if (!prev) {
|
||||
return prev;
|
||||
}
|
||||
|
||||
const updatedConversation = { ...prev };
|
||||
const resetKeys: string[] = [];
|
||||
|
||||
Object.keys(updatedConversation).forEach((key) => {
|
||||
if (excludedKeys.has(key)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (updatedConversation[key] !== undefined) {
|
||||
resetKeys.push(key);
|
||||
delete updatedConversation[key];
|
||||
}
|
||||
});
|
||||
|
||||
logger.log('parameters', 'parameters reset, affected keys:', resetKeys);
|
||||
return updatedConversation;
|
||||
});
|
||||
}, [setConversation]);
|
||||
|
||||
const openDialog = useCallback(() => {
|
||||
const newPreset = tConvoUpdateSchema.parse({
|
||||
...conversation,
|
||||
|
|
@ -146,7 +155,17 @@ export default function Parameters() {
|
|||
);
|
||||
})}
|
||||
</div>
|
||||
<div className="mt-6 flex justify-center">
|
||||
<div className="mt-4 flex justify-center">
|
||||
<button
|
||||
type="button"
|
||||
onClick={resetParameters}
|
||||
className="btn btn-neutral flex w-full items-center justify-center gap-2 px-4 py-2 text-sm"
|
||||
>
|
||||
<RotateCcw className="h-4 w-4" aria-hidden="true" />
|
||||
{localize('com_ui_reset_var', { 0: localize('com_ui_model_parameters') })}
|
||||
</button>
|
||||
</div>
|
||||
<div className="mt-2 flex justify-center">
|
||||
<button
|
||||
onClick={openDialog}
|
||||
className="btn btn-primary focus:shadow-outline flex w-full items-center justify-center px-4 py-2 font-semibold text-white hover:bg-green-600 focus:border-green-500"
|
||||
|
|
|
|||
|
|
@ -140,11 +140,9 @@
|
|||
"com_endpoint_ai": "AI",
|
||||
"com_endpoint_anthropic_maxoutputtokens": "Maximum number of tokens that can be generated in the response. Specify a lower value for shorter responses and a higher value for longer responses. Note: models may stop before reaching this maximum.",
|
||||
"com_endpoint_anthropic_prompt_cache": "Prompt caching allows reusing large context or instructions across API calls, reducing costs and latency",
|
||||
"com_endpoint_thinking": "Thinking",
|
||||
"com_endpoint_thinking_budget": "Thinking Budget",
|
||||
"com_endpoint_anthropic_thinking": "Enables internal reasoning for supported Claude models (3.7 Sonnet). Note: requires \"Thinking Budget\" to be set and lower than \"Max Output Tokens\"",
|
||||
"com_endpoint_anthropic_thinking_budget": "Determines the max number of tokens Claude is allowed to use for its internal reasoning process. Larger budgets can improve response quality by enabling more thorough analysis for complex problems, although Claude may not use the entire budget allocated, especially at ranges above 32K. This setting must be lower than \"Max Output Tokens.\"",
|
||||
"com_endpoint_anthropic_temp": "Ranges from 0 to 1. Use temp closer to 0 for analytical / multiple choice, and closer to 1 for creative and generative tasks. We recommend altering this or Top P but not both.",
|
||||
"com_endpoint_anthropic_thinking": "Enables internal reasoning for supported Claude models (3.7 Sonnet). Note: requires \"Thinking Budget\" to be set and lower than \"Max Output Tokens\"",
|
||||
"com_endpoint_anthropic_thinking_budget": "Determines the max number of tokens Claude is allowed use for its internal reasoning process. Larger budgets can improve response quality by enabling more thorough analysis for complex problems, although Claude may not use the entire budget allocated, especially at ranges above 32K. This setting must be lower than \"Max Output Tokens.\"",
|
||||
"com_endpoint_anthropic_topk": "Top-k changes how the model selects tokens for output. A top-k of 1 means the selected token is the most probable among all tokens in the model's vocabulary (also called greedy decoding), while a top-k of 3 means that the next token is selected from among the 3 most probable tokens (using temperature).",
|
||||
"com_endpoint_anthropic_topp": "Top-p changes how the model selects tokens for output. Tokens are selected from most K (see topK parameter) probable to least until the sum of their probabilities equals the top-p value.",
|
||||
"com_endpoint_assistant": "Assistant",
|
||||
|
|
@ -250,6 +248,8 @@
|
|||
"com_endpoint_stop": "Stop Sequences",
|
||||
"com_endpoint_stop_placeholder": "Separate values by pressing `Enter`",
|
||||
"com_endpoint_temperature": "Temperature",
|
||||
"com_endpoint_thinking": "Thinking",
|
||||
"com_endpoint_thinking_budget": "Thinking Budget",
|
||||
"com_endpoint_top_k": "Top K",
|
||||
"com_endpoint_top_p": "Top P",
|
||||
"com_endpoint_use_active_assistant": "Use Active Assistant",
|
||||
|
|
@ -357,12 +357,12 @@
|
|||
"com_nav_lang_estonian": "Eesti keel",
|
||||
"com_nav_lang_finnish": "Suomi",
|
||||
"com_nav_lang_french": "Français ",
|
||||
"com_nav_lang_georgian": "ქართული",
|
||||
"com_nav_lang_german": "Deutsch",
|
||||
"com_nav_lang_hebrew": "עברית",
|
||||
"com_nav_lang_indonesia": "Indonesia",
|
||||
"com_nav_lang_italian": "Italiano",
|
||||
"com_nav_lang_japanese": "日本語",
|
||||
"com_nav_lang_georgian": "ქართული",
|
||||
"com_nav_lang_korean": "한국어",
|
||||
"com_nav_lang_polish": "Polski",
|
||||
"com_nav_lang_portuguese": "Português",
|
||||
|
|
@ -834,4 +834,4 @@
|
|||
"com_ui_zoom": "Zoom",
|
||||
"com_user_message": "You",
|
||||
"com_warning_resubmit_unsupported": "Resubmitting the AI message is not supported for this endpoint."
|
||||
}
|
||||
}
|
||||
|
|
@ -720,6 +720,7 @@
|
|||
"com_ui_stop": "停止",
|
||||
"com_ui_storage": "存储",
|
||||
"com_ui_submit": "提交",
|
||||
"com_ui_teach_or_explain": "学习中",
|
||||
"com_ui_temporary_chat": "临时对话",
|
||||
"com_ui_terms_and_conditions": "条款和条件",
|
||||
"com_ui_terms_of_service": "服务政策",
|
||||
|
|
|
|||
|
|
@ -35,10 +35,5 @@
|
|||
"test/setupTests.js",
|
||||
"env.d.ts",
|
||||
"../config/translations/**/*.ts"
|
||||
],
|
||||
"references": [
|
||||
{
|
||||
"path": "./tsconfig.node.json"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,9 +0,0 @@
|
|||
{
|
||||
"compilerOptions": {
|
||||
"composite": true,
|
||||
"module": "ESNext",
|
||||
"moduleResolution": "Node",
|
||||
"allowSyntheticDefaultImports": true
|
||||
},
|
||||
"include": ["vite.config.ts"]
|
||||
}
|
||||
|
|
@ -37,13 +37,12 @@ export default defineConfig({
|
|||
},
|
||||
useCredentials: true,
|
||||
workbox: {
|
||||
globPatterns: [
|
||||
'assets/**/*.{png,jpg,svg,ico}',
|
||||
'**/*.{js,css,html,ico,woff2}',
|
||||
],
|
||||
globPatterns: ['**/*'],
|
||||
globIgnores: ['images/**/*'],
|
||||
maximumFileSizeToCacheInBytes: 4 * 1024 * 1024,
|
||||
navigateFallbackDenylist: [/^\/oauth/],
|
||||
},
|
||||
includeAssets: ['**/*'],
|
||||
manifest: {
|
||||
name: 'LibreChat',
|
||||
short_name: 'LibreChat',
|
||||
|
|
@ -70,7 +69,7 @@ export default defineConfig({
|
|||
{
|
||||
src: '/assets/icon-192x192.png',
|
||||
sizes: '192x192',
|
||||
type: 'image/png'
|
||||
type: 'image/png',
|
||||
},
|
||||
{
|
||||
src: '/assets/maskable-icon.png',
|
||||
|
|
@ -113,10 +112,7 @@ export default defineConfig({
|
|||
if (id.includes('node_modules/highlight.js')) {
|
||||
return 'markdown_highlight';
|
||||
}
|
||||
if (
|
||||
id.includes('node_modules/hast-util-raw') ||
|
||||
id.includes('node_modules/katex')
|
||||
) {
|
||||
if (id.includes('node_modules/hast-util-raw') || id.includes('node_modules/katex')) {
|
||||
return 'markdown_large';
|
||||
}
|
||||
// Group TanStack libraries together.
|
||||
|
|
@ -141,10 +137,7 @@ export default defineConfig({
|
|||
entryFileNames: 'assets/[name].[hash].js',
|
||||
chunkFileNames: 'assets/[name].[hash].js',
|
||||
assetFileNames: (assetInfo) => {
|
||||
if (
|
||||
assetInfo.names &&
|
||||
/\.(woff|woff2|eot|ttf|otf)$/.test(assetInfo.names)
|
||||
) {
|
||||
if (assetInfo.names && /\.(woff|woff2|eot|ttf|otf)$/.test(assetInfo.names)) {
|
||||
return 'assets/fonts/[name][extname]';
|
||||
}
|
||||
return 'assets/[name].[hash][extname]';
|
||||
|
|
@ -187,4 +180,4 @@ export function sourcemapExclude(opts?: SourcemapExclude): Plugin {
|
|||
}
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ const compat = new FlatCompat({
|
|||
export default [
|
||||
{
|
||||
ignores: [
|
||||
'client/vite.config.ts',
|
||||
'client/dist/**/*',
|
||||
'client/public/**/*',
|
||||
'client/coverage/**/*',
|
||||
|
|
|
|||
|
|
@ -235,7 +235,6 @@ endpoints:
|
|||
- name: 'OpenRouter'
|
||||
# For `apiKey` and `baseURL`, you can use environment variables that you define.
|
||||
# recommended environment variables:
|
||||
# Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well.
|
||||
apiKey: '${OPENROUTER_KEY}'
|
||||
baseURL: 'https://openrouter.ai/api/v1'
|
||||
models:
|
||||
|
|
|
|||
2
package-lock.json
generated
2
package-lock.json
generated
|
|
@ -41798,7 +41798,7 @@
|
|||
},
|
||||
"packages/data-provider": {
|
||||
"name": "librechat-data-provider",
|
||||
"version": "0.7.6992",
|
||||
"version": "0.7.6993",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"axios": "^1.7.7",
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "librechat-data-provider",
|
||||
"version": "0.7.6992",
|
||||
"version": "0.7.6993",
|
||||
"description": "data services for librechat apps",
|
||||
"main": "dist/index.js",
|
||||
"module": "dist/index.es.js",
|
||||
|
|
|
|||
|
|
@ -2,8 +2,8 @@ import { z } from 'zod';
|
|||
import type { ZodError } from 'zod';
|
||||
import type { TModelsConfig } from './types';
|
||||
import { EModelEndpoint, eModelEndpointSchema } from './schemas';
|
||||
import { fileConfigSchema } from './file-config';
|
||||
import { specsConfigSchema, TSpecsConfig } from './models';
|
||||
import { fileConfigSchema } from './file-config';
|
||||
import { FileSources } from './types/files';
|
||||
import { MCPServersSchema } from './mcp';
|
||||
|
||||
|
|
@ -31,6 +31,27 @@ export const defaultRetrievalModels = [
|
|||
'gpt-4-1106',
|
||||
];
|
||||
|
||||
export const excludedKeys = new Set([
|
||||
'conversationId',
|
||||
'title',
|
||||
'iconURL',
|
||||
'greeting',
|
||||
'endpoint',
|
||||
'endpointType',
|
||||
'createdAt',
|
||||
'updatedAt',
|
||||
'expiredAt',
|
||||
'messages',
|
||||
'isArchived',
|
||||
'tags',
|
||||
'user',
|
||||
'__v',
|
||||
'_id',
|
||||
'tools',
|
||||
'model',
|
||||
'files',
|
||||
]);
|
||||
|
||||
export enum SettingsViews {
|
||||
default = 'default',
|
||||
advanced = 'advanced',
|
||||
|
|
|
|||
|
|
@ -252,7 +252,8 @@ export const googleSettings = {
|
|||
},
|
||||
};
|
||||
|
||||
const ANTHROPIC_MAX_OUTPUT = 8192;
|
||||
const ANTHROPIC_MAX_OUTPUT = 128000;
|
||||
const DEFAULT_MAX_OUTPUT = 8192;
|
||||
const LEGACY_ANTHROPIC_MAX_OUTPUT = 4096;
|
||||
export const anthropicSettings = {
|
||||
model: {
|
||||
|
|
@ -280,16 +281,19 @@ export const anthropicSettings = {
|
|||
min: 1,
|
||||
max: ANTHROPIC_MAX_OUTPUT,
|
||||
step: 1,
|
||||
default: ANTHROPIC_MAX_OUTPUT,
|
||||
default: DEFAULT_MAX_OUTPUT,
|
||||
reset: (modelName: string) => {
|
||||
if (modelName.includes('claude-3-5-sonnet') || modelName.includes('claude-3-7-sonnet')) {
|
||||
return ANTHROPIC_MAX_OUTPUT;
|
||||
if (/claude-3[-.]5-sonnet/.test(modelName) || /claude-3[-.]7/.test(modelName)) {
|
||||
return DEFAULT_MAX_OUTPUT;
|
||||
}
|
||||
|
||||
return 4096;
|
||||
},
|
||||
set: (value: number, modelName: string) => {
|
||||
if (!modelName.includes('claude-3-5-sonnet') && value > LEGACY_ANTHROPIC_MAX_OUTPUT) {
|
||||
if (
|
||||
!(/claude-3[-.]5-sonnet/.test(modelName) || /claude-3[-.]7/.test(modelName)) &&
|
||||
value > LEGACY_ANTHROPIC_MAX_OUTPUT
|
||||
) {
|
||||
return LEGACY_ANTHROPIC_MAX_OUTPUT;
|
||||
}
|
||||
|
||||
|
|
@ -760,37 +764,8 @@ export const googleSchema = tConversationSchema
|
|||
spec: true,
|
||||
maxContextTokens: true,
|
||||
})
|
||||
.transform((obj) => {
|
||||
return {
|
||||
...obj,
|
||||
model: obj.model ?? google.model.default,
|
||||
modelLabel: obj.modelLabel ?? null,
|
||||
promptPrefix: obj.promptPrefix ?? null,
|
||||
examples: obj.examples ?? [{ input: { content: '' }, output: { content: '' } }],
|
||||
temperature: obj.temperature ?? google.temperature.default,
|
||||
maxOutputTokens: obj.maxOutputTokens ?? google.maxOutputTokens.default,
|
||||
topP: obj.topP ?? google.topP.default,
|
||||
topK: obj.topK ?? google.topK.default,
|
||||
iconURL: obj.iconURL ?? undefined,
|
||||
greeting: obj.greeting ?? undefined,
|
||||
spec: obj.spec ?? undefined,
|
||||
maxContextTokens: obj.maxContextTokens ?? undefined,
|
||||
};
|
||||
})
|
||||
.catch(() => ({
|
||||
model: google.model.default,
|
||||
modelLabel: null,
|
||||
promptPrefix: null,
|
||||
examples: [{ input: { content: '' }, output: { content: '' } }],
|
||||
temperature: google.temperature.default,
|
||||
maxOutputTokens: google.maxOutputTokens.default,
|
||||
topP: google.topP.default,
|
||||
topK: google.topK.default,
|
||||
iconURL: undefined,
|
||||
greeting: undefined,
|
||||
spec: undefined,
|
||||
maxContextTokens: undefined,
|
||||
}));
|
||||
.transform((obj: Partial<TConversation>) => removeNullishValues(obj))
|
||||
.catch(() => ({}));
|
||||
|
||||
/**
|
||||
* TODO: Map the following fields:
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue