mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-01-10 04:28:50 +01:00
Merge branch 'main' into feat/Custom-Token-Rates-for-Endpoints
This commit is contained in:
commit
9486599268
588 changed files with 35845 additions and 13907 deletions
|
|
@ -1,6 +1,8 @@
|
|||
const mongoose = require('mongoose');
|
||||
const { SystemRoles } = require('librechat-data-provider');
|
||||
const { GLOBAL_PROJECT_NAME } = require('librechat-data-provider').Constants;
|
||||
const { agentSchema } = require('@librechat/data-schemas');
|
||||
const { SystemRoles, Tools } = require('librechat-data-provider');
|
||||
const { GLOBAL_PROJECT_NAME, EPHEMERAL_AGENT_ID, mcp_delimiter } =
|
||||
require('librechat-data-provider').Constants;
|
||||
const { CONFIG_STORE, STARTUP_CONFIG } = require('librechat-data-provider').CacheKeys;
|
||||
const {
|
||||
getProjectByName,
|
||||
|
|
@ -9,7 +11,6 @@ const {
|
|||
removeAgentFromAllProjects,
|
||||
} = require('./Project');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
const { agentSchema } = require('@librechat/data-schemas');
|
||||
|
||||
const Agent = mongoose.model('agent', agentSchema);
|
||||
|
||||
|
|
@ -39,13 +40,69 @@ const getAgent = async (searchParameter) => await Agent.findOne(searchParameter)
|
|||
* @param {Object} params
|
||||
* @param {ServerRequest} params.req
|
||||
* @param {string} params.agent_id
|
||||
* @param {string} params.endpoint
|
||||
* @param {import('@librechat/agents').ClientOptions} [params.model_parameters]
|
||||
* @returns {Agent|null} The agent document as a plain object, or null if not found.
|
||||
*/
|
||||
const loadEphemeralAgent = ({ req, agent_id, endpoint, model_parameters: _m }) => {
|
||||
const { model, ...model_parameters } = _m;
|
||||
/** @type {Record<string, FunctionTool>} */
|
||||
const availableTools = req.app.locals.availableTools;
|
||||
const mcpServers = new Set(req.body.ephemeralAgent?.mcp);
|
||||
/** @type {string[]} */
|
||||
const tools = [];
|
||||
if (req.body.ephemeralAgent?.execute_code === true) {
|
||||
tools.push(Tools.execute_code);
|
||||
}
|
||||
|
||||
if (mcpServers.size > 0) {
|
||||
for (const toolName of Object.keys(availableTools)) {
|
||||
if (!toolName.includes(mcp_delimiter)) {
|
||||
continue;
|
||||
}
|
||||
const mcpServer = toolName.split(mcp_delimiter)?.[1];
|
||||
if (mcpServer && mcpServers.has(mcpServer)) {
|
||||
tools.push(toolName);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const instructions = req.body.promptPrefix;
|
||||
return {
|
||||
id: agent_id,
|
||||
instructions,
|
||||
provider: endpoint,
|
||||
model_parameters,
|
||||
model,
|
||||
tools,
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Load an agent based on the provided ID
|
||||
*
|
||||
* @param {Object} params
|
||||
* @param {ServerRequest} params.req
|
||||
* @param {string} params.agent_id
|
||||
* @param {string} params.endpoint
|
||||
* @param {import('@librechat/agents').ClientOptions} [params.model_parameters]
|
||||
* @returns {Promise<Agent|null>} The agent document as a plain object, or null if not found.
|
||||
*/
|
||||
const loadAgent = async ({ req, agent_id }) => {
|
||||
const loadAgent = async ({ req, agent_id, endpoint, model_parameters }) => {
|
||||
if (!agent_id) {
|
||||
return null;
|
||||
}
|
||||
if (agent_id === EPHEMERAL_AGENT_ID) {
|
||||
return loadEphemeralAgent({ req, agent_id, endpoint, model_parameters });
|
||||
}
|
||||
const agent = await getAgent({
|
||||
id: agent_id,
|
||||
});
|
||||
|
||||
if (!agent) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (agent.author.toString() === req.user.id) {
|
||||
return agent;
|
||||
}
|
||||
|
|
@ -96,9 +153,11 @@ const updateAgent = async (searchParameter, updateData) => {
|
|||
*/
|
||||
const addAgentResourceFile = async ({ agent_id, tool_resource, file_id }) => {
|
||||
const searchParameter = { id: agent_id };
|
||||
|
||||
let agent = await getAgent(searchParameter);
|
||||
if (!agent) {
|
||||
throw new Error('Agent not found for adding resource file');
|
||||
}
|
||||
const fileIdsPath = `tool_resources.${tool_resource}.file_ids`;
|
||||
|
||||
await Agent.updateOne(
|
||||
{
|
||||
id: agent_id,
|
||||
|
|
@ -111,7 +170,12 @@ const addAgentResourceFile = async ({ agent_id, tool_resource, file_id }) => {
|
|||
},
|
||||
);
|
||||
|
||||
const updateData = { $addToSet: { [fileIdsPath]: file_id } };
|
||||
const updateData = {
|
||||
$addToSet: {
|
||||
tools: tool_resource,
|
||||
[fileIdsPath]: file_id,
|
||||
},
|
||||
};
|
||||
|
||||
const updatedAgent = await updateAgent(searchParameter, updateData);
|
||||
if (updatedAgent) {
|
||||
|
|
@ -122,16 +186,17 @@ const addAgentResourceFile = async ({ agent_id, tool_resource, file_id }) => {
|
|||
};
|
||||
|
||||
/**
|
||||
* Removes multiple resource files from an agent in a single update.
|
||||
* Removes multiple resource files from an agent using atomic operations.
|
||||
* @param {object} params
|
||||
* @param {string} params.agent_id
|
||||
* @param {Array<{tool_resource: string, file_id: string}>} params.files
|
||||
* @returns {Promise<Agent>} The updated agent.
|
||||
* @throws {Error} If the agent is not found or update fails.
|
||||
*/
|
||||
const removeAgentResourceFiles = async ({ agent_id, files }) => {
|
||||
const searchParameter = { id: agent_id };
|
||||
|
||||
// associate each tool resource with the respective file ids array
|
||||
// Group files to remove by resource
|
||||
const filesByResource = files.reduce((acc, { tool_resource, file_id }) => {
|
||||
if (!acc[tool_resource]) {
|
||||
acc[tool_resource] = [];
|
||||
|
|
@ -140,42 +205,35 @@ const removeAgentResourceFiles = async ({ agent_id, files }) => {
|
|||
return acc;
|
||||
}, {});
|
||||
|
||||
// build the update aggregation pipeline wich removes file ids from tool resources array
|
||||
// and eventually deletes empty tool resources
|
||||
const updateData = [];
|
||||
Object.entries(filesByResource).forEach(([resource, fileIds]) => {
|
||||
const toolResourcePath = `tool_resources.${resource}`;
|
||||
const fileIdsPath = `${toolResourcePath}.file_ids`;
|
||||
|
||||
// file ids removal stage
|
||||
updateData.push({
|
||||
$set: {
|
||||
[fileIdsPath]: {
|
||||
$filter: {
|
||||
input: `$${fileIdsPath}`,
|
||||
cond: { $not: [{ $in: ['$$this', fileIds] }] },
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
// empty tool resource deletion stage
|
||||
updateData.push({
|
||||
$set: {
|
||||
[toolResourcePath]: {
|
||||
$cond: [{ $eq: [`$${fileIdsPath}`, []] }, '$$REMOVE', `$${toolResourcePath}`],
|
||||
},
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
// return the updated agent or throw if no agent matches
|
||||
const updatedAgent = await updateAgent(searchParameter, updateData);
|
||||
if (updatedAgent) {
|
||||
return updatedAgent;
|
||||
} else {
|
||||
throw new Error('Agent not found for removing resource files');
|
||||
// Step 1: Atomically remove file IDs using $pull
|
||||
const pullOps = {};
|
||||
const resourcesToCheck = new Set();
|
||||
for (const [resource, fileIds] of Object.entries(filesByResource)) {
|
||||
const fileIdsPath = `tool_resources.${resource}.file_ids`;
|
||||
pullOps[fileIdsPath] = { $in: fileIds };
|
||||
resourcesToCheck.add(resource);
|
||||
}
|
||||
|
||||
const updatePullData = { $pull: pullOps };
|
||||
const agentAfterPull = await Agent.findOneAndUpdate(searchParameter, updatePullData, {
|
||||
new: true,
|
||||
}).lean();
|
||||
|
||||
if (!agentAfterPull) {
|
||||
// Agent might have been deleted concurrently, or never existed.
|
||||
// Check if it existed before trying to throw.
|
||||
const agentExists = await getAgent(searchParameter);
|
||||
if (!agentExists) {
|
||||
throw new Error('Agent not found for removing resource files');
|
||||
}
|
||||
// If it existed but findOneAndUpdate returned null, something else went wrong.
|
||||
throw new Error('Failed to update agent during file removal (pull step)');
|
||||
}
|
||||
|
||||
// Return the agent state directly after the $pull operation.
|
||||
// Skipping the $unset step for now to simplify and test core $pull atomicity.
|
||||
// Empty arrays might remain, but the removal itself should be correct.
|
||||
return agentAfterPull;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -250,7 +308,7 @@ const getListAgents = async (searchParameter) => {
|
|||
* This function also updates the corresponding projects to include or exclude the agent ID.
|
||||
*
|
||||
* @param {Object} params - Parameters for updating the agent's projects.
|
||||
* @param {import('librechat-data-provider').TUser} params.user - Parameters for updating the agent's projects.
|
||||
* @param {MongoUser} params.user - Parameters for updating the agent's projects.
|
||||
* @param {string} params.agentId - The ID of the agent to update.
|
||||
* @param {string[]} [params.projectIds] - Array of project IDs to add to the agent.
|
||||
* @param {string[]} [params.removeProjectIds] - Array of project IDs to remove from the agent.
|
||||
|
|
|
|||
|
|
@ -33,6 +33,50 @@ describe('Agent Resource File Operations', () => {
|
|||
return agent;
|
||||
};
|
||||
|
||||
test('should add tool_resource to tools if missing', async () => {
|
||||
const agent = await createBasicAgent();
|
||||
const fileId = uuidv4();
|
||||
const toolResource = 'file_search';
|
||||
|
||||
const updatedAgent = await addAgentResourceFile({
|
||||
agent_id: agent.id,
|
||||
tool_resource: toolResource,
|
||||
file_id: fileId,
|
||||
});
|
||||
|
||||
expect(updatedAgent.tools).toContain(toolResource);
|
||||
expect(Array.isArray(updatedAgent.tools)).toBe(true);
|
||||
// Should not duplicate
|
||||
const count = updatedAgent.tools.filter((t) => t === toolResource).length;
|
||||
expect(count).toBe(1);
|
||||
});
|
||||
|
||||
test('should not duplicate tool_resource in tools if already present', async () => {
|
||||
const agent = await createBasicAgent();
|
||||
const fileId1 = uuidv4();
|
||||
const fileId2 = uuidv4();
|
||||
const toolResource = 'file_search';
|
||||
|
||||
// First add
|
||||
await addAgentResourceFile({
|
||||
agent_id: agent.id,
|
||||
tool_resource: toolResource,
|
||||
file_id: fileId1,
|
||||
});
|
||||
|
||||
// Second add (should not duplicate)
|
||||
const updatedAgent = await addAgentResourceFile({
|
||||
agent_id: agent.id,
|
||||
tool_resource: toolResource,
|
||||
file_id: fileId2,
|
||||
});
|
||||
|
||||
expect(updatedAgent.tools).toContain(toolResource);
|
||||
expect(Array.isArray(updatedAgent.tools)).toBe(true);
|
||||
const count = updatedAgent.tools.filter((t) => t === toolResource).length;
|
||||
expect(count).toBe(1);
|
||||
});
|
||||
|
||||
test('should handle concurrent file additions', async () => {
|
||||
const agent = await createBasicAgent();
|
||||
const fileIds = Array.from({ length: 10 }, () => uuidv4());
|
||||
|
|
@ -157,4 +201,134 @@ describe('Agent Resource File Operations', () => {
|
|||
expect(updatedAgent.tool_resources[tool].file_ids).toHaveLength(5);
|
||||
});
|
||||
});
|
||||
|
||||
test('should handle concurrent duplicate additions', async () => {
|
||||
const agent = await createBasicAgent();
|
||||
const fileId = uuidv4();
|
||||
|
||||
// Concurrent additions of the same file
|
||||
const additionPromises = Array.from({ length: 5 }).map(() =>
|
||||
addAgentResourceFile({
|
||||
agent_id: agent.id,
|
||||
tool_resource: 'test_tool',
|
||||
file_id: fileId,
|
||||
}),
|
||||
);
|
||||
|
||||
await Promise.all(additionPromises);
|
||||
|
||||
const updatedAgent = await Agent.findOne({ id: agent.id });
|
||||
expect(updatedAgent.tool_resources.test_tool.file_ids).toBeDefined();
|
||||
// Should only contain one instance of the fileId
|
||||
expect(updatedAgent.tool_resources.test_tool.file_ids).toHaveLength(1);
|
||||
expect(updatedAgent.tool_resources.test_tool.file_ids[0]).toBe(fileId);
|
||||
});
|
||||
|
||||
test('should handle concurrent add and remove of the same file', async () => {
|
||||
const agent = await createBasicAgent();
|
||||
const fileId = uuidv4();
|
||||
|
||||
// First, ensure the file exists (or test might be trivial if remove runs first)
|
||||
await addAgentResourceFile({
|
||||
agent_id: agent.id,
|
||||
tool_resource: 'test_tool',
|
||||
file_id: fileId,
|
||||
});
|
||||
|
||||
// Concurrent add (which should be ignored) and remove
|
||||
const operations = [
|
||||
addAgentResourceFile({
|
||||
agent_id: agent.id,
|
||||
tool_resource: 'test_tool',
|
||||
file_id: fileId,
|
||||
}),
|
||||
removeAgentResourceFiles({
|
||||
agent_id: agent.id,
|
||||
files: [{ tool_resource: 'test_tool', file_id: fileId }],
|
||||
}),
|
||||
];
|
||||
|
||||
await Promise.all(operations);
|
||||
|
||||
const updatedAgent = await Agent.findOne({ id: agent.id });
|
||||
// The final state should ideally be that the file is removed,
|
||||
// but the key point is consistency (not duplicated or error state).
|
||||
// Depending on execution order, the file might remain if the add operation's
|
||||
// findOneAndUpdate runs after the remove operation completes.
|
||||
// A more robust check might be that the length is <= 1.
|
||||
// Given the remove uses an update pipeline, it might be more likely to win.
|
||||
// The final state depends on race condition timing (add or remove might "win").
|
||||
// The critical part is that the state is consistent (no duplicates, no errors).
|
||||
// Assert that the fileId is either present exactly once or not present at all.
|
||||
expect(updatedAgent.tool_resources.test_tool.file_ids).toBeDefined();
|
||||
const finalFileIds = updatedAgent.tool_resources.test_tool.file_ids;
|
||||
const count = finalFileIds.filter((id) => id === fileId).length;
|
||||
expect(count).toBeLessThanOrEqual(1); // Should be 0 or 1, never more
|
||||
// Optional: Check overall length is consistent with the count
|
||||
if (count === 0) {
|
||||
expect(finalFileIds).toHaveLength(0);
|
||||
} else {
|
||||
expect(finalFileIds).toHaveLength(1);
|
||||
expect(finalFileIds[0]).toBe(fileId);
|
||||
}
|
||||
});
|
||||
|
||||
test('should handle concurrent duplicate removals', async () => {
|
||||
const agent = await createBasicAgent();
|
||||
const fileId = uuidv4();
|
||||
|
||||
// Add the file first
|
||||
await addAgentResourceFile({
|
||||
agent_id: agent.id,
|
||||
tool_resource: 'test_tool',
|
||||
file_id: fileId,
|
||||
});
|
||||
|
||||
// Concurrent removals of the same file
|
||||
const removalPromises = Array.from({ length: 5 }).map(() =>
|
||||
removeAgentResourceFiles({
|
||||
agent_id: agent.id,
|
||||
files: [{ tool_resource: 'test_tool', file_id: fileId }],
|
||||
}),
|
||||
);
|
||||
|
||||
await Promise.all(removalPromises);
|
||||
|
||||
const updatedAgent = await Agent.findOne({ id: agent.id });
|
||||
// Check if the array is empty or the tool resource itself is removed
|
||||
const fileIds = updatedAgent.tool_resources?.test_tool?.file_ids ?? [];
|
||||
expect(fileIds).toHaveLength(0);
|
||||
expect(fileIds).not.toContain(fileId);
|
||||
});
|
||||
|
||||
test('should handle concurrent removals of different files', async () => {
|
||||
const agent = await createBasicAgent();
|
||||
const fileIds = Array.from({ length: 10 }, () => uuidv4());
|
||||
|
||||
// Add all files first
|
||||
await Promise.all(
|
||||
fileIds.map((fileId) =>
|
||||
addAgentResourceFile({
|
||||
agent_id: agent.id,
|
||||
tool_resource: 'test_tool',
|
||||
file_id: fileId,
|
||||
}),
|
||||
),
|
||||
);
|
||||
|
||||
// Concurrently remove all files
|
||||
const removalPromises = fileIds.map((fileId) =>
|
||||
removeAgentResourceFiles({
|
||||
agent_id: agent.id,
|
||||
files: [{ tool_resource: 'test_tool', file_id: fileId }],
|
||||
}),
|
||||
);
|
||||
|
||||
await Promise.all(removalPromises);
|
||||
|
||||
const updatedAgent = await Agent.findOne({ id: agent.id });
|
||||
// Check if the array is empty or the tool resource itself is removed
|
||||
const finalFileIds = updatedAgent.tool_resources?.test_tool?.file_ids ?? [];
|
||||
expect(finalFileIds).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,44 +1,4 @@
|
|||
const mongoose = require('mongoose');
|
||||
const { balanceSchema } = require('@librechat/data-schemas');
|
||||
const { getMultiplier } = require('./tx');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
balanceSchema.statics.check = async function ({
|
||||
user,
|
||||
model,
|
||||
endpoint,
|
||||
valueKey,
|
||||
tokenType,
|
||||
amount,
|
||||
endpointTokenConfig,
|
||||
}) {
|
||||
const multiplier = getMultiplier({ valueKey, tokenType, model, endpoint, endpointTokenConfig });
|
||||
const tokenCost = amount * multiplier;
|
||||
const { tokenCredits: balance } = (await this.findOne({ user }, 'tokenCredits').lean()) ?? {};
|
||||
|
||||
logger.debug('[Balance.check]', {
|
||||
user,
|
||||
model,
|
||||
endpoint,
|
||||
valueKey,
|
||||
tokenType,
|
||||
amount,
|
||||
balance,
|
||||
multiplier,
|
||||
endpointTokenConfig: !!endpointTokenConfig,
|
||||
});
|
||||
|
||||
if (!balance) {
|
||||
return {
|
||||
canSpend: false,
|
||||
balance: 0,
|
||||
tokenCost,
|
||||
};
|
||||
}
|
||||
|
||||
logger.debug('[Balance.check]', { tokenCost });
|
||||
|
||||
return { canSpend: balance >= tokenCost, balance, tokenCost };
|
||||
};
|
||||
|
||||
module.exports = mongoose.model('Balance', balanceSchema);
|
||||
|
|
|
|||
|
|
@ -28,4 +28,4 @@ const getBanner = async (user) => {
|
|||
}
|
||||
};
|
||||
|
||||
module.exports = { getBanner };
|
||||
module.exports = { Banner, getBanner };
|
||||
|
|
|
|||
|
|
@ -15,19 +15,6 @@ const searchConversation = async (conversationId) => {
|
|||
throw new Error('Error searching conversation');
|
||||
}
|
||||
};
|
||||
/**
|
||||
* Searches for a conversation by conversationId and returns associated file ids.
|
||||
* @param {string} conversationId - The conversation's ID.
|
||||
* @returns {Promise<string[] | null>}
|
||||
*/
|
||||
const getConvoFiles = async (conversationId) => {
|
||||
try {
|
||||
return (await Conversation.findOne({ conversationId }, 'files').lean())?.files ?? [];
|
||||
} catch (error) {
|
||||
logger.error('[getConvoFiles] Error getting conversation files', error);
|
||||
throw new Error('Error getting conversation files');
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Retrieves a single conversation for a given user and conversation ID.
|
||||
|
|
@ -73,6 +60,20 @@ const deleteNullOrEmptyConversations = async () => {
|
|||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Searches for a conversation by conversationId and returns associated file ids.
|
||||
* @param {string} conversationId - The conversation's ID.
|
||||
* @returns {Promise<string[] | null>}
|
||||
*/
|
||||
const getConvoFiles = async (conversationId) => {
|
||||
try {
|
||||
return (await Conversation.findOne({ conversationId }, 'files').lean())?.files ?? [];
|
||||
} catch (error) {
|
||||
logger.error('[getConvoFiles] Error getting conversation files', error);
|
||||
throw new Error('Error getting conversation files');
|
||||
}
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
Conversation,
|
||||
getConvoFiles,
|
||||
|
|
@ -87,11 +88,13 @@ module.exports = {
|
|||
*/
|
||||
saveConvo: async (req, { conversationId, newConversationId, ...convo }, metadata) => {
|
||||
try {
|
||||
if (metadata && metadata?.context) {
|
||||
if (metadata?.context) {
|
||||
logger.debug(`[saveConvo] ${metadata.context}`);
|
||||
}
|
||||
|
||||
const messages = await getMessages({ conversationId }, '_id');
|
||||
const update = { ...convo, messages, user: req.user.id };
|
||||
|
||||
if (newConversationId) {
|
||||
update.conversationId = newConversationId;
|
||||
}
|
||||
|
|
@ -147,75 +150,102 @@ module.exports = {
|
|||
throw new Error('Failed to save conversations in bulk.');
|
||||
}
|
||||
},
|
||||
getConvosByPage: async (user, pageNumber = 1, pageSize = 25, isArchived = false, tags) => {
|
||||
const query = { user };
|
||||
getConvosByCursor: async (
|
||||
user,
|
||||
{ cursor, limit = 25, isArchived = false, tags, search, order = 'desc' } = {},
|
||||
) => {
|
||||
const filters = [{ user }];
|
||||
|
||||
if (isArchived) {
|
||||
query.isArchived = true;
|
||||
filters.push({ isArchived: true });
|
||||
} else {
|
||||
query.$or = [{ isArchived: false }, { isArchived: { $exists: false } }];
|
||||
}
|
||||
if (Array.isArray(tags) && tags.length > 0) {
|
||||
query.tags = { $in: tags };
|
||||
filters.push({ $or: [{ isArchived: false }, { isArchived: { $exists: false } }] });
|
||||
}
|
||||
|
||||
query.$and = [{ $or: [{ expiredAt: null }, { expiredAt: { $exists: false } }] }];
|
||||
if (Array.isArray(tags) && tags.length > 0) {
|
||||
filters.push({ tags: { $in: tags } });
|
||||
}
|
||||
|
||||
filters.push({ $or: [{ expiredAt: null }, { expiredAt: { $exists: false } }] });
|
||||
|
||||
if (search) {
|
||||
try {
|
||||
const meiliResults = await Conversation.meiliSearch(search);
|
||||
const matchingIds = Array.isArray(meiliResults.hits)
|
||||
? meiliResults.hits.map((result) => result.conversationId)
|
||||
: [];
|
||||
if (!matchingIds.length) {
|
||||
return { conversations: [], nextCursor: null };
|
||||
}
|
||||
filters.push({ conversationId: { $in: matchingIds } });
|
||||
} catch (error) {
|
||||
logger.error('[getConvosByCursor] Error during meiliSearch', error);
|
||||
return { message: 'Error during meiliSearch' };
|
||||
}
|
||||
}
|
||||
|
||||
if (cursor) {
|
||||
filters.push({ updatedAt: { $lt: new Date(cursor) } });
|
||||
}
|
||||
|
||||
const query = filters.length === 1 ? filters[0] : { $and: filters };
|
||||
|
||||
try {
|
||||
const totalConvos = (await Conversation.countDocuments(query)) || 1;
|
||||
const totalPages = Math.ceil(totalConvos / pageSize);
|
||||
const convos = await Conversation.find(query)
|
||||
.sort({ updatedAt: -1 })
|
||||
.skip((pageNumber - 1) * pageSize)
|
||||
.limit(pageSize)
|
||||
.select(
|
||||
'conversationId endpoint title createdAt updatedAt user model agent_id assistant_id spec iconURL',
|
||||
)
|
||||
.sort({ updatedAt: order === 'asc' ? 1 : -1 })
|
||||
.limit(limit + 1)
|
||||
.lean();
|
||||
return { conversations: convos, pages: totalPages, pageNumber, pageSize };
|
||||
|
||||
let nextCursor = null;
|
||||
if (convos.length > limit) {
|
||||
const lastConvo = convos.pop();
|
||||
nextCursor = lastConvo.updatedAt.toISOString();
|
||||
}
|
||||
|
||||
return { conversations: convos, nextCursor };
|
||||
} catch (error) {
|
||||
logger.error('[getConvosByPage] Error getting conversations', error);
|
||||
logger.error('[getConvosByCursor] Error getting conversations', error);
|
||||
return { message: 'Error getting conversations' };
|
||||
}
|
||||
},
|
||||
getConvosQueried: async (user, convoIds, pageNumber = 1, pageSize = 25) => {
|
||||
getConvosQueried: async (user, convoIds, cursor = null, limit = 25) => {
|
||||
try {
|
||||
if (!convoIds || convoIds.length === 0) {
|
||||
return { conversations: [], pages: 1, pageNumber, pageSize };
|
||||
if (!convoIds?.length) {
|
||||
return { conversations: [], nextCursor: null, convoMap: {} };
|
||||
}
|
||||
|
||||
const conversationIds = convoIds.map((convo) => convo.conversationId);
|
||||
|
||||
const results = await Conversation.find({
|
||||
user,
|
||||
conversationId: { $in: conversationIds },
|
||||
$or: [{ expiredAt: { $exists: false } }, { expiredAt: null }],
|
||||
}).lean();
|
||||
|
||||
results.sort((a, b) => new Date(b.updatedAt) - new Date(a.updatedAt));
|
||||
|
||||
let filtered = results;
|
||||
if (cursor && cursor !== 'start') {
|
||||
const cursorDate = new Date(cursor);
|
||||
filtered = results.filter((convo) => new Date(convo.updatedAt) < cursorDate);
|
||||
}
|
||||
|
||||
const limited = filtered.slice(0, limit + 1);
|
||||
let nextCursor = null;
|
||||
if (limited.length > limit) {
|
||||
const lastConvo = limited.pop();
|
||||
nextCursor = lastConvo.updatedAt.toISOString();
|
||||
}
|
||||
|
||||
const cache = {};
|
||||
const convoMap = {};
|
||||
const promises = [];
|
||||
|
||||
convoIds.forEach((convo) =>
|
||||
promises.push(
|
||||
Conversation.findOne({
|
||||
user,
|
||||
conversationId: convo.conversationId,
|
||||
$or: [{ expiredAt: { $exists: false } }, { expiredAt: null }],
|
||||
}).lean(),
|
||||
),
|
||||
);
|
||||
|
||||
const results = (await Promise.all(promises)).filter(Boolean);
|
||||
|
||||
results.forEach((convo, i) => {
|
||||
const page = Math.floor(i / pageSize) + 1;
|
||||
if (!cache[page]) {
|
||||
cache[page] = [];
|
||||
}
|
||||
cache[page].push(convo);
|
||||
limited.forEach((convo) => {
|
||||
convoMap[convo.conversationId] = convo;
|
||||
});
|
||||
|
||||
const totalPages = Math.ceil(results.length / pageSize);
|
||||
cache.pages = totalPages;
|
||||
cache.pageSize = pageSize;
|
||||
return {
|
||||
cache,
|
||||
conversations: cache[pageNumber] || [],
|
||||
pages: totalPages || 1,
|
||||
pageNumber,
|
||||
pageSize,
|
||||
convoMap,
|
||||
};
|
||||
return { conversations: limited, nextCursor, convoMap };
|
||||
} catch (error) {
|
||||
logger.error('[getConvosQueried] Error getting conversations', error);
|
||||
return { message: 'Error fetching conversations' };
|
||||
|
|
@ -256,10 +286,26 @@ module.exports = {
|
|||
* logger.error(result); // { n: 5, ok: 1, deletedCount: 5, messages: { n: 10, ok: 1, deletedCount: 10 } }
|
||||
*/
|
||||
deleteConvos: async (user, filter) => {
|
||||
let toRemove = await Conversation.find({ ...filter, user }).select('conversationId');
|
||||
const ids = toRemove.map((instance) => instance.conversationId);
|
||||
let deleteCount = await Conversation.deleteMany({ ...filter, user });
|
||||
deleteCount.messages = await deleteMessages({ conversationId: { $in: ids } });
|
||||
return deleteCount;
|
||||
try {
|
||||
const userFilter = { ...filter, user };
|
||||
|
||||
const conversations = await Conversation.find(userFilter).select('conversationId');
|
||||
const conversationIds = conversations.map((c) => c.conversationId);
|
||||
|
||||
if (!conversationIds.length) {
|
||||
throw new Error('Conversation not found or already deleted.');
|
||||
}
|
||||
|
||||
const deleteConvoResult = await Conversation.deleteMany(userFilter);
|
||||
|
||||
const deleteMessagesResult = await deleteMessages({
|
||||
conversationId: { $in: conversationIds },
|
||||
});
|
||||
|
||||
return { ...deleteConvoResult, messages: deleteMessagesResult };
|
||||
} catch (error) {
|
||||
logger.error('[deleteConvos] Error deleting conversations and messages', error);
|
||||
throw error;
|
||||
}
|
||||
},
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,5 +1,7 @@
|
|||
const mongoose = require('mongoose');
|
||||
const { EToolResources } = require('librechat-data-provider');
|
||||
const { fileSchema } = require('@librechat/data-schemas');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const File = mongoose.model('File', fileSchema);
|
||||
|
||||
|
|
@ -7,7 +9,7 @@ const File = mongoose.model('File', fileSchema);
|
|||
* Finds a file by its file_id with additional query options.
|
||||
* @param {string} file_id - The unique identifier of the file.
|
||||
* @param {object} options - Query options for filtering, projection, etc.
|
||||
* @returns {Promise<IMongoFile>} A promise that resolves to the file document or null.
|
||||
* @returns {Promise<MongoFile>} A promise that resolves to the file document or null.
|
||||
*/
|
||||
const findFileById = async (file_id, options = {}) => {
|
||||
return await File.findOne({ file_id, ...options }).lean();
|
||||
|
|
@ -17,18 +19,57 @@ const findFileById = async (file_id, options = {}) => {
|
|||
* Retrieves files matching a given filter, sorted by the most recently updated.
|
||||
* @param {Object} filter - The filter criteria to apply.
|
||||
* @param {Object} [_sortOptions] - Optional sort parameters.
|
||||
* @returns {Promise<Array<IMongoFile>>} A promise that resolves to an array of file documents.
|
||||
* @param {Object|String} [selectFields={ text: 0 }] - Fields to include/exclude in the query results.
|
||||
* Default excludes the 'text' field.
|
||||
* @returns {Promise<Array<MongoFile>>} A promise that resolves to an array of file documents.
|
||||
*/
|
||||
const getFiles = async (filter, _sortOptions) => {
|
||||
const getFiles = async (filter, _sortOptions, selectFields = { text: 0 }) => {
|
||||
const sortOptions = { updatedAt: -1, ..._sortOptions };
|
||||
return await File.find(filter).sort(sortOptions).lean();
|
||||
return await File.find(filter).select(selectFields).sort(sortOptions).lean();
|
||||
};
|
||||
|
||||
/**
|
||||
* Retrieves tool files (files that are embedded or have a fileIdentifier) from an array of file IDs
|
||||
* @param {string[]} fileIds - Array of file_id strings to search for
|
||||
* @param {Set<EToolResources>} toolResourceSet - Optional filter for tool resources
|
||||
* @returns {Promise<Array<MongoFile>>} Files that match the criteria
|
||||
*/
|
||||
const getToolFilesByIds = async (fileIds, toolResourceSet) => {
|
||||
if (!fileIds || !fileIds.length) {
|
||||
return [];
|
||||
}
|
||||
|
||||
try {
|
||||
const filter = {
|
||||
file_id: { $in: fileIds },
|
||||
};
|
||||
|
||||
if (toolResourceSet.size) {
|
||||
filter.$or = [];
|
||||
}
|
||||
|
||||
if (toolResourceSet.has(EToolResources.file_search)) {
|
||||
filter.$or.push({ embedded: true });
|
||||
}
|
||||
if (toolResourceSet.has(EToolResources.execute_code)) {
|
||||
filter.$or.push({ 'metadata.fileIdentifier': { $exists: true } });
|
||||
}
|
||||
|
||||
const selectFields = { text: 0 };
|
||||
const sortOptions = { updatedAt: -1 };
|
||||
|
||||
return await getFiles(filter, sortOptions, selectFields);
|
||||
} catch (error) {
|
||||
logger.error('[getToolFilesByIds] Error retrieving tool files:', error);
|
||||
throw new Error('Error retrieving tool files');
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates a new file with a TTL of 1 hour.
|
||||
* @param {IMongoFile} data - The file data to be created, must contain file_id.
|
||||
* @param {MongoFile} data - The file data to be created, must contain file_id.
|
||||
* @param {boolean} disableTTL - Whether to disable the TTL.
|
||||
* @returns {Promise<IMongoFile>} A promise that resolves to the created file document.
|
||||
* @returns {Promise<MongoFile>} A promise that resolves to the created file document.
|
||||
*/
|
||||
const createFile = async (data, disableTTL) => {
|
||||
const fileData = {
|
||||
|
|
@ -48,8 +89,8 @@ const createFile = async (data, disableTTL) => {
|
|||
|
||||
/**
|
||||
* Updates a file identified by file_id with new data and removes the TTL.
|
||||
* @param {IMongoFile} data - The data to update, must contain file_id.
|
||||
* @returns {Promise<IMongoFile>} A promise that resolves to the updated file document.
|
||||
* @param {MongoFile} data - The data to update, must contain file_id.
|
||||
* @returns {Promise<MongoFile>} A promise that resolves to the updated file document.
|
||||
*/
|
||||
const updateFile = async (data) => {
|
||||
const { file_id, ...update } = data;
|
||||
|
|
@ -62,8 +103,8 @@ const updateFile = async (data) => {
|
|||
|
||||
/**
|
||||
* Increments the usage of a file identified by file_id.
|
||||
* @param {IMongoFile} data - The data to update, must contain file_id and the increment value for usage.
|
||||
* @returns {Promise<IMongoFile>} A promise that resolves to the updated file document.
|
||||
* @param {MongoFile} data - The data to update, must contain file_id and the increment value for usage.
|
||||
* @returns {Promise<MongoFile>} A promise that resolves to the updated file document.
|
||||
*/
|
||||
const updateFileUsage = async (data) => {
|
||||
const { file_id, inc = 1 } = data;
|
||||
|
|
@ -77,7 +118,7 @@ const updateFileUsage = async (data) => {
|
|||
/**
|
||||
* Deletes a file identified by file_id.
|
||||
* @param {string} file_id - The unique identifier of the file to delete.
|
||||
* @returns {Promise<IMongoFile>} A promise that resolves to the deleted file document or null.
|
||||
* @returns {Promise<MongoFile>} A promise that resolves to the deleted file document or null.
|
||||
*/
|
||||
const deleteFile = async (file_id) => {
|
||||
return await File.findOneAndDelete({ file_id }).lean();
|
||||
|
|
@ -86,7 +127,7 @@ const deleteFile = async (file_id) => {
|
|||
/**
|
||||
* Deletes a file identified by a filter.
|
||||
* @param {object} filter - The filter criteria to apply.
|
||||
* @returns {Promise<IMongoFile>} A promise that resolves to the deleted file document or null.
|
||||
* @returns {Promise<MongoFile>} A promise that resolves to the deleted file document or null.
|
||||
*/
|
||||
const deleteFileByFilter = async (filter) => {
|
||||
return await File.findOneAndDelete(filter).lean();
|
||||
|
|
@ -105,14 +146,38 @@ const deleteFiles = async (file_ids, user) => {
|
|||
return await File.deleteMany(deleteQuery);
|
||||
};
|
||||
|
||||
/**
|
||||
* Batch updates files with new signed URLs in MongoDB
|
||||
*
|
||||
* @param {MongoFile[]} updates - Array of updates in the format { file_id, filepath }
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async function batchUpdateFiles(updates) {
|
||||
if (!updates || updates.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const bulkOperations = updates.map((update) => ({
|
||||
updateOne: {
|
||||
filter: { file_id: update.file_id },
|
||||
update: { $set: { filepath: update.filepath } },
|
||||
},
|
||||
}));
|
||||
|
||||
const result = await File.bulkWrite(bulkOperations);
|
||||
logger.info(`Updated ${result.modifiedCount} files with new S3 URLs`);
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
File,
|
||||
findFileById,
|
||||
getFiles,
|
||||
getToolFilesByIds,
|
||||
createFile,
|
||||
updateFile,
|
||||
updateFileUsage,
|
||||
deleteFile,
|
||||
deleteFiles,
|
||||
deleteFileByFilter,
|
||||
batchUpdateFiles,
|
||||
};
|
||||
|
|
|
|||
|
|
@ -61,6 +61,14 @@ async function saveMessage(req, params, metadata) {
|
|||
update.expiredAt = null;
|
||||
}
|
||||
|
||||
if (update.tokenCount != null && isNaN(update.tokenCount)) {
|
||||
logger.warn(
|
||||
`Resetting invalid \`tokenCount\` for message \`${params.messageId}\`: ${update.tokenCount}`,
|
||||
);
|
||||
logger.info(`---\`saveMessage\` context: ${metadata?.context}`);
|
||||
update.tokenCount = 0;
|
||||
}
|
||||
|
||||
const message = await Message.findOneAndUpdate(
|
||||
{ messageId: params.messageId, user: req.user.id },
|
||||
update,
|
||||
|
|
@ -71,7 +79,44 @@ async function saveMessage(req, params, metadata) {
|
|||
} catch (err) {
|
||||
logger.error('Error saving message:', err);
|
||||
logger.info(`---\`saveMessage\` context: ${metadata?.context}`);
|
||||
throw err;
|
||||
|
||||
// Check if this is a duplicate key error (MongoDB error code 11000)
|
||||
if (err.code === 11000 && err.message.includes('duplicate key error')) {
|
||||
// Log the duplicate key error but don't crash the application
|
||||
logger.warn(`Duplicate messageId detected: ${params.messageId}. Continuing execution.`);
|
||||
|
||||
try {
|
||||
// Try to find the existing message with this ID
|
||||
const existingMessage = await Message.findOne({
|
||||
messageId: params.messageId,
|
||||
user: req.user.id,
|
||||
});
|
||||
|
||||
// If we found it, return it
|
||||
if (existingMessage) {
|
||||
return existingMessage.toObject();
|
||||
}
|
||||
|
||||
// If we can't find it (unlikely but possible in race conditions)
|
||||
return {
|
||||
...params,
|
||||
messageId: params.messageId,
|
||||
user: req.user.id,
|
||||
};
|
||||
} catch (findError) {
|
||||
// If the findOne also fails, log it but don't crash
|
||||
logger.warn(
|
||||
`Could not retrieve existing message with ID ${params.messageId}: ${findError.message}`,
|
||||
);
|
||||
return {
|
||||
...params,
|
||||
messageId: params.messageId,
|
||||
user: req.user.id,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
throw err; // Re-throw other errors
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -4,13 +4,8 @@ const {
|
|||
SystemRoles,
|
||||
roleDefaults,
|
||||
PermissionTypes,
|
||||
permissionsSchema,
|
||||
removeNullishValues,
|
||||
agentPermissionsSchema,
|
||||
promptPermissionsSchema,
|
||||
runCodePermissionsSchema,
|
||||
bookmarkPermissionsSchema,
|
||||
multiConvoPermissionsSchema,
|
||||
temporaryChatPermissionsSchema,
|
||||
} = require('librechat-data-provider');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
const { roleSchema } = require('@librechat/data-schemas');
|
||||
|
|
@ -20,15 +15,16 @@ const Role = mongoose.model('Role', roleSchema);
|
|||
|
||||
/**
|
||||
* Retrieve a role by name and convert the found role document to a plain object.
|
||||
* If the role with the given name doesn't exist and the name is a system defined role, create it and return the lean version.
|
||||
* If the role with the given name doesn't exist and the name is a system defined role,
|
||||
* create it and return the lean version.
|
||||
*
|
||||
* @param {string} roleName - The name of the role to find or create.
|
||||
* @param {string|string[]} [fieldsToSelect] - The fields to include or exclude in the returned document.
|
||||
* @returns {Promise<Object>} A plain object representing the role document.
|
||||
*/
|
||||
const getRoleByName = async function (roleName, fieldsToSelect = null) {
|
||||
const cache = getLogStores(CacheKeys.ROLES);
|
||||
try {
|
||||
const cache = getLogStores(CacheKeys.ROLES);
|
||||
const cachedRole = await cache.get(roleName);
|
||||
if (cachedRole) {
|
||||
return cachedRole;
|
||||
|
|
@ -40,8 +36,7 @@ const getRoleByName = async function (roleName, fieldsToSelect = null) {
|
|||
let role = await query.lean().exec();
|
||||
|
||||
if (!role && SystemRoles[roleName]) {
|
||||
role = roleDefaults[roleName];
|
||||
role = await new Role(role).save();
|
||||
role = await new Role(roleDefaults[roleName]).save();
|
||||
await cache.set(roleName, role);
|
||||
return role.toObject();
|
||||
}
|
||||
|
|
@ -60,8 +55,8 @@ const getRoleByName = async function (roleName, fieldsToSelect = null) {
|
|||
* @returns {Promise<TRole>} Updated role document.
|
||||
*/
|
||||
const updateRoleByName = async function (roleName, updates) {
|
||||
const cache = getLogStores(CacheKeys.ROLES);
|
||||
try {
|
||||
const cache = getLogStores(CacheKeys.ROLES);
|
||||
const role = await Role.findOneAndUpdate(
|
||||
{ name: roleName },
|
||||
{ $set: updates },
|
||||
|
|
@ -77,29 +72,20 @@ const updateRoleByName = async function (roleName, updates) {
|
|||
}
|
||||
};
|
||||
|
||||
const permissionSchemas = {
|
||||
[PermissionTypes.AGENTS]: agentPermissionsSchema,
|
||||
[PermissionTypes.PROMPTS]: promptPermissionsSchema,
|
||||
[PermissionTypes.BOOKMARKS]: bookmarkPermissionsSchema,
|
||||
[PermissionTypes.MULTI_CONVO]: multiConvoPermissionsSchema,
|
||||
[PermissionTypes.TEMPORARY_CHAT]: temporaryChatPermissionsSchema,
|
||||
[PermissionTypes.RUN_CODE]: runCodePermissionsSchema,
|
||||
};
|
||||
|
||||
/**
|
||||
* Updates access permissions for a specific role and multiple permission types.
|
||||
* @param {SystemRoles} roleName - The role to update.
|
||||
* @param {string} roleName - The role to update.
|
||||
* @param {Object.<PermissionTypes, Object.<Permissions, boolean>>} permissionsUpdate - Permissions to update and their values.
|
||||
*/
|
||||
async function updateAccessPermissions(roleName, permissionsUpdate) {
|
||||
// Filter and clean the permission updates based on our schema definition.
|
||||
const updates = {};
|
||||
for (const [permissionType, permissions] of Object.entries(permissionsUpdate)) {
|
||||
if (permissionSchemas[permissionType]) {
|
||||
if (permissionsSchema.shape && permissionsSchema.shape[permissionType]) {
|
||||
updates[permissionType] = removeNullishValues(permissions);
|
||||
}
|
||||
}
|
||||
|
||||
if (Object.keys(updates).length === 0) {
|
||||
if (!Object.keys(updates).length) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -109,26 +95,75 @@ async function updateAccessPermissions(roleName, permissionsUpdate) {
|
|||
return;
|
||||
}
|
||||
|
||||
const updatedPermissions = {};
|
||||
const currentPermissions = role.permissions || {};
|
||||
const updatedPermissions = { ...currentPermissions };
|
||||
let hasChanges = false;
|
||||
|
||||
const unsetFields = {};
|
||||
const permissionTypes = Object.keys(permissionsSchema.shape || {});
|
||||
for (const permType of permissionTypes) {
|
||||
if (role[permType] && typeof role[permType] === 'object') {
|
||||
logger.info(
|
||||
`Migrating '${roleName}' role from old schema: found '${permType}' at top level`,
|
||||
);
|
||||
|
||||
updatedPermissions[permType] = {
|
||||
...updatedPermissions[permType],
|
||||
...role[permType],
|
||||
};
|
||||
|
||||
unsetFields[permType] = 1;
|
||||
hasChanges = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Process the current updates
|
||||
for (const [permissionType, permissions] of Object.entries(updates)) {
|
||||
const currentPermissions = role[permissionType] || {};
|
||||
updatedPermissions[permissionType] = { ...currentPermissions };
|
||||
const currentTypePermissions = currentPermissions[permissionType] || {};
|
||||
updatedPermissions[permissionType] = { ...currentTypePermissions };
|
||||
|
||||
for (const [permission, value] of Object.entries(permissions)) {
|
||||
if (currentPermissions[permission] !== value) {
|
||||
if (currentTypePermissions[permission] !== value) {
|
||||
updatedPermissions[permissionType][permission] = value;
|
||||
hasChanges = true;
|
||||
logger.info(
|
||||
`Updating '${roleName}' role ${permissionType} '${permission}' permission from ${currentPermissions[permission]} to: ${value}`,
|
||||
`Updating '${roleName}' role permission '${permissionType}' '${permission}' from ${currentTypePermissions[permission]} to: ${value}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (hasChanges) {
|
||||
await updateRoleByName(roleName, updatedPermissions);
|
||||
const updateObj = { permissions: updatedPermissions };
|
||||
|
||||
if (Object.keys(unsetFields).length > 0) {
|
||||
logger.info(
|
||||
`Unsetting old schema fields for '${roleName}' role: ${Object.keys(unsetFields).join(', ')}`,
|
||||
);
|
||||
|
||||
try {
|
||||
await Role.updateOne(
|
||||
{ name: roleName },
|
||||
{
|
||||
$set: updateObj,
|
||||
$unset: unsetFields,
|
||||
},
|
||||
);
|
||||
|
||||
const cache = getLogStores(CacheKeys.ROLES);
|
||||
const updatedRole = await Role.findOne({ name: roleName }).select('-__v').lean().exec();
|
||||
await cache.set(roleName, updatedRole);
|
||||
|
||||
logger.info(`Updated role '${roleName}' and removed old schema fields`);
|
||||
} catch (updateError) {
|
||||
logger.error(`Error during role migration update: ${updateError.message}`);
|
||||
throw updateError;
|
||||
}
|
||||
} else {
|
||||
// Standard update if no migration needed
|
||||
await updateRoleByName(roleName, updateObj);
|
||||
}
|
||||
|
||||
logger.info(`Updated '${roleName}' role permissions`);
|
||||
} else {
|
||||
logger.info(`No changes needed for '${roleName}' role permissions`);
|
||||
|
|
@ -146,34 +181,111 @@ async function updateAccessPermissions(roleName, permissionsUpdate) {
|
|||
* @returns {Promise<void>}
|
||||
*/
|
||||
const initializeRoles = async function () {
|
||||
const defaultRoles = [SystemRoles.ADMIN, SystemRoles.USER];
|
||||
|
||||
for (const roleName of defaultRoles) {
|
||||
for (const roleName of [SystemRoles.ADMIN, SystemRoles.USER]) {
|
||||
let role = await Role.findOne({ name: roleName });
|
||||
const defaultPerms = roleDefaults[roleName].permissions;
|
||||
|
||||
if (!role) {
|
||||
// Create new role if it doesn't exist
|
||||
// Create new role if it doesn't exist.
|
||||
role = new Role(roleDefaults[roleName]);
|
||||
} else {
|
||||
// Add missing permission types
|
||||
let isUpdated = false;
|
||||
for (const permType of Object.values(PermissionTypes)) {
|
||||
if (!role[permType]) {
|
||||
role[permType] = roleDefaults[roleName][permType];
|
||||
isUpdated = true;
|
||||
// Ensure role.permissions is defined.
|
||||
role.permissions = role.permissions || {};
|
||||
// For each permission type in defaults, add it if missing.
|
||||
for (const permType of Object.keys(defaultPerms)) {
|
||||
if (role.permissions[permType] == null) {
|
||||
role.permissions[permType] = defaultPerms[permType];
|
||||
}
|
||||
}
|
||||
if (isUpdated) {
|
||||
await role.save();
|
||||
}
|
||||
}
|
||||
await role.save();
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Migrates roles from old schema to new schema structure.
|
||||
* This can be called directly to fix existing roles.
|
||||
*
|
||||
* @param {string} [roleName] - Optional specific role to migrate. If not provided, migrates all roles.
|
||||
* @returns {Promise<number>} Number of roles migrated.
|
||||
*/
|
||||
const migrateRoleSchema = async function (roleName) {
|
||||
try {
|
||||
// Get roles to migrate
|
||||
let roles;
|
||||
if (roleName) {
|
||||
const role = await Role.findOne({ name: roleName });
|
||||
roles = role ? [role] : [];
|
||||
} else {
|
||||
roles = await Role.find({});
|
||||
}
|
||||
|
||||
logger.info(`Migrating ${roles.length} roles to new schema structure`);
|
||||
let migratedCount = 0;
|
||||
|
||||
for (const role of roles) {
|
||||
const permissionTypes = Object.keys(permissionsSchema.shape || {});
|
||||
const unsetFields = {};
|
||||
let hasOldSchema = false;
|
||||
|
||||
// Check for old schema fields
|
||||
for (const permType of permissionTypes) {
|
||||
if (role[permType] && typeof role[permType] === 'object') {
|
||||
hasOldSchema = true;
|
||||
|
||||
// Ensure permissions object exists
|
||||
role.permissions = role.permissions || {};
|
||||
|
||||
// Migrate permissions from old location to new
|
||||
role.permissions[permType] = {
|
||||
...role.permissions[permType],
|
||||
...role[permType],
|
||||
};
|
||||
|
||||
// Mark field for removal
|
||||
unsetFields[permType] = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (hasOldSchema) {
|
||||
try {
|
||||
logger.info(`Migrating role '${role.name}' from old schema structure`);
|
||||
|
||||
// Simple update operation
|
||||
await Role.updateOne(
|
||||
{ _id: role._id },
|
||||
{
|
||||
$set: { permissions: role.permissions },
|
||||
$unset: unsetFields,
|
||||
},
|
||||
);
|
||||
|
||||
// Refresh cache
|
||||
const cache = getLogStores(CacheKeys.ROLES);
|
||||
const updatedRole = await Role.findById(role._id).lean().exec();
|
||||
await cache.set(role.name, updatedRole);
|
||||
|
||||
migratedCount++;
|
||||
logger.info(`Migrated role '${role.name}'`);
|
||||
} catch (error) {
|
||||
logger.error(`Failed to migrate role '${role.name}': ${error.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logger.info(`Migration complete: ${migratedCount} roles migrated`);
|
||||
return migratedCount;
|
||||
} catch (error) {
|
||||
logger.error(`Role schema migration failed: ${error.message}`);
|
||||
throw error;
|
||||
}
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
Role,
|
||||
getRoleByName,
|
||||
initializeRoles,
|
||||
updateRoleByName,
|
||||
updateAccessPermissions,
|
||||
migrateRoleSchema,
|
||||
};
|
||||
|
|
|
|||
|
|
@ -2,22 +2,21 @@ const mongoose = require('mongoose');
|
|||
const { MongoMemoryServer } = require('mongodb-memory-server');
|
||||
const {
|
||||
SystemRoles,
|
||||
PermissionTypes,
|
||||
roleDefaults,
|
||||
Permissions,
|
||||
roleDefaults,
|
||||
PermissionTypes,
|
||||
} = require('librechat-data-provider');
|
||||
const { updateAccessPermissions, initializeRoles } = require('~/models/Role');
|
||||
const { Role, getRoleByName, updateAccessPermissions, initializeRoles } = require('~/models/Role');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
const { Role } = require('~/models/Role');
|
||||
|
||||
// Mock the cache
|
||||
jest.mock('~/cache/getLogStores', () => {
|
||||
return jest.fn().mockReturnValue({
|
||||
jest.mock('~/cache/getLogStores', () =>
|
||||
jest.fn().mockReturnValue({
|
||||
get: jest.fn(),
|
||||
set: jest.fn(),
|
||||
del: jest.fn(),
|
||||
});
|
||||
});
|
||||
}),
|
||||
);
|
||||
|
||||
let mongoServer;
|
||||
|
||||
|
|
@ -41,10 +40,12 @@ describe('updateAccessPermissions', () => {
|
|||
it('should update permissions when changes are needed', async () => {
|
||||
await new Role({
|
||||
name: SystemRoles.USER,
|
||||
[PermissionTypes.PROMPTS]: {
|
||||
CREATE: true,
|
||||
USE: true,
|
||||
SHARED_GLOBAL: false,
|
||||
permissions: {
|
||||
[PermissionTypes.PROMPTS]: {
|
||||
CREATE: true,
|
||||
USE: true,
|
||||
SHARED_GLOBAL: false,
|
||||
},
|
||||
},
|
||||
}).save();
|
||||
|
||||
|
|
@ -56,8 +57,8 @@ describe('updateAccessPermissions', () => {
|
|||
},
|
||||
});
|
||||
|
||||
const updatedRole = await Role.findOne({ name: SystemRoles.USER }).lean();
|
||||
expect(updatedRole[PermissionTypes.PROMPTS]).toEqual({
|
||||
const updatedRole = await getRoleByName(SystemRoles.USER);
|
||||
expect(updatedRole.permissions[PermissionTypes.PROMPTS]).toEqual({
|
||||
CREATE: true,
|
||||
USE: true,
|
||||
SHARED_GLOBAL: true,
|
||||
|
|
@ -67,10 +68,12 @@ describe('updateAccessPermissions', () => {
|
|||
it('should not update permissions when no changes are needed', async () => {
|
||||
await new Role({
|
||||
name: SystemRoles.USER,
|
||||
[PermissionTypes.PROMPTS]: {
|
||||
CREATE: true,
|
||||
USE: true,
|
||||
SHARED_GLOBAL: false,
|
||||
permissions: {
|
||||
[PermissionTypes.PROMPTS]: {
|
||||
CREATE: true,
|
||||
USE: true,
|
||||
SHARED_GLOBAL: false,
|
||||
},
|
||||
},
|
||||
}).save();
|
||||
|
||||
|
|
@ -82,8 +85,8 @@ describe('updateAccessPermissions', () => {
|
|||
},
|
||||
});
|
||||
|
||||
const updatedRole = await Role.findOne({ name: SystemRoles.USER }).lean();
|
||||
expect(updatedRole[PermissionTypes.PROMPTS]).toEqual({
|
||||
const updatedRole = await getRoleByName(SystemRoles.USER);
|
||||
expect(updatedRole.permissions[PermissionTypes.PROMPTS]).toEqual({
|
||||
CREATE: true,
|
||||
USE: true,
|
||||
SHARED_GLOBAL: false,
|
||||
|
|
@ -92,11 +95,8 @@ describe('updateAccessPermissions', () => {
|
|||
|
||||
it('should handle non-existent roles', async () => {
|
||||
await updateAccessPermissions('NON_EXISTENT_ROLE', {
|
||||
[PermissionTypes.PROMPTS]: {
|
||||
CREATE: true,
|
||||
},
|
||||
[PermissionTypes.PROMPTS]: { CREATE: true },
|
||||
});
|
||||
|
||||
const role = await Role.findOne({ name: 'NON_EXISTENT_ROLE' });
|
||||
expect(role).toBeNull();
|
||||
});
|
||||
|
|
@ -104,21 +104,21 @@ describe('updateAccessPermissions', () => {
|
|||
it('should update only specified permissions', async () => {
|
||||
await new Role({
|
||||
name: SystemRoles.USER,
|
||||
[PermissionTypes.PROMPTS]: {
|
||||
CREATE: true,
|
||||
USE: true,
|
||||
SHARED_GLOBAL: false,
|
||||
permissions: {
|
||||
[PermissionTypes.PROMPTS]: {
|
||||
CREATE: true,
|
||||
USE: true,
|
||||
SHARED_GLOBAL: false,
|
||||
},
|
||||
},
|
||||
}).save();
|
||||
|
||||
await updateAccessPermissions(SystemRoles.USER, {
|
||||
[PermissionTypes.PROMPTS]: {
|
||||
SHARED_GLOBAL: true,
|
||||
},
|
||||
[PermissionTypes.PROMPTS]: { SHARED_GLOBAL: true },
|
||||
});
|
||||
|
||||
const updatedRole = await Role.findOne({ name: SystemRoles.USER }).lean();
|
||||
expect(updatedRole[PermissionTypes.PROMPTS]).toEqual({
|
||||
const updatedRole = await getRoleByName(SystemRoles.USER);
|
||||
expect(updatedRole.permissions[PermissionTypes.PROMPTS]).toEqual({
|
||||
CREATE: true,
|
||||
USE: true,
|
||||
SHARED_GLOBAL: true,
|
||||
|
|
@ -128,21 +128,21 @@ describe('updateAccessPermissions', () => {
|
|||
it('should handle partial updates', async () => {
|
||||
await new Role({
|
||||
name: SystemRoles.USER,
|
||||
[PermissionTypes.PROMPTS]: {
|
||||
CREATE: true,
|
||||
USE: true,
|
||||
SHARED_GLOBAL: false,
|
||||
permissions: {
|
||||
[PermissionTypes.PROMPTS]: {
|
||||
CREATE: true,
|
||||
USE: true,
|
||||
SHARED_GLOBAL: false,
|
||||
},
|
||||
},
|
||||
}).save();
|
||||
|
||||
await updateAccessPermissions(SystemRoles.USER, {
|
||||
[PermissionTypes.PROMPTS]: {
|
||||
USE: false,
|
||||
},
|
||||
[PermissionTypes.PROMPTS]: { USE: false },
|
||||
});
|
||||
|
||||
const updatedRole = await Role.findOne({ name: SystemRoles.USER }).lean();
|
||||
expect(updatedRole[PermissionTypes.PROMPTS]).toEqual({
|
||||
const updatedRole = await getRoleByName(SystemRoles.USER);
|
||||
expect(updatedRole.permissions[PermissionTypes.PROMPTS]).toEqual({
|
||||
CREATE: true,
|
||||
USE: false,
|
||||
SHARED_GLOBAL: false,
|
||||
|
|
@ -152,13 +152,9 @@ describe('updateAccessPermissions', () => {
|
|||
it('should update multiple permission types at once', async () => {
|
||||
await new Role({
|
||||
name: SystemRoles.USER,
|
||||
[PermissionTypes.PROMPTS]: {
|
||||
CREATE: true,
|
||||
USE: true,
|
||||
SHARED_GLOBAL: false,
|
||||
},
|
||||
[PermissionTypes.BOOKMARKS]: {
|
||||
USE: true,
|
||||
permissions: {
|
||||
[PermissionTypes.PROMPTS]: { CREATE: true, USE: true, SHARED_GLOBAL: false },
|
||||
[PermissionTypes.BOOKMARKS]: { USE: true },
|
||||
},
|
||||
}).save();
|
||||
|
||||
|
|
@ -167,24 +163,20 @@ describe('updateAccessPermissions', () => {
|
|||
[PermissionTypes.BOOKMARKS]: { USE: false },
|
||||
});
|
||||
|
||||
const updatedRole = await Role.findOne({ name: SystemRoles.USER }).lean();
|
||||
expect(updatedRole[PermissionTypes.PROMPTS]).toEqual({
|
||||
const updatedRole = await getRoleByName(SystemRoles.USER);
|
||||
expect(updatedRole.permissions[PermissionTypes.PROMPTS]).toEqual({
|
||||
CREATE: true,
|
||||
USE: false,
|
||||
SHARED_GLOBAL: true,
|
||||
});
|
||||
expect(updatedRole[PermissionTypes.BOOKMARKS]).toEqual({
|
||||
USE: false,
|
||||
});
|
||||
expect(updatedRole.permissions[PermissionTypes.BOOKMARKS]).toEqual({ USE: false });
|
||||
});
|
||||
|
||||
it('should handle updates for a single permission type', async () => {
|
||||
await new Role({
|
||||
name: SystemRoles.USER,
|
||||
[PermissionTypes.PROMPTS]: {
|
||||
CREATE: true,
|
||||
USE: true,
|
||||
SHARED_GLOBAL: false,
|
||||
permissions: {
|
||||
[PermissionTypes.PROMPTS]: { CREATE: true, USE: true, SHARED_GLOBAL: false },
|
||||
},
|
||||
}).save();
|
||||
|
||||
|
|
@ -192,8 +184,8 @@ describe('updateAccessPermissions', () => {
|
|||
[PermissionTypes.PROMPTS]: { USE: false, SHARED_GLOBAL: true },
|
||||
});
|
||||
|
||||
const updatedRole = await Role.findOne({ name: SystemRoles.USER }).lean();
|
||||
expect(updatedRole[PermissionTypes.PROMPTS]).toEqual({
|
||||
const updatedRole = await getRoleByName(SystemRoles.USER);
|
||||
expect(updatedRole.permissions[PermissionTypes.PROMPTS]).toEqual({
|
||||
CREATE: true,
|
||||
USE: false,
|
||||
SHARED_GLOBAL: true,
|
||||
|
|
@ -203,33 +195,25 @@ describe('updateAccessPermissions', () => {
|
|||
it('should update MULTI_CONVO permissions', async () => {
|
||||
await new Role({
|
||||
name: SystemRoles.USER,
|
||||
[PermissionTypes.MULTI_CONVO]: {
|
||||
USE: false,
|
||||
permissions: {
|
||||
[PermissionTypes.MULTI_CONVO]: { USE: false },
|
||||
},
|
||||
}).save();
|
||||
|
||||
await updateAccessPermissions(SystemRoles.USER, {
|
||||
[PermissionTypes.MULTI_CONVO]: {
|
||||
USE: true,
|
||||
},
|
||||
[PermissionTypes.MULTI_CONVO]: { USE: true },
|
||||
});
|
||||
|
||||
const updatedRole = await Role.findOne({ name: SystemRoles.USER }).lean();
|
||||
expect(updatedRole[PermissionTypes.MULTI_CONVO]).toEqual({
|
||||
USE: true,
|
||||
});
|
||||
const updatedRole = await getRoleByName(SystemRoles.USER);
|
||||
expect(updatedRole.permissions[PermissionTypes.MULTI_CONVO]).toEqual({ USE: true });
|
||||
});
|
||||
|
||||
it('should update MULTI_CONVO permissions along with other permission types', async () => {
|
||||
await new Role({
|
||||
name: SystemRoles.USER,
|
||||
[PermissionTypes.PROMPTS]: {
|
||||
CREATE: true,
|
||||
USE: true,
|
||||
SHARED_GLOBAL: false,
|
||||
},
|
||||
[PermissionTypes.MULTI_CONVO]: {
|
||||
USE: false,
|
||||
permissions: {
|
||||
[PermissionTypes.PROMPTS]: { CREATE: true, USE: true, SHARED_GLOBAL: false },
|
||||
[PermissionTypes.MULTI_CONVO]: { USE: false },
|
||||
},
|
||||
}).save();
|
||||
|
||||
|
|
@ -238,35 +222,29 @@ describe('updateAccessPermissions', () => {
|
|||
[PermissionTypes.MULTI_CONVO]: { USE: true },
|
||||
});
|
||||
|
||||
const updatedRole = await Role.findOne({ name: SystemRoles.USER }).lean();
|
||||
expect(updatedRole[PermissionTypes.PROMPTS]).toEqual({
|
||||
const updatedRole = await getRoleByName(SystemRoles.USER);
|
||||
expect(updatedRole.permissions[PermissionTypes.PROMPTS]).toEqual({
|
||||
CREATE: true,
|
||||
USE: true,
|
||||
SHARED_GLOBAL: true,
|
||||
});
|
||||
expect(updatedRole[PermissionTypes.MULTI_CONVO]).toEqual({
|
||||
USE: true,
|
||||
});
|
||||
expect(updatedRole.permissions[PermissionTypes.MULTI_CONVO]).toEqual({ USE: true });
|
||||
});
|
||||
|
||||
it('should not update MULTI_CONVO permissions when no changes are needed', async () => {
|
||||
await new Role({
|
||||
name: SystemRoles.USER,
|
||||
[PermissionTypes.MULTI_CONVO]: {
|
||||
USE: true,
|
||||
permissions: {
|
||||
[PermissionTypes.MULTI_CONVO]: { USE: true },
|
||||
},
|
||||
}).save();
|
||||
|
||||
await updateAccessPermissions(SystemRoles.USER, {
|
||||
[PermissionTypes.MULTI_CONVO]: {
|
||||
USE: true,
|
||||
},
|
||||
[PermissionTypes.MULTI_CONVO]: { USE: true },
|
||||
});
|
||||
|
||||
const updatedRole = await Role.findOne({ name: SystemRoles.USER }).lean();
|
||||
expect(updatedRole[PermissionTypes.MULTI_CONVO]).toEqual({
|
||||
USE: true,
|
||||
});
|
||||
const updatedRole = await getRoleByName(SystemRoles.USER);
|
||||
expect(updatedRole.permissions[PermissionTypes.MULTI_CONVO]).toEqual({ USE: true });
|
||||
});
|
||||
});
|
||||
|
||||
|
|
@ -278,65 +256,69 @@ describe('initializeRoles', () => {
|
|||
it('should create default roles if they do not exist', async () => {
|
||||
await initializeRoles();
|
||||
|
||||
const adminRole = await Role.findOne({ name: SystemRoles.ADMIN }).lean();
|
||||
const userRole = await Role.findOne({ name: SystemRoles.USER }).lean();
|
||||
const adminRole = await getRoleByName(SystemRoles.ADMIN);
|
||||
const userRole = await getRoleByName(SystemRoles.USER);
|
||||
|
||||
expect(adminRole).toBeTruthy();
|
||||
expect(userRole).toBeTruthy();
|
||||
|
||||
// Check if all permission types exist
|
||||
// Check if all permission types exist in the permissions field
|
||||
Object.values(PermissionTypes).forEach((permType) => {
|
||||
expect(adminRole[permType]).toBeDefined();
|
||||
expect(userRole[permType]).toBeDefined();
|
||||
expect(adminRole.permissions[permType]).toBeDefined();
|
||||
expect(userRole.permissions[permType]).toBeDefined();
|
||||
});
|
||||
|
||||
// Check if permissions match defaults (example for ADMIN role)
|
||||
expect(adminRole[PermissionTypes.PROMPTS].SHARED_GLOBAL).toBe(true);
|
||||
expect(adminRole[PermissionTypes.BOOKMARKS].USE).toBe(true);
|
||||
expect(adminRole[PermissionTypes.AGENTS].CREATE).toBe(true);
|
||||
// Example: Check default values for ADMIN role
|
||||
expect(adminRole.permissions[PermissionTypes.PROMPTS].SHARED_GLOBAL).toBe(true);
|
||||
expect(adminRole.permissions[PermissionTypes.BOOKMARKS].USE).toBe(true);
|
||||
expect(adminRole.permissions[PermissionTypes.AGENTS].CREATE).toBe(true);
|
||||
});
|
||||
|
||||
it('should not modify existing permissions for existing roles', async () => {
|
||||
const customUserRole = {
|
||||
name: SystemRoles.USER,
|
||||
[PermissionTypes.PROMPTS]: {
|
||||
[Permissions.USE]: false,
|
||||
[Permissions.CREATE]: true,
|
||||
[Permissions.SHARED_GLOBAL]: true,
|
||||
},
|
||||
[PermissionTypes.BOOKMARKS]: {
|
||||
[Permissions.USE]: false,
|
||||
permissions: {
|
||||
[PermissionTypes.PROMPTS]: {
|
||||
[Permissions.USE]: false,
|
||||
[Permissions.CREATE]: true,
|
||||
[Permissions.SHARED_GLOBAL]: true,
|
||||
},
|
||||
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: false },
|
||||
},
|
||||
};
|
||||
|
||||
await new Role(customUserRole).save();
|
||||
|
||||
await initializeRoles();
|
||||
|
||||
const userRole = await Role.findOne({ name: SystemRoles.USER }).lean();
|
||||
|
||||
expect(userRole[PermissionTypes.PROMPTS]).toEqual(customUserRole[PermissionTypes.PROMPTS]);
|
||||
expect(userRole[PermissionTypes.BOOKMARKS]).toEqual(customUserRole[PermissionTypes.BOOKMARKS]);
|
||||
expect(userRole[PermissionTypes.AGENTS]).toBeDefined();
|
||||
const userRole = await getRoleByName(SystemRoles.USER);
|
||||
expect(userRole.permissions[PermissionTypes.PROMPTS]).toEqual(
|
||||
customUserRole.permissions[PermissionTypes.PROMPTS],
|
||||
);
|
||||
expect(userRole.permissions[PermissionTypes.BOOKMARKS]).toEqual(
|
||||
customUserRole.permissions[PermissionTypes.BOOKMARKS],
|
||||
);
|
||||
expect(userRole.permissions[PermissionTypes.AGENTS]).toBeDefined();
|
||||
});
|
||||
|
||||
it('should add new permission types to existing roles', async () => {
|
||||
const partialUserRole = {
|
||||
name: SystemRoles.USER,
|
||||
[PermissionTypes.PROMPTS]: roleDefaults[SystemRoles.USER][PermissionTypes.PROMPTS],
|
||||
[PermissionTypes.BOOKMARKS]: roleDefaults[SystemRoles.USER][PermissionTypes.BOOKMARKS],
|
||||
permissions: {
|
||||
[PermissionTypes.PROMPTS]:
|
||||
roleDefaults[SystemRoles.USER].permissions[PermissionTypes.PROMPTS],
|
||||
[PermissionTypes.BOOKMARKS]:
|
||||
roleDefaults[SystemRoles.USER].permissions[PermissionTypes.BOOKMARKS],
|
||||
},
|
||||
};
|
||||
|
||||
await new Role(partialUserRole).save();
|
||||
|
||||
await initializeRoles();
|
||||
|
||||
const userRole = await Role.findOne({ name: SystemRoles.USER }).lean();
|
||||
|
||||
expect(userRole[PermissionTypes.AGENTS]).toBeDefined();
|
||||
expect(userRole[PermissionTypes.AGENTS].CREATE).toBeDefined();
|
||||
expect(userRole[PermissionTypes.AGENTS].USE).toBeDefined();
|
||||
expect(userRole[PermissionTypes.AGENTS].SHARED_GLOBAL).toBeDefined();
|
||||
const userRole = await getRoleByName(SystemRoles.USER);
|
||||
expect(userRole.permissions[PermissionTypes.AGENTS]).toBeDefined();
|
||||
expect(userRole.permissions[PermissionTypes.AGENTS].CREATE).toBeDefined();
|
||||
expect(userRole.permissions[PermissionTypes.AGENTS].USE).toBeDefined();
|
||||
expect(userRole.permissions[PermissionTypes.AGENTS].SHARED_GLOBAL).toBeDefined();
|
||||
});
|
||||
|
||||
it('should handle multiple runs without duplicating or modifying data', async () => {
|
||||
|
|
@ -349,72 +331,73 @@ describe('initializeRoles', () => {
|
|||
expect(adminRoles).toHaveLength(1);
|
||||
expect(userRoles).toHaveLength(1);
|
||||
|
||||
const adminRole = adminRoles[0].toObject();
|
||||
const userRole = userRoles[0].toObject();
|
||||
|
||||
// Check if all permission types exist
|
||||
const adminPerms = adminRoles[0].toObject().permissions;
|
||||
const userPerms = userRoles[0].toObject().permissions;
|
||||
Object.values(PermissionTypes).forEach((permType) => {
|
||||
expect(adminRole[permType]).toBeDefined();
|
||||
expect(userRole[permType]).toBeDefined();
|
||||
expect(adminPerms[permType]).toBeDefined();
|
||||
expect(userPerms[permType]).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
it('should update roles with missing permission types from roleDefaults', async () => {
|
||||
const partialAdminRole = {
|
||||
name: SystemRoles.ADMIN,
|
||||
[PermissionTypes.PROMPTS]: {
|
||||
[Permissions.USE]: false,
|
||||
[Permissions.CREATE]: false,
|
||||
[Permissions.SHARED_GLOBAL]: false,
|
||||
permissions: {
|
||||
[PermissionTypes.PROMPTS]: {
|
||||
[Permissions.USE]: false,
|
||||
[Permissions.CREATE]: false,
|
||||
[Permissions.SHARED_GLOBAL]: false,
|
||||
},
|
||||
[PermissionTypes.BOOKMARKS]:
|
||||
roleDefaults[SystemRoles.ADMIN].permissions[PermissionTypes.BOOKMARKS],
|
||||
},
|
||||
[PermissionTypes.BOOKMARKS]: roleDefaults[SystemRoles.ADMIN][PermissionTypes.BOOKMARKS],
|
||||
};
|
||||
|
||||
await new Role(partialAdminRole).save();
|
||||
|
||||
await initializeRoles();
|
||||
|
||||
const adminRole = await Role.findOne({ name: SystemRoles.ADMIN }).lean();
|
||||
|
||||
expect(adminRole[PermissionTypes.PROMPTS]).toEqual(partialAdminRole[PermissionTypes.PROMPTS]);
|
||||
expect(adminRole[PermissionTypes.AGENTS]).toBeDefined();
|
||||
expect(adminRole[PermissionTypes.AGENTS].CREATE).toBeDefined();
|
||||
expect(adminRole[PermissionTypes.AGENTS].USE).toBeDefined();
|
||||
expect(adminRole[PermissionTypes.AGENTS].SHARED_GLOBAL).toBeDefined();
|
||||
const adminRole = await getRoleByName(SystemRoles.ADMIN);
|
||||
expect(adminRole.permissions[PermissionTypes.PROMPTS]).toEqual(
|
||||
partialAdminRole.permissions[PermissionTypes.PROMPTS],
|
||||
);
|
||||
expect(adminRole.permissions[PermissionTypes.AGENTS]).toBeDefined();
|
||||
expect(adminRole.permissions[PermissionTypes.AGENTS].CREATE).toBeDefined();
|
||||
expect(adminRole.permissions[PermissionTypes.AGENTS].USE).toBeDefined();
|
||||
expect(adminRole.permissions[PermissionTypes.AGENTS].SHARED_GLOBAL).toBeDefined();
|
||||
});
|
||||
|
||||
it('should include MULTI_CONVO permissions when creating default roles', async () => {
|
||||
await initializeRoles();
|
||||
|
||||
const adminRole = await Role.findOne({ name: SystemRoles.ADMIN }).lean();
|
||||
const userRole = await Role.findOne({ name: SystemRoles.USER }).lean();
|
||||
const adminRole = await getRoleByName(SystemRoles.ADMIN);
|
||||
const userRole = await getRoleByName(SystemRoles.USER);
|
||||
|
||||
expect(adminRole[PermissionTypes.MULTI_CONVO]).toBeDefined();
|
||||
expect(userRole[PermissionTypes.MULTI_CONVO]).toBeDefined();
|
||||
|
||||
// Check if MULTI_CONVO permissions match defaults
|
||||
expect(adminRole[PermissionTypes.MULTI_CONVO].USE).toBe(
|
||||
roleDefaults[SystemRoles.ADMIN][PermissionTypes.MULTI_CONVO].USE,
|
||||
expect(adminRole.permissions[PermissionTypes.MULTI_CONVO]).toBeDefined();
|
||||
expect(userRole.permissions[PermissionTypes.MULTI_CONVO]).toBeDefined();
|
||||
expect(adminRole.permissions[PermissionTypes.MULTI_CONVO].USE).toBe(
|
||||
roleDefaults[SystemRoles.ADMIN].permissions[PermissionTypes.MULTI_CONVO].USE,
|
||||
);
|
||||
expect(userRole[PermissionTypes.MULTI_CONVO].USE).toBe(
|
||||
roleDefaults[SystemRoles.USER][PermissionTypes.MULTI_CONVO].USE,
|
||||
expect(userRole.permissions[PermissionTypes.MULTI_CONVO].USE).toBe(
|
||||
roleDefaults[SystemRoles.USER].permissions[PermissionTypes.MULTI_CONVO].USE,
|
||||
);
|
||||
});
|
||||
|
||||
it('should add MULTI_CONVO permissions to existing roles without them', async () => {
|
||||
const partialUserRole = {
|
||||
name: SystemRoles.USER,
|
||||
[PermissionTypes.PROMPTS]: roleDefaults[SystemRoles.USER][PermissionTypes.PROMPTS],
|
||||
[PermissionTypes.BOOKMARKS]: roleDefaults[SystemRoles.USER][PermissionTypes.BOOKMARKS],
|
||||
permissions: {
|
||||
[PermissionTypes.PROMPTS]:
|
||||
roleDefaults[SystemRoles.USER].permissions[PermissionTypes.PROMPTS],
|
||||
[PermissionTypes.BOOKMARKS]:
|
||||
roleDefaults[SystemRoles.USER].permissions[PermissionTypes.BOOKMARKS],
|
||||
},
|
||||
};
|
||||
|
||||
await new Role(partialUserRole).save();
|
||||
|
||||
await initializeRoles();
|
||||
|
||||
const userRole = await Role.findOne({ name: SystemRoles.USER }).lean();
|
||||
|
||||
expect(userRole[PermissionTypes.MULTI_CONVO]).toBeDefined();
|
||||
expect(userRole[PermissionTypes.MULTI_CONVO].USE).toBeDefined();
|
||||
const userRole = await getRoleByName(SystemRoles.USER);
|
||||
expect(userRole.permissions[PermissionTypes.MULTI_CONVO]).toBeDefined();
|
||||
expect(userRole.permissions[PermissionTypes.MULTI_CONVO].USE).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -52,6 +52,14 @@ function anonymizeMessages(messages, newConvoId) {
|
|||
const newMessageId = anonymizeMessageId(message.messageId);
|
||||
idMap.set(message.messageId, newMessageId);
|
||||
|
||||
const anonymizedAttachments = message.attachments?.map((attachment) => {
|
||||
return {
|
||||
...attachment,
|
||||
messageId: newMessageId,
|
||||
conversationId: newConvoId,
|
||||
};
|
||||
});
|
||||
|
||||
return {
|
||||
...message,
|
||||
messageId: newMessageId,
|
||||
|
|
@ -61,6 +69,7 @@ function anonymizeMessages(messages, newConvoId) {
|
|||
model: message.model?.startsWith('asst_')
|
||||
? anonymizeAssistantId(message.model)
|
||||
: message.model,
|
||||
attachments: anonymizedAttachments,
|
||||
};
|
||||
});
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,11 +1,144 @@
|
|||
const mongoose = require('mongoose');
|
||||
const { isEnabled } = require('~/server/utils/handleText');
|
||||
const { transactionSchema } = require('@librechat/data-schemas');
|
||||
const { getBalanceConfig } = require('~/server/services/Config');
|
||||
const { getMultiplier, getCacheMultiplier } = require('./tx');
|
||||
const { logger } = require('~/config');
|
||||
const Balance = require('./Balance');
|
||||
|
||||
const cancelRate = 1.15;
|
||||
|
||||
/**
|
||||
* Updates a user's token balance based on a transaction using optimistic concurrency control
|
||||
* without schema changes. Compatible with DocumentDB.
|
||||
* @async
|
||||
* @function
|
||||
* @param {Object} params - The function parameters.
|
||||
* @param {string|mongoose.Types.ObjectId} params.user - The user ID.
|
||||
* @param {number} params.incrementValue - The value to increment the balance by (can be negative).
|
||||
* @param {import('mongoose').UpdateQuery<import('@librechat/data-schemas').IBalance>['$set']} [params.setValues] - Optional additional fields to set.
|
||||
* @returns {Promise<Object>} Returns the updated balance document (lean).
|
||||
* @throws {Error} Throws an error if the update fails after multiple retries.
|
||||
*/
|
||||
const updateBalance = async ({ user, incrementValue, setValues }) => {
|
||||
let maxRetries = 10; // Number of times to retry on conflict
|
||||
let delay = 50; // Initial retry delay in ms
|
||||
let lastError = null;
|
||||
|
||||
for (let attempt = 1; attempt <= maxRetries; attempt++) {
|
||||
let currentBalanceDoc;
|
||||
try {
|
||||
// 1. Read the current document state
|
||||
currentBalanceDoc = await Balance.findOne({ user }).lean();
|
||||
const currentCredits = currentBalanceDoc ? currentBalanceDoc.tokenCredits : 0;
|
||||
|
||||
// 2. Calculate the desired new state
|
||||
const potentialNewCredits = currentCredits + incrementValue;
|
||||
const newCredits = Math.max(0, potentialNewCredits); // Ensure balance doesn't go below zero
|
||||
|
||||
// 3. Prepare the update payload
|
||||
const updatePayload = {
|
||||
$set: {
|
||||
tokenCredits: newCredits,
|
||||
...(setValues || {}), // Merge other values to set
|
||||
},
|
||||
};
|
||||
|
||||
// 4. Attempt the conditional update or upsert
|
||||
let updatedBalance = null;
|
||||
if (currentBalanceDoc) {
|
||||
// --- Document Exists: Perform Conditional Update ---
|
||||
// Try to update only if the tokenCredits match the value we read (currentCredits)
|
||||
updatedBalance = await Balance.findOneAndUpdate(
|
||||
{
|
||||
user: user,
|
||||
tokenCredits: currentCredits, // Optimistic lock: condition based on the read value
|
||||
},
|
||||
updatePayload,
|
||||
{
|
||||
new: true, // Return the modified document
|
||||
// lean: true, // .lean() is applied after query execution in Mongoose >= 6
|
||||
},
|
||||
).lean(); // Use lean() for plain JS object
|
||||
|
||||
if (updatedBalance) {
|
||||
// Success! The update was applied based on the expected current state.
|
||||
return updatedBalance;
|
||||
}
|
||||
// If updatedBalance is null, it means tokenCredits changed between read and write (conflict).
|
||||
lastError = new Error(`Concurrency conflict for user ${user} on attempt ${attempt}.`);
|
||||
// Proceed to retry logic below.
|
||||
} else {
|
||||
// --- Document Does Not Exist: Perform Conditional Upsert ---
|
||||
// Try to insert the document, but only if it still doesn't exist.
|
||||
// Using tokenCredits: {$exists: false} helps prevent race conditions where
|
||||
// another process creates the doc between our findOne and findOneAndUpdate.
|
||||
try {
|
||||
updatedBalance = await Balance.findOneAndUpdate(
|
||||
{
|
||||
user: user,
|
||||
// Attempt to match only if the document doesn't exist OR was just created
|
||||
// without tokenCredits (less likely but possible). A simple { user } filter
|
||||
// might also work, relying on the retry for conflicts.
|
||||
// Let's use a simpler filter and rely on retry for races.
|
||||
// tokenCredits: { $exists: false } // This condition might be too strict if doc exists with 0 credits
|
||||
},
|
||||
updatePayload,
|
||||
{
|
||||
upsert: true, // Create if doesn't exist
|
||||
new: true, // Return the created/updated document
|
||||
// setDefaultsOnInsert: true, // Ensure schema defaults are applied on insert
|
||||
// lean: true,
|
||||
},
|
||||
).lean();
|
||||
|
||||
if (updatedBalance) {
|
||||
// Upsert succeeded (likely created the document)
|
||||
return updatedBalance;
|
||||
}
|
||||
// If null, potentially a rare race condition during upsert. Retry should handle it.
|
||||
lastError = new Error(
|
||||
`Upsert race condition suspected for user ${user} on attempt ${attempt}.`,
|
||||
);
|
||||
} catch (error) {
|
||||
if (error.code === 11000) {
|
||||
// E11000 duplicate key error on index
|
||||
// This means another process created the document *just* before our upsert.
|
||||
// It's a concurrency conflict during creation. We should retry.
|
||||
lastError = error; // Store the error
|
||||
// Proceed to retry logic below.
|
||||
} else {
|
||||
// Different error, rethrow
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
} // End if/else (document exists?)
|
||||
} catch (error) {
|
||||
// Catch errors from findOne or unexpected findOneAndUpdate errors
|
||||
logger.error(`[updateBalance] Error during attempt ${attempt} for user ${user}:`, error);
|
||||
lastError = error; // Store the error
|
||||
// Consider stopping retries for non-transient errors, but for now, we retry.
|
||||
}
|
||||
|
||||
// If we reached here, it means the update failed (conflict or error), wait and retry
|
||||
if (attempt < maxRetries) {
|
||||
const jitter = Math.random() * delay * 0.5; // Add jitter to delay
|
||||
await new Promise((resolve) => setTimeout(resolve, delay + jitter));
|
||||
delay = Math.min(delay * 2, 2000); // Exponential backoff with cap
|
||||
}
|
||||
} // End for loop (retries)
|
||||
|
||||
// If loop finishes without success, throw the last encountered error or a generic one
|
||||
logger.error(
|
||||
`[updateBalance] Failed to update balance for user ${user} after ${maxRetries} attempts.`,
|
||||
);
|
||||
throw (
|
||||
lastError ||
|
||||
new Error(
|
||||
`Failed to update balance for user ${user} after maximum retries due to persistent conflicts.`,
|
||||
)
|
||||
);
|
||||
};
|
||||
|
||||
/** Method to calculate and set the tokenValue for a transaction */
|
||||
transactionSchema.methods.calculateTokenValue = function () {
|
||||
if (!this.valueKey || !this.tokenType) {
|
||||
|
|
@ -21,6 +154,39 @@ transactionSchema.methods.calculateTokenValue = function () {
|
|||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* New static method to create an auto-refill transaction that does NOT trigger a balance update.
|
||||
* @param {object} txData - Transaction data.
|
||||
* @param {string} txData.user - The user ID.
|
||||
* @param {string} txData.tokenType - The type of token.
|
||||
* @param {string} txData.context - The context of the transaction.
|
||||
* @param {number} txData.rawAmount - The raw amount of tokens.
|
||||
* @returns {Promise<object>} - The created transaction.
|
||||
*/
|
||||
transactionSchema.statics.createAutoRefillTransaction = async function (txData) {
|
||||
if (txData.rawAmount != null && isNaN(txData.rawAmount)) {
|
||||
return;
|
||||
}
|
||||
const transaction = new this(txData);
|
||||
transaction.endpointTokenConfig = txData.endpointTokenConfig;
|
||||
transaction.calculateTokenValue();
|
||||
await transaction.save();
|
||||
|
||||
const balanceResponse = await updateBalance({
|
||||
user: transaction.user,
|
||||
incrementValue: txData.rawAmount,
|
||||
setValues: { lastRefill: new Date() },
|
||||
});
|
||||
const result = {
|
||||
rate: transaction.rate,
|
||||
user: transaction.user.toString(),
|
||||
balance: balanceResponse.tokenCredits,
|
||||
};
|
||||
logger.debug('[Balance.check] Auto-refill performed', result);
|
||||
result.transaction = transaction;
|
||||
return result;
|
||||
};
|
||||
|
||||
/**
|
||||
* Static method to create a transaction and update the balance
|
||||
* @param {txData} txData - Transaction data.
|
||||
|
|
@ -37,27 +203,22 @@ transactionSchema.statics.create = async function (txData) {
|
|||
|
||||
await transaction.save();
|
||||
|
||||
if (!isEnabled(process.env.CHECK_BALANCE)) {
|
||||
const balance = await getBalanceConfig();
|
||||
if (!balance?.enabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
let balance = await Balance.findOne({ user: transaction.user }).lean();
|
||||
let incrementValue = transaction.tokenValue;
|
||||
|
||||
if (balance && balance?.tokenCredits + incrementValue < 0) {
|
||||
incrementValue = -balance.tokenCredits;
|
||||
}
|
||||
|
||||
balance = await Balance.findOneAndUpdate(
|
||||
{ user: transaction.user },
|
||||
{ $inc: { tokenCredits: incrementValue } },
|
||||
{ upsert: true, new: true },
|
||||
).lean();
|
||||
const balanceResponse = await updateBalance({
|
||||
user: transaction.user,
|
||||
incrementValue,
|
||||
});
|
||||
|
||||
return {
|
||||
rate: transaction.rate,
|
||||
user: transaction.user.toString(),
|
||||
balance: balance.tokenCredits,
|
||||
balance: balanceResponse.tokenCredits,
|
||||
[transaction.tokenType]: incrementValue,
|
||||
};
|
||||
};
|
||||
|
|
@ -78,27 +239,22 @@ transactionSchema.statics.createStructured = async function (txData) {
|
|||
|
||||
await transaction.save();
|
||||
|
||||
if (!isEnabled(process.env.CHECK_BALANCE)) {
|
||||
const balance = await getBalanceConfig();
|
||||
if (!balance?.enabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
let balance = await Balance.findOne({ user: transaction.user }).lean();
|
||||
let incrementValue = transaction.tokenValue;
|
||||
|
||||
if (balance && balance?.tokenCredits + incrementValue < 0) {
|
||||
incrementValue = -balance.tokenCredits;
|
||||
}
|
||||
|
||||
balance = await Balance.findOneAndUpdate(
|
||||
{ user: transaction.user },
|
||||
{ $inc: { tokenCredits: incrementValue } },
|
||||
{ upsert: true, new: true },
|
||||
).lean();
|
||||
const balanceResponse = await updateBalance({
|
||||
user: transaction.user,
|
||||
incrementValue,
|
||||
});
|
||||
|
||||
return {
|
||||
rate: transaction.rate,
|
||||
user: transaction.user.toString(),
|
||||
balance: balance.tokenCredits,
|
||||
balance: balanceResponse.tokenCredits,
|
||||
[transaction.tokenType]: incrementValue,
|
||||
};
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,9 +1,13 @@
|
|||
const mongoose = require('mongoose');
|
||||
const { MongoMemoryServer } = require('mongodb-memory-server');
|
||||
const { spendTokens, spendStructuredTokens } = require('./spendTokens');
|
||||
const { getBalanceConfig } = require('~/server/services/Config');
|
||||
const { getMultiplier, getCacheMultiplier } = require('./tx');
|
||||
const { Transaction } = require('./Transaction');
|
||||
const Balance = require('./Balance');
|
||||
const { spendTokens, spendStructuredTokens } = require('./spendTokens');
|
||||
const { getMultiplier, getCacheMultiplier } = require('./tx');
|
||||
|
||||
// Mock the custom config module so we can control the balance flag.
|
||||
jest.mock('~/server/services/Config');
|
||||
|
||||
let mongoServer;
|
||||
|
||||
|
|
@ -20,6 +24,8 @@ afterAll(async () => {
|
|||
|
||||
beforeEach(async () => {
|
||||
await mongoose.connection.dropDatabase();
|
||||
// Default: enable balance updates in tests.
|
||||
getBalanceConfig.mockResolvedValue({ enabled: true });
|
||||
});
|
||||
|
||||
describe('Regular Token Spending Tests', () => {
|
||||
|
|
@ -44,34 +50,22 @@ describe('Regular Token Spending Tests', () => {
|
|||
};
|
||||
|
||||
// Act
|
||||
process.env.CHECK_BALANCE = 'true';
|
||||
await spendTokens(txData, tokenUsage);
|
||||
|
||||
// Assert
|
||||
console.log('Initial Balance:', initialBalance);
|
||||
|
||||
const updatedBalance = await Balance.findOne({ user: userId });
|
||||
console.log('Updated Balance:', updatedBalance.tokenCredits);
|
||||
|
||||
const promptMultiplier = getMultiplier({ model, tokenType: 'prompt' });
|
||||
const completionMultiplier = getMultiplier({ model, tokenType: 'completion' });
|
||||
|
||||
const expectedPromptCost = tokenUsage.promptTokens * promptMultiplier;
|
||||
const expectedCompletionCost = tokenUsage.completionTokens * completionMultiplier;
|
||||
const expectedTotalCost = expectedPromptCost + expectedCompletionCost;
|
||||
const expectedTotalCost = 100 * promptMultiplier + 50 * completionMultiplier;
|
||||
const expectedBalance = initialBalance - expectedTotalCost;
|
||||
|
||||
expect(updatedBalance.tokenCredits).toBeLessThan(initialBalance);
|
||||
expect(updatedBalance.tokenCredits).toBeCloseTo(expectedBalance, 0);
|
||||
|
||||
console.log('Expected Total Cost:', expectedTotalCost);
|
||||
console.log('Actual Balance Decrease:', initialBalance - updatedBalance.tokenCredits);
|
||||
});
|
||||
|
||||
test('spendTokens should handle zero completion tokens', async () => {
|
||||
// Arrange
|
||||
const userId = new mongoose.Types.ObjectId();
|
||||
const initialBalance = 10000000; // $10.00
|
||||
const initialBalance = 10000000;
|
||||
await Balance.create({ user: userId, tokenCredits: initialBalance });
|
||||
|
||||
const model = 'gpt-3.5-turbo';
|
||||
|
|
@ -89,24 +83,19 @@ describe('Regular Token Spending Tests', () => {
|
|||
};
|
||||
|
||||
// Act
|
||||
process.env.CHECK_BALANCE = 'true';
|
||||
await spendTokens(txData, tokenUsage);
|
||||
|
||||
// Assert
|
||||
const updatedBalance = await Balance.findOne({ user: userId });
|
||||
|
||||
const promptMultiplier = getMultiplier({ model, tokenType: 'prompt' });
|
||||
const expectedCost = tokenUsage.promptTokens * promptMultiplier;
|
||||
const expectedCost = 100 * promptMultiplier;
|
||||
expect(updatedBalance.tokenCredits).toBeCloseTo(initialBalance - expectedCost, 0);
|
||||
|
||||
console.log('Initial Balance:', initialBalance);
|
||||
console.log('Updated Balance:', updatedBalance.tokenCredits);
|
||||
console.log('Expected Cost:', expectedCost);
|
||||
});
|
||||
|
||||
test('spendTokens should handle undefined token counts', async () => {
|
||||
// Arrange
|
||||
const userId = new mongoose.Types.ObjectId();
|
||||
const initialBalance = 10000000; // $10.00
|
||||
const initialBalance = 10000000;
|
||||
await Balance.create({ user: userId, tokenCredits: initialBalance });
|
||||
|
||||
const model = 'gpt-3.5-turbo';
|
||||
|
|
@ -120,14 +109,17 @@ describe('Regular Token Spending Tests', () => {
|
|||
|
||||
const tokenUsage = {};
|
||||
|
||||
// Act
|
||||
const result = await spendTokens(txData, tokenUsage);
|
||||
|
||||
// Assert: No transaction should be created
|
||||
expect(result).toBeUndefined();
|
||||
});
|
||||
|
||||
test('spendTokens should handle only prompt tokens', async () => {
|
||||
// Arrange
|
||||
const userId = new mongoose.Types.ObjectId();
|
||||
const initialBalance = 10000000; // $10.00
|
||||
const initialBalance = 10000000;
|
||||
await Balance.create({ user: userId, tokenCredits: initialBalance });
|
||||
|
||||
const model = 'gpt-3.5-turbo';
|
||||
|
|
@ -141,14 +133,44 @@ describe('Regular Token Spending Tests', () => {
|
|||
|
||||
const tokenUsage = { promptTokens: 100 };
|
||||
|
||||
// Act
|
||||
await spendTokens(txData, tokenUsage);
|
||||
|
||||
// Assert
|
||||
const updatedBalance = await Balance.findOne({ user: userId });
|
||||
|
||||
const promptMultiplier = getMultiplier({ model, tokenType: 'prompt' });
|
||||
const expectedCost = 100 * promptMultiplier;
|
||||
expect(updatedBalance.tokenCredits).toBeCloseTo(initialBalance - expectedCost, 0);
|
||||
});
|
||||
|
||||
test('spendTokens should not update balance when balance feature is disabled', async () => {
|
||||
// Arrange: Override the config to disable balance updates.
|
||||
getBalanceConfig.mockResolvedValue({ balance: { enabled: false } });
|
||||
const userId = new mongoose.Types.ObjectId();
|
||||
const initialBalance = 10000000;
|
||||
await Balance.create({ user: userId, tokenCredits: initialBalance });
|
||||
|
||||
const model = 'gpt-3.5-turbo';
|
||||
const txData = {
|
||||
user: userId,
|
||||
conversationId: 'test-conversation-id',
|
||||
model,
|
||||
context: 'test',
|
||||
endpointTokenConfig: null,
|
||||
};
|
||||
|
||||
const tokenUsage = {
|
||||
promptTokens: 100,
|
||||
completionTokens: 50,
|
||||
};
|
||||
|
||||
// Act
|
||||
await spendTokens(txData, tokenUsage);
|
||||
|
||||
// Assert: Balance should remain unchanged.
|
||||
const updatedBalance = await Balance.findOne({ user: userId });
|
||||
expect(updatedBalance.tokenCredits).toBe(initialBalance);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Structured Token Spending Tests', () => {
|
||||
|
|
@ -164,7 +186,7 @@ describe('Structured Token Spending Tests', () => {
|
|||
conversationId: 'c23a18da-706c-470a-ac28-ec87ed065199',
|
||||
model,
|
||||
context: 'message',
|
||||
endpointTokenConfig: null, // We'll use the default rates
|
||||
endpointTokenConfig: null,
|
||||
};
|
||||
|
||||
const tokenUsage = {
|
||||
|
|
@ -176,28 +198,15 @@ describe('Structured Token Spending Tests', () => {
|
|||
completionTokens: 5,
|
||||
};
|
||||
|
||||
// Get the actual multipliers
|
||||
const promptMultiplier = getMultiplier({ model, tokenType: 'prompt' });
|
||||
const completionMultiplier = getMultiplier({ model, tokenType: 'completion' });
|
||||
const writeMultiplier = getCacheMultiplier({ model, cacheType: 'write' });
|
||||
const readMultiplier = getCacheMultiplier({ model, cacheType: 'read' });
|
||||
|
||||
console.log('Multipliers:', {
|
||||
promptMultiplier,
|
||||
completionMultiplier,
|
||||
writeMultiplier,
|
||||
readMultiplier,
|
||||
});
|
||||
|
||||
// Act
|
||||
process.env.CHECK_BALANCE = 'true';
|
||||
const result = await spendStructuredTokens(txData, tokenUsage);
|
||||
|
||||
// Assert
|
||||
console.log('Initial Balance:', initialBalance);
|
||||
console.log('Updated Balance:', result.completion.balance);
|
||||
console.log('Transaction Result:', result);
|
||||
|
||||
// Calculate expected costs.
|
||||
const expectedPromptCost =
|
||||
tokenUsage.promptTokens.input * promptMultiplier +
|
||||
tokenUsage.promptTokens.write * writeMultiplier +
|
||||
|
|
@ -206,37 +215,21 @@ describe('Structured Token Spending Tests', () => {
|
|||
const expectedTotalCost = expectedPromptCost + expectedCompletionCost;
|
||||
const expectedBalance = initialBalance - expectedTotalCost;
|
||||
|
||||
console.log('Expected Cost:', expectedTotalCost);
|
||||
console.log('Expected Balance:', expectedBalance);
|
||||
|
||||
// Assert
|
||||
expect(result.completion.balance).toBeLessThan(initialBalance);
|
||||
|
||||
// Allow for a small difference (e.g., 100 token credits, which is $0.0001)
|
||||
const allowedDifference = 100;
|
||||
expect(Math.abs(result.completion.balance - expectedBalance)).toBeLessThan(allowedDifference);
|
||||
|
||||
// Check if the decrease is approximately as expected
|
||||
const balanceDecrease = initialBalance - result.completion.balance;
|
||||
expect(balanceDecrease).toBeCloseTo(expectedTotalCost, 0);
|
||||
|
||||
// Check token values
|
||||
const expectedPromptTokenValue = -(
|
||||
tokenUsage.promptTokens.input * promptMultiplier +
|
||||
tokenUsage.promptTokens.write * writeMultiplier +
|
||||
tokenUsage.promptTokens.read * readMultiplier
|
||||
);
|
||||
const expectedCompletionTokenValue = -tokenUsage.completionTokens * completionMultiplier;
|
||||
|
||||
const expectedPromptTokenValue = -expectedPromptCost;
|
||||
const expectedCompletionTokenValue = -expectedCompletionCost;
|
||||
expect(result.prompt.prompt).toBeCloseTo(expectedPromptTokenValue, 1);
|
||||
expect(result.completion.completion).toBe(expectedCompletionTokenValue);
|
||||
|
||||
console.log('Expected prompt tokenValue:', expectedPromptTokenValue);
|
||||
console.log('Actual prompt tokenValue:', result.prompt.prompt);
|
||||
console.log('Expected completion tokenValue:', expectedCompletionTokenValue);
|
||||
console.log('Actual completion tokenValue:', result.completion.completion);
|
||||
});
|
||||
|
||||
test('should handle zero completion tokens in structured spending', async () => {
|
||||
// Arrange
|
||||
const userId = new mongoose.Types.ObjectId();
|
||||
const initialBalance = 17613154.55;
|
||||
await Balance.create({ user: userId, tokenCredits: initialBalance });
|
||||
|
|
@ -258,15 +251,17 @@ describe('Structured Token Spending Tests', () => {
|
|||
completionTokens: 0,
|
||||
};
|
||||
|
||||
process.env.CHECK_BALANCE = 'true';
|
||||
// Act
|
||||
const result = await spendStructuredTokens(txData, tokenUsage);
|
||||
|
||||
// Assert
|
||||
expect(result.prompt).toBeDefined();
|
||||
expect(result.completion).toBeUndefined();
|
||||
expect(result.prompt.prompt).toBeLessThan(0);
|
||||
});
|
||||
|
||||
test('should handle only prompt tokens in structured spending', async () => {
|
||||
// Arrange
|
||||
const userId = new mongoose.Types.ObjectId();
|
||||
const initialBalance = 17613154.55;
|
||||
await Balance.create({ user: userId, tokenCredits: initialBalance });
|
||||
|
|
@ -287,15 +282,17 @@ describe('Structured Token Spending Tests', () => {
|
|||
},
|
||||
};
|
||||
|
||||
process.env.CHECK_BALANCE = 'true';
|
||||
// Act
|
||||
const result = await spendStructuredTokens(txData, tokenUsage);
|
||||
|
||||
// Assert
|
||||
expect(result.prompt).toBeDefined();
|
||||
expect(result.completion).toBeUndefined();
|
||||
expect(result.prompt.prompt).toBeLessThan(0);
|
||||
});
|
||||
|
||||
test('should handle undefined token counts in structured spending', async () => {
|
||||
// Arrange
|
||||
const userId = new mongoose.Types.ObjectId();
|
||||
const initialBalance = 17613154.55;
|
||||
await Balance.create({ user: userId, tokenCredits: initialBalance });
|
||||
|
|
@ -310,9 +307,10 @@ describe('Structured Token Spending Tests', () => {
|
|||
|
||||
const tokenUsage = {};
|
||||
|
||||
process.env.CHECK_BALANCE = 'true';
|
||||
// Act
|
||||
const result = await spendStructuredTokens(txData, tokenUsage);
|
||||
|
||||
// Assert
|
||||
expect(result).toEqual({
|
||||
prompt: undefined,
|
||||
completion: undefined,
|
||||
|
|
@ -320,6 +318,7 @@ describe('Structured Token Spending Tests', () => {
|
|||
});
|
||||
|
||||
test('should handle incomplete context for completion tokens', async () => {
|
||||
// Arrange
|
||||
const userId = new mongoose.Types.ObjectId();
|
||||
const initialBalance = 17613154.55;
|
||||
await Balance.create({ user: userId, tokenCredits: initialBalance });
|
||||
|
|
@ -341,15 +340,18 @@ describe('Structured Token Spending Tests', () => {
|
|||
completionTokens: 50,
|
||||
};
|
||||
|
||||
process.env.CHECK_BALANCE = 'true';
|
||||
// Act
|
||||
const result = await spendStructuredTokens(txData, tokenUsage);
|
||||
|
||||
expect(result.completion.completion).toBeCloseTo(-50 * 15 * 1.15, 0); // Assuming multiplier is 15 and cancelRate is 1.15
|
||||
// Assert:
|
||||
// (Assuming a multiplier for completion of 15 and a cancel rate of 1.15 as noted in the original test.)
|
||||
expect(result.completion.completion).toBeCloseTo(-50 * 15 * 1.15, 0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('NaN Handling Tests', () => {
|
||||
test('should skip transaction creation when rawAmount is NaN', async () => {
|
||||
// Arrange
|
||||
const userId = new mongoose.Types.ObjectId();
|
||||
const initialBalance = 10000000;
|
||||
await Balance.create({ user: userId, tokenCredits: initialBalance });
|
||||
|
|
@ -365,9 +367,11 @@ describe('NaN Handling Tests', () => {
|
|||
tokenType: 'prompt',
|
||||
};
|
||||
|
||||
// Act
|
||||
const result = await Transaction.create(txData);
|
||||
expect(result).toBeUndefined();
|
||||
|
||||
// Assert: No transaction should be created and balance remains unchanged.
|
||||
expect(result).toBeUndefined();
|
||||
const balance = await Balance.findOne({ user: userId });
|
||||
expect(balance.tokenCredits).toBe(initialBalance);
|
||||
});
|
||||
|
|
|
|||
156
api/models/balanceMethods.js
Normal file
156
api/models/balanceMethods.js
Normal file
|
|
@ -0,0 +1,156 @@
|
|||
const { ViolationTypes } = require('librechat-data-provider');
|
||||
const { Transaction } = require('./Transaction');
|
||||
const { logViolation } = require('~/cache');
|
||||
const { getMultiplier } = require('./tx');
|
||||
const { logger } = require('~/config');
|
||||
const Balance = require('./Balance');
|
||||
|
||||
function isInvalidDate(date) {
|
||||
return isNaN(date);
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple check method that calculates token cost and returns balance info.
|
||||
* The auto-refill logic has been moved to balanceMethods.js to prevent circular dependencies.
|
||||
*/
|
||||
const checkBalanceRecord = async function ({
|
||||
user,
|
||||
model,
|
||||
endpoint,
|
||||
valueKey,
|
||||
tokenType,
|
||||
amount,
|
||||
endpointTokenConfig,
|
||||
}) {
|
||||
const multiplier = getMultiplier({ valueKey, tokenType, model, endpoint, endpointTokenConfig });
|
||||
const tokenCost = amount * multiplier;
|
||||
|
||||
// Retrieve the balance record
|
||||
let record = await Balance.findOne({ user }).lean();
|
||||
if (!record) {
|
||||
logger.debug('[Balance.check] No balance record found for user', { user });
|
||||
return {
|
||||
canSpend: false,
|
||||
balance: 0,
|
||||
tokenCost,
|
||||
};
|
||||
}
|
||||
let balance = record.tokenCredits;
|
||||
|
||||
logger.debug('[Balance.check] Initial state', {
|
||||
user,
|
||||
model,
|
||||
endpoint,
|
||||
valueKey,
|
||||
tokenType,
|
||||
amount,
|
||||
balance,
|
||||
multiplier,
|
||||
endpointTokenConfig: !!endpointTokenConfig,
|
||||
});
|
||||
|
||||
// Only perform auto-refill if spending would bring the balance to 0 or below
|
||||
if (balance - tokenCost <= 0 && record.autoRefillEnabled && record.refillAmount > 0) {
|
||||
const lastRefillDate = new Date(record.lastRefill);
|
||||
const now = new Date();
|
||||
if (
|
||||
isInvalidDate(lastRefillDate) ||
|
||||
now >=
|
||||
addIntervalToDate(lastRefillDate, record.refillIntervalValue, record.refillIntervalUnit)
|
||||
) {
|
||||
try {
|
||||
/** @type {{ rate: number, user: string, balance: number, transaction: import('@librechat/data-schemas').ITransaction}} */
|
||||
const result = await Transaction.createAutoRefillTransaction({
|
||||
user: user,
|
||||
tokenType: 'credits',
|
||||
context: 'autoRefill',
|
||||
rawAmount: record.refillAmount,
|
||||
});
|
||||
balance = result.balance;
|
||||
} catch (error) {
|
||||
logger.error('[Balance.check] Failed to record transaction for auto-refill', error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logger.debug('[Balance.check] Token cost', { tokenCost });
|
||||
return { canSpend: balance >= tokenCost, balance, tokenCost };
|
||||
};
|
||||
|
||||
/**
|
||||
* Adds a time interval to a given date.
|
||||
* @param {Date} date - The starting date.
|
||||
* @param {number} value - The numeric value of the interval.
|
||||
* @param {'seconds'|'minutes'|'hours'|'days'|'weeks'|'months'} unit - The unit of time.
|
||||
* @returns {Date} A new Date representing the starting date plus the interval.
|
||||
*/
|
||||
const addIntervalToDate = (date, value, unit) => {
|
||||
const result = new Date(date);
|
||||
switch (unit) {
|
||||
case 'seconds':
|
||||
result.setSeconds(result.getSeconds() + value);
|
||||
break;
|
||||
case 'minutes':
|
||||
result.setMinutes(result.getMinutes() + value);
|
||||
break;
|
||||
case 'hours':
|
||||
result.setHours(result.getHours() + value);
|
||||
break;
|
||||
case 'days':
|
||||
result.setDate(result.getDate() + value);
|
||||
break;
|
||||
case 'weeks':
|
||||
result.setDate(result.getDate() + value * 7);
|
||||
break;
|
||||
case 'months':
|
||||
result.setMonth(result.getMonth() + value);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
};
|
||||
|
||||
/**
|
||||
* Checks the balance for a user and determines if they can spend a certain amount.
|
||||
* If the user cannot spend the amount, it logs a violation and denies the request.
|
||||
*
|
||||
* @async
|
||||
* @function
|
||||
* @param {Object} params - The function parameters.
|
||||
* @param {Express.Request} params.req - The Express request object.
|
||||
* @param {Express.Response} params.res - The Express response object.
|
||||
* @param {Object} params.txData - The transaction data.
|
||||
* @param {string} params.txData.user - The user ID or identifier.
|
||||
* @param {('prompt' | 'completion')} params.txData.tokenType - The type of token.
|
||||
* @param {number} params.txData.amount - The amount of tokens.
|
||||
* @param {string} params.txData.model - The model name or identifier.
|
||||
* @param {string} [params.txData.endpointTokenConfig] - The token configuration for the endpoint.
|
||||
* @returns {Promise<boolean>} Throws error if the user cannot spend the amount.
|
||||
* @throws {Error} Throws an error if there's an issue with the balance check.
|
||||
*/
|
||||
const checkBalance = async ({ req, res, txData }) => {
|
||||
const { canSpend, balance, tokenCost } = await checkBalanceRecord(txData);
|
||||
if (canSpend) {
|
||||
return true;
|
||||
}
|
||||
|
||||
const type = ViolationTypes.TOKEN_BALANCE;
|
||||
const errorMessage = {
|
||||
type,
|
||||
balance,
|
||||
tokenCost,
|
||||
promptTokens: txData.amount,
|
||||
};
|
||||
|
||||
if (txData.generations && txData.generations.length > 0) {
|
||||
errorMessage.generations = txData.generations;
|
||||
}
|
||||
|
||||
await logViolation(req, res, type, errorMessage, 0);
|
||||
throw new Error(JSON.stringify(errorMessage));
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
checkBalance,
|
||||
};
|
||||
|
|
@ -1,45 +0,0 @@
|
|||
const { ViolationTypes } = require('librechat-data-provider');
|
||||
const { logViolation } = require('~/cache');
|
||||
const Balance = require('./Balance');
|
||||
/**
|
||||
* Checks the balance for a user and determines if they can spend a certain amount.
|
||||
* If the user cannot spend the amount, it logs a violation and denies the request.
|
||||
*
|
||||
* @async
|
||||
* @function
|
||||
* @param {Object} params - The function parameters.
|
||||
* @param {Express.Request} params.req - The Express request object.
|
||||
* @param {Express.Response} params.res - The Express response object.
|
||||
* @param {Object} params.txData - The transaction data.
|
||||
* @param {string} params.txData.user - The user ID or identifier.
|
||||
* @param {('prompt' | 'completion')} params.txData.tokenType - The type of token.
|
||||
* @param {number} params.txData.amount - The amount of tokens.
|
||||
* @param {string} params.txData.model - The model name or identifier.
|
||||
* @param {string} [params.txData.endpointTokenConfig] - The token configuration for the endpoint.
|
||||
* @returns {Promise<boolean>} Returns true if the user can spend the amount, otherwise denies the request.
|
||||
* @throws {Error} Throws an error if there's an issue with the balance check.
|
||||
*/
|
||||
const checkBalance = async ({ req, res, txData }) => {
|
||||
const { canSpend, balance, tokenCost } = await Balance.check(txData);
|
||||
|
||||
if (canSpend) {
|
||||
return true;
|
||||
}
|
||||
|
||||
const type = ViolationTypes.TOKEN_BALANCE;
|
||||
const errorMessage = {
|
||||
type,
|
||||
balance,
|
||||
tokenCost,
|
||||
promptTokens: txData.amount,
|
||||
};
|
||||
|
||||
if (txData.generations && txData.generations.length > 0) {
|
||||
errorMessage.generations = txData.generations;
|
||||
}
|
||||
|
||||
await logViolation(req, res, type, errorMessage, 0);
|
||||
throw new Error(JSON.stringify(errorMessage));
|
||||
};
|
||||
|
||||
module.exports = checkBalance;
|
||||
|
|
@ -1,6 +1,7 @@
|
|||
const _ = require('lodash');
|
||||
const mongoose = require('mongoose');
|
||||
const { MeiliSearch } = require('meilisearch');
|
||||
const { parseTextParts, ContentTypes } = require('librechat-data-provider');
|
||||
const { cleanUpPrimaryKeyValue } = require('~/lib/utils/misc');
|
||||
const logger = require('~/config/meiliLogger');
|
||||
|
||||
|
|
@ -238,10 +239,7 @@ const createMeiliMongooseModel = function ({ index, attributesToIndex }) {
|
|||
}
|
||||
|
||||
if (object.content && Array.isArray(object.content)) {
|
||||
object.text = object.content
|
||||
.filter((item) => item.type === 'text' && item.text && item.text.value)
|
||||
.map((item) => item.text.value)
|
||||
.join(' ');
|
||||
object.text = parseTextParts(object.content);
|
||||
delete object.content;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ const spendTokens = async (txData, tokenUsage) => {
|
|||
prompt = await Transaction.create({
|
||||
...txData,
|
||||
tokenType: 'prompt',
|
||||
rawAmount: -Math.max(promptTokens, 0),
|
||||
rawAmount: promptTokens === 0 ? 0 : -Math.max(promptTokens, 0),
|
||||
});
|
||||
}
|
||||
|
||||
|
|
@ -44,7 +44,7 @@ const spendTokens = async (txData, tokenUsage) => {
|
|||
completion = await Transaction.create({
|
||||
...txData,
|
||||
tokenType: 'completion',
|
||||
rawAmount: -Math.max(completionTokens, 0),
|
||||
rawAmount: completionTokens === 0 ? 0 : -Math.max(completionTokens, 0),
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,17 +1,10 @@
|
|||
const mongoose = require('mongoose');
|
||||
const { MongoMemoryServer } = require('mongodb-memory-server');
|
||||
const { Transaction } = require('./Transaction');
|
||||
const Balance = require('./Balance');
|
||||
const { spendTokens, spendStructuredTokens } = require('./spendTokens');
|
||||
|
||||
jest.mock('./Transaction', () => ({
|
||||
Transaction: {
|
||||
create: jest.fn(),
|
||||
createStructured: jest.fn(),
|
||||
},
|
||||
}));
|
||||
|
||||
jest.mock('./Balance', () => ({
|
||||
findOne: jest.fn(),
|
||||
findOneAndUpdate: jest.fn(),
|
||||
}));
|
||||
|
||||
// Mock the logger to prevent console output during tests
|
||||
jest.mock('~/config', () => ({
|
||||
logger: {
|
||||
debug: jest.fn(),
|
||||
|
|
@ -19,19 +12,46 @@ jest.mock('~/config', () => ({
|
|||
},
|
||||
}));
|
||||
|
||||
// Import after mocking
|
||||
const { spendTokens, spendStructuredTokens } = require('./spendTokens');
|
||||
const { Transaction } = require('./Transaction');
|
||||
const Balance = require('./Balance');
|
||||
// Mock the Config service
|
||||
const { getBalanceConfig } = require('~/server/services/Config');
|
||||
jest.mock('~/server/services/Config');
|
||||
|
||||
describe('spendTokens', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
process.env.CHECK_BALANCE = 'true';
|
||||
let mongoServer;
|
||||
let userId;
|
||||
|
||||
beforeAll(async () => {
|
||||
mongoServer = await MongoMemoryServer.create();
|
||||
const mongoUri = mongoServer.getUri();
|
||||
await mongoose.connect(mongoUri);
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
await mongoose.disconnect();
|
||||
await mongoServer.stop();
|
||||
});
|
||||
|
||||
beforeEach(async () => {
|
||||
// Clear collections before each test
|
||||
await Transaction.deleteMany({});
|
||||
await Balance.deleteMany({});
|
||||
|
||||
// Create a new user ID for each test
|
||||
userId = new mongoose.Types.ObjectId();
|
||||
|
||||
// Mock the balance config to be enabled by default
|
||||
getBalanceConfig.mockResolvedValue({ enabled: true });
|
||||
});
|
||||
|
||||
it('should create transactions for both prompt and completion tokens', async () => {
|
||||
// Create a balance for the user
|
||||
await Balance.create({
|
||||
user: userId,
|
||||
tokenCredits: 10000,
|
||||
});
|
||||
|
||||
const txData = {
|
||||
user: new mongoose.Types.ObjectId(),
|
||||
user: userId,
|
||||
conversationId: 'test-convo',
|
||||
model: 'gpt-3.5-turbo',
|
||||
context: 'test',
|
||||
|
|
@ -41,31 +61,35 @@ describe('spendTokens', () => {
|
|||
completionTokens: 50,
|
||||
};
|
||||
|
||||
Transaction.create.mockResolvedValueOnce({ tokenType: 'prompt', rawAmount: -100 });
|
||||
Transaction.create.mockResolvedValueOnce({ tokenType: 'completion', rawAmount: -50 });
|
||||
Balance.findOne.mockResolvedValue({ tokenCredits: 10000 });
|
||||
Balance.findOneAndUpdate.mockResolvedValue({ tokenCredits: 9850 });
|
||||
|
||||
await spendTokens(txData, tokenUsage);
|
||||
|
||||
expect(Transaction.create).toHaveBeenCalledTimes(2);
|
||||
expect(Transaction.create).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
tokenType: 'prompt',
|
||||
rawAmount: -100,
|
||||
}),
|
||||
);
|
||||
expect(Transaction.create).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
tokenType: 'completion',
|
||||
rawAmount: -50,
|
||||
}),
|
||||
);
|
||||
// Verify transactions were created
|
||||
const transactions = await Transaction.find({ user: userId }).sort({ tokenType: 1 });
|
||||
expect(transactions).toHaveLength(2);
|
||||
|
||||
// Check completion transaction
|
||||
expect(transactions[0].tokenType).toBe('completion');
|
||||
expect(transactions[0].rawAmount).toBe(-50);
|
||||
|
||||
// Check prompt transaction
|
||||
expect(transactions[1].tokenType).toBe('prompt');
|
||||
expect(transactions[1].rawAmount).toBe(-100);
|
||||
|
||||
// Verify balance was updated
|
||||
const balance = await Balance.findOne({ user: userId });
|
||||
expect(balance).toBeDefined();
|
||||
expect(balance.tokenCredits).toBeLessThan(10000); // Balance should be reduced
|
||||
});
|
||||
|
||||
it('should handle zero completion tokens', async () => {
|
||||
// Create a balance for the user
|
||||
await Balance.create({
|
||||
user: userId,
|
||||
tokenCredits: 10000,
|
||||
});
|
||||
|
||||
const txData = {
|
||||
user: new mongoose.Types.ObjectId(),
|
||||
user: userId,
|
||||
conversationId: 'test-convo',
|
||||
model: 'gpt-3.5-turbo',
|
||||
context: 'test',
|
||||
|
|
@ -75,31 +99,26 @@ describe('spendTokens', () => {
|
|||
completionTokens: 0,
|
||||
};
|
||||
|
||||
Transaction.create.mockResolvedValueOnce({ tokenType: 'prompt', rawAmount: -100 });
|
||||
Transaction.create.mockResolvedValueOnce({ tokenType: 'completion', rawAmount: -0 });
|
||||
Balance.findOne.mockResolvedValue({ tokenCredits: 10000 });
|
||||
Balance.findOneAndUpdate.mockResolvedValue({ tokenCredits: 9850 });
|
||||
|
||||
await spendTokens(txData, tokenUsage);
|
||||
|
||||
expect(Transaction.create).toHaveBeenCalledTimes(2);
|
||||
expect(Transaction.create).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
tokenType: 'prompt',
|
||||
rawAmount: -100,
|
||||
}),
|
||||
);
|
||||
expect(Transaction.create).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
tokenType: 'completion',
|
||||
rawAmount: -0, // Changed from 0 to -0
|
||||
}),
|
||||
);
|
||||
// Verify transactions were created
|
||||
const transactions = await Transaction.find({ user: userId }).sort({ tokenType: 1 });
|
||||
expect(transactions).toHaveLength(2);
|
||||
|
||||
// Check completion transaction
|
||||
expect(transactions[0].tokenType).toBe('completion');
|
||||
// In JavaScript -0 and 0 are different but functionally equivalent
|
||||
// Use Math.abs to handle both 0 and -0
|
||||
expect(Math.abs(transactions[0].rawAmount)).toBe(0);
|
||||
|
||||
// Check prompt transaction
|
||||
expect(transactions[1].tokenType).toBe('prompt');
|
||||
expect(transactions[1].rawAmount).toBe(-100);
|
||||
});
|
||||
|
||||
it('should handle undefined token counts', async () => {
|
||||
const txData = {
|
||||
user: new mongoose.Types.ObjectId(),
|
||||
user: userId,
|
||||
conversationId: 'test-convo',
|
||||
model: 'gpt-3.5-turbo',
|
||||
context: 'test',
|
||||
|
|
@ -108,13 +127,22 @@ describe('spendTokens', () => {
|
|||
|
||||
await spendTokens(txData, tokenUsage);
|
||||
|
||||
expect(Transaction.create).not.toHaveBeenCalled();
|
||||
// Verify no transactions were created
|
||||
const transactions = await Transaction.find({ user: userId });
|
||||
expect(transactions).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should not update balance when CHECK_BALANCE is false', async () => {
|
||||
process.env.CHECK_BALANCE = 'false';
|
||||
it('should not update balance when the balance feature is disabled', async () => {
|
||||
// Override configuration: disable balance updates
|
||||
getBalanceConfig.mockResolvedValue({ enabled: false });
|
||||
// Create a balance for the user
|
||||
await Balance.create({
|
||||
user: userId,
|
||||
tokenCredits: 10000,
|
||||
});
|
||||
|
||||
const txData = {
|
||||
user: new mongoose.Types.ObjectId(),
|
||||
user: userId,
|
||||
conversationId: 'test-convo',
|
||||
model: 'gpt-3.5-turbo',
|
||||
context: 'test',
|
||||
|
|
@ -124,19 +152,529 @@ describe('spendTokens', () => {
|
|||
completionTokens: 50,
|
||||
};
|
||||
|
||||
Transaction.create.mockResolvedValueOnce({ tokenType: 'prompt', rawAmount: -100 });
|
||||
Transaction.create.mockResolvedValueOnce({ tokenType: 'completion', rawAmount: -50 });
|
||||
await spendTokens(txData, tokenUsage);
|
||||
|
||||
// Verify transactions were created
|
||||
const transactions = await Transaction.find({ user: userId });
|
||||
expect(transactions).toHaveLength(2);
|
||||
|
||||
// Verify balance was not updated (should still be 10000)
|
||||
const balance = await Balance.findOne({ user: userId });
|
||||
expect(balance.tokenCredits).toBe(10000);
|
||||
});
|
||||
|
||||
it('should not allow balance to go below zero when spending tokens', async () => {
|
||||
// Create a balance with a low amount
|
||||
await Balance.create({
|
||||
user: userId,
|
||||
tokenCredits: 5000,
|
||||
});
|
||||
|
||||
const txData = {
|
||||
user: userId,
|
||||
conversationId: 'test-convo',
|
||||
model: 'gpt-4', // Using a more expensive model
|
||||
context: 'test',
|
||||
};
|
||||
|
||||
// Spending more tokens than the user has balance for
|
||||
const tokenUsage = {
|
||||
promptTokens: 1000,
|
||||
completionTokens: 500,
|
||||
};
|
||||
|
||||
await spendTokens(txData, tokenUsage);
|
||||
|
||||
expect(Transaction.create).toHaveBeenCalledTimes(2);
|
||||
expect(Balance.findOne).not.toHaveBeenCalled();
|
||||
expect(Balance.findOneAndUpdate).not.toHaveBeenCalled();
|
||||
// Verify transactions were created
|
||||
const transactions = await Transaction.find({ user: userId }).sort({ tokenType: 1 });
|
||||
expect(transactions).toHaveLength(2);
|
||||
|
||||
// Verify balance was reduced to exactly 0, not negative
|
||||
const balance = await Balance.findOne({ user: userId });
|
||||
expect(balance).toBeDefined();
|
||||
expect(balance.tokenCredits).toBe(0);
|
||||
|
||||
// Check that the transaction records show the adjusted values
|
||||
const transactionResults = await Promise.all(
|
||||
transactions.map((t) =>
|
||||
Transaction.create({
|
||||
...txData,
|
||||
tokenType: t.tokenType,
|
||||
rawAmount: t.rawAmount,
|
||||
}),
|
||||
),
|
||||
);
|
||||
|
||||
// The second transaction should have an adjusted value since balance is already 0
|
||||
expect(transactionResults[1]).toEqual(
|
||||
expect.objectContaining({
|
||||
balance: 0,
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle multiple transactions in sequence with low balance and not increase balance', async () => {
|
||||
// This test is specifically checking for the issue reported in production
|
||||
// where the balance increases after a transaction when it should remain at 0
|
||||
// Create a balance with a very low amount
|
||||
await Balance.create({
|
||||
user: userId,
|
||||
tokenCredits: 100,
|
||||
});
|
||||
|
||||
// First transaction - should reduce balance to 0
|
||||
const txData1 = {
|
||||
user: userId,
|
||||
conversationId: 'test-convo-1',
|
||||
model: 'gpt-4',
|
||||
context: 'test',
|
||||
};
|
||||
|
||||
const tokenUsage1 = {
|
||||
promptTokens: 100,
|
||||
completionTokens: 50,
|
||||
};
|
||||
|
||||
await spendTokens(txData1, tokenUsage1);
|
||||
|
||||
// Check balance after first transaction
|
||||
let balance = await Balance.findOne({ user: userId });
|
||||
expect(balance.tokenCredits).toBe(0);
|
||||
|
||||
// Second transaction - should keep balance at 0, not make it negative or increase it
|
||||
const txData2 = {
|
||||
user: userId,
|
||||
conversationId: 'test-convo-2',
|
||||
model: 'gpt-4',
|
||||
context: 'test',
|
||||
};
|
||||
|
||||
const tokenUsage2 = {
|
||||
promptTokens: 200,
|
||||
completionTokens: 100,
|
||||
};
|
||||
|
||||
await spendTokens(txData2, tokenUsage2);
|
||||
|
||||
// Check balance after second transaction - should still be 0
|
||||
balance = await Balance.findOne({ user: userId });
|
||||
expect(balance.tokenCredits).toBe(0);
|
||||
|
||||
// Verify all transactions were created
|
||||
const transactions = await Transaction.find({ user: userId });
|
||||
expect(transactions).toHaveLength(4); // 2 transactions (prompt+completion) for each call
|
||||
|
||||
// Let's examine the actual transaction records to see what's happening
|
||||
const transactionDetails = await Transaction.find({ user: userId }).sort({ createdAt: 1 });
|
||||
|
||||
// Log the transaction details for debugging
|
||||
console.log('Transaction details:');
|
||||
transactionDetails.forEach((tx, i) => {
|
||||
console.log(`Transaction ${i + 1}:`, {
|
||||
tokenType: tx.tokenType,
|
||||
rawAmount: tx.rawAmount,
|
||||
tokenValue: tx.tokenValue,
|
||||
model: tx.model,
|
||||
});
|
||||
});
|
||||
|
||||
// Check the return values from Transaction.create directly
|
||||
// This is to verify that the incrementValue is not becoming positive
|
||||
const directResult = await Transaction.create({
|
||||
user: userId,
|
||||
conversationId: 'test-convo-3',
|
||||
model: 'gpt-4',
|
||||
tokenType: 'completion',
|
||||
rawAmount: -100,
|
||||
context: 'test',
|
||||
});
|
||||
|
||||
console.log('Direct Transaction.create result:', directResult);
|
||||
|
||||
// The completion value should never be positive
|
||||
expect(directResult.completion).not.toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should ensure tokenValue is always negative for spending tokens', async () => {
|
||||
// Create a balance for the user
|
||||
await Balance.create({
|
||||
user: userId,
|
||||
tokenCredits: 10000,
|
||||
});
|
||||
|
||||
// Test with various models to check multiplier calculations
|
||||
const models = ['gpt-3.5-turbo', 'gpt-4', 'claude-3-5-sonnet'];
|
||||
|
||||
for (const model of models) {
|
||||
const txData = {
|
||||
user: userId,
|
||||
conversationId: `test-convo-${model}`,
|
||||
model,
|
||||
context: 'test',
|
||||
};
|
||||
|
||||
const tokenUsage = {
|
||||
promptTokens: 100,
|
||||
completionTokens: 50,
|
||||
};
|
||||
|
||||
await spendTokens(txData, tokenUsage);
|
||||
|
||||
// Get the transactions for this model
|
||||
const transactions = await Transaction.find({
|
||||
user: userId,
|
||||
model,
|
||||
});
|
||||
|
||||
// Verify tokenValue is negative for all transactions
|
||||
transactions.forEach((tx) => {
|
||||
console.log(`Model ${model}, Type ${tx.tokenType}: tokenValue = ${tx.tokenValue}`);
|
||||
expect(tx.tokenValue).toBeLessThan(0);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
it('should handle structured transactions in sequence with low balance', async () => {
|
||||
// Create a balance with a very low amount
|
||||
await Balance.create({
|
||||
user: userId,
|
||||
tokenCredits: 100,
|
||||
});
|
||||
|
||||
// First transaction - should reduce balance to 0
|
||||
const txData1 = {
|
||||
user: userId,
|
||||
conversationId: 'test-convo-1',
|
||||
model: 'claude-3-5-sonnet',
|
||||
context: 'test',
|
||||
};
|
||||
|
||||
const tokenUsage1 = {
|
||||
promptTokens: {
|
||||
input: 10,
|
||||
write: 100,
|
||||
read: 5,
|
||||
},
|
||||
completionTokens: 50,
|
||||
};
|
||||
|
||||
await spendStructuredTokens(txData1, tokenUsage1);
|
||||
|
||||
// Check balance after first transaction
|
||||
let balance = await Balance.findOne({ user: userId });
|
||||
expect(balance.tokenCredits).toBe(0);
|
||||
|
||||
// Second transaction - should keep balance at 0, not make it negative or increase it
|
||||
const txData2 = {
|
||||
user: userId,
|
||||
conversationId: 'test-convo-2',
|
||||
model: 'claude-3-5-sonnet',
|
||||
context: 'test',
|
||||
};
|
||||
|
||||
const tokenUsage2 = {
|
||||
promptTokens: {
|
||||
input: 20,
|
||||
write: 200,
|
||||
read: 10,
|
||||
},
|
||||
completionTokens: 100,
|
||||
};
|
||||
|
||||
await spendStructuredTokens(txData2, tokenUsage2);
|
||||
|
||||
// Check balance after second transaction - should still be 0
|
||||
balance = await Balance.findOne({ user: userId });
|
||||
expect(balance.tokenCredits).toBe(0);
|
||||
|
||||
// Verify all transactions were created
|
||||
const transactions = await Transaction.find({ user: userId });
|
||||
expect(transactions).toHaveLength(4); // 2 transactions (prompt+completion) for each call
|
||||
|
||||
// Let's examine the actual transaction records to see what's happening
|
||||
const transactionDetails = await Transaction.find({ user: userId }).sort({ createdAt: 1 });
|
||||
|
||||
// Log the transaction details for debugging
|
||||
console.log('Structured transaction details:');
|
||||
transactionDetails.forEach((tx, i) => {
|
||||
console.log(`Transaction ${i + 1}:`, {
|
||||
tokenType: tx.tokenType,
|
||||
rawAmount: tx.rawAmount,
|
||||
tokenValue: tx.tokenValue,
|
||||
inputTokens: tx.inputTokens,
|
||||
writeTokens: tx.writeTokens,
|
||||
readTokens: tx.readTokens,
|
||||
model: tx.model,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('should not allow balance to go below zero when spending structured tokens', async () => {
|
||||
// Create a balance with a low amount
|
||||
await Balance.create({
|
||||
user: userId,
|
||||
tokenCredits: 5000,
|
||||
});
|
||||
|
||||
const txData = {
|
||||
user: userId,
|
||||
conversationId: 'test-convo',
|
||||
model: 'claude-3-5-sonnet', // Using a model that supports structured tokens
|
||||
context: 'test',
|
||||
};
|
||||
|
||||
// Spending more tokens than the user has balance for
|
||||
const tokenUsage = {
|
||||
promptTokens: {
|
||||
input: 100,
|
||||
write: 1000,
|
||||
read: 50,
|
||||
},
|
||||
completionTokens: 500,
|
||||
};
|
||||
|
||||
const result = await spendStructuredTokens(txData, tokenUsage);
|
||||
|
||||
// Verify transactions were created
|
||||
const transactions = await Transaction.find({ user: userId }).sort({ tokenType: 1 });
|
||||
expect(transactions).toHaveLength(2);
|
||||
|
||||
// Verify balance was reduced to exactly 0, not negative
|
||||
const balance = await Balance.findOne({ user: userId });
|
||||
expect(balance).toBeDefined();
|
||||
expect(balance.tokenCredits).toBe(0);
|
||||
|
||||
// The result should show the adjusted values
|
||||
expect(result).toEqual({
|
||||
prompt: expect.objectContaining({
|
||||
user: userId.toString(),
|
||||
balance: expect.any(Number),
|
||||
}),
|
||||
completion: expect.objectContaining({
|
||||
user: userId.toString(),
|
||||
balance: 0, // Final balance should be 0
|
||||
}),
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle multiple concurrent transactions correctly with a high balance', async () => {
|
||||
// Create a balance with a high amount
|
||||
const initialBalance = 10000000;
|
||||
await Balance.create({
|
||||
user: userId,
|
||||
tokenCredits: initialBalance,
|
||||
});
|
||||
|
||||
// Simulate the recordCollectedUsage function from the production code
|
||||
const conversationId = 'test-concurrent-convo';
|
||||
const context = 'message';
|
||||
const model = 'gpt-4';
|
||||
|
||||
const amount = 50;
|
||||
// Create `amount` of usage records to simulate multiple transactions
|
||||
const collectedUsage = Array.from({ length: amount }, (_, i) => ({
|
||||
model,
|
||||
input_tokens: 100 + i * 10, // Increasing input tokens
|
||||
output_tokens: 50 + i * 5, // Increasing output tokens
|
||||
input_token_details: {
|
||||
cache_creation: i % 2 === 0 ? 20 : 0, // Some have cache creation
|
||||
cache_read: i % 3 === 0 ? 10 : 0, // Some have cache read
|
||||
},
|
||||
}));
|
||||
|
||||
// Process all transactions concurrently to simulate race conditions
|
||||
const promises = [];
|
||||
let expectedTotalSpend = 0;
|
||||
|
||||
for (let i = 0; i < collectedUsage.length; i++) {
|
||||
const usage = collectedUsage[i];
|
||||
if (!usage) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const cache_creation = Number(usage.input_token_details?.cache_creation) || 0;
|
||||
const cache_read = Number(usage.input_token_details?.cache_read) || 0;
|
||||
|
||||
const txMetadata = {
|
||||
context,
|
||||
conversationId,
|
||||
user: userId,
|
||||
model: usage.model,
|
||||
};
|
||||
|
||||
// Calculate expected spend for this transaction
|
||||
const promptTokens = usage.input_tokens;
|
||||
const completionTokens = usage.output_tokens;
|
||||
|
||||
// For regular transactions
|
||||
if (cache_creation === 0 && cache_read === 0) {
|
||||
// Add to expected spend using the correct multipliers from tx.js
|
||||
// For gpt-4, the multipliers are: prompt=30, completion=60
|
||||
expectedTotalSpend += promptTokens * 30; // gpt-4 prompt rate is 30
|
||||
expectedTotalSpend += completionTokens * 60; // gpt-4 completion rate is 60
|
||||
|
||||
promises.push(
|
||||
spendTokens(txMetadata, {
|
||||
promptTokens,
|
||||
completionTokens,
|
||||
}),
|
||||
);
|
||||
} else {
|
||||
// For structured transactions with cache operations
|
||||
// The multipliers for claude models with cache operations are different
|
||||
// But since we're using gpt-4 in the test, we need to use appropriate values
|
||||
expectedTotalSpend += promptTokens * 30; // Base prompt rate for gpt-4
|
||||
// Since gpt-4 doesn't have cache multipliers defined, we'll use the prompt rate
|
||||
expectedTotalSpend += cache_creation * 30; // Write rate (using prompt rate as fallback)
|
||||
expectedTotalSpend += cache_read * 30; // Read rate (using prompt rate as fallback)
|
||||
expectedTotalSpend += completionTokens * 60; // Completion rate for gpt-4
|
||||
|
||||
promises.push(
|
||||
spendStructuredTokens(txMetadata, {
|
||||
promptTokens: {
|
||||
input: promptTokens,
|
||||
write: cache_creation,
|
||||
read: cache_read,
|
||||
},
|
||||
completionTokens,
|
||||
}),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for all transactions to complete
|
||||
await Promise.all(promises);
|
||||
|
||||
// Verify final balance
|
||||
const finalBalance = await Balance.findOne({ user: userId });
|
||||
expect(finalBalance).toBeDefined();
|
||||
|
||||
// The final balance should be the initial balance minus the expected total spend
|
||||
const expectedFinalBalance = initialBalance - expectedTotalSpend;
|
||||
|
||||
console.log('Initial balance:', initialBalance);
|
||||
console.log('Expected total spend:', expectedTotalSpend);
|
||||
console.log('Expected final balance:', expectedFinalBalance);
|
||||
console.log('Actual final balance:', finalBalance.tokenCredits);
|
||||
|
||||
// Allow for small rounding differences
|
||||
expect(finalBalance.tokenCredits).toBeCloseTo(expectedFinalBalance, 0);
|
||||
|
||||
// Verify all transactions were created
|
||||
const transactions = await Transaction.find({
|
||||
user: userId,
|
||||
conversationId,
|
||||
});
|
||||
|
||||
// We should have 2 transactions (prompt + completion) for each usage record
|
||||
// Some might be structured, some regular
|
||||
expect(transactions.length).toBeGreaterThanOrEqual(collectedUsage.length);
|
||||
|
||||
// Log transaction details for debugging
|
||||
console.log('Transaction summary:');
|
||||
let totalTokenValue = 0;
|
||||
transactions.forEach((tx) => {
|
||||
console.log(`${tx.tokenType}: rawAmount=${tx.rawAmount}, tokenValue=${tx.tokenValue}`);
|
||||
totalTokenValue += tx.tokenValue;
|
||||
});
|
||||
console.log('Total token value from transactions:', totalTokenValue);
|
||||
|
||||
// The difference between expected and actual is significant
|
||||
// This is likely due to the multipliers being different in the test environment
|
||||
// Let's adjust our expectation based on the actual transactions
|
||||
const actualSpend = initialBalance - finalBalance.tokenCredits;
|
||||
console.log('Actual spend:', actualSpend);
|
||||
|
||||
// Instead of checking the exact balance, let's verify that:
|
||||
// 1. The balance was reduced (tokens were spent)
|
||||
expect(finalBalance.tokenCredits).toBeLessThan(initialBalance);
|
||||
// 2. The total token value from transactions matches the actual spend
|
||||
expect(Math.abs(totalTokenValue)).toBeCloseTo(actualSpend, -3); // Allow for larger differences
|
||||
});
|
||||
|
||||
// Add this new test case
|
||||
it('should handle multiple concurrent balance increases correctly', async () => {
|
||||
// Start with zero balance
|
||||
const initialBalance = 0;
|
||||
await Balance.create({
|
||||
user: userId,
|
||||
tokenCredits: initialBalance,
|
||||
});
|
||||
|
||||
const numberOfRefills = 25;
|
||||
const refillAmount = 1000;
|
||||
|
||||
const promises = [];
|
||||
for (let i = 0; i < numberOfRefills; i++) {
|
||||
promises.push(
|
||||
Transaction.createAutoRefillTransaction({
|
||||
user: userId,
|
||||
tokenType: 'credits',
|
||||
context: 'concurrent-refill-test',
|
||||
rawAmount: refillAmount,
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
// Wait for all refill transactions to complete
|
||||
const results = await Promise.all(promises);
|
||||
|
||||
// Verify final balance
|
||||
const finalBalance = await Balance.findOne({ user: userId });
|
||||
expect(finalBalance).toBeDefined();
|
||||
|
||||
// The final balance should be the initial balance plus the sum of all refills
|
||||
const expectedFinalBalance = initialBalance + numberOfRefills * refillAmount;
|
||||
|
||||
console.log('Initial balance (Increase Test):', initialBalance);
|
||||
console.log(`Performed ${numberOfRefills} refills of ${refillAmount} each.`);
|
||||
console.log('Expected final balance (Increase Test):', expectedFinalBalance);
|
||||
console.log('Actual final balance (Increase Test):', finalBalance.tokenCredits);
|
||||
|
||||
// Use toBeCloseTo for safety, though toBe should work for integer math
|
||||
expect(finalBalance.tokenCredits).toBeCloseTo(expectedFinalBalance, 0);
|
||||
|
||||
// Verify all transactions were created
|
||||
const transactions = await Transaction.find({
|
||||
user: userId,
|
||||
context: 'concurrent-refill-test',
|
||||
});
|
||||
|
||||
// We should have one transaction for each refill attempt
|
||||
expect(transactions.length).toBe(numberOfRefills);
|
||||
|
||||
// Optional: Verify the sum of increments from the results matches the balance change
|
||||
const totalIncrementReported = results.reduce((sum, result) => {
|
||||
// Assuming createAutoRefillTransaction returns an object with the increment amount
|
||||
// Adjust this based on the actual return structure.
|
||||
// Let's assume it returns { balance: newBalance, transaction: { rawAmount: ... } }
|
||||
// Or perhaps we check the transaction.rawAmount directly
|
||||
return sum + (result?.transaction?.rawAmount || 0);
|
||||
}, 0);
|
||||
console.log('Total increment reported by results:', totalIncrementReported);
|
||||
expect(totalIncrementReported).toBe(expectedFinalBalance - initialBalance);
|
||||
|
||||
// Optional: Check the sum of tokenValue from saved transactions
|
||||
let totalTokenValueFromDb = 0;
|
||||
transactions.forEach((tx) => {
|
||||
// For refills, rawAmount is positive, and tokenValue might be calculated based on it
|
||||
// Let's assume tokenValue directly reflects the increment for simplicity here
|
||||
// If calculation is involved, adjust accordingly
|
||||
totalTokenValueFromDb += tx.rawAmount; // Or tx.tokenValue if that holds the increment
|
||||
});
|
||||
console.log('Total rawAmount from DB transactions:', totalTokenValueFromDb);
|
||||
expect(totalTokenValueFromDb).toBeCloseTo(expectedFinalBalance - initialBalance, 0);
|
||||
});
|
||||
|
||||
it('should create structured transactions for both prompt and completion tokens', async () => {
|
||||
// Create a balance for the user
|
||||
await Balance.create({
|
||||
user: userId,
|
||||
tokenCredits: 10000,
|
||||
});
|
||||
|
||||
const txData = {
|
||||
user: new mongoose.Types.ObjectId(),
|
||||
user: userId,
|
||||
conversationId: 'test-convo',
|
||||
model: 'claude-3-5-sonnet',
|
||||
context: 'test',
|
||||
|
|
@ -150,48 +688,37 @@ describe('spendTokens', () => {
|
|||
completionTokens: 50,
|
||||
};
|
||||
|
||||
Transaction.createStructured.mockResolvedValueOnce({
|
||||
rate: 3.75,
|
||||
user: txData.user.toString(),
|
||||
balance: 9570,
|
||||
prompt: -430,
|
||||
});
|
||||
Transaction.create.mockResolvedValueOnce({
|
||||
rate: 15,
|
||||
user: txData.user.toString(),
|
||||
balance: 8820,
|
||||
completion: -750,
|
||||
});
|
||||
|
||||
const result = await spendStructuredTokens(txData, tokenUsage);
|
||||
|
||||
expect(Transaction.createStructured).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
tokenType: 'prompt',
|
||||
inputTokens: -10,
|
||||
writeTokens: -100,
|
||||
readTokens: -5,
|
||||
}),
|
||||
);
|
||||
expect(Transaction.create).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
tokenType: 'completion',
|
||||
rawAmount: -50,
|
||||
}),
|
||||
);
|
||||
// Verify transactions were created
|
||||
const transactions = await Transaction.find({ user: userId }).sort({ tokenType: 1 });
|
||||
expect(transactions).toHaveLength(2);
|
||||
|
||||
// Check completion transaction
|
||||
expect(transactions[0].tokenType).toBe('completion');
|
||||
expect(transactions[0].rawAmount).toBe(-50);
|
||||
|
||||
// Check prompt transaction
|
||||
expect(transactions[1].tokenType).toBe('prompt');
|
||||
expect(transactions[1].inputTokens).toBe(-10);
|
||||
expect(transactions[1].writeTokens).toBe(-100);
|
||||
expect(transactions[1].readTokens).toBe(-5);
|
||||
|
||||
// Verify result contains transaction info
|
||||
expect(result).toEqual({
|
||||
prompt: expect.objectContaining({
|
||||
rate: 3.75,
|
||||
user: txData.user.toString(),
|
||||
balance: 9570,
|
||||
prompt: -430,
|
||||
user: userId.toString(),
|
||||
prompt: expect.any(Number),
|
||||
}),
|
||||
completion: expect.objectContaining({
|
||||
rate: 15,
|
||||
user: txData.user.toString(),
|
||||
balance: 8820,
|
||||
completion: -750,
|
||||
user: userId.toString(),
|
||||
completion: expect.any(Number),
|
||||
}),
|
||||
});
|
||||
|
||||
// Verify balance was updated
|
||||
const balance = await Balance.findOne({ user: userId });
|
||||
expect(balance).toBeDefined();
|
||||
expect(balance.tokenCredits).toBeLessThan(10000); // Balance should be reduced
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -104,6 +104,7 @@ const bedrockValues = {
|
|||
'amazon.nova-micro-v1:0': { prompt: 0.035, completion: 0.14 },
|
||||
'amazon.nova-lite-v1:0': { prompt: 0.06, completion: 0.24 },
|
||||
'amazon.nova-pro-v1:0': { prompt: 0.8, completion: 3.2 },
|
||||
'deepseek.r1': { prompt: 1.35, completion: 5.4 },
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -118,10 +119,15 @@ const tokenValues = Object.assign(
|
|||
'4k': { prompt: 1.5, completion: 2 },
|
||||
'16k': { prompt: 3, completion: 4 },
|
||||
'gpt-3.5-turbo-1106': { prompt: 1, completion: 2 },
|
||||
'o4-mini': { prompt: 1.1, completion: 4.4 },
|
||||
'o3-mini': { prompt: 1.1, completion: 4.4 },
|
||||
o3: { prompt: 10, completion: 40 },
|
||||
'o1-mini': { prompt: 1.1, completion: 4.4 },
|
||||
'o1-preview': { prompt: 15, completion: 60 },
|
||||
o1: { prompt: 15, completion: 60 },
|
||||
'gpt-4.1-nano': { prompt: 0.1, completion: 0.4 },
|
||||
'gpt-4.1-mini': { prompt: 0.4, completion: 1.6 },
|
||||
'gpt-4.1': { prompt: 2, completion: 8 },
|
||||
'gpt-4.5': { prompt: 75, completion: 150 },
|
||||
'gpt-4o-mini': { prompt: 0.15, completion: 0.6 },
|
||||
'gpt-4o': { prompt: 2.5, completion: 10 },
|
||||
|
|
@ -148,9 +154,16 @@ const tokenValues = Object.assign(
|
|||
/* cohere doesn't have rates for the older command models,
|
||||
so this was from https://artificialanalysis.ai/models/command-light/providers */
|
||||
command: { prompt: 0.38, completion: 0.38 },
|
||||
gemma: { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing
|
||||
'gemma-2': { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing
|
||||
'gemma-3': { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing
|
||||
'gemma-3-27b': { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing
|
||||
'gemini-2.0-flash-lite': { prompt: 0.075, completion: 0.3 },
|
||||
'gemini-2.0-flash': { prompt: 0.1, completion: 0.7 },
|
||||
'gemini-2.0-flash': { prompt: 0.1, completion: 0.4 },
|
||||
'gemini-2.0': { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing
|
||||
'gemini-2.5-pro': { prompt: 1.25, completion: 10 },
|
||||
'gemini-2.5-flash': { prompt: 0.15, completion: 3.5 },
|
||||
'gemini-2.5': { prompt: 0, completion: 0 }, // Free for a period of time
|
||||
'gemini-1.5-flash-8b': { prompt: 0.075, completion: 0.3 },
|
||||
'gemini-1.5-flash': { prompt: 0.15, completion: 0.6 },
|
||||
'gemini-1.5': { prompt: 2.5, completion: 10 },
|
||||
|
|
@ -163,7 +176,17 @@ const tokenValues = Object.assign(
|
|||
'grok-2-1212': { prompt: 2.0, completion: 10.0 },
|
||||
'grok-2-latest': { prompt: 2.0, completion: 10.0 },
|
||||
'grok-2': { prompt: 2.0, completion: 10.0 },
|
||||
'grok-3-mini-fast': { prompt: 0.4, completion: 4 },
|
||||
'grok-3-mini': { prompt: 0.3, completion: 0.5 },
|
||||
'grok-3-fast': { prompt: 5.0, completion: 25.0 },
|
||||
'grok-3': { prompt: 3.0, completion: 15.0 },
|
||||
'grok-beta': { prompt: 5.0, completion: 15.0 },
|
||||
'mistral-large': { prompt: 2.0, completion: 6.0 },
|
||||
'pixtral-large': { prompt: 2.0, completion: 6.0 },
|
||||
'mistral-saba': { prompt: 0.2, completion: 0.6 },
|
||||
codestral: { prompt: 0.3, completion: 0.9 },
|
||||
'ministral-8b': { prompt: 0.1, completion: 0.1 },
|
||||
'ministral-3b': { prompt: 0.04, completion: 0.04 },
|
||||
},
|
||||
bedrockValues,
|
||||
);
|
||||
|
|
@ -205,6 +228,14 @@ const getValueKey = (model, endpoint) => {
|
|||
return 'gpt-3.5-turbo-1106';
|
||||
} else if (modelName.includes('gpt-3.5')) {
|
||||
return '4k';
|
||||
} else if (modelName.includes('o4-mini')) {
|
||||
return 'o4-mini';
|
||||
} else if (modelName.includes('o4')) {
|
||||
return 'o4';
|
||||
} else if (modelName.includes('o3-mini')) {
|
||||
return 'o3-mini';
|
||||
} else if (modelName.includes('o3')) {
|
||||
return 'o3';
|
||||
} else if (modelName.includes('o1-preview')) {
|
||||
return 'o1-preview';
|
||||
} else if (modelName.includes('o1-mini')) {
|
||||
|
|
@ -213,6 +244,12 @@ const getValueKey = (model, endpoint) => {
|
|||
return 'o1';
|
||||
} else if (modelName.includes('gpt-4.5')) {
|
||||
return 'gpt-4.5';
|
||||
} else if (modelName.includes('gpt-4.1-nano')) {
|
||||
return 'gpt-4.1-nano';
|
||||
} else if (modelName.includes('gpt-4.1-mini')) {
|
||||
return 'gpt-4.1-mini';
|
||||
} else if (modelName.includes('gpt-4.1')) {
|
||||
return 'gpt-4.1';
|
||||
} else if (modelName.includes('gpt-4o-2024-05-13')) {
|
||||
return 'gpt-4o-2024-05-13';
|
||||
} else if (modelName.includes('gpt-4o-mini')) {
|
||||
|
|
|
|||
|
|
@ -60,6 +60,30 @@ describe('getValueKey', () => {
|
|||
expect(getValueKey('gpt-4.5-0125')).toBe('gpt-4.5');
|
||||
});
|
||||
|
||||
it('should return "gpt-4.1" for model type of "gpt-4.1"', () => {
|
||||
expect(getValueKey('gpt-4.1-preview')).toBe('gpt-4.1');
|
||||
expect(getValueKey('gpt-4.1-2024-08-06')).toBe('gpt-4.1');
|
||||
expect(getValueKey('gpt-4.1-2024-08-06-0718')).toBe('gpt-4.1');
|
||||
expect(getValueKey('openai/gpt-4.1')).toBe('gpt-4.1');
|
||||
expect(getValueKey('openai/gpt-4.1-2024-08-06')).toBe('gpt-4.1');
|
||||
expect(getValueKey('gpt-4.1-turbo')).toBe('gpt-4.1');
|
||||
expect(getValueKey('gpt-4.1-0125')).toBe('gpt-4.1');
|
||||
});
|
||||
|
||||
it('should return "gpt-4.1-mini" for model type of "gpt-4.1-mini"', () => {
|
||||
expect(getValueKey('gpt-4.1-mini-preview')).toBe('gpt-4.1-mini');
|
||||
expect(getValueKey('gpt-4.1-mini-2024-08-06')).toBe('gpt-4.1-mini');
|
||||
expect(getValueKey('openai/gpt-4.1-mini')).toBe('gpt-4.1-mini');
|
||||
expect(getValueKey('gpt-4.1-mini-0125')).toBe('gpt-4.1-mini');
|
||||
});
|
||||
|
||||
it('should return "gpt-4.1-nano" for model type of "gpt-4.1-nano"', () => {
|
||||
expect(getValueKey('gpt-4.1-nano-preview')).toBe('gpt-4.1-nano');
|
||||
expect(getValueKey('gpt-4.1-nano-2024-08-06')).toBe('gpt-4.1-nano');
|
||||
expect(getValueKey('openai/gpt-4.1-nano')).toBe('gpt-4.1-nano');
|
||||
expect(getValueKey('gpt-4.1-nano-0125')).toBe('gpt-4.1-nano');
|
||||
});
|
||||
|
||||
it('should return "gpt-4o" for model type of "gpt-4o"', () => {
|
||||
expect(getValueKey('gpt-4o-2024-08-06')).toBe('gpt-4o');
|
||||
expect(getValueKey('gpt-4o-2024-08-06-0718')).toBe('gpt-4o');
|
||||
|
|
@ -141,6 +165,15 @@ describe('getMultiplier', () => {
|
|||
);
|
||||
});
|
||||
|
||||
it('should return correct multipliers for o4-mini and o3', () => {
|
||||
['o4-mini', 'o3'].forEach((model) => {
|
||||
const prompt = getMultiplier({ model, tokenType: 'prompt' });
|
||||
const completion = getMultiplier({ model, tokenType: 'completion' });
|
||||
expect(prompt).toBe(tokenValues[model].prompt);
|
||||
expect(completion).toBe(tokenValues[model].completion);
|
||||
});
|
||||
});
|
||||
|
||||
it('should return defaultRate if tokenType is provided but not found in tokenValues', () => {
|
||||
expect(getMultiplier({ valueKey: '8k', tokenType: 'unknownType' })).toBe(defaultRate);
|
||||
});
|
||||
|
|
@ -185,6 +218,52 @@ describe('getMultiplier', () => {
|
|||
);
|
||||
});
|
||||
|
||||
it('should return the correct multiplier for gpt-4.1', () => {
|
||||
const valueKey = getValueKey('gpt-4.1-2024-08-06');
|
||||
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(tokenValues['gpt-4.1'].prompt);
|
||||
expect(getMultiplier({ valueKey, tokenType: 'completion' })).toBe(
|
||||
tokenValues['gpt-4.1'].completion,
|
||||
);
|
||||
expect(getMultiplier({ model: 'gpt-4.1-preview', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['gpt-4.1'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'openai/gpt-4.1', tokenType: 'completion' })).toBe(
|
||||
tokenValues['gpt-4.1'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should return the correct multiplier for gpt-4.1-mini', () => {
|
||||
const valueKey = getValueKey('gpt-4.1-mini-2024-08-06');
|
||||
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(
|
||||
tokenValues['gpt-4.1-mini'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ valueKey, tokenType: 'completion' })).toBe(
|
||||
tokenValues['gpt-4.1-mini'].completion,
|
||||
);
|
||||
expect(getMultiplier({ model: 'gpt-4.1-mini-preview', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['gpt-4.1-mini'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'openai/gpt-4.1-mini', tokenType: 'completion' })).toBe(
|
||||
tokenValues['gpt-4.1-mini'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should return the correct multiplier for gpt-4.1-nano', () => {
|
||||
const valueKey = getValueKey('gpt-4.1-nano-2024-08-06');
|
||||
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(
|
||||
tokenValues['gpt-4.1-nano'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ valueKey, tokenType: 'completion' })).toBe(
|
||||
tokenValues['gpt-4.1-nano'].completion,
|
||||
);
|
||||
expect(getMultiplier({ model: 'gpt-4.1-nano-preview', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['gpt-4.1-nano'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'openai/gpt-4.1-nano', tokenType: 'completion' })).toBe(
|
||||
tokenValues['gpt-4.1-nano'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should return the correct multiplier for gpt-4o-mini', () => {
|
||||
const valueKey = getValueKey('gpt-4o-mini-2024-07-18');
|
||||
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(
|
||||
|
|
@ -288,7 +367,7 @@ describe('AWS Bedrock Model Tests', () => {
|
|||
});
|
||||
|
||||
describe('Deepseek Model Tests', () => {
|
||||
const deepseekModels = ['deepseek-chat', 'deepseek-coder', 'deepseek-reasoner'];
|
||||
const deepseekModels = ['deepseek-chat', 'deepseek-coder', 'deepseek-reasoner', 'deepseek.r1'];
|
||||
|
||||
it('should return the correct prompt multipliers for all models', () => {
|
||||
const results = deepseekModels.map((model) => {
|
||||
|
|
@ -348,9 +427,11 @@ describe('getCacheMultiplier', () => {
|
|||
|
||||
it('should derive the valueKey from the model if not provided', () => {
|
||||
expect(getCacheMultiplier({ cacheType: 'write', model: 'claude-3-5-sonnet-20240620' })).toBe(
|
||||
3.75,
|
||||
cacheTokenValues['claude-3-5-sonnet'].write,
|
||||
);
|
||||
expect(getCacheMultiplier({ cacheType: 'read', model: 'claude-3-haiku-20240307' })).toBe(
|
||||
cacheTokenValues['claude-3-haiku'].read,
|
||||
);
|
||||
expect(getCacheMultiplier({ cacheType: 'read', model: 'claude-3-haiku-20240307' })).toBe(0.03);
|
||||
});
|
||||
|
||||
it('should return null if only model or cacheType is missing', () => {
|
||||
|
|
@ -371,10 +452,10 @@ describe('getCacheMultiplier', () => {
|
|||
};
|
||||
expect(
|
||||
getCacheMultiplier({ model: 'custom-model', cacheType: 'write', endpointTokenConfig }),
|
||||
).toBe(5);
|
||||
).toBe(endpointTokenConfig['custom-model'].write);
|
||||
expect(
|
||||
getCacheMultiplier({ model: 'custom-model', cacheType: 'read', endpointTokenConfig }),
|
||||
).toBe(1);
|
||||
).toBe(endpointTokenConfig['custom-model'].read);
|
||||
});
|
||||
|
||||
it('should return null if model is not found in endpointTokenConfig', () => {
|
||||
|
|
@ -395,18 +476,21 @@ describe('getCacheMultiplier', () => {
|
|||
model: 'bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0',
|
||||
cacheType: 'write',
|
||||
}),
|
||||
).toBe(3.75);
|
||||
).toBe(cacheTokenValues['claude-3-5-sonnet'].write);
|
||||
expect(
|
||||
getCacheMultiplier({
|
||||
model: 'bedrock/anthropic.claude-3-haiku-20240307-v1:0',
|
||||
cacheType: 'read',
|
||||
}),
|
||||
).toBe(0.03);
|
||||
).toBe(cacheTokenValues['claude-3-haiku'].read);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Google Model Tests', () => {
|
||||
const googleModels = [
|
||||
'gemini-2.5-pro-preview-05-06',
|
||||
'gemini-2.5-flash-preview-04-17',
|
||||
'gemini-2.5-exp',
|
||||
'gemini-2.0-flash-lite-preview-02-05',
|
||||
'gemini-2.0-flash-001',
|
||||
'gemini-2.0-flash-exp',
|
||||
|
|
@ -444,6 +528,9 @@ describe('Google Model Tests', () => {
|
|||
|
||||
it('should map to the correct model keys', () => {
|
||||
const expected = {
|
||||
'gemini-2.5-pro-preview-05-06': 'gemini-2.5-pro',
|
||||
'gemini-2.5-flash-preview-04-17': 'gemini-2.5-flash',
|
||||
'gemini-2.5-exp': 'gemini-2.5',
|
||||
'gemini-2.0-flash-lite-preview-02-05': 'gemini-2.0-flash-lite',
|
||||
'gemini-2.0-flash-001': 'gemini-2.0-flash',
|
||||
'gemini-2.0-flash-exp': 'gemini-2.0-flash',
|
||||
|
|
@ -488,24 +575,92 @@ describe('Grok Model Tests - Pricing', () => {
|
|||
test('should return correct prompt and completion rates for Grok vision models', () => {
|
||||
const models = ['grok-2-vision-1212', 'grok-2-vision', 'grok-2-vision-latest'];
|
||||
models.forEach((model) => {
|
||||
expect(getMultiplier({ model, tokenType: 'prompt' })).toBe(2.0);
|
||||
expect(getMultiplier({ model, tokenType: 'completion' })).toBe(10.0);
|
||||
expect(getMultiplier({ model, tokenType: 'prompt' })).toBe(
|
||||
tokenValues['grok-2-vision'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model, tokenType: 'completion' })).toBe(
|
||||
tokenValues['grok-2-vision'].completion,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
test('should return correct prompt and completion rates for Grok text models', () => {
|
||||
const models = ['grok-2-1212', 'grok-2', 'grok-2-latest'];
|
||||
models.forEach((model) => {
|
||||
expect(getMultiplier({ model, tokenType: 'prompt' })).toBe(2.0);
|
||||
expect(getMultiplier({ model, tokenType: 'completion' })).toBe(10.0);
|
||||
expect(getMultiplier({ model, tokenType: 'prompt' })).toBe(tokenValues['grok-2'].prompt);
|
||||
expect(getMultiplier({ model, tokenType: 'completion' })).toBe(
|
||||
tokenValues['grok-2'].completion,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
test('should return correct prompt and completion rates for Grok beta models', () => {
|
||||
expect(getMultiplier({ model: 'grok-vision-beta', tokenType: 'prompt' })).toBe(5.0);
|
||||
expect(getMultiplier({ model: 'grok-vision-beta', tokenType: 'completion' })).toBe(15.0);
|
||||
expect(getMultiplier({ model: 'grok-beta', tokenType: 'prompt' })).toBe(5.0);
|
||||
expect(getMultiplier({ model: 'grok-beta', tokenType: 'completion' })).toBe(15.0);
|
||||
expect(getMultiplier({ model: 'grok-vision-beta', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['grok-vision-beta'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'grok-vision-beta', tokenType: 'completion' })).toBe(
|
||||
tokenValues['grok-vision-beta'].completion,
|
||||
);
|
||||
expect(getMultiplier({ model: 'grok-beta', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['grok-beta'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'grok-beta', tokenType: 'completion' })).toBe(
|
||||
tokenValues['grok-beta'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
test('should return correct prompt and completion rates for Grok 3 models', () => {
|
||||
expect(getMultiplier({ model: 'grok-3', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['grok-3'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'grok-3', tokenType: 'completion' })).toBe(
|
||||
tokenValues['grok-3'].completion,
|
||||
);
|
||||
expect(getMultiplier({ model: 'grok-3-fast', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['grok-3-fast'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'grok-3-fast', tokenType: 'completion' })).toBe(
|
||||
tokenValues['grok-3-fast'].completion,
|
||||
);
|
||||
expect(getMultiplier({ model: 'grok-3-mini', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['grok-3-mini'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'grok-3-mini', tokenType: 'completion' })).toBe(
|
||||
tokenValues['grok-3-mini'].completion,
|
||||
);
|
||||
expect(getMultiplier({ model: 'grok-3-mini-fast', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['grok-3-mini-fast'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'grok-3-mini-fast', tokenType: 'completion' })).toBe(
|
||||
tokenValues['grok-3-mini-fast'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
test('should return correct prompt and completion rates for Grok 3 models with prefixes', () => {
|
||||
expect(getMultiplier({ model: 'xai/grok-3', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['grok-3'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'xai/grok-3', tokenType: 'completion' })).toBe(
|
||||
tokenValues['grok-3'].completion,
|
||||
);
|
||||
expect(getMultiplier({ model: 'xai/grok-3-fast', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['grok-3-fast'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'xai/grok-3-fast', tokenType: 'completion' })).toBe(
|
||||
tokenValues['grok-3-fast'].completion,
|
||||
);
|
||||
expect(getMultiplier({ model: 'xai/grok-3-mini', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['grok-3-mini'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'xai/grok-3-mini', tokenType: 'completion' })).toBe(
|
||||
tokenValues['grok-3-mini'].completion,
|
||||
);
|
||||
expect(getMultiplier({ model: 'xai/grok-3-mini-fast', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['grok-3-mini-fast'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'xai/grok-3-mini-fast', tokenType: 'completion' })).toBe(
|
||||
tokenValues['grok-3-mini-fast'].completion,
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
const bcrypt = require('bcryptjs');
|
||||
const { getBalanceConfig } = require('~/server/services/Config');
|
||||
const signPayload = require('~/server/services/signPayload');
|
||||
const { isEnabled } = require('~/server/utils/handleText');
|
||||
const Balance = require('./Balance');
|
||||
const User = require('./User');
|
||||
|
||||
|
|
@ -13,11 +13,9 @@ const User = require('./User');
|
|||
*/
|
||||
const getUserById = async function (userId, fieldsToSelect = null) {
|
||||
const query = User.findById(userId);
|
||||
|
||||
if (fieldsToSelect) {
|
||||
query.select(fieldsToSelect);
|
||||
}
|
||||
|
||||
return await query.lean();
|
||||
};
|
||||
|
||||
|
|
@ -32,7 +30,6 @@ const findUser = async function (searchCriteria, fieldsToSelect = null) {
|
|||
if (fieldsToSelect) {
|
||||
query.select(fieldsToSelect);
|
||||
}
|
||||
|
||||
return await query.lean();
|
||||
};
|
||||
|
||||
|
|
@ -58,11 +55,12 @@ const updateUser = async function (userId, updateData) {
|
|||
* Creates a new user, optionally with a TTL of 1 week.
|
||||
* @param {MongoUser} data - The user data to be created, must contain user_id.
|
||||
* @param {boolean} [disableTTL=true] - Whether to disable the TTL. Defaults to `true`.
|
||||
* @param {boolean} [returnUser=false] - Whether to disable the TTL. Defaults to `true`.
|
||||
* @returns {Promise<ObjectId>} A promise that resolves to the created user document ID.
|
||||
* @param {boolean} [returnUser=false] - Whether to return the created user object.
|
||||
* @returns {Promise<ObjectId|MongoUser>} A promise that resolves to the created user document ID or user object.
|
||||
* @throws {Error} If a user with the same user_id already exists.
|
||||
*/
|
||||
const createUser = async (data, disableTTL = true, returnUser = false) => {
|
||||
const balance = await getBalanceConfig();
|
||||
const userData = {
|
||||
...data,
|
||||
expiresAt: disableTTL ? null : new Date(Date.now() + 604800 * 1000), // 1 week in milliseconds
|
||||
|
|
@ -74,13 +72,27 @@ const createUser = async (data, disableTTL = true, returnUser = false) => {
|
|||
|
||||
const user = await User.create(userData);
|
||||
|
||||
if (isEnabled(process.env.CHECK_BALANCE) && process.env.START_BALANCE) {
|
||||
let incrementValue = parseInt(process.env.START_BALANCE);
|
||||
await Balance.findOneAndUpdate(
|
||||
{ user: user._id },
|
||||
{ $inc: { tokenCredits: incrementValue } },
|
||||
{ upsert: true, new: true },
|
||||
).lean();
|
||||
// If balance is enabled, create or update a balance record for the user using global.interfaceConfig.balance
|
||||
if (balance?.enabled && balance?.startBalance) {
|
||||
const update = {
|
||||
$inc: { tokenCredits: balance.startBalance },
|
||||
};
|
||||
|
||||
if (
|
||||
balance.autoRefillEnabled &&
|
||||
balance.refillIntervalValue != null &&
|
||||
balance.refillIntervalUnit != null &&
|
||||
balance.refillAmount != null
|
||||
) {
|
||||
update.$set = {
|
||||
autoRefillEnabled: true,
|
||||
refillIntervalValue: balance.refillIntervalValue,
|
||||
refillIntervalUnit: balance.refillIntervalUnit,
|
||||
refillAmount: balance.refillAmount,
|
||||
};
|
||||
}
|
||||
|
||||
await Balance.findOneAndUpdate({ user: user._id }, update, { upsert: true, new: true }).lean();
|
||||
}
|
||||
|
||||
if (returnUser) {
|
||||
|
|
@ -123,7 +135,7 @@ const expires = eval(SESSION_EXPIRY) ?? 1000 * 60 * 15;
|
|||
/**
|
||||
* Generates a JWT token for a given user.
|
||||
*
|
||||
* @param {MongoUser} user - ID of the user for whom the token is being generated.
|
||||
* @param {MongoUser} user - The user for whom the token is being generated.
|
||||
* @returns {Promise<string>} A promise that resolves to a JWT token.
|
||||
*/
|
||||
const generateToken = async (user) => {
|
||||
|
|
@ -146,7 +158,7 @@ const generateToken = async (user) => {
|
|||
/**
|
||||
* Compares the provided password with the user's password.
|
||||
*
|
||||
* @param {MongoUser} user - the user to compare password for.
|
||||
* @param {MongoUser} user - The user to compare the password for.
|
||||
* @param {string} candidatePassword - The password to test against the user's password.
|
||||
* @returns {Promise<boolean>} A promise that resolves to a boolean indicating if the password matches.
|
||||
*/
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue