mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-26 21:28:50 +01:00
🤖 feat: Support o4-mini and o3 Models (#6928)
* feat: Add support for new OpenAI models (o4-mini, o3) and update related logic * 🔧 fix: Rename 'resubmitFiles' to 'isResubmission' for consistency across types and hooks * 🔧 fix: Replace hardcoded 'pending_req' with CacheKeys.PENDING_REQ for consistency in cache handling * 🔧 fix: Update cache handling to use Time.ONE_MINUTE instead of hardcoded TTL and streamline imports * 🔧 fix: Enhance message handling logic to correctly identify parent messages and streamline imports in useSSE
This commit is contained in:
parent
88f4ad7c47
commit
52f146dd97
19 changed files with 69 additions and 53 deletions
9
api/cache/clearPendingReq.js
vendored
9
api/cache/clearPendingReq.js
vendored
|
|
@ -1,7 +1,8 @@
|
|||
const { Time, CacheKeys } = require('librechat-data-provider');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
const getLogStores = require('./getLogStores');
|
||||
const { isEnabled } = require('../server/utils');
|
||||
|
||||
const { USE_REDIS, LIMIT_CONCURRENT_MESSAGES } = process.env ?? {};
|
||||
const ttl = 1000 * 60 * 1;
|
||||
|
||||
/**
|
||||
* Clear or decrement pending requests from the cache.
|
||||
|
|
@ -28,7 +29,7 @@ const clearPendingReq = async ({ userId, cache: _cache }) => {
|
|||
return;
|
||||
}
|
||||
|
||||
const namespace = 'pending_req';
|
||||
const namespace = CacheKeys.PENDING_REQ;
|
||||
const cache = _cache ?? getLogStores(namespace);
|
||||
|
||||
if (!cache) {
|
||||
|
|
@ -39,7 +40,7 @@ const clearPendingReq = async ({ userId, cache: _cache }) => {
|
|||
const currentReq = +((await cache.get(key)) ?? 0);
|
||||
|
||||
if (currentReq && currentReq >= 1) {
|
||||
await cache.set(key, currentReq - 1, ttl);
|
||||
await cache.set(key, currentReq - 1, Time.ONE_MINUTE);
|
||||
} else {
|
||||
await cache.delete(key);
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue