mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-01-31 06:45:17 +01:00
📶 fix: Mobile Stylings (#2639)
* chore: remove unused mobile nav * fix: mobile nav fix for 'more' and 'archive' buttons div * refactor(useTextarea): rewrite handleKeyUp for backwards compatibility refactor(useTextarea): rewrite handleKeyUp for backwards compatibility * experimental: add processing delay to azure streams for better performance/UX * experiemental: adjust gpt-3 azureDelay * fix: perplexity titles
This commit is contained in:
parent
b6d6343f54
commit
3c5fa40435
7 changed files with 49 additions and 105 deletions
|
|
@ -26,11 +26,11 @@ const {
|
|||
createContextHandlers,
|
||||
} = require('./prompts');
|
||||
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
|
||||
const { isEnabled, sleep } = require('~/server/utils');
|
||||
const { handleOpenAIErrors } = require('./tools/util');
|
||||
const spendTokens = require('~/models/spendTokens');
|
||||
const { createLLM, RunManager } = require('./llm');
|
||||
const ChatGPTClient = require('./ChatGPTClient');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
const { summaryBuffer } = require('./memory');
|
||||
const { runTitleChain } = require('./chains');
|
||||
const { tokenSplit } = require('./document');
|
||||
|
|
@ -1079,11 +1079,8 @@ ${convo}
|
|||
...opts,
|
||||
});
|
||||
|
||||
/* hacky fixes for Mistral AI API:
|
||||
- Re-orders system message to the top of the messages payload, as not allowed anywhere else
|
||||
- If there is only one message and it's a system message, change the role to user
|
||||
*/
|
||||
if (opts.baseURL.includes('https://api.mistral.ai/v1') && modelOptions.messages) {
|
||||
/* Re-orders system message to the top of the messages payload, as not allowed anywhere else */
|
||||
if (opts.baseURL.includes('api.mistral.ai') && modelOptions.messages) {
|
||||
const { messages } = modelOptions;
|
||||
|
||||
const systemMessageIndex = messages.findIndex((msg) => msg.role === 'system');
|
||||
|
|
@ -1094,10 +1091,16 @@ ${convo}
|
|||
}
|
||||
|
||||
modelOptions.messages = messages;
|
||||
}
|
||||
|
||||
if (messages.length === 1 && messages[0].role === 'system') {
|
||||
modelOptions.messages[0].role = 'user';
|
||||
}
|
||||
/* If there is only one message and it's a system message, change the role to user */
|
||||
if (
|
||||
(opts.baseURL.includes('api.mistral.ai') || opts.baseURL.includes('api.perplexity.ai')) &&
|
||||
modelOptions.messages &&
|
||||
modelOptions.messages.length === 1 &&
|
||||
modelOptions.messages[0]?.role === 'system'
|
||||
) {
|
||||
modelOptions.messages[0].role = 'user';
|
||||
}
|
||||
|
||||
if (this.options.addParams && typeof this.options.addParams === 'object') {
|
||||
|
|
@ -1151,6 +1154,7 @@ ${convo}
|
|||
}
|
||||
});
|
||||
|
||||
const azureDelay = this.modelOptions.model?.includes('gpt-4') ? 30 : 17;
|
||||
for await (const chunk of stream) {
|
||||
const token = chunk.choices[0]?.delta?.content || '';
|
||||
intermediateReply += token;
|
||||
|
|
@ -1159,6 +1163,10 @@ ${convo}
|
|||
stream.controller.abort();
|
||||
break;
|
||||
}
|
||||
|
||||
if (this.azure) {
|
||||
await sleep(azureDelay);
|
||||
}
|
||||
}
|
||||
|
||||
if (!UnexpectedRoleError) {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue