🛠️ refactor: Model Loading and Custom Endpoint Error Handling (#1849)

* fix: handle non-assistant role ChatCompletionMessage error

* refactor(ModelController): decouple res.send from loading/caching models

* fix(custom/initializeClient): only fetch custom endpoint models if models.fetch is true

* refactor(validateModel): load models if modelsConfig is not yet cached

* docs: update on file upload rate limiting
This commit is contained in:
Danny Avila 2024-02-20 12:57:58 -05:00 committed by GitHub
parent 542494fad6
commit dd8038b375
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 47 additions and 10 deletions

View file

@ -994,6 +994,7 @@ ${convo}
}
let chatCompletion;
/** @type {OpenAI} */
const openai = new OpenAI({
apiKey: this.apiKey,
...opts,
@ -1046,6 +1047,16 @@ ${convo}
.on('error', (err) => {
handleOpenAIErrors(err, errorCallback, 'stream');
})
.on('finalChatCompletion', (finalChatCompletion) => {
const finalMessage = finalChatCompletion?.choices?.[0]?.message;
if (finalMessage && finalMessage?.role !== 'assistant') {
finalChatCompletion.choices[0].message.role = 'assistant';
}
if (finalMessage && !finalMessage?.content?.trim()) {
finalChatCompletion.choices[0].message.content = intermediateReply;
}
})
.on('finalMessage', (message) => {
if (message?.role !== 'assistant') {
stream.messages.push({ role: 'assistant', content: intermediateReply });
@ -1117,6 +1128,9 @@ ${convo}
err?.message?.includes(
'OpenAI error: Invalid final message: OpenAI expects final message to include role=assistant',
) ||
err?.message?.includes(
'stream ended without producing a ChatCompletionMessage with role=assistant',
) ||
err?.message?.includes('The server had an error processing your request') ||
err?.message?.includes('missing finish_reason') ||
err?.message?.includes('missing role') ||