diff --git a/api/app/clients/PluginsClient.js b/api/app/clients/PluginsClient.js index 2e84304c3a..f0bf964b2c 100644 --- a/api/app/clients/PluginsClient.js +++ b/api/app/clients/PluginsClient.js @@ -132,14 +132,13 @@ Only respond with your conversational reply to the following User Message: } getFunctionModelName(input) { - const prefixMap = { - 'gpt-4': 'gpt-4-0613', - 'gpt-4-32k': 'gpt-4-32k-0613', - 'gpt-3.5-turbo': 'gpt-3.5-turbo-0613', - }; - - const prefix = Object.keys(prefixMap).find((key) => input.startsWith(key)); - return prefix ? prefixMap[prefix] : 'gpt-3.5-turbo-0613'; + if (input.startsWith('gpt-3.5-turbo')) { + return 'gpt-3.5-turbo'; + } else if (input.startsWith('gpt-4')) { + return 'gpt-4'; + } else { + return 'gpt-3.5-turbo'; + } } getBuildMessagesOptions(opts) { @@ -184,7 +183,9 @@ Only respond with your conversational reply to the following User Message: const model = this.createLLM(modelOptions, configOptions); if (this.options.debug) { - console.debug(`<-----Agent Model: ${model.modelName} | Temp: ${model.temperature}----->`); + console.debug( + `<-----Agent Model: ${model.modelName} | Temp: ${model.temperature} | Functions: ${this.functionsAgent}----->`, + ); } this.availableTools = await loadTools({ @@ -328,7 +329,12 @@ Only respond with your conversational reply to the following User Message: return; } - if (!responseMessage.text.includes(observation)) { + // Extract the image file path from the observation + const observedImagePath = observation.match(/\(\/images\/.*\.\w*\)/g)[0]; + + // Check if the responseMessage already includes the image file path + if (!responseMessage.text.includes(observedImagePath)) { + // If the image file path is not found, append the whole observation responseMessage.text += '\n' + observation; if (this.options.debug) { console.debug('added image from intermediateSteps');