LibreChat/api/app/clients/output_parsers/handleOutputs.js
Danny Avila 4ca43fb53d
refactor: Encrypt & Expire User Provided Keys, feat: Rate Limiting (#874)
* docs: make_your_own.md formatting fix for mkdocs

* feat: add express-mongo-sanitize
feat: add login/registration rate limiting

* chore: remove unnecessary console log

* wip: remove token handling from localStorage to encrypted DB solution

* refactor: minor change to UserService

* fix mongo query and add keys route to server

* fix backend controllers and simplify schema/crud

* refactor: rename token to key to separate from access/refresh tokens, setTokenDialog -> setKeyDialog

* refactor(schemas): TEndpointOption token -> key

* refactor(api): use new encrypted key retrieval system

* fix(SetKeyDialog): fix key prop error

* fix(abortMiddleware): pass random UUID if messageId is not generated yet for proper error display on frontend

* fix(getUserKey): wrong prop passed in arg, adds error handling

* fix: prevent message without conversationId from saving to DB, prevents branching on the frontend to a new top-level branch

* refactor: change wording of multiple display messages

* refactor(checkExpiry -> checkUserKeyExpiry): move to UserService file

* fix: type imports from common

* refactor(SubmitButton): convert to TS

* refactor(key.ts): change localStorage map key name

* refactor: add new custom tailwind classes to better match openAI colors

* chore: remove unnecessary warning and catch ScreenShot error

* refactor: move userKey frontend logic to hooks and remove use of localStorage and instead query the DB

* refactor: invalidate correct query key, memoize userKey hook, conditionally render SetKeyDialog to avoid unnecessary calls, refactor SubmitButton props and useEffect for showing 'provide key first'

* fix(SetKeyDialog): use enum-like object for expiry values
feat(Dropdown): add optionsClassName to dynamically change dropdown options container classes

* fix: handle edge case where user had provided a key but the server changes to env variable for keys

* refactor(OpenAI/titleConvo): move titling to client to retain authorized credentials in message lifecycle for titling

* fix(azure): handle user_provided keys correctly for azure

* feat: send user Id to OpenAI to differentiate users in completion requests

* refactor(OpenAI/titleConvo): adding tokens helps minimize LLM from using the language in title response

* feat: add delete endpoint for keys

* chore: remove throttling of title

* feat: add 'Data controls' to Settings, add 'Revoke' keys feature in Key Dialog and Data controls

* refactor: reorganize PluginsClient files in langchain format

* feat: use langchain for titling convos

* chore: cleanup titling convo, with fallback to original method, escape braces, use only snippet for language detection

* refactor: move helper functions to appropriate langchain folders for reusability

* fix: userProvidesKey handling for gptPlugins

* fix: frontend handling of plugins key

* chore: cleanup logging and ts-ignore SSE

* fix: forwardRef misuse in DangerButton

* fix(GoogleConfig/FileUpload): localize errors and simplify validation with zod

* fix: cleanup google logging and fix user provided key handling

* chore: remove titling from google

* chore: removing logging from browser endpoint

* wip: fix menu flicker

* feat: useLocalStorage hook

* feat: add Tooltip for UI

* refactor(EndpointMenu): utilize Tooltip and useLocalStorage, remove old 'New Chat' slide-over

* fix(e2e): use testId for endpoint menu trigger

* chore: final touches to EndpointMenu before future refactor to declutter component

* refactor(localization): change select endpoint to open menu and add translations

* chore: add final prop to error message response

* ci: minor edits to facilitate testing

* ci: new e2e test which tests for new key setting/revoking features
2023-09-06 10:46:27 -04:00

88 lines
3.1 KiB
JavaScript

const { instructions, imageInstructions, errorInstructions } = require('../prompts');
function getActions(actions = [], functionsAgent = false) {
let output = 'Internal thoughts & actions taken:\n"';
if (actions[0]?.action && functionsAgent) {
actions = actions.map((step) => ({
log: `Action: ${step.action?.tool || ''}\nInput: ${
JSON.stringify(step.action?.toolInput) || ''
}\nObservation: ${step.observation}`,
}));
} else if (actions[0]?.action) {
actions = actions.map((step) => ({
log: `${step.action.log}\nObservation: ${step.observation}`,
}));
}
actions.forEach((actionObj, index) => {
output += `${actionObj.log}`;
if (index < actions.length - 1) {
output += '\n';
}
});
return output + '"';
}
function buildErrorInput({ message, errorMessage, actions, functionsAgent }) {
const log = errorMessage.includes('Could not parse LLM output:')
? `A formatting error occurred with your response to the human's last message. You didn't follow the formatting instructions. Remember to ${instructions}`
: `You encountered an error while replying to the human's last message. Attempt to answer again or admit an answer cannot be given.\nError: ${errorMessage}`;
return `
${log}
${getActions(actions, functionsAgent)}
Human's last message: ${message}
`;
}
function buildPromptPrefix({ result, message, functionsAgent }) {
if ((result.output && result.output.includes('N/A')) || result.output === undefined) {
return null;
}
if (
result?.intermediateSteps?.length === 1 &&
result?.intermediateSteps[0]?.action?.toolInput === 'N/A'
) {
return null;
}
const internalActions =
result?.intermediateSteps?.length > 0
? getActions(result.intermediateSteps, functionsAgent)
: 'Internal Actions Taken: None';
const toolBasedInstructions = internalActions.toLowerCase().includes('image')
? imageInstructions
: '';
const errorMessage = result.errorMessage ? `${errorInstructions} ${result.errorMessage}\n` : '';
const preliminaryAnswer =
result.output?.length > 0 ? `Preliminary Answer: "${result.output.trim()}"` : '';
const prefix = preliminaryAnswer
? 'review and improve the answer you generated using plugins in response to the User Message below. The user hasn\'t seen your answer or thoughts yet.'
: 'respond to the User Message below based on your preliminary thoughts & actions.';
return `As a helpful AI Assistant, ${prefix}${errorMessage}\n${internalActions}
${preliminaryAnswer}
Reply conversationally to the User based on your ${
preliminaryAnswer ? 'preliminary answer, ' : ''
}internal actions, thoughts, and observations, making improvements wherever possible, but do not modify URLs.
${
preliminaryAnswer
? ''
: '\nIf there is an incomplete thought or action, you are expected to complete it in your response now.\n'
}You must cite sources if you are using any web links. ${toolBasedInstructions}
Only respond with your conversational reply to the following User Message:
"${message}"`;
}
module.exports = {
buildErrorInput,
buildPromptPrefix,
};