mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-27 21:58:51 +01:00
feat: Plugins endpoint - Reverse Engineering of official Plugins features (#197)
* components for plugins in progress * WIP: add langchain client implementation for tools/plugins feat(langchain): add loadHistory function for loading chat history from database feat(langchain): add saveMessageToDatabase function for saving chat messages to database * chore(Memory.js): remove Memory.js file from the project directory. * WIP: adding plugin functionality —————————————————— fix(eslintrc.js): change arrow-parens rule to always require parentheses refactor(agent.js): reorganize imports and add new imports feat(agent.js): add support for saving and loading chat history feat(agent.js): add support for saving messages to database feat(agent.js): add ChatAgent class with initialize and sendMessage methods fix(langchain): use getConvo and saveMessage functions from models.js instead of Conversation and Message models feat(langchain): add user parameter to loadHistory and saveMessageToDatabase functions chore(package.json): update langchain package version to 0.0.59 and add langchain script to run test2.js file —————————————————— * WIP: testing agent initialization * WIP: testing various agent methods feat(agent.js): add CustomChatAgent class and initializeAgentExecutorWithOptions method feat(customChatAgent.js): add CustomPromptTemplate and CustomOutputParser classes refactor(langchain): uncomment code for input2 and options feat(langchain): add input1 to read comments on a youtube video docs(langchain): remove commented code and add whitespace to package.json * WIP: feat: plugin endpoint, backend class working * feat(agent.js): add support for Zapier NLA API key feat(agent.js): add ZapierToolKit to tools if zapierApiKey is provided feat(customAgent.js): change prompt prefix and suffix to reflect new task-based prompt feat(test4.js): add test for new task-based prompt * style(langchain): improve readability and add comments to code feat(langchain): update prompt message for custom agent fix(langchain): update message format in test4.js * style(customAgent.js): remove unnecessary capitalization and rephrase some sentences test(langchain): add test2 and test3 scripts to package.json * chore(customAgent.js): fix typo in comment, change "an" to "identical" * WIP: gpt-4 testing * feat(langchain): add AIPluginTool and HumanTool classes fix(langchain): remove zapierApiKey option from ChatAgent constructor refactor(langchain): update langchain package to v0.0.64 misc(langchain): update test2, test3, and test4 scripts to use --inspect flag * feat(langchain): add GoogleSearchAPI tool for searching the web using Google Custom Search API * feat(askGPTPlugins.js): add support for progress callback in ask function fix(agent.js): pass progress callback to sendApiMessage function * refactor(agent.js): load tools from options and initialize them in constructor feat(agent.js): add support for environment variable SERPAPI_API_KEY feat(agent.js): add support for environment variable ZAPIER_NLA_API_KEY docs(agent.js): remove commented out code and add comments to clarify code * chore(langchain): remove unused files loadHistory.js and saveMessage.js * feat(validateTools.js): add function to validate API keys for supported tools * feat(langchain): update langchain package to version 0.0.66 feat(langchain): add support for GPT-4 model fix(server/index.js): fix uncaughtException handler to ignore 'fetch failed' errors * refactor(agent.js): remove FORMAT_INSTRUCTIONS and replace with a more concise message refactor(agent.js): remove unused variable 'errorMessage' refactor(agent.js): change 'result' variable initialization to an empty object instead of null refactor(agent.js): change error message when response generation fails refactor(agent.js): change output message when response generation fails refactor(agent.js): change output message when response generation succeeds * chore(langchain): comment out unused model in ChatAgent constructor feat(langchain): add test5 script to package.json for running test5.js script * refactor(agent.js): change response to answer and update message refactor(test3.js, test5.js): remove commented out code and add comments The changes in agent.js are to improve the message that is returned to the user. The word "response" has been changed to "answer" to better reflect the output of the chatbot. The message has also been updated to provide clearer instructions to the user. The changes in test3.js and test5.js are to remove commented out code and add comments to improve readability. * docs: update links to LOCAL_INSTALL.md and defaultSystemMessage.md fix: fix typo in BingAI/Settings.jsx feat: add Dockerfile for app containerization docs(google_search.md): add guide for setting up Google Custom Search API key and ID * docs: update link to system message guidelines in Bing AI Settings component docs: update link to system message guidelines in GOOGLE_SEARCH.md feat: add JAILBREAK_INFO.md guide for Bing AI jailbreak mode system message guidelines * style(api): remove unnecessary quotes and empty values from .env.example style(agent.js): refactor getActions method to accept an input parameter feat(agent.js): add handleChainEnd method to CustomChatAgent class style(customAgent.js): add a new line to the end of the file style(test5.js): comment out unused variable and update input1 variable style(googleSearch.js): change tool name to kebab-case * chore(langchain): comment out handleChainEnd method in agent.js feat(langchain): add browser tool to ChatAgent in test2.js feat(langchain): add modelOptions to ChatAgent in test2.js feat(langchain): change question in input1 and request article review summary in test5.js * fix(askGPTPlugins.js): fix syntax error by removing extra comma in parentMessageId field feat(askGPTPlugins.js): add default value of null to parentMessageId parameter in ask function * fix(askGPTPlugins.js): change endpoint string from 'GPTPlugins' to 'gptPlugins' feat(endpoints.js): add support for gptPlugins endpoint feat(PresetItem.jsx): add support for gptPlugins endpoint feat(HoverButtons.jsx): add support for gptPlugins endpoint feat(createPayload.ts): add support for gptPlugins endpoint feat(types.ts): add gptPlugins endpoint to EModelEndpoint enum feat(endpoints.js): add gptPlugins endpoint to availableEndpoints selector feat(cleanupPreset.js): add support for gptPlugins endpoint feat(getDefaultConversation.js): add support for gptPlugins endpoint feat(getIcon.jsx): add support for gptPlugins endpoint feat(handleSubmit.js): add support for gptPlugins endpoint * refactor(agent.js): remove debug option from options object refactor(agent.js): change tool name from 'google-search' to 'google' refactor(agent.js): update description for 'google' tool feat(agent.js): add support for citing sources when using web links in response message fix(agent.js): update error message to not mention error to user feat(agent.js): add unique message ids for user message and response message feat(agent.js): limit number of search results to 5 in 'google' tool refactor(validateTools.js): add console log to show valid tools * feat(askGPTPlugins.js): add support for GPT-3.5-turbo model and validate model option refactor(askGPTPlugins.js): remove unused imports and variables refactor(askGPTPlugins.js): remove commented code refactor(askGPTPlugins.js): remove unused parameters in ask function feat(ask/index.js): add askGPTPlugins route to router * feat(NewConversationMenu): add alpha tag to gptPlugins endpoint and rename it to Plugins * refactor(askGPTPlugins.js): remove commented code and unused imports feat(askGPTPlugins.js): add support for debug option in endpointOption feat(askGPTPlugins.js): add support for chatGptLabel, promptPrefix, temperature, top_p, presence_penalty, and frequency_penalty in endpointOption feat(askGPTPlugins.js): add support for sending plugin and pluginend events feat(askGPTPlugins.js): add onAgentAction and onChainEnd callbacks to ChatAgent.sendMessage refactor(titleConvo.js): comment out unused imports refactor(validateTools.js): comment out console.log statement refactor(agent.js): change saveMessage to include unfinished property feat(agent.js): add endpoint property to saveConvo call in saveMessageToDatabase feat(askGPTPlugins.js): add validateTools import and use it to validate endpointOption.tools before passing to ChatAgent constructor feat(askGPTPlugins.js * refactor(MessageHeader.jsx): extract plugins section into a separate variable and add support for gptPlugins endpoint fix(MessageHeader.jsx): disable clicking on non-clickable endpoints * components for plugins in progress * feat(Plugin.jsx): add plugin prop to Plugin component and display plugin name feat(Plugin.jsx): add loading state and display loading spinner feat(Plugin.jsx): add Disclosure component to Plugin component feat(Plugin.jsx): add Disclosure.Panel to Plugin component to display team pricing information feat(Spinner.jsx): add classProp prop to Spinner component to allow for custom styling feat(Landing.jsx): add Plugin component to Landing page for testing testing gpt plugins feat(plugins): Milestone commit - Add formatAction function to format plugin actions. - Add prefix.js file to store the prefix message for ChatAgent. - Update ask function to include plugin object to store plugin data. - Update onAgentAction and onChainEnd functions to format plugin data and send intermediate messages. - Update response object to include plugin data. The `handlers.js` file now includes a `formatAction` function that formats the action object for display in the UI. The `createOnProgress` function now returns a `sendIntermediateMessage` function that sends intermediate messages to the client. feat (client): add support for plugins in messages This commit adds support for plugins in messages. It includes changes to the `handlers.js`, `index.jsx`, `CodeBlock.jsx`, `Message.jsx`, `MessageHeader.jsx`, and `Plugin.jsx` files. The `index.jsx` file now includes a `plugin` property in the `messageHandler` function. The `CodeBlock.jsx` file now includes a `plugin` property that determines the language of the code block. The `Message.jsx` file now includes a `Plugin` component that displays the plugin used in the message. The `MessageHeader.jsx` file now includes a `Plugins` component that displays the enabled plugins. feat(langchain): add OpenAICreateImage tool for generating images based on user prompts fix(langchain): update validateTools to include create-image tool fix(langchain): save plugin data to messageSchema fix(server/routes/askGPTPlugins.js): save userMessage and response to messageSchema feat(langchain): add SelfReflectionTool Add a new tool to the LangChain agent, SelfReflectionTool, which enhances the agent's self-awareness by reflecting on its thoughts before taking action. The tool provides a space for the agent to explore and organize its ideas in response to the user's message. Also, update the prefix message to reflect the changes in the agent's behavior and the way it should engage with the user. The prefix message now emphasizes the use of tools when necessary, and relying on the agent's knowledge for creative requests. It also provides clear instructions on how to use the 'Action' input and how to carry out tasks in the sequence written by the human. Finally, update the OpenAICreateImage tool to return the image URL in markdown format. The tool replaces newlines and spaces in the input text with hyphens to create a valid markdown link. Milestone commit: better error handling with custom output parser, dir and file re-org style(langchain): fix formatting and add comments to prefix.js fix(langchain): remove commented out code in test6.js feat(langchain): reduce maxAttempts from 3 to 2 in CustomChatAgent's buildPromptPrefix method feat(langchain): add null check for result.output in CustomChatAgent's buildPromptPrefix method style(langchain): improve consistency and readability of code This commit improves the consistency and readability of the code in the langchain directory. Specifically, it: - Changes the case of the "Thought" output in the CustomChatAgent class to match the "Thought" output in the SelfReflectionTool class. - Adds a currentDateString property to the CustomChatAgent class to avoid repeating the same code in multiple places. - Updates the prefix in the prefix.js file to match the current objectives of the ChatGPT model. - Changes the description of the OpenAICreateImage tool to request a description of the image to be generated. - Updates the tools used by the ChatAgent in the askGPTPlugins.js file to include the Google and Browser tools instead of the Calculator and Create-Image tools. feat: add wolfram, improve image creation, rename to dall-e * refactor(langchain): update language and formatting in various files - Update tool-based instructions to use proper Markdown syntax for image URLs - Adjust temperature for modelOptions in CustomChatAgent class - Comment out console.debug statement in CustomChatAgent class - Update prefix in initializeCustomAgent function to use proper line breaks - Update prefix in instructions.js to use proper line breaks and change "user" to "human" - Update input in test6.js to use Ezra Pound instead of Hemingway - Update return statement in OpenAICreateImage class to use "generated-image" as alt-text - Update description in SelfReflectionTool class to provide clearer instructions - Update tools in ask function in askGPTPlugins.js to use only the DALL-E tool and enable debug mode feat(ask): add support for DALL-E tool in formatAction function feat(ask): add support for self-reflection tool in formatAction function feat(Plugin.jsx): add support for self-reflection tool in Plugin component fix(Plugin.jsx): fix Plugin component to not display 'None' when latest is not available * docs(openaiCreateImage.js): update tool description to clarify usage * feat(agent.js): add message parameter to initialize function feat(agent.js): pass message parameter to SelfReflectionTool constructor feat(customAgent.js): add longestToolName variable to CustomOutputParser feat(openaiCreateImage.js): replace new lines with spaces in prompt parameter feat(selfReflection.js): add message parameter to SelfReflectionTool constructor feat(selfReflection.js): add placeholder response to selfReflect function * feat: frontend plugin selection * fix: agent updates, available tools via endpoint config * fix: improve frontend plugin selection * feat: further customize agent and bypass executor when no tools are provided * fix: key issue in multiselect and allow setting changes during convo in plugins endpoint * fix: convo will save modelOptions, fix persistent errors with agent * fix: add looser final answer parsing and edit action formatting * fix: handle edge case where stop token is not hit and causes long parsing error * feat: trying new prompt for image creation * fix: improvements based on gpt-3.5 * feat: allow setting model options throughout plugin conversation * fix: agent adjustments * improve final reply for gpt-4, gpt-3.5 needs a more stable approach * fix: better context output for gpt-3.5 * fix: added clarification for better context output for gpt-3.5 * feat(PluginsOptions): add advanced mode to show/hide options style(PluginsOptions): add styles for advanced mode and show/hide options * minor changes to styling * refactor(langchain): add support for custom GPT-4 agent This commit adds support for a custom GPT-4 agent in the langchain module. The `CustomGpt4Agent` class extends the `ZeroShotAgent` class and includes a new `createPrompt` method that generates a prompt template for the agent. The `initializeCustomAgent` function has been updated to use the `CustomGpt4Agent` class when the model is not GPT-3. The `instructions.js` file has also been updated to include new instructions for the GPT-4 agent. The `formatInstructions` method has been removed and replaced with `gpt4Instructions` and `prefix2` and `suffix2` have been added to include the new instructions. feat(langchain): add custom output parser for langchain agents This commit adds a custom output parser for langchain agents. The new parser is called CustomOutputParser and it extends ZeroShotAgentOutputParser. It takes a fields object as a parameter and sets the tools and longestToolName properties. It also sets the finishToolNameRegex property to match the final answer. The parse method of the CustomOutputParser class takes a text parameter and returns an object with returnValues, log, and toolInput properties. This commit also adds a Gpt4OutputParser class that extends ZeroShotAgentOutputParser. It takes a fields object as a parameter and sets the tools and longestToolName properties. It also sets the finishToolNameRegex property to match the final answer. The parse method of the Gpt4OutputParser class takes a text parameter and returns an object with returnValues, log, and toolInput properties. feat(langchain): add isGpt3 parameter to * Stable Diffusion Plugin (#204) * Added stable diffusion plugin * Added example prompt * Fixed naming * Removed brackets in the prompt * fix: improved agent for gpt-3.5 * fix: outparser, gpt3 instructions, and wolfram error handling * chore: update langchain to 0.0.71 * fix: long parsing action input fix * fix: make plugin select close on clicking label/button * fix: make plugin select close on clicking label/button * fix: wolfram input formatting and gpt-3 payload without plugins * chore(api): update axios package version to 1.3.4 feat(api): add requireJwtAuth middleware to askGPTPlugins endpoint fix(api): replace session user with user id in askGPTPlugins endpoint docs(LOCAL_INSTALL.md): update guide for local installation and testing This commit updates the guide for local installation and testing of the ChatGPT-Clone app. It includes instructions for locally running the app, updating the app version, and running tests. It also includes a new option for running the app using Docker. The commit also fixes some typos and formatting issues. * add reverseProxy to plugins client * chore(Dockerfile-app): add Dockerfile for building and running the app in a container docs: remove outdated guides on Google search and Bing jailbreak mode docs(LOCAL_INSTALL.md): remove outdated Windows installation instructions and update MeiliSearch configuration file * fix: handle n/a parsing error better, reduce token waste if no agentic behavior is needed * style: fix formatting and add parentheses around arrow function parameter style: change hover background color to white and dark hover background color to gray-700 * chore: re-organize agent dir and files * feat(ChatAgent.js): add support for PlanAndExecuteAgentExecutor feat(PlanAndExecuteAgentExecutor.js): add PlanAndExecuteAgentExecutor class feat(planExecutor.js): add demo for PlanAndExecuteAgentExecutor * feat: add azure support to plugins * refactor(utils): add basePath endpoint for genAzureEndpoint feat(api): add support for Azure OpenAI API in various modules and tools * feat: add plugin api for fetching available tools * feat: add data service for getting available plugins * feat: first iteration plugin store UI * refactor: rename files to follow proper naming convention * feat: Plugin store UI components * feat: create separate user routes, service, controller, and add plugins to user model * feat: create data service for adding and removing plugins per user * feat: UI for adding and removing plugins, displaying plugins in dropdown based on what user has installed * fix: merge conflicts from main * fix: fix plugin items titles * fix: tool.value -> tool.pluginKey * fix: testing returnDirect for self-reflection * fix: add browser tool to manifest * refactor(outputParser.js): remove commented out code feat(outputParser.js): add support for thought input when there is no action input * handling 'use tool' edge case * merge main to langchain * fix(User.js, auth.service.js, localStrategy.js): change deprecated Joi.validate() to schema.validate() method (#322) * fix(auth.service.js): fixes deprecated error callback in mongoose save method (#323) * chore: run formatting script with new rules * refactor: add requiresAuth to manifest, fix uninstall button * version with plugin auth as dialog modal * feat: Complete frontend for plugin auth * frontend styling updates * feat: api for plugin auth * feat: Add tooltip with field description to plugin auth form * fix: issue with plugin that has no auth * feat(tools): add support for user-specific API keys This commit adds support for user-specific API keys for the following tools: - Google Search API - Web Browser - SerpAPI - Zapier - DALL-E - Wolfram Alpha API It also adds support for OpenAI API key for the Web Browser tool. The `validateTools` function now takes a `user` parameter and checks for user-specific API keys before falling back to environment variables. The `loadTools` function now takes a `user` parameter and initializes the tools with user-specific API keys if available. The `manifest.json` file has been updated to include the new `authConfig` fields for the tools that support user-specific API keys. The `askGPTPlugins.js` file has been updated to use the `validateTools` function with the `user` parameter. refactor(ChatAgent.js): add user parameter to initialize function and pass it to loadTools function refactor(tools/index.js): set default value for tools parameter in validateTools function refactor(askGPTPlugins.js): remove duplicate user variable declaration and use the one from req object * refactor(ChatAgent.js): await validTool() before pushing to this.tools array refactor(tools/index.js): use Map instead of Set to store valid tools refactor(tools/index.js): filter availableTools to only validate tools passed in refactor(PluginController.js): filter out duplicate plugins by pluginKey refactor(crypto.js): use environment variables for encryption key and initialization vector feat(PluginService.js): add null check for pluginAuth in getUserPluginAuthValue() * feat(api): add credentials key and IV to .env.example for securely storing credentials * Adds testing for handling tools, introducing a test env to the backend Fixes bugs & optimizes code as revealed through testing, including: - wolfram.js: fixes bug where wolfram was not handling authentication - ChatAgent.js: ChatAgent modified to reflect 'handleTools' changes - handleTools.js: Moves logic out of index file - handleTools.js: loadTools: returns only requested tools - handleTools.js: validTools: correctly returns tools based on authentication * test(index.test.js): add test to validate a tool from an environment variable * test(tools): add test for initializing an authenticated tool through Environment Variables * refactor(ChatAgent.js): remove commented out code and unused imports * refactor(ChatAgent.js): move instructions to a separate file and import them fix(ChatAgent.js): replace hardcoded instructions with imported ones * refactor(ChatAgent.js): change import path for TextStream refactor(stream.js): remove unused TextStream class * chore(.gitignore): add .env.test to gitignore refactor(ChatAgent.js): rename CustomChatAgent to ChatAgent test(ChatAgent.test.js): add tests for ChatAgent class refactor(outputParser.js): remove OldOutputParser class refactor(outputParser.js): rename CustomOutputParser to OutputParser docs(.env.test.example): add comment explaining how to use OPENAI_API_KEY refactor(jestSetup.js): use dotenv to load environment variables from .env.test file * Various optimizations and config, add tests for PluginStoreDialog * test(ChatAgent.test.js): add test to check if chat history is returned correctly * test: unit tests for plugin store * test: add frontend-test script to root package.json * feat(ChatAgent.js, askGPTPlugins.js): add support for aborting chat requests (in progress) * test: add more client tests * feat(ChatAgent): allow plugin requests to be cancelled * feat(ChatAgent): allow message regeneration * feat(ChatAgent): remember last selected tools * Remove plugins we don't yet have from manifest.json * fix(ChatAgent.js): increase maxAttempts from 1 to 2 fix(ChatAgent.js): change error message to 'Cancelled.' if message was aborted mid-generation fix(openaiCreateImage.js): replace unwanted characters in input string fix(handlers.js): compare action.tool in lowercase to 'self-reflection' * fix(ChatAgent): Fix up plugin I/O formatting for n/a actions * refactor(Plugin.jsx): remove unused import statement feat(Plugin.jsx): add Plugin component with svg paths and styles * refactor: simplify credential encryption/decryption by using a single key and IV for all environments. Update crypto.js and .env.example files accordingly. * fix(ChatAgent.js): reduce maxAttempts from 2 to 1 feat(ChatAgent.js): add model information to responseMessage object feat(Message.js): add model field to messageSchema feat(Message.js): add model field to message object feat(Message.jsx): pass model information to getIcon function feat(getIcon.jsx): add Plugin component and handle plugin messages differently * feat(askGPTPlugins.js): add model property to the ask function response object feat(EndpointItem.jsx): add message property to the EndpointItem component feat(MessageHeader.jsx): add Plugin icon to the plugins section feat(MessageHeader.jsx): change alpha to beta in the plugins section feat(svg): add Plugin, GPTIcon, and BingIcon components to the svg folder refactor(EndpointItems.jsx): remove unused import statement * refactor(googleSearch.js, wolfram.js): change error handling to return a message instead of throwing an error * refactor(CustomAgent): remove commented code and change return object to include returnValues property * feat(CustomAgent.js): add currentDateString to createPrompt method options deps(api/package.json): update langchain to v0.0.81 * fix: do not show pagination if the maxPage is 1 * Add Zapier back to manifest (accidentally removed) * chore(api): update langchain dependency to version 0.0.84 * feat(DALL-E.js): add DALL-E tool for generating images using OpenAI's DALL-E API refactor(handleTools.js): update import for DALL-E tool refactor(index.test.js): update import for DALL-E tool refactor(stablediffusion.js): add check for image directory existence before saving image * refactor(CustomAgent): rename instructions prefix variable to gpt3 and add gpt4 instructions feat(CustomAgent): add support for gpt-4 model fix(initializeCustomAgent.js): pass model name to createPrompt method fix(outputParser.js): set selectedTool to 'self-reflection' when tool parsing fails * style(langchain/tools): update guidelines for image creation in DALL-E and StableDiffusion - Update guidelines for image creation in DALL-E and StableDiffusion tools - Emphasize the importance of "showing" and not "telling" the imagery in crafting input - Update formatting for the example prompt for generating a realistic portrait photo of a man - Generate images only once per human query unless explicitly requested by the user * docs(tools): update tool descriptions for DALL-E and Stable Diffusion - Update the description for DALL-E tool to indicate that it is exclusively for visual content and provide guidelines for generating images with a focus on visual attributes. - Update the description for Stable Diffusion tool to indicate that it is exclusively for visual content and provide guidelines for generating images with a focus on visual attributes. * chore(api): update "@waylaidwanderer/chatgpt-api" dependency to version "^1.36.3" * refactor(ChatAgent.js): use environment variable for reverse proxy url refactor(ChatAgent.js): use environment variable for openai base path refactor(instructions.js): update gpt3 and gpt3-v2 instructions refactor(outputParser.js): update finishToolNameRegex in CustomOutputParser class * refactor(DALL-E.js): change apiKey and azureKey fields to uppercase refactor(googleSearch.js): change cx and apiKey fields to uppercase feat(manifest.json): add authConfig field for Stable Diffusion WebUI API URL refactor(stablediffusion.js): add url field to constructor and change getServerURL() to this.url refactor(wolfram.js): change apiKey field to uppercase WOLFRAM_APP_ID * refactor(handleTools.js): simplify tool loading and add support for custom tool constructors and options * refactor(handleTools.js): remove commented out code and unused imports * refactor(handleTools.js, index.js): change file name from wolfram.js to Wolfram.js and selfReflection.js to SelfReflection.js to follow PascalCase convention * refactor(outputParser.js, askGPTPlugins.js): improve code readability and remove unnecessary comments * feat(GoogleSearch.js): add GoogleSearchAPI tool to allow agents to use the Google Custom Search API feat(SelfReflection.js): add SelfReflectionTool to allow agents to reflect on their thoughts and actions feat(StableDiffusion.js): add StableDiffusionAPI tool to allow agents to generate images using stable diffusion webui's api feat(Wolfram.js): add WolframAlphaAPI tool for computation, math, curated knowledge & real-time data through WolframAlpha. * testing openai specs * doc: fix link in .env.example * package-update * fix(MultiSelectDropDown.jsx): handle null or undefined values in availableValues array * refactor(DALL-E.js, StableDiffusion.js): remove 'dist/' from image path feat(docker-compose.yml): add comments for reverse proxy configuration * chore(.gitignore): ignore client/public/images/ fix(DALL-E.js, StableDiffusion.js): change image path from dist/ to public/ feat(index.js): add support for serving static files from client/public/ directory * fix: remove selected tool when uninstalled * plugin options in progress * fix: fix issue with uninstalling a plugin that is in use and typescript errors * feat(gptPlugins): add Preset support for GPT Plugins endpoint feat(ChatAgent.js): add support for agentOptions object feat(convoSchema.js): add agentOptions field to conversation schema feat(defaults.js): add agentOptions object to defaults feat(presetSchema.js): add agentOptions field to preset schema feat(askGPTPlugins.js): add support for agentOptions object in request body feat(EditPresetDialog.jsx): add support for showing/hiding GPT Plugins agent settings feat(EditPresetDialog.jsx): add support for setting GPT Plugins agent options fix(EndpointOptionsDialog.jsx): change endpoint name from 'gptPlugins' to 'Plugins' feat(AgentSettings.jsx): add AgentSettings component for GPT plugins configuration feat(client): add GPT Plugins settings component and endpoint to Settings component fix(client): remove unused imports in GoogleOptions component feat(PluginsOptions): add support for agent settings and refactor code feat(PluginsOptions): add GPTIcon to show/hide agent settings button feat(index.ts): export SVG components feat(GPTIcon.jsx): add className prop to GPTIcon component feat(GPTIcon.jsx): import cn function from utils feat(BingIcon.tsx): export BingIcon component feat(index.ts): export BingIcon component feat(index.ts): export MessagesSquared component refactor(cleanupPreset.js): add default values for agentOptions in gptPlugins endpoint feat(getDefaultConversation.js, handleSubmit.js): add agentOptions object to conversation object for GPT plugins endpoint. Update default temperature value to 0.8. Add chatGptLabel and promptPrefix properties to conversation object. * fix: set default convo back to null * refactor(ChatAgent.js, askGPTPlugins.js, AgentSettings.jsx): change variable names for better readability and remove redundant code * test: add RecoilRoot to layout-test-utils * refactor(askGPTPlugins.js): remove redundant code and use endpointOption directly feat(askGPTPlugins.js): add validation for tools in endpointOption before using it * chore(ChatAgent.js, Settings.jsx): add agentOptions to saveConvo function and adjust Settings component height The ChatAgent.js file was modified to include the agentOptions object in the saveConvo function. The Settings.jsx file was modified to adjust the height of the component to ensure that all content is visible. * refactor(ChatAgent.js): extract reverseProxyUrl option to a class property and add support for it feat(ChatAgent.js): add support for completionMode option in sendApiMessage method feat(ChatAgent.js): add support for user-provided promptPrefix in buildPrompt method * feat(plugins): allow preset change mid conversation * chore: update OPENAI_KEY to OPENAI_API_KEY in .github/playwright.yml and api/.env.example refactor(chatgpt-client.js): update OPENAI_KEY to OPENAI_API_KEY feat(langchain): add demo-aiplugin.js and demo-yaml.js, remove test2.js, test3.js, and test4.js chore: remove unused test files fix(titleConvo.js): fix typo in environment variable name fix(askGPTPlugins.js): fix typo in environment variable name fix(endpoints.js): fix typo in environment variable name docs: update installation guide to use OPENAI_API_KEY instead of OPENAI_KEY in .env file * fix(index.test.js): change import of GoogleSearchAPI to use uppercase G in GoogleSearch * chore(api): bump langchain version * feat(PluginController.js): authenticate plugins from environment variables if they are set feat(PluginStoreDialog.tsx): show plugin auth form only if plugin is not authenticated by env var and require authentication feat(types.ts): add authenticated field to TPlugin type definition * docs: update google_search.md and add stable_diffusion.md * Update stable_diffusion.md * refactor(Wolfram.js): remove newline characters from query before encoding docs(wolfram.md): add instructions for setting WOLFRAM_APP_ID in api.env to bypass prompt for AppID in plugin * refactor(Wolfram.js): replace deprecated replaceAll method with replace method * Update wolfram.md * fix(askGPTPlugins): error message will reference correct Parent Message * refactor(chatgpt-client.js, ChatAgent.js): simplify maxContextTokens calculation and add promptPrefix parameter to buildPrompt method * docs: initial draft of intro to plugins * Update introduction.md * Update introduction.md * Feature: User/Reg cleanup + Install / Upgrade script for langchain (#427) * test: login tests * test: finish login tests * test: initial tests for registration * test: registration specs * feature: Init a app config file - Simplifies the ENV vars too - Legacy fallbacks for older builds * refactor(auth): Refactor log in/out controllers - Moves both login and logout controllers to their own file * chore(jwt): Throw warning if secret is default * feature(frontend): Ability to disable registration * feature(env): Env in the root + version support ie .env.prod, .env.dev, .env.test * feature: Upgrade .env script for users * chore(config): Refactor and remove legacy env refs * feature(upgrade): Upgrade script for .env changes * feature: Install script and upgrade script * bugfix: Uncomment line to remove old .env file * chore: rename OPENAI_KEY to OPENAI_API_KEY * chore: Cleanup config changes/bugs * bugfix: Fix config and node env issues * bugfix: Config validation logic * bugfix: Handle unusual env configs gracefully * bugfix: Revert route changes and fix registration disable calling * bugfix: Fix env issues in frontend * bugfix: Fix login * bugfix: Fix frontend envs * bugfix: Fix frontend jest tests * bugfix: Fix upgrade scripts * bugfix: Allow install in non-tty envs * bugfix(windows): Use cross-env to set for windows * bugfix(env): Handle .env being incorrect to begin with for client domain * chore(merge-conflict): Update to LibreChat * chore(merge-conflict): Update to package-lock --------- Co-authored-by: Daniel D Orlando <dan@danorlando.com> * chore: comment out unused agent options * Update langchain plugins docs (#461) * Update: install docs (LibreChat) (#458) * Release: rename project from ChatGPT Clone to LibreChat Release: rename project from ChatGPT Clone to LibreChat * Release: rename project from ChatGPT Clone to LibreChat Release: rename project from ChatGPT Clone to LibreChat * Release: rename project from ChatGPT Clone to LibreChat Release: rename project from ChatGPT Clone to LibreChat * Release: rename project from ChatGPT Clone to LibreChat Release: rename project from ChatGPT Clone to LibreChat * Update documentation_guidelines.md * Update introduction.md add link to readme * Update stable_diffusion.md add link back to readme * Update wolfram.md add link back to readme * Update README.md add Plugins to ToC * feat(ChatAgent.js): add support for langchainProxy configuration option Add a new configuration option `langchainProxy` to the ChatAgent class. If the option is set, the `basePath` configuration option of the `ChatOpenAI` instance is set to the base path of `langchainProxy`. * bugfix(errors): Possible workaround for error flashing (#463) * Test/user auth system client tests (#462) * test: login tests * test: finish login tests * test: initial tests for registration * test: registration specs * chore(api): update langchain dependency to version 0.0.91 * Update introduction.md * Update introduction.md * Update introduction.md * fix: no longer renders html in markdown content fix: patch XSS vulnerability completely by handling cursor on the frontend without css/html * fix(Content.jsx): fix cursor logic so it never shows for static messages * bugfix(langchain): Upgrade script, docker, env and docs (#465) * bugfix(errors): Remove incorrect manual fix from misunderstanding * chore(env): Lets not make a .env.prod and use the prod values in the default root .env - .env.dev will still be created * chore(upgrade.js): Lets tell the user about .env.dev if we create it * bugfix(env): Move to full name environments for vite - .env.prod => .env.production - .env.dev => .env.development * chore(env-example): Explain how to get google login working in production * bugfix(oauth): Minor fix to point isProduction to a correct value * bugfix: Typo in public * chore(docs): Update docs to note the changes to .env * chore(docs): Include note on how to get google auth working in dev and how to disable registration * bugfix: Fix missing env changes * bugfix: Fix up docker to work with new env / npm changes * Update .env.example Cleanup the env of the palm2 instruction and fix to formating * chore(docker): Simplify Docker deployments - Needs work to support dev env/hotreload * bugfix: Remove volume map for client dir * chore(env-example): Change instructions to be more user centric --------- Co-authored-by: Fuegovic <32828263+fuegovic@users.noreply.github.com> * update: install docs (#466) * Add files via upload * Update apis-and-tokens.md * Update apis-and-tokens.md * Update docker_install.md * Update linux_install.md * Rename apis-and-tokens.md to apis_and_tokens.md * Update docker_install.md * Update linux_install.md * Update mac_install.md * Update linux_install.md * Update docker_install.md * Update windows_install.md * Update apis_and_tokens.md * Update mac_install.md * Update linux_install.md * Update docker_install.md * Update README.md * Update README.md : Breaking Changes --------- Co-authored-by: Danny Avila <110412045+danny-avila@users.noreply.github.com> * Update README.md (#468) add new API/Token docs to Toc * docs: guide on how to create your own plugin * Update make_your_own.md * Update make_your_own.md * feat(docker): add build args for frontend variables in Dockerfile feat(docker-compose): add build args for frontend variables in docker-compose.yml * Update docker_install.md * Update docker_install.md * Update docker_install.md * Update docker_install.md * docs: update (#469) * Update: make_your_own.md * Update README.md add `make_your_own.md` to ToC * Update linux_install.md * Update mac_install.md * Update windows_install.md * Update apis_and_tokens.md * Update docker_install.md * Update docker_install.md * Update linux_install.md * Update mac_install.md * Update windows_install.md * Update apis_and_tokens.md * Update user_auth_system.md * Update docker_install.md clean up of repeated information * Update docker_install.md * Update docker_install.md typo * fix: fix issue with pluginstore next and prev buttons going out of bounds * fix: add icon for web browser plugin * docs(GoogleSearch.js): update description of GoogleSearchAPI class to be more descriptive of its functionality * feat(ask/handlers.js): add cursor to indicate ongoing progress of a long-running task fix(Content.jsx): handle null content in the message stream by replacing it with an empty string (with a space so a text space is rendered) * Update README.md * Update README.md * fix: plugin option stacking order * update: web browser icon (#470) * Delete web-browser.png * update: web browser icon * Update readme (#472) * Update README.md Discord badge now displays the number of online users Project description has been updated to reflect current status Feature section has been updated to reflect current capabilities Sponsors section is now located just above the contributors section Roadmap has been removed as it was outdated. * Delete roadmap.md Roadmap has been removed to streamline document maintenance. * Update README.md * Update README.md * Delete CHANGELOG.md * fix: pluginstore in mobile view getting clipped and not scrolling * docs(linux_install.md): remove duplicate git clone command * chore(Dockerfile): comment out nginx-client build stage docs(README.md): update installation instructions and mention docker-compose changes docs(features/plugins/introduction.md): bold plugin names and add emphasis to notes * feat: add superscript and subscript support to markdown rendering refactor: support markdown citations for BingAI * refactor: support markdown citations for BingAI --------- Co-authored-by: David Shin <42793498+dncc89@users.noreply.github.com> Co-authored-by: Daniel D Orlando <dan@danorlando.com> Co-authored-by: LaraClara <2524209+ClaraLeigh@users.noreply.github.com> Co-authored-by: Fuegovic <32828263+fuegovic@users.noreply.github.com>
This commit is contained in:
parent
aaa20309a0
commit
e4c91dfbea
194 changed files with 19251 additions and 5744 deletions
|
|
@ -1,6 +1,6 @@
|
|||
require('dotenv').config();
|
||||
const { KeyvFile } = require('keyv-file');
|
||||
const { genAzureEndpoint } = require('../../utils/genAzureEndpoints');
|
||||
const { genAzureChatCompletion } = require('../../utils/genAzureEndpoints');
|
||||
const tiktoken = require('@dqbd/tiktoken');
|
||||
const tiktokenModels = require('../../utils/tiktokenModels');
|
||||
const encoding_for_model = tiktoken.encoding_for_model;
|
||||
|
|
@ -31,7 +31,7 @@ const askClient = async ({
|
|||
if (promptPrefix) {
|
||||
promptText = promptPrefix;
|
||||
}
|
||||
const maxContextTokens = model === 'gpt-4' ? 8191 : model === 'gpt-4-32k' ? 32767 : 4095; // 1 less than maximum
|
||||
const maxContextTokens = model === 'gpt-4-32k' ? 32767 : model.startsWith('gpt-4') ? 8191 : 4095; // 1 less than maximum
|
||||
const clientOptions = {
|
||||
reverseProxyUrl: process.env.OPENAI_REVERSE_PROXY || null,
|
||||
azure,
|
||||
|
|
@ -49,11 +49,11 @@ const askClient = async ({
|
|||
// debug: true
|
||||
};
|
||||
|
||||
let apiKey = oaiApiKey ? oaiApiKey : process.env.OPENAI_KEY || null;
|
||||
let apiKey = oaiApiKey ? oaiApiKey : process.env.OPENAI_API_KEY || null;
|
||||
|
||||
if (azure) {
|
||||
apiKey = oaiApiKey ? oaiApiKey : process.env.AZURE_OPENAI_API_KEY || null;
|
||||
clientOptions.reverseProxyUrl = genAzureEndpoint({
|
||||
clientOptions.reverseProxyUrl = genAzureChatCompletion({
|
||||
azureOpenAIApiInstanceName: process.env.AZURE_OPENAI_API_INSTANCE_NAME,
|
||||
azureOpenAIApiDeploymentName: process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME,
|
||||
azureOpenAIApiVersion: process.env.AZURE_OPENAI_API_VERSION
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ const run = async () => {
|
|||
debug: true
|
||||
};
|
||||
|
||||
let apiKey = process.env.OPENAI_KEY;
|
||||
let apiKey = process.env.OPENAI_API_KEY;
|
||||
|
||||
const maxMemory = 0.05 * 1024 * 1024 * 1024;
|
||||
|
||||
|
|
|
|||
904
api/app/langchain/ChatAgent.js
Normal file
904
api/app/langchain/ChatAgent.js
Normal file
|
|
@ -0,0 +1,904 @@
|
|||
const crypto = require('crypto');
|
||||
const { genAzureChatCompletion } = require('../../utils/genAzureEndpoints');
|
||||
const {
|
||||
encoding_for_model: encodingForModel,
|
||||
get_encoding: getEncoding
|
||||
} = require('@dqbd/tiktoken');
|
||||
const { fetchEventSource } = require('@waylaidwanderer/fetch-event-source');
|
||||
const { Agent, ProxyAgent } = require('undici');
|
||||
const TextStream = require('../stream');
|
||||
const { ChatOpenAI } = require('langchain/chat_models/openai');
|
||||
const { CallbackManager } = require('langchain/callbacks');
|
||||
const { HumanChatMessage, AIChatMessage } = require('langchain/schema');
|
||||
const { initializeCustomAgent } = require('./agents/CustomAgent/initializeCustomAgent');
|
||||
const { getMessages, saveMessage, saveConvo } = require('../../models');
|
||||
const { loadTools, SelfReflectionTool } = require('./tools');
|
||||
const {
|
||||
instructions,
|
||||
imageInstructions,
|
||||
errorInstructions,
|
||||
completionInstructions
|
||||
} = require('./instructions');
|
||||
|
||||
const tokenizersCache = {};
|
||||
|
||||
class ChatAgent {
|
||||
constructor(apiKey, options = {}) {
|
||||
this.tools = [];
|
||||
this.actions = [];
|
||||
this.openAIApiKey = apiKey;
|
||||
this.azure = options.azure || false;
|
||||
if (this.azure) {
|
||||
const { azureOpenAIApiInstanceName, azureOpenAIApiDeploymentName, azureOpenAIApiVersion } =
|
||||
this.azure;
|
||||
this.azureEndpoint = genAzureChatCompletion({
|
||||
azureOpenAIApiInstanceName,
|
||||
azureOpenAIApiDeploymentName,
|
||||
azureOpenAIApiVersion
|
||||
});
|
||||
}
|
||||
this.setOptions(options);
|
||||
this.executor = null;
|
||||
this.currentDateString = new Date().toLocaleDateString('en-us', {
|
||||
year: 'numeric',
|
||||
month: 'long',
|
||||
day: 'numeric'
|
||||
});
|
||||
}
|
||||
|
||||
getActions(input = null) {
|
||||
let output = 'Internal thoughts & actions taken:\n"';
|
||||
let actions = input || this.actions;
|
||||
|
||||
if (actions[0]?.action) {
|
||||
actions = actions.map((step) => ({
|
||||
log: `${step.action.log}\nObservation: ${step.observation}`
|
||||
}));
|
||||
}
|
||||
|
||||
actions.forEach((actionObj, index) => {
|
||||
output += `${actionObj.log}`;
|
||||
if (index < actions.length - 1) {
|
||||
output += '\n';
|
||||
}
|
||||
});
|
||||
|
||||
return output + '"';
|
||||
}
|
||||
|
||||
buildErrorInput(message, errorMessage) {
|
||||
const log = errorMessage.includes('Could not parse LLM output:')
|
||||
? `A formatting error occurred with your response to the human's last message. You didn't follow the formatting instructions. Remember to ${instructions}`
|
||||
: `You encountered an error while replying to the human's last message. Attempt to answer again or admit an answer cannot be given.\nError: ${errorMessage}`;
|
||||
|
||||
return `
|
||||
${log}
|
||||
|
||||
${this.getActions()}
|
||||
|
||||
Human's last message: ${message}
|
||||
`;
|
||||
}
|
||||
|
||||
buildPromptPrefix(result, message) {
|
||||
if ((result.output && result.output.includes('N/A')) || result.output === undefined) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (
|
||||
result?.intermediateSteps?.length === 1 &&
|
||||
result?.intermediateSteps[0]?.action?.toolInput === 'N/A'
|
||||
) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const internalActions =
|
||||
result?.intermediateSteps?.length > 0
|
||||
? this.getActions(result.intermediateSteps)
|
||||
: 'Internal Actions Taken: None';
|
||||
|
||||
const toolBasedInstructions = internalActions.toLowerCase().includes('image')
|
||||
? imageInstructions
|
||||
: '';
|
||||
|
||||
const errorMessage = result.errorMessage ? `${errorInstructions} ${result.errorMessage}\n` : '';
|
||||
|
||||
const preliminaryAnswer =
|
||||
result.output?.length > 0 ? `Preliminary Answer: "${result.output.trim()}"` : '';
|
||||
const prefix = preliminaryAnswer
|
||||
? `review and improve the answer you generated using plugins in response to the User Message below. The answer hasn't been sent to the user yet.`
|
||||
: 'respond to the User Message below based on your preliminary thoughts & actions.';
|
||||
|
||||
return `As ChatGPT, ${prefix}${errorMessage}\n${internalActions}
|
||||
${preliminaryAnswer}
|
||||
Reply conversationally to the User based on your ${
|
||||
preliminaryAnswer ? 'preliminary answer, ' : ''
|
||||
}internal actions, thoughts, and observations, making improvements wherever possible, but do not modify URLs.
|
||||
${
|
||||
preliminaryAnswer
|
||||
? ''
|
||||
: '\nIf there is an incomplete thought or action, you are expected to complete it in your response now.\n'
|
||||
}You must cite sources if you are using any web links. ${toolBasedInstructions}
|
||||
Only respond with your conversational reply to the following User Message:
|
||||
"${message}"`;
|
||||
}
|
||||
|
||||
setOptions(options) {
|
||||
if (this.options && !this.options.replaceOptions) {
|
||||
// nested options aren't spread properly, so we need to do this manually
|
||||
this.options.modelOptions = {
|
||||
...this.options.modelOptions,
|
||||
...options.modelOptions
|
||||
};
|
||||
this.options.agentOptions = {
|
||||
...this.options.agentOptions,
|
||||
...options.agentOptions
|
||||
};
|
||||
delete options.modelOptions;
|
||||
delete options.agentOptions;
|
||||
// now we can merge options
|
||||
this.options = {
|
||||
...this.options,
|
||||
...options
|
||||
};
|
||||
} else {
|
||||
this.options = options;
|
||||
}
|
||||
|
||||
this.agentOptions = this.options.agentOptions || {};
|
||||
this.agentIsGpt3 = this.agentOptions.model.startsWith('gpt-3');
|
||||
const modelOptions = this.options.modelOptions || {};
|
||||
this.modelOptions = {
|
||||
...modelOptions,
|
||||
model: modelOptions.model || 'gpt-3.5-turbo',
|
||||
temperature: typeof modelOptions.temperature === 'undefined' ? 0.8 : modelOptions.temperature,
|
||||
top_p: typeof modelOptions.top_p === 'undefined' ? 1 : modelOptions.top_p,
|
||||
presence_penalty:
|
||||
typeof modelOptions.presence_penalty === 'undefined' ? 0 : modelOptions.presence_penalty,
|
||||
frequency_penalty:
|
||||
typeof modelOptions.frequency_penalty === 'undefined' ? 0 : modelOptions.frequency_penalty,
|
||||
stop: modelOptions.stop
|
||||
};
|
||||
|
||||
this.isChatGptModel = this.modelOptions.model.startsWith('gpt-');
|
||||
this.isGpt3 = this.modelOptions.model.startsWith('gpt-3');
|
||||
this.maxContextTokens = this.modelOptions.model === 'gpt-4-32k' ? 32767 : this.modelOptions.model.startsWith('gpt-4') ? 8191 : 4095,
|
||||
|
||||
// Reserve 1024 tokens for the response.
|
||||
// The max prompt tokens is determined by the max context tokens minus the max response tokens.
|
||||
// Earlier messages will be dropped until the prompt is within the limit.
|
||||
this.maxResponseTokens = this.modelOptions.max_tokens || 1024;
|
||||
this.maxPromptTokens =
|
||||
this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens;
|
||||
|
||||
if (this.maxPromptTokens + this.maxResponseTokens > this.maxContextTokens) {
|
||||
throw new Error(
|
||||
`maxPromptTokens + max_tokens (${this.maxPromptTokens} + ${this.maxResponseTokens} = ${
|
||||
this.maxPromptTokens + this.maxResponseTokens
|
||||
}) must be less than or equal to maxContextTokens (${this.maxContextTokens})`
|
||||
);
|
||||
}
|
||||
|
||||
this.userLabel = this.options.userLabel || 'User';
|
||||
this.chatGptLabel = this.options.chatGptLabel || 'ChatGPT';
|
||||
|
||||
// Use these faux tokens to help the AI understand the context since we are building the chat log ourselves.
|
||||
// Trying to use "<|im_start|>" causes the AI to still generate "<" or "<|" at the end sometimes for some reason,
|
||||
// without tripping the stop sequences, so I'm using "||>" instead.
|
||||
this.startToken = '||>';
|
||||
this.endToken = '';
|
||||
this.gptEncoder = this.constructor.getTokenizer('cl100k_base');
|
||||
this.completionsUrl = 'https://api.openai.com/v1/chat/completions';
|
||||
this.reverseProxyUrl = this.options.reverseProxyUrl || process.env.OPENAI_REVERSE_PROXY;
|
||||
|
||||
if (this.reverseProxyUrl) {
|
||||
this.completionsUrl = this.reverseProxyUrl;
|
||||
this.langchainProxy = this.reverseProxyUrl.substring(0, this.reverseProxyUrl.indexOf('v1') + 'v1'.length)
|
||||
}
|
||||
|
||||
if (this.azureEndpoint) {
|
||||
this.completionsUrl = this.azureEndpoint;
|
||||
}
|
||||
|
||||
if (this.azureEndpoint && this.options.debug) {
|
||||
console.debug(`Using Azure endpoint: ${this.azureEndpoint}`, this.azure);
|
||||
}
|
||||
}
|
||||
|
||||
static getTokenizer(encoding, isModelName = false, extendSpecialTokens = {}) {
|
||||
if (tokenizersCache[encoding]) {
|
||||
return tokenizersCache[encoding];
|
||||
}
|
||||
let tokenizer;
|
||||
if (isModelName) {
|
||||
tokenizer = encodingForModel(encoding, extendSpecialTokens);
|
||||
} else {
|
||||
tokenizer = getEncoding(encoding, extendSpecialTokens);
|
||||
}
|
||||
tokenizersCache[encoding] = tokenizer;
|
||||
return tokenizer;
|
||||
}
|
||||
|
||||
async getCompletion(input, onProgress, abortController = null) {
|
||||
if (!abortController) {
|
||||
abortController = new AbortController();
|
||||
}
|
||||
|
||||
const modelOptions = this.modelOptions;
|
||||
if (typeof onProgress === 'function') {
|
||||
modelOptions.stream = true;
|
||||
}
|
||||
if (this.isChatGptModel) {
|
||||
modelOptions.messages = input;
|
||||
} else {
|
||||
modelOptions.prompt = input;
|
||||
}
|
||||
const { debug } = this.options;
|
||||
const url = this.completionsUrl;
|
||||
if (debug) {
|
||||
console.debug();
|
||||
console.debug(url);
|
||||
console.debug(modelOptions);
|
||||
console.debug();
|
||||
}
|
||||
const opts = {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify(modelOptions),
|
||||
dispatcher: new Agent({
|
||||
bodyTimeout: 0,
|
||||
headersTimeout: 0
|
||||
})
|
||||
};
|
||||
|
||||
if (this.azureEndpoint) {
|
||||
opts.headers['api-key'] = this.azure.azureOpenAIApiKey;
|
||||
} else if (this.openAIApiKey) {
|
||||
opts.headers.Authorization = `Bearer ${this.openAIApiKey}`;
|
||||
}
|
||||
|
||||
if (this.options.proxy) {
|
||||
opts.dispatcher = new ProxyAgent(this.options.proxy);
|
||||
}
|
||||
|
||||
if (modelOptions.stream) {
|
||||
// eslint-disable-next-line no-async-promise-executor
|
||||
return new Promise(async (resolve, reject) => {
|
||||
try {
|
||||
let done = false;
|
||||
await fetchEventSource(url, {
|
||||
...opts,
|
||||
signal: abortController.signal,
|
||||
async onopen(response) {
|
||||
if (response.status === 200) {
|
||||
return;
|
||||
}
|
||||
if (debug) {
|
||||
// console.debug(response);
|
||||
}
|
||||
let error;
|
||||
try {
|
||||
const body = await response.text();
|
||||
error = new Error(`Failed to send message. HTTP ${response.status} - ${body}`);
|
||||
error.status = response.status;
|
||||
error.json = JSON.parse(body);
|
||||
} catch {
|
||||
error = error || new Error(`Failed to send message. HTTP ${response.status}`);
|
||||
}
|
||||
throw error;
|
||||
},
|
||||
onclose() {
|
||||
if (debug) {
|
||||
console.debug('Server closed the connection unexpectedly, returning...');
|
||||
}
|
||||
// workaround for private API not sending [DONE] event
|
||||
if (!done) {
|
||||
onProgress('[DONE]');
|
||||
abortController.abort();
|
||||
resolve();
|
||||
}
|
||||
},
|
||||
onerror(err) {
|
||||
if (debug) {
|
||||
console.debug(err);
|
||||
}
|
||||
// rethrow to stop the operation
|
||||
throw err;
|
||||
},
|
||||
onmessage(message) {
|
||||
if (debug) {
|
||||
// console.debug(message);
|
||||
}
|
||||
if (!message.data || message.event === 'ping') {
|
||||
return;
|
||||
}
|
||||
if (message.data === '[DONE]') {
|
||||
onProgress('[DONE]');
|
||||
abortController.abort();
|
||||
resolve();
|
||||
done = true;
|
||||
return;
|
||||
}
|
||||
onProgress(JSON.parse(message.data));
|
||||
}
|
||||
});
|
||||
} catch (err) {
|
||||
reject(err);
|
||||
}
|
||||
});
|
||||
}
|
||||
const response = await fetch(url, {
|
||||
...opts,
|
||||
signal: abortController.signal
|
||||
});
|
||||
if (response.status !== 200) {
|
||||
const body = await response.text();
|
||||
const error = new Error(`Failed to send message. HTTP ${response.status} - ${body}`);
|
||||
error.status = response.status;
|
||||
try {
|
||||
error.json = JSON.parse(body);
|
||||
} catch {
|
||||
error.body = body;
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
return response.json();
|
||||
}
|
||||
|
||||
async loadHistory(conversationId, parentMessageId = null) {
|
||||
if (this.options.debug) {
|
||||
console.debug('Loading history for conversation', conversationId, parentMessageId);
|
||||
}
|
||||
|
||||
const messages = (await getMessages({ conversationId })) || [];
|
||||
|
||||
if (messages.length === 0) {
|
||||
this.currentMessages = [];
|
||||
return [];
|
||||
}
|
||||
|
||||
const orderedMessages = this.constructor.getMessagesForConversation(messages, parentMessageId);
|
||||
// Convert Message documents into appropriate ChatMessage instances
|
||||
const chatMessages = orderedMessages.map((msg) =>
|
||||
msg?.isCreatedByUser || msg?.role.toLowerCase() === 'user'
|
||||
? new HumanChatMessage(msg.text)
|
||||
: new AIChatMessage(msg.text)
|
||||
);
|
||||
|
||||
this.currentMessages = orderedMessages;
|
||||
|
||||
return chatMessages;
|
||||
}
|
||||
|
||||
async saveMessageToDatabase(message, user = null) {
|
||||
await saveMessage({ ...message, unfinished: false });
|
||||
await saveConvo(user, {
|
||||
conversationId: message.conversationId,
|
||||
endpoint: 'gptPlugins',
|
||||
chatGptLabel: this.options.chatGptLabel,
|
||||
promptPrefix: this.options.promptPrefix,
|
||||
...this.modelOptions,
|
||||
agentOptions: this.agentOptions
|
||||
});
|
||||
}
|
||||
|
||||
saveLatestAction(action) {
|
||||
this.actions.push(action);
|
||||
}
|
||||
|
||||
async initialize({ user, message, onAgentAction, onChainEnd, signal }) {
|
||||
const modelOptions = {
|
||||
modelName: this.agentOptions.model,
|
||||
temperature: this.agentOptions.temperature
|
||||
};
|
||||
|
||||
const configOptions = {};
|
||||
|
||||
if (this.langchainProxy) {
|
||||
configOptions.basePath = this.langchainProxy;
|
||||
}
|
||||
|
||||
const model = this.azure
|
||||
? new ChatOpenAI({
|
||||
...this.azure,
|
||||
...modelOptions
|
||||
})
|
||||
: new ChatOpenAI(
|
||||
{
|
||||
openAIApiKey: this.openAIApiKey,
|
||||
...modelOptions
|
||||
},
|
||||
configOptions
|
||||
// {
|
||||
// basePath: 'http://localhost:8080/v1'
|
||||
// }
|
||||
);
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug(`<-----Agent Model: ${model.modelName} | Temp: ${model.temperature}----->`);
|
||||
}
|
||||
|
||||
this.availableTools = await loadTools({
|
||||
user,
|
||||
model,
|
||||
tools: this.options.tools,
|
||||
options: {
|
||||
openAIApiKey: this.openAIApiKey
|
||||
}
|
||||
});
|
||||
// load tools
|
||||
for (const tool of this.options.tools) {
|
||||
const validTool = this.availableTools[tool];
|
||||
|
||||
if (tool === 'plugins') {
|
||||
const plugins = await validTool();
|
||||
this.tools = [...this.tools, ...plugins];
|
||||
} else if (validTool) {
|
||||
this.tools.push(await validTool());
|
||||
}
|
||||
}
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('Requested Tools');
|
||||
console.debug(this.options.tools);
|
||||
console.debug('Loaded Tools');
|
||||
console.debug(this.tools.map((tool) => tool.name));
|
||||
}
|
||||
|
||||
if (this.tools.length > 0) {
|
||||
this.tools.push(new SelfReflectionTool({ message, isGpt3: false }));
|
||||
} else if (this.tools.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const handleAction = (action, callback = null) => {
|
||||
this.saveLatestAction(action);
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('Latest Agent Action ', this.actions[this.actions.length - 1]);
|
||||
}
|
||||
|
||||
if (typeof callback === 'function') {
|
||||
callback(action);
|
||||
}
|
||||
};
|
||||
|
||||
// initialize agent
|
||||
this.executor = await initializeCustomAgent({
|
||||
model,
|
||||
signal,
|
||||
tools: this.tools,
|
||||
pastMessages: this.pastMessages,
|
||||
currentDateString: this.currentDateString,
|
||||
verbose: this.options.debug,
|
||||
returnIntermediateSteps: true,
|
||||
callbackManager: CallbackManager.fromHandlers({
|
||||
async handleAgentAction(action) {
|
||||
handleAction(action, onAgentAction);
|
||||
},
|
||||
async handleChainEnd(action) {
|
||||
if (typeof onChainEnd === 'function') {
|
||||
onChainEnd(action);
|
||||
}
|
||||
}
|
||||
})
|
||||
});
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('Loaded agent.');
|
||||
}
|
||||
}
|
||||
|
||||
async sendApiMessage(messages, userMessage, opts = {}) {
|
||||
// Doing it this way instead of having each message be a separate element in the array seems to be more reliable,
|
||||
// especially when it comes to keeping the AI in character. It also seems to improve coherency and context retention.
|
||||
let payload = await this.buildPrompt({
|
||||
messages: [
|
||||
...messages,
|
||||
{
|
||||
messageId: userMessage.messageId,
|
||||
parentMessageId: userMessage.parentMessageId,
|
||||
role: 'User',
|
||||
text: userMessage.text
|
||||
}
|
||||
],
|
||||
...opts
|
||||
});
|
||||
|
||||
let reply = '';
|
||||
let result = {};
|
||||
if (typeof opts.onProgress === 'function') {
|
||||
await this.getCompletion(
|
||||
payload,
|
||||
(progressMessage) => {
|
||||
if (progressMessage === '[DONE]') {
|
||||
return;
|
||||
}
|
||||
const token = this.isChatGptModel
|
||||
? progressMessage.choices[0].delta.content
|
||||
: progressMessage.choices[0].text;
|
||||
// first event's delta content is always undefined
|
||||
if (!token) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (token === this.endToken) {
|
||||
return;
|
||||
}
|
||||
opts.onProgress(token);
|
||||
reply += token;
|
||||
},
|
||||
opts.abortController || new AbortController()
|
||||
);
|
||||
} else {
|
||||
result = await this.getCompletion(
|
||||
payload,
|
||||
null,
|
||||
opts.abortController || new AbortController()
|
||||
);
|
||||
if (this.options.debug) {
|
||||
console.debug(JSON.stringify(result));
|
||||
}
|
||||
if (this.isChatGptModel) {
|
||||
reply = result.choices[0].message.content;
|
||||
} else {
|
||||
reply = result.choices[0].text.replace(this.endToken, '');
|
||||
}
|
||||
}
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug();
|
||||
}
|
||||
|
||||
return reply.trim();
|
||||
}
|
||||
|
||||
async executorCall(message, signal) {
|
||||
let errorMessage = '';
|
||||
const maxAttempts = 1;
|
||||
|
||||
for (let attempts = 1; attempts <= maxAttempts; attempts++) {
|
||||
const errorInput = this.buildErrorInput(message, errorMessage);
|
||||
const input = attempts > 1 ? errorInput : message;
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug(`Attempt ${attempts} of ${maxAttempts}`);
|
||||
}
|
||||
|
||||
if (this.options.debug && errorMessage.length > 0) {
|
||||
console.debug('Caught error, input:', input);
|
||||
}
|
||||
|
||||
try {
|
||||
this.result = await this.executor.call({ input, signal });
|
||||
break; // Exit the loop if the function call is successful
|
||||
} catch (err) {
|
||||
console.error(err);
|
||||
errorMessage = err.message;
|
||||
if (attempts === maxAttempts) {
|
||||
this.result.output = `Encountered an error while attempting to respond. Error: ${err.message}`;
|
||||
this.result.intermediateSteps = this.actions;
|
||||
this.result.errorMessage = errorMessage;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async sendMessage(message, opts = {}) {
|
||||
if (opts && typeof opts === 'object') {
|
||||
this.setOptions(opts);
|
||||
}
|
||||
console.log('sendMessage', message, opts);
|
||||
|
||||
const user = opts.user || null;
|
||||
const { onAgentAction, onChainEnd, onProgress } = opts;
|
||||
const conversationId = opts.conversationId || crypto.randomUUID();
|
||||
const parentMessageId = opts.parentMessageId || '00000000-0000-0000-0000-000000000000';
|
||||
const userMessageId = opts.overrideParentMessageId || crypto.randomUUID();
|
||||
const responseMessageId = crypto.randomUUID();
|
||||
this.pastMessages = await this.loadHistory(conversationId, this.options?.parentMessageId);
|
||||
|
||||
const userMessage = {
|
||||
messageId: userMessageId,
|
||||
parentMessageId,
|
||||
conversationId,
|
||||
sender: 'User',
|
||||
text: message,
|
||||
isCreatedByUser: true
|
||||
};
|
||||
|
||||
if (typeof opts?.getIds === 'function') {
|
||||
opts.getIds({
|
||||
userMessage,
|
||||
conversationId,
|
||||
responseMessageId
|
||||
});
|
||||
}
|
||||
|
||||
if (typeof opts?.onStart === 'function') {
|
||||
opts.onStart(userMessage);
|
||||
}
|
||||
|
||||
await this.saveMessageToDatabase(userMessage, user);
|
||||
|
||||
this.result = {};
|
||||
const responseMessage = {
|
||||
messageId: responseMessageId,
|
||||
conversationId,
|
||||
parentMessageId: userMessage.messageId,
|
||||
isCreatedByUser: false,
|
||||
model: this.modelOptions.model,
|
||||
sender: 'ChatGPT'
|
||||
};
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('options');
|
||||
console.debug(this.options);
|
||||
}
|
||||
|
||||
const completionMode = this.options.tools.length === 0;
|
||||
if (!completionMode) {
|
||||
await this.initialize({
|
||||
user,
|
||||
message,
|
||||
onAgentAction,
|
||||
onChainEnd,
|
||||
signal: opts.abortController.signal
|
||||
});
|
||||
await this.executorCall(message, opts.abortController.signal);
|
||||
}
|
||||
|
||||
// If message was aborted mid-generation
|
||||
if (this.result?.errorMessage?.length > 0 && this.result?.errorMessage?.includes('cancel')) {
|
||||
responseMessage.text = 'Cancelled.';
|
||||
await this.saveMessageToDatabase(responseMessage, user);
|
||||
return { ...responseMessage, ...this.result };
|
||||
}
|
||||
|
||||
if (!this.agentIsGpt3 && this.result.output) {
|
||||
responseMessage.text = this.result.output;
|
||||
await this.saveMessageToDatabase(responseMessage, user);
|
||||
const textStream = new TextStream(this.result.output);
|
||||
await textStream.processTextStream(onProgress);
|
||||
return { ...responseMessage, ...this.result };
|
||||
}
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('this.result', this.result);
|
||||
}
|
||||
|
||||
const userProvidedPrefix = completionMode && this.options?.promptPrefix?.length > 0;
|
||||
const promptPrefix = userProvidedPrefix
|
||||
? this.options.promptPrefix
|
||||
: this.buildPromptPrefix(this.result, message);
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('promptPrefix', promptPrefix);
|
||||
}
|
||||
|
||||
const finalReply = await this.sendApiMessage(this.currentMessages, userMessage, { ...opts, completionMode, promptPrefix });
|
||||
responseMessage.text = finalReply;
|
||||
await this.saveMessageToDatabase(responseMessage, user);
|
||||
return { ...responseMessage, ...this.result };
|
||||
}
|
||||
|
||||
async buildPrompt({ messages, promptPrefix: _promptPrefix, completionMode = false, isChatGptModel = true }) {
|
||||
if (this.options.debug) {
|
||||
console.debug('buildPrompt messages', messages);
|
||||
}
|
||||
|
||||
const orderedMessages = messages;
|
||||
let promptPrefix = _promptPrefix;
|
||||
if (promptPrefix) {
|
||||
promptPrefix = promptPrefix.trim();
|
||||
// If the prompt prefix doesn't end with the end token, add it.
|
||||
if (!promptPrefix.endsWith(`${this.endToken}`)) {
|
||||
promptPrefix = `${promptPrefix.trim()}${this.endToken}\n\n`;
|
||||
}
|
||||
promptPrefix = `${this.startToken}Instructions:\n${promptPrefix}`;
|
||||
} else {
|
||||
promptPrefix = `${this.startToken}${completionInstructions} ${this.currentDateString}${this.endToken}\n\n`;
|
||||
}
|
||||
|
||||
const promptSuffix = `${this.startToken}${this.chatGptLabel}:\n`; // Prompt ChatGPT to respond.
|
||||
|
||||
const instructionsPayload = {
|
||||
role: 'system',
|
||||
name: 'instructions',
|
||||
content: promptPrefix
|
||||
};
|
||||
|
||||
const messagePayload = {
|
||||
role: 'system',
|
||||
content: promptSuffix
|
||||
};
|
||||
|
||||
if (this.isGpt3) {
|
||||
instructionsPayload.role = 'user';
|
||||
messagePayload.role = 'user';
|
||||
}
|
||||
|
||||
if (this.isGpt3 && completionMode) {
|
||||
instructionsPayload.content += `\n${promptSuffix}`;
|
||||
}
|
||||
|
||||
// testing if this works with browser endpoint
|
||||
if (!this.isGpt3 && this.reverseProxyUrl) {
|
||||
instructionsPayload.role = 'user';
|
||||
}
|
||||
|
||||
let currentTokenCount;
|
||||
if (isChatGptModel) {
|
||||
currentTokenCount =
|
||||
this.getTokenCountForMessage(instructionsPayload) +
|
||||
this.getTokenCountForMessage(messagePayload);
|
||||
} else {
|
||||
currentTokenCount = this.getTokenCount(`${promptPrefix}${promptSuffix}`);
|
||||
}
|
||||
let promptBody = '';
|
||||
const maxTokenCount = this.maxPromptTokens;
|
||||
|
||||
// Iterate backwards through the messages, adding them to the prompt until we reach the max token count.
|
||||
// Do this within a recursive async function so that it doesn't block the event loop for too long.
|
||||
const buildPromptBody = async () => {
|
||||
if (currentTokenCount < maxTokenCount && orderedMessages.length > 0) {
|
||||
const message = orderedMessages.pop();
|
||||
// const roleLabel = message.role === 'User' ? this.userLabel : this.chatGptLabel;
|
||||
const roleLabel = message.role;
|
||||
let messageString = `${this.startToken}${roleLabel}:\n${message.text}${this.endToken}\n`;
|
||||
let newPromptBody;
|
||||
if (promptBody || isChatGptModel) {
|
||||
newPromptBody = `${messageString}${promptBody}`;
|
||||
} else {
|
||||
// Always insert prompt prefix before the last user message, if not gpt-3.5-turbo.
|
||||
// This makes the AI obey the prompt instructions better, which is important for custom instructions.
|
||||
// After a bunch of testing, it doesn't seem to cause the AI any confusion, even if you ask it things
|
||||
// like "what's the last thing I wrote?".
|
||||
newPromptBody = `${promptPrefix}${messageString}${promptBody}`;
|
||||
}
|
||||
|
||||
const tokenCountForMessage = this.getTokenCount(messageString);
|
||||
const newTokenCount = currentTokenCount + tokenCountForMessage;
|
||||
if (newTokenCount > maxTokenCount) {
|
||||
if (promptBody) {
|
||||
// This message would put us over the token limit, so don't add it.
|
||||
return false;
|
||||
}
|
||||
// This is the first message, so we can't add it. Just throw an error.
|
||||
throw new Error(
|
||||
`Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`
|
||||
);
|
||||
}
|
||||
promptBody = newPromptBody;
|
||||
currentTokenCount = newTokenCount;
|
||||
// wait for next tick to avoid blocking the event loop
|
||||
await new Promise((resolve) => setTimeout(resolve, 0));
|
||||
return buildPromptBody();
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
await buildPromptBody();
|
||||
|
||||
// const prompt = `${promptBody}${promptSuffix}`;
|
||||
const prompt = promptBody;
|
||||
if (isChatGptModel) {
|
||||
messagePayload.content = prompt;
|
||||
// Add 2 tokens for metadata after all messages have been counted.
|
||||
currentTokenCount += 2;
|
||||
}
|
||||
|
||||
if (this.isGpt3 && messagePayload.content.length > 0) {
|
||||
const context = `Chat History:\n`;
|
||||
messagePayload.content = `${context}${prompt}`;
|
||||
currentTokenCount += this.getTokenCount(context);
|
||||
}
|
||||
|
||||
// Use up to `this.maxContextTokens` tokens (prompt + response), but try to leave `this.maxTokens` tokens for the response.
|
||||
this.modelOptions.max_tokens = Math.min(
|
||||
this.maxContextTokens - currentTokenCount,
|
||||
this.maxResponseTokens
|
||||
);
|
||||
|
||||
if (this.isGpt3 && !completionMode) {
|
||||
messagePayload.content += promptSuffix;
|
||||
return [instructionsPayload, messagePayload];
|
||||
}
|
||||
|
||||
if (isChatGptModel) {
|
||||
const result = [messagePayload, instructionsPayload];
|
||||
return result.filter((message) => message.content.length > 0);
|
||||
}
|
||||
|
||||
this.completionPromptTokens = currentTokenCount;
|
||||
return prompt;
|
||||
}
|
||||
|
||||
getTokenCount(text) {
|
||||
return this.gptEncoder.encode(text, 'all').length;
|
||||
}
|
||||
|
||||
/**
|
||||
* Algorithm adapted from "6. Counting tokens for chat API calls" of
|
||||
* https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
|
||||
*
|
||||
* An additional 2 tokens need to be added for metadata after all messages have been counted.
|
||||
*
|
||||
* @param {*} message
|
||||
*/
|
||||
getTokenCountForMessage(message) {
|
||||
// Map each property of the message to the number of tokens it contains
|
||||
const propertyTokenCounts = Object.entries(message).map(([key, value]) => {
|
||||
// Count the number of tokens in the property value
|
||||
const numTokens = this.getTokenCount(value);
|
||||
|
||||
// Subtract 1 token if the property key is 'name'
|
||||
const adjustment = key === 'name' ? 1 : 0;
|
||||
return numTokens - adjustment;
|
||||
});
|
||||
|
||||
// Sum the number of tokens in all properties and add 4 for metadata
|
||||
return propertyTokenCounts.reduce((a, b) => a + b, 4);
|
||||
}
|
||||
|
||||
/**
|
||||
* Iterate through messages, building an array based on the parentMessageId.
|
||||
* Each message has an id and a parentMessageId. The parentMessageId is the id of the message that this message is a reply to.
|
||||
* @param messages
|
||||
* @param parentMessageId
|
||||
* @returns {*[]} An array containing the messages in the order they should be displayed, starting with the root message.
|
||||
*/
|
||||
static getMessagesForConversation(messages, parentMessageId) {
|
||||
const orderedMessages = [];
|
||||
let currentMessageId = parentMessageId;
|
||||
while (currentMessageId) {
|
||||
// eslint-disable-next-line no-loop-func
|
||||
const message = messages.find((m) => m.messageId === currentMessageId);
|
||||
if (!message) {
|
||||
break;
|
||||
}
|
||||
orderedMessages.unshift(message);
|
||||
currentMessageId = message.parentMessageId;
|
||||
}
|
||||
|
||||
if (orderedMessages.length === 0) {
|
||||
return [];
|
||||
}
|
||||
|
||||
return orderedMessages.map((msg) => ({
|
||||
messageId: msg.messageId,
|
||||
parentMessageId: msg.parentMessageId,
|
||||
role: msg.isCreatedByUser ? 'User' : 'ChatGPT',
|
||||
text: msg.text
|
||||
}));
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts the action tool values from the intermediate steps array.
|
||||
* Each step object in the array contains an action object with a tool property.
|
||||
* This function returns an array of tool values.
|
||||
*
|
||||
* @param {Object[]} intermediateSteps - An array of intermediate step objects.
|
||||
* @returns {string} An string of action tool values from each step.
|
||||
*/
|
||||
extractToolValues(intermediateSteps) {
|
||||
const tools = intermediateSteps.map((step) => step.action.tool);
|
||||
|
||||
if (tools.length === 0) {
|
||||
return '';
|
||||
}
|
||||
|
||||
const uniqueTools = [...new Set(tools)];
|
||||
|
||||
if (tools.length === 1) {
|
||||
return tools[0] + ' plugin';
|
||||
}
|
||||
|
||||
return uniqueTools.join(' plugin, ');
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = ChatAgent;
|
||||
92
api/app/langchain/ChatAgent.test.js
Normal file
92
api/app/langchain/ChatAgent.test.js
Normal file
|
|
@ -0,0 +1,92 @@
|
|||
const mongoose = require('mongoose');
|
||||
const ChatAgent = require('./ChatAgent');
|
||||
const connectDb = require('../../lib/db/connectDb');
|
||||
const Conversation = require('../../models/Conversation');
|
||||
|
||||
describe('ChatAgent', () => {
|
||||
let TestAgent;
|
||||
let options = {
|
||||
tools: [],
|
||||
modelOptions: {
|
||||
model: 'gpt-3.5-turbo',
|
||||
temperature: 0,
|
||||
max_tokens: 2
|
||||
},
|
||||
agentOptions: {
|
||||
model: 'gpt-3.5-turbo',
|
||||
}
|
||||
};
|
||||
let parentMessageId;
|
||||
let conversationId;
|
||||
const userMessage = 'Hello, ChatGPT!';
|
||||
const apiKey = process.env.OPENAI_API_KEY;
|
||||
|
||||
beforeAll(async () => {
|
||||
await connectDb();
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
TestAgent = new ChatAgent(apiKey, options);
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
// Delete the messages and conversation created by the test
|
||||
await Conversation.deleteConvos(null, { conversationId });
|
||||
await mongoose.connection.close();
|
||||
});
|
||||
|
||||
test('initializes ChatAgent without crashing', () => {
|
||||
expect(TestAgent).toBeInstanceOf(ChatAgent);
|
||||
});
|
||||
|
||||
test('check setOptions function', () => {
|
||||
expect(TestAgent.agentIsGpt3).toBe(true);
|
||||
});
|
||||
|
||||
describe('sendMessage', () => {
|
||||
test('sendMessage should return a response message', async () => {
|
||||
const expectedResult = expect.objectContaining({
|
||||
sender: 'ChatGPT',
|
||||
text: expect.any(String),
|
||||
isCreatedByUser: false,
|
||||
messageId: expect.any(String),
|
||||
parentMessageId: expect.any(String),
|
||||
conversationId: expect.any(String)
|
||||
});
|
||||
|
||||
const response = await TestAgent.sendMessage(userMessage);
|
||||
console.log(response);
|
||||
parentMessageId = response.messageId;
|
||||
conversationId = response.conversationId;
|
||||
expect(response).toEqual(expectedResult);
|
||||
});
|
||||
|
||||
test('sendMessage should work with provided conversationId and parentMessageId', async () => {
|
||||
const userMessage = 'Second message in the conversation';
|
||||
const opts = {
|
||||
conversationId,
|
||||
parentMessageId
|
||||
};
|
||||
|
||||
const expectedResult = expect.objectContaining({
|
||||
sender: 'ChatGPT',
|
||||
text: expect.any(String),
|
||||
isCreatedByUser: false,
|
||||
messageId: expect.any(String),
|
||||
parentMessageId: expect.any(String),
|
||||
conversationId: opts.conversationId
|
||||
});
|
||||
|
||||
const response = await TestAgent.sendMessage(userMessage, opts);
|
||||
parentMessageId = response.messageId;
|
||||
expect(response.conversationId).toEqual(conversationId);
|
||||
expect(response).toEqual(expectedResult);
|
||||
});
|
||||
|
||||
test('should return chat history', async () => {
|
||||
const chatMessages = await TestAgent.loadHistory(conversationId, parentMessageId);
|
||||
expect(TestAgent.currentMessages).toHaveLength(4);
|
||||
expect(chatMessages[0].text).toEqual(userMessage);
|
||||
});
|
||||
});
|
||||
});
|
||||
50
api/app/langchain/agents/CustomAgent/CustomAgent.js
Normal file
50
api/app/langchain/agents/CustomAgent/CustomAgent.js
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
const { ZeroShotAgent } = require('langchain/agents');
|
||||
const { PromptTemplate, renderTemplate } = require('langchain/prompts');
|
||||
const { gpt3, gpt4 } = require('./instructions');
|
||||
|
||||
class CustomAgent extends ZeroShotAgent {
|
||||
constructor(input) {
|
||||
super(input);
|
||||
}
|
||||
|
||||
_stop() {
|
||||
return [`\nObservation:`, `\nObservation 1:`];
|
||||
}
|
||||
|
||||
static createPrompt(tools, opts = {}) {
|
||||
const { currentDateString, model } = opts;
|
||||
const inputVariables = ['input', 'chat_history', 'agent_scratchpad'];
|
||||
|
||||
let prefix, instructions, suffix;
|
||||
if (model.startsWith('gpt-3')) {
|
||||
prefix = gpt3.prefix;
|
||||
instructions = gpt3.instructions;
|
||||
suffix = gpt3.suffix;
|
||||
} else if (model.startsWith('gpt-4')) {
|
||||
prefix = gpt4.prefix;
|
||||
instructions = gpt4.instructions;
|
||||
suffix = gpt4.suffix;
|
||||
}
|
||||
|
||||
const toolStrings = tools
|
||||
.filter((tool) => tool.name !== 'self-reflection')
|
||||
.map((tool) => `${tool.name}: ${tool.description}`)
|
||||
.join('\n');
|
||||
const toolNames = tools.map((tool) => tool.name);
|
||||
const formatInstructions = (0, renderTemplate)(instructions, 'f-string', {
|
||||
tool_names: toolNames
|
||||
});
|
||||
const template = [
|
||||
`Date: ${currentDateString}\n${prefix}`,
|
||||
toolStrings,
|
||||
formatInstructions,
|
||||
suffix
|
||||
].join('\n\n');
|
||||
return new PromptTemplate({
|
||||
template,
|
||||
inputVariables
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = CustomAgent;
|
||||
|
|
@ -0,0 +1,56 @@
|
|||
const CustomAgent = require('./CustomAgent');
|
||||
const { CustomOutputParser } = require('./outputParser');
|
||||
const { AgentExecutor } = require('langchain/agents');
|
||||
const { LLMChain } = require('langchain/chains');
|
||||
const { BufferMemory, ChatMessageHistory } = require('langchain/memory');
|
||||
const {
|
||||
ChatPromptTemplate,
|
||||
SystemMessagePromptTemplate,
|
||||
HumanMessagePromptTemplate
|
||||
} = require('langchain/prompts');
|
||||
|
||||
const initializeCustomAgent = async ({
|
||||
tools,
|
||||
model,
|
||||
pastMessages,
|
||||
currentDateString,
|
||||
...rest
|
||||
}) => {
|
||||
let prompt = CustomAgent.createPrompt(tools, { currentDateString, model: model.modelName });
|
||||
|
||||
const chatPrompt = ChatPromptTemplate.fromPromptMessages([
|
||||
new SystemMessagePromptTemplate(prompt),
|
||||
HumanMessagePromptTemplate.fromTemplate(`{chat_history}
|
||||
Query: {input}
|
||||
{agent_scratchpad}`)
|
||||
]);
|
||||
|
||||
const outputParser = new CustomOutputParser({ tools });
|
||||
|
||||
const memory = new BufferMemory({
|
||||
chatHistory: new ChatMessageHistory(pastMessages),
|
||||
// returnMessages: true, // commenting this out retains memory
|
||||
memoryKey: 'chat_history',
|
||||
humanPrefix: 'User',
|
||||
aiPrefix: 'Assistant',
|
||||
inputKey: 'input',
|
||||
outputKey: 'output'
|
||||
});
|
||||
|
||||
const llmChain = new LLMChain({
|
||||
prompt: chatPrompt,
|
||||
llm: model
|
||||
});
|
||||
|
||||
const agent = new CustomAgent({
|
||||
llmChain,
|
||||
outputParser,
|
||||
allowedTools: tools.map((tool) => tool.name)
|
||||
});
|
||||
|
||||
return AgentExecutor.fromAgentAndTools({ agent, tools, memory, ...rest });
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
initializeCustomAgent
|
||||
};
|
||||
203
api/app/langchain/agents/CustomAgent/instructions.js
Normal file
203
api/app/langchain/agents/CustomAgent/instructions.js
Normal file
|
|
@ -0,0 +1,203 @@
|
|||
/*
|
||||
module.exports = `You are ChatGPT, a Large Language model with useful tools.
|
||||
|
||||
Talk to the human and provide meaningful answers when questions are asked.
|
||||
|
||||
Use the tools when you need them, but use your own knowledge if you are confident of the answer. Keep answers short and concise.
|
||||
|
||||
A tool is not usually needed for creative requests, so do your best to answer them without tools.
|
||||
|
||||
Avoid repeating identical answers if it appears before. Only fulfill the human's requests, do not create extra steps beyond what the human has asked for.
|
||||
|
||||
Your input for 'Action' should be the name of tool used only.
|
||||
|
||||
Be honest. If you can't answer something, or a tool is not appropriate, say you don't know or answer to the best of your ability.
|
||||
|
||||
Attempt to fulfill the human's requests in as few actions as possible`;
|
||||
*/
|
||||
|
||||
// module.exports = `You are ChatGPT, a highly knowledgeable and versatile large language model.
|
||||
|
||||
// Engage with the Human conversationally, providing concise and meaningful answers to questions. Utilize built-in tools when necessary, except for creative requests, where relying on your own knowledge is preferred. Aim for variety and avoid repetitive answers.
|
||||
|
||||
// For your 'Action' input, state the name of the tool used only, and honor user requests without adding extra steps. Always be honest; if you cannot provide an appropriate answer or tool, admit that or do your best.
|
||||
|
||||
// Strive to meet the user's needs efficiently with minimal actions.`;
|
||||
|
||||
// import {
|
||||
// BasePromptTemplate,
|
||||
// BaseStringPromptTemplate,
|
||||
// SerializedBasePromptTemplate,
|
||||
// renderTemplate,
|
||||
// } from "langchain/prompts";
|
||||
|
||||
// prefix: `You are ChatGPT, a highly knowledgeable and versatile large language model.
|
||||
// Your objective is to help users by understanding their intent and choosing the best action. Prioritize direct, specific responses. Use concise, varied answers and rely on your knowledge for creative tasks. Utilize tools when needed, and structure results for machine compatibility.
|
||||
// prefix: `Objective: to comprehend human intentions based on user input and available tools. Goal: identify the best action to directly address the human's query. In your subsequent steps, you will utilize the chosen action. You may select multiple actions and list them in a meaningful order. Prioritize actions that directly relate to the user's query over general ones. Ensure that the generated thought is highly specific and explicit to best match the user's expectations. Construct the result in a manner that an online open-API would most likely expect. Provide concise and meaningful answers to human queries. Utilize tools when necessary. Relying on your own knowledge is preferred for creative requests. Aim for variety and avoid repetitive answers.
|
||||
|
||||
// # Available Actions & Tools:
|
||||
// N/A: no suitable action, use your own knowledge.`,
|
||||
// suffix: `Remember, all your responses MUST adhere to the described format and only respond if the format is followed. Output exactly with the requested format, avoiding any other text as this will be parsed by a machine. Following 'Action:', provide only one of the actions listed above. If a tool is not necessary, deduce this quickly and finish your response. Honor the human's requests without adding extra steps. Carry out tasks in the sequence written by the human. Always be honest; if you cannot provide an appropriate answer or tool, do your best with your own knowledge. Strive to meet the user's needs efficiently with minimal actions.`;
|
||||
|
||||
module.exports = {
|
||||
'gpt3-v1': {
|
||||
prefix: `Objective: Understand human intentions using user input and available tools. Goal: Identify the most suitable actions to directly address user queries.
|
||||
|
||||
When responding:
|
||||
- Choose actions relevant to the user's query, using multiple actions in a logical order if needed.
|
||||
- Prioritize direct and specific thoughts to meet user expectations.
|
||||
- Format results in a way compatible with open-API expectations.
|
||||
- Offer concise, meaningful answers to user queries.
|
||||
- Use tools when necessary but rely on your own knowledge for creative requests.
|
||||
- Strive for variety, avoiding repetitive responses.
|
||||
|
||||
# Available Actions & Tools:
|
||||
N/A: No suitable action; use your own knowledge.`,
|
||||
instructions: `Always adhere to the following format in your response to indicate actions taken:
|
||||
|
||||
Thought: Summarize your thought process.
|
||||
Action: Select an action from [{tool_names}].
|
||||
Action Input: Define the action's input.
|
||||
Observation: Report the action's result.
|
||||
|
||||
Repeat steps 1-4 as needed, in order. When not using a tool, use N/A for Action, provide the result as Action Input, and include an Observation.
|
||||
|
||||
Upon reaching the final answer, use this format after completing all necessary actions:
|
||||
|
||||
Thought: Indicate that you've determined the final answer.
|
||||
Final Answer: Present the answer to the user's query.`,
|
||||
suffix: `Keep these guidelines in mind when crafting your response:
|
||||
- Strictly adhere to the Action format for all responses, as they will be machine-parsed.
|
||||
- If a tool is unnecessary, quickly move to the Thought/Final Answer format.
|
||||
- Follow the logical sequence provided by the user without adding extra steps.
|
||||
- Be honest; if you can't provide an appropriate answer using the given tools, use your own knowledge.
|
||||
- Aim for efficiency and minimal actions to meet the user's needs effectively.`,
|
||||
},
|
||||
'gpt3-v2': {
|
||||
prefix: `Objective: Understand the human's query with available actions & tools. Let's work this out in a step by step way to be sure we fulfill the query.
|
||||
|
||||
When responding:
|
||||
- Choose actions relevant to the user's query, using multiple actions in a logical order if needed.
|
||||
- Prioritize direct and specific thoughts to meet user expectations.
|
||||
- Format results in a way compatible with open-API expectations.
|
||||
- Offer concise, meaningful answers to user queries.
|
||||
- Use tools when necessary but rely on your own knowledge for creative requests.
|
||||
- Strive for variety, avoiding repetitive responses.
|
||||
|
||||
# Available Actions & Tools:
|
||||
N/A: No suitable action; use your own knowledge.`,
|
||||
instructions: `I want you to respond with this format and this format only, without comments or explanations, to indicate actions taken:
|
||||
\`\`\`
|
||||
Thought: Summarize your thought process.
|
||||
Action: Select an action from [{tool_names}].
|
||||
Action Input: Define the action's input.
|
||||
Observation: Report the action's result.
|
||||
\`\`\`
|
||||
|
||||
Repeat the format for each action as needed. When not using a tool, use N/A for Action, provide the result as Action Input, and include an Observation.
|
||||
|
||||
Upon reaching the final answer, use this format after completing all necessary actions:
|
||||
\`\`\`
|
||||
Thought: Indicate that you've determined the final answer.
|
||||
Final Answer: A conversational reply to the user's query as if you were answering them directly.
|
||||
\`\`\``,
|
||||
suffix: `Keep these guidelines in mind when crafting your response:
|
||||
- Strictly adhere to the Action format for all responses, as they will be machine-parsed.
|
||||
- If a tool is unnecessary, quickly move to the Thought/Final Answer format.
|
||||
- Follow the logical sequence provided by the user without adding extra steps.
|
||||
- Be honest; if you can't provide an appropriate answer using the given tools, use your own knowledge.
|
||||
- Aim for efficiency and minimal actions to meet the user's needs effectively.`,
|
||||
},
|
||||
gpt3: {
|
||||
prefix: `Objective: Understand the human's query with available actions & tools. Let's work this out in a step by step way to be sure we fulfill the query.
|
||||
|
||||
Use available actions and tools judiciously.
|
||||
|
||||
# Available Actions & Tools:
|
||||
N/A: No suitable action; use your own knowledge.`,
|
||||
instructions: `I want you to respond with this format and this format only, without comments or explanations, to indicate actions taken:
|
||||
\`\`\`
|
||||
Thought: Your thought process.
|
||||
Action: Action from [{tool_names}].
|
||||
Action Input: Action's input.
|
||||
Observation: Action's result.
|
||||
\`\`\`
|
||||
|
||||
For each action, repeat the format. If no tool is used, use N/A for Action, and provide the result as Action Input.
|
||||
|
||||
Finally, complete with:
|
||||
\`\`\`
|
||||
Thought: Convey final answer determination.
|
||||
Final Answer: Reply to user's query conversationally.
|
||||
\`\`\``,
|
||||
suffix: `Remember:
|
||||
- Adhere to the Action format strictly for parsing.
|
||||
- Transition quickly to Thought/Final Answer format when a tool isn't needed.
|
||||
- Follow user's logic without superfluous steps.
|
||||
- If unable to use tools for a fitting answer, use your knowledge.
|
||||
- Strive for efficient, minimal actions.`,
|
||||
},
|
||||
'gpt4-v1': {
|
||||
prefix: `Objective: Understand the human's query with available actions & tools. Let's work this out in a step by step way to be sure we fulfill the query.
|
||||
|
||||
When responding:
|
||||
- Choose actions relevant to the query, using multiple actions in a step by step way.
|
||||
- Prioritize direct and specific thoughts to meet user expectations.
|
||||
- Be precise and offer meaningful answers to user queries.
|
||||
- Use tools when necessary but rely on your own knowledge for creative requests.
|
||||
- Strive for variety, avoiding repetitive responses.
|
||||
|
||||
# Available Actions & Tools:
|
||||
N/A: No suitable action; use your own knowledge.`,
|
||||
instructions: `I want you to respond with this format and this format only, without comments or explanations, to indicate actions taken:
|
||||
\`\`\`
|
||||
Thought: Summarize your thought process.
|
||||
Action: Select an action from [{tool_names}].
|
||||
Action Input: Define the action's input.
|
||||
Observation: Report the action's result.
|
||||
\`\`\`
|
||||
|
||||
Repeat the format for each action as needed. When not using a tool, use N/A for Action, provide the result as Action Input, and include an Observation.
|
||||
|
||||
Upon reaching the final answer, use this format after completing all necessary actions:
|
||||
\`\`\`
|
||||
Thought: Indicate that you've determined the final answer.
|
||||
Final Answer: A conversational reply to the user's query as if you were answering them directly.
|
||||
\`\`\``,
|
||||
suffix: `Keep these guidelines in mind when crafting your final response:
|
||||
- Strictly adhere to the Action format for all responses.
|
||||
- If a tool is unnecessary, quickly move to the Thought/Final Answer format, only if no further actions are possible or necessary.
|
||||
- Follow the logical sequence provided by the user without adding extra steps.
|
||||
- Be honest: if you can't provide an appropriate answer using the given tools, use your own knowledge.
|
||||
- Aim for efficiency and minimal actions to meet the user's needs effectively.`,
|
||||
},
|
||||
gpt4: {
|
||||
prefix: `Objective: Understand the human's query with available actions & tools. Let's work this out in a step by step way to be sure we fulfill the query.
|
||||
|
||||
Use available actions and tools judiciously.
|
||||
|
||||
# Available Actions & Tools:
|
||||
N/A: No suitable action; use your own knowledge.`,
|
||||
instructions: `Respond in this specific format without extraneous comments:
|
||||
\`\`\`
|
||||
Thought: Your thought process.
|
||||
Action: Action from [{tool_names}].
|
||||
Action Input: Action's input.
|
||||
Observation: Action's result.
|
||||
\`\`\`
|
||||
|
||||
For each action, repeat the format. If no tool is used, use N/A for Action, and provide the result as Action Input.
|
||||
|
||||
Finally, complete with:
|
||||
\`\`\`
|
||||
Thought: Indicate that you've determined the final answer.
|
||||
Final Answer: A conversational reply to the user's query, including your full answer.
|
||||
\`\`\``,
|
||||
suffix: `Remember:
|
||||
- Adhere to the Action format strictly for parsing.
|
||||
- Transition quickly to Thought/Final Answer format when a tool isn't needed.
|
||||
- Follow user's logic without superfluous steps.
|
||||
- If unable to use tools for a fitting answer, use your knowledge.
|
||||
- Strive for efficient, minimal actions.`,
|
||||
},
|
||||
};
|
||||
218
api/app/langchain/agents/CustomAgent/outputParser.js
Normal file
218
api/app/langchain/agents/CustomAgent/outputParser.js
Normal file
|
|
@ -0,0 +1,218 @@
|
|||
const { ZeroShotAgentOutputParser } = require('langchain/agents');
|
||||
|
||||
class CustomOutputParser extends ZeroShotAgentOutputParser {
|
||||
constructor(fields) {
|
||||
super(fields);
|
||||
this.tools = fields.tools;
|
||||
this.longestToolName = '';
|
||||
for (const tool of this.tools) {
|
||||
if (tool.name.length > this.longestToolName.length) {
|
||||
this.longestToolName = tool.name;
|
||||
}
|
||||
}
|
||||
this.finishToolNameRegex = /(?:the\s+)?final\s+answer:\s*/i;
|
||||
this.actionValues =
|
||||
/(?:Action(?: [1-9])?:) ([\s\S]*?)(?:\n(?:Action Input(?: [1-9])?:) ([\s\S]*?))?$/i;
|
||||
this.actionInputRegex = /(?:Action Input(?: *\d*):) ?([\s\S]*?)$/i;
|
||||
this.thoughtRegex = /(?:Thought(?: *\d*):) ?([\s\S]*?)$/i;
|
||||
}
|
||||
|
||||
getValidTool(text) {
|
||||
let result = false;
|
||||
for (const tool of this.tools) {
|
||||
const { name } = tool;
|
||||
const toolIndex = text.indexOf(name);
|
||||
if (toolIndex !== -1) {
|
||||
result = name;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
checkIfValidTool(text) {
|
||||
let isValidTool = false;
|
||||
for (const tool of this.tools) {
|
||||
const { name } = tool;
|
||||
if (text === name) {
|
||||
isValidTool = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return isValidTool;
|
||||
}
|
||||
|
||||
async parse(text) {
|
||||
const finalMatch = text.match(this.finishToolNameRegex);
|
||||
// if (text.includes(this.finishToolName)) {
|
||||
// const parts = text.split(this.finishToolName);
|
||||
// const output = parts[parts.length - 1].trim();
|
||||
// return {
|
||||
// returnValues: { output },
|
||||
// log: text
|
||||
// };
|
||||
// }
|
||||
|
||||
if (finalMatch) {
|
||||
const output = text.substring(finalMatch.index + finalMatch[0].length).trim();
|
||||
return {
|
||||
returnValues: { output },
|
||||
log: text
|
||||
};
|
||||
}
|
||||
|
||||
const match = this.actionValues.exec(text); // old v2
|
||||
|
||||
if (!match) {
|
||||
console.log(
|
||||
'\n\n<----------------------HIT NO MATCH PARSING ERROR---------------------->\n\n',
|
||||
match
|
||||
);
|
||||
const thoughts = text.replace(/[tT]hought:/, '').split('\n');
|
||||
// return {
|
||||
// tool: 'self-reflection',
|
||||
// toolInput: thoughts[0],
|
||||
// log: thoughts.slice(1).join('\n')
|
||||
// };
|
||||
|
||||
return {
|
||||
returnValues: { output: thoughts[0] },
|
||||
log: thoughts.slice(1).join('\n')
|
||||
};
|
||||
}
|
||||
|
||||
let selectedTool = match?.[1].trim().toLowerCase();
|
||||
|
||||
if (match && selectedTool === 'n/a') {
|
||||
console.log(
|
||||
'\n\n<----------------------HIT N/A PARSING ERROR---------------------->\n\n',
|
||||
match
|
||||
);
|
||||
return {
|
||||
tool: 'self-reflection',
|
||||
toolInput: match[2]?.trim().replace(/^"+|"+$/g, '') ?? '',
|
||||
log: text
|
||||
};
|
||||
}
|
||||
|
||||
let toolIsValid = this.checkIfValidTool(selectedTool);
|
||||
if (match && !toolIsValid) {
|
||||
console.log(
|
||||
'\n\n<----------------Tool invalid: Re-assigning Selected Tool---------------->\n\n',
|
||||
match
|
||||
);
|
||||
selectedTool = this.getValidTool(selectedTool);
|
||||
}
|
||||
|
||||
if (match && !selectedTool) {
|
||||
console.log(
|
||||
'\n\n<----------------------HIT INVALID TOOL PARSING ERROR---------------------->\n\n',
|
||||
match
|
||||
);
|
||||
selectedTool = 'self-reflection';
|
||||
}
|
||||
|
||||
if (match && !match[2]) {
|
||||
console.log(
|
||||
'\n\n<----------------------HIT NO ACTION INPUT PARSING ERROR---------------------->\n\n',
|
||||
match
|
||||
);
|
||||
|
||||
// In case there is no action input, let's double-check if there is an action input in 'text' variable
|
||||
const actionInputMatch = this.actionInputRegex.exec(text);
|
||||
const thoughtMatch = this.thoughtRegex.exec(text);
|
||||
if (actionInputMatch) {
|
||||
return {
|
||||
tool: selectedTool,
|
||||
toolInput: actionInputMatch[1].trim(),
|
||||
log: text
|
||||
};
|
||||
}
|
||||
|
||||
if (thoughtMatch && !actionInputMatch) {
|
||||
return {
|
||||
tool: selectedTool,
|
||||
toolInput: thoughtMatch[1].trim(),
|
||||
log: text
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
if (match && selectedTool.length > this.longestToolName.length) {
|
||||
console.log('\n\n<----------------------HIT LONG PARSING ERROR---------------------->\n\n');
|
||||
|
||||
let action, input, thought;
|
||||
let firstIndex = Infinity;
|
||||
|
||||
for (const tool of this.tools) {
|
||||
const { name } = tool;
|
||||
const toolIndex = text.indexOf(name);
|
||||
if (toolIndex !== -1 && toolIndex < firstIndex) {
|
||||
firstIndex = toolIndex;
|
||||
action = name;
|
||||
}
|
||||
}
|
||||
|
||||
// In case there is no action input, let's double-check if there is an action input in 'text' variable
|
||||
const actionInputMatch = this.actionInputRegex.exec(text);
|
||||
if (action && actionInputMatch) {
|
||||
console.log(
|
||||
'\n\n<------Matched Action Input in Long Parsing Error------>\n\n',
|
||||
actionInputMatch
|
||||
);
|
||||
return {
|
||||
tool: action,
|
||||
toolInput: actionInputMatch[1].trim().replaceAll('"', ''),
|
||||
log: text
|
||||
};
|
||||
}
|
||||
|
||||
if (action) {
|
||||
const actionEndIndex = text.indexOf('Action:', firstIndex + action.length);
|
||||
const inputText = text
|
||||
.slice(firstIndex + action.length, actionEndIndex !== -1 ? actionEndIndex : undefined)
|
||||
.trim();
|
||||
const inputLines = inputText.split('\n');
|
||||
input = inputLines[0];
|
||||
if (inputLines.length > 1) {
|
||||
thought = inputLines.slice(1).join('\n');
|
||||
}
|
||||
const returnValues = {
|
||||
tool: action,
|
||||
toolInput: input,
|
||||
log: thought || inputText
|
||||
};
|
||||
|
||||
const inputMatch = this.actionValues.exec(returnValues.log); //new
|
||||
if (inputMatch) {
|
||||
console.log('inputMatch');
|
||||
console.dir(inputMatch, { depth: null });
|
||||
returnValues.toolInput = inputMatch[1].replaceAll('"', '').trim();
|
||||
returnValues.log = returnValues.log.replace(this.actionValues, '');
|
||||
}
|
||||
|
||||
return returnValues;
|
||||
} else {
|
||||
console.log('No valid tool mentioned.', this.tools, text);
|
||||
return {
|
||||
tool: 'self-reflection',
|
||||
toolInput: 'Hypothetical actions: \n"' + text + '"\n',
|
||||
log: 'Thought: I need to look at my hypothetical actions and try one'
|
||||
};
|
||||
}
|
||||
|
||||
// if (action && input) {
|
||||
// console.log('Action:', action);
|
||||
// console.log('Input:', input);
|
||||
// }
|
||||
}
|
||||
|
||||
return {
|
||||
tool: selectedTool,
|
||||
toolInput: match[2]?.trim()?.replace(/^"+|"+$/g, '') ?? '',
|
||||
log: text
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { CustomOutputParser };
|
||||
|
|
@ -0,0 +1,77 @@
|
|||
const {
|
||||
ChainStepExecutor,
|
||||
LLMPlanner,
|
||||
PlanOutputParser,
|
||||
PlanAndExecuteAgentExecutor
|
||||
} = require('langchain/experimental/plan_and_execute');
|
||||
const { LLMChain } = require('langchain/chains');
|
||||
const { ChatAgent, AgentExecutor } = require('langchain/agents');
|
||||
const { BufferMemory, ChatMessageHistory } = require('langchain/memory');
|
||||
const {
|
||||
ChatPromptTemplate,
|
||||
SystemMessagePromptTemplate,
|
||||
HumanMessagePromptTemplate
|
||||
} = require('langchain/prompts');
|
||||
|
||||
const DEFAULT_STEP_EXECUTOR_HUMAN_CHAT_MESSAGE_TEMPLATE = `{chat_history}
|
||||
|
||||
Previous steps: {previous_steps}
|
||||
Current objective: {current_step}
|
||||
{agent_scratchpad}
|
||||
You may extract and combine relevant data from your previous steps when responding to me.`;
|
||||
|
||||
const PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE = [
|
||||
`Let's first understand the problem and devise a plan to solve the problem.`,
|
||||
`Please output the plan starting with the header "Plan:"`,
|
||||
`and then followed by a numbered list of steps.`,
|
||||
`Please make the plan the minimum number of steps required`,
|
||||
`to answer the query or complete the task accurately and precisely.`,
|
||||
`Your steps should be general, and should not require a specific method to solve a step. If the task is a question,`,
|
||||
`the final step in the plan must be the following: "Given the above steps taken,`,
|
||||
`please respond to the original query."`,
|
||||
`At the end of your plan, say "<END_OF_PLAN>"`
|
||||
].join(' ');
|
||||
|
||||
const PLANNER_CHAT_PROMPT = /* #__PURE__ */ ChatPromptTemplate.fromPromptMessages([
|
||||
/* #__PURE__ */ SystemMessagePromptTemplate.fromTemplate(PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE),
|
||||
/* #__PURE__ */ HumanMessagePromptTemplate.fromTemplate(`{input}`)
|
||||
]);
|
||||
|
||||
const initializePAEAgent = async ({ tools: _tools, model: llm, pastMessages, ...rest }) => {
|
||||
//removed currentDateString
|
||||
const tools = _tools.filter((tool) => tool.name !== 'self-reflection');
|
||||
|
||||
const memory = new BufferMemory({
|
||||
chatHistory: new ChatMessageHistory(pastMessages),
|
||||
// returnMessages: true, // commenting this out retains memory
|
||||
memoryKey: 'chat_history',
|
||||
humanPrefix: 'User',
|
||||
aiPrefix: 'Assistant',
|
||||
inputKey: 'input',
|
||||
outputKey: 'output'
|
||||
});
|
||||
|
||||
const plannerLlmChain = new LLMChain({
|
||||
llm,
|
||||
prompt: PLANNER_CHAT_PROMPT,
|
||||
memory
|
||||
});
|
||||
const planner = new LLMPlanner(plannerLlmChain, new PlanOutputParser());
|
||||
|
||||
const agent = ChatAgent.fromLLMAndTools(llm, tools, {
|
||||
humanMessageTemplate: DEFAULT_STEP_EXECUTOR_HUMAN_CHAT_MESSAGE_TEMPLATE
|
||||
});
|
||||
|
||||
const stepExecutor = new ChainStepExecutor(
|
||||
AgentExecutor.fromAgentAndTools({ agent, tools, memory, ...rest })
|
||||
);
|
||||
|
||||
return new PlanAndExecuteAgentExecutor({
|
||||
planner,
|
||||
stepExecutor
|
||||
});
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
initializePAEAgent
|
||||
};
|
||||
31
api/app/langchain/demos/demo-aiplugin.js
Normal file
31
api/app/langchain/demos/demo-aiplugin.js
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
require('dotenv').config();
|
||||
const { ChatOpenAI } = require( "langchain/chat_models/openai");
|
||||
const { initializeAgentExecutorWithOptions } = require( "langchain/agents");
|
||||
const HttpRequestTool = require('../tools/HttpRequestTool');
|
||||
const AIPluginTool = require('../tools/AIPluginTool');
|
||||
|
||||
const run = async () => {
|
||||
const openAIApiKey = process.env.OPENAI_API_KEY;
|
||||
const tools = [
|
||||
new HttpRequestTool(),
|
||||
await AIPluginTool.fromPluginUrl(
|
||||
"https://www.klarna.com/.well-known/ai-plugin.json", new ChatOpenAI({ temperature: 0, openAIApiKey })
|
||||
),
|
||||
];
|
||||
const agent = await initializeAgentExecutorWithOptions(
|
||||
tools,
|
||||
new ChatOpenAI({ temperature: 0, openAIApiKey }),
|
||||
{ agentType: "chat-zero-shot-react-description", verbose: true }
|
||||
);
|
||||
|
||||
const result = await agent.call({
|
||||
input: "what t shirts are available in klarna?",
|
||||
});
|
||||
|
||||
console.log({ result });
|
||||
};
|
||||
|
||||
(async () => {
|
||||
await run();
|
||||
})();
|
||||
|
||||
47
api/app/langchain/demos/demo-yaml.js
Normal file
47
api/app/langchain/demos/demo-yaml.js
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
require('dotenv').config();
|
||||
|
||||
const fs = require( "fs");
|
||||
const yaml = require( "js-yaml");
|
||||
const { OpenAI } = require( "langchain/llms/openai");
|
||||
const { JsonSpec } = require( "langchain/tools");
|
||||
const { createOpenApiAgent, OpenApiToolkit } = require( "langchain/agents");
|
||||
|
||||
const run = async () => {
|
||||
let data;
|
||||
try {
|
||||
const yamlFile = fs.readFileSync("./app/langchain/demos/klarna.yaml", "utf8");
|
||||
data = yaml.load(yamlFile);
|
||||
if (!data) {
|
||||
throw new Error("Failed to load OpenAPI spec");
|
||||
}
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
return;
|
||||
}
|
||||
|
||||
const headers = {
|
||||
"Content-Type": "application/json",
|
||||
// Authorization: `Bearer ${process.env.OPENAI_API_KEY}`,
|
||||
};
|
||||
const model = new OpenAI({ temperature: 0 });
|
||||
const toolkit = new OpenApiToolkit(new JsonSpec(data), model, headers);
|
||||
const executor = createOpenApiAgent(model, toolkit, { verbose: true });
|
||||
|
||||
const input = `Find me some medium sized blue shirts`;
|
||||
console.log(`Executing with input "${input}"...`);
|
||||
|
||||
const result = await executor.call({ input });
|
||||
console.log(`Got output ${result.output}`);
|
||||
|
||||
console.log(
|
||||
`Got intermediate steps ${JSON.stringify(
|
||||
result.intermediateSteps,
|
||||
null,
|
||||
2
|
||||
)}`
|
||||
);
|
||||
};
|
||||
|
||||
(async () => {
|
||||
await run();
|
||||
})();
|
||||
79
api/app/langchain/demos/klarna.yaml
Normal file
79
api/app/langchain/demos/klarna.yaml
Normal file
|
|
@ -0,0 +1,79 @@
|
|||
openapi: 3.0.1
|
||||
servers:
|
||||
- url: https://www.klarna.com/us/shopping
|
||||
info:
|
||||
title: Open AI Klarna product Api
|
||||
version: v0
|
||||
x-apisguru-categories:
|
||||
- ecommerce
|
||||
x-logo:
|
||||
url: https://www.klarna.com/static/img/social-prod-imagery-blinds-beauty-default.jpg
|
||||
x-origin:
|
||||
- format: openapi
|
||||
url: https://www.klarna.com/us/shopping/public/openai/v0/api-docs/
|
||||
version: "3.0"
|
||||
x-providerName: klarna.com
|
||||
x-serviceName: openai
|
||||
tags:
|
||||
- description: Open AI Product Endpoint. Query for products.
|
||||
name: open-ai-product-endpoint
|
||||
paths:
|
||||
/public/openai/v0/products:
|
||||
get:
|
||||
deprecated: false
|
||||
operationId: productsUsingGET
|
||||
parameters:
|
||||
- description: A precise query that matches one very small category or product that needs to be searched for to find the products the user is looking for. If the user explicitly stated what they want, use that as a query. The query is as specific as possible to the product name or category mentioned by the user in its singular form, and don't contain any clarifiers like latest, newest, cheapest, budget, premium, expensive or similar. The query is always taken from the latest topic, if there is a new topic a new query is started.
|
||||
in: query
|
||||
name: q
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- description: number of products returned
|
||||
in: query
|
||||
name: size
|
||||
required: false
|
||||
schema:
|
||||
type: integer
|
||||
- description: maximum price of the matching product in local currency, filters results
|
||||
in: query
|
||||
name: budget
|
||||
required: false
|
||||
schema:
|
||||
type: integer
|
||||
responses:
|
||||
"200":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/ProductResponse"
|
||||
description: Products found
|
||||
"503":
|
||||
description: one or more services are unavailable
|
||||
summary: API for fetching Klarna product information
|
||||
tags:
|
||||
- open-ai-product-endpoint
|
||||
components:
|
||||
schemas:
|
||||
Product:
|
||||
properties:
|
||||
attributes:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
name:
|
||||
type: string
|
||||
price:
|
||||
type: string
|
||||
url:
|
||||
type: string
|
||||
title: Product
|
||||
type: object
|
||||
ProductResponse:
|
||||
properties:
|
||||
products:
|
||||
items:
|
||||
$ref: "#/components/schemas/Product"
|
||||
type: array
|
||||
title: ProductResponse
|
||||
type: object
|
||||
32
api/app/langchain/demos/planExecutor.js
Normal file
32
api/app/langchain/demos/planExecutor.js
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
require('dotenv').config();
|
||||
const { Calculator } = require('langchain/tools/calculator');
|
||||
const { SerpAPI } = require('langchain/tools');
|
||||
const { ChatOpenAI } = require('langchain/chat_models/openai');
|
||||
const { PlanAndExecuteAgentExecutor } = require('langchain/experimental/plan_and_execute');
|
||||
|
||||
const tools = [
|
||||
new Calculator(),
|
||||
new SerpAPI(process.env.SERPAPI_API_KEY || '', {
|
||||
location: 'Austin,Texas,United States',
|
||||
hl: 'en',
|
||||
gl: 'us'
|
||||
})
|
||||
];
|
||||
const model = new ChatOpenAI({
|
||||
temperature: 0,
|
||||
modelName: 'gpt-3.5-turbo',
|
||||
verbose: true,
|
||||
openAIApiKey: process.env.OPENAI_API_KEY
|
||||
});
|
||||
const executor = PlanAndExecuteAgentExecutor.fromLLMAndTools({
|
||||
llm: model,
|
||||
tools
|
||||
});
|
||||
|
||||
(async () => {
|
||||
const result = await executor.call({
|
||||
input: `Who is the current president of the United States? What is their current age raised to the second power?`
|
||||
});
|
||||
|
||||
console.log({ result });
|
||||
})();
|
||||
7305
api/app/langchain/demos/spotify.yaml
Normal file
7305
api/app/langchain/demos/spotify.yaml
Normal file
File diff suppressed because it is too large
Load diff
6
api/app/langchain/instructions.js
Normal file
6
api/app/langchain/instructions.js
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
module.exports = {
|
||||
instructions: `Remember, all your responses MUST be in the format described. Do not respond unless it's in the format described, using the structure of Action, Action Input, etc.`,
|
||||
errorInstructions: `\nYou encountered an error in attempting a response. The user is not aware of the error so you shouldn't mention it.\nReview the actions taken carefully in case there is a partial or complete answer within them.\nError Message:`,
|
||||
imageInstructions: 'You must include the exact image paths from above, formatted in Markdown syntax: ',
|
||||
completionInstructions: `Instructions:\nYou are ChatGPT, a large language model trained by OpenAI. Respond conversationally.\nCurrent date:`,
|
||||
};
|
||||
237
api/app/langchain/tools/AIPluginTool.js
Normal file
237
api/app/langchain/tools/AIPluginTool.js
Normal file
|
|
@ -0,0 +1,237 @@
|
|||
const { Tool } = require('langchain/tools');
|
||||
const yaml = require('js-yaml');
|
||||
|
||||
/*
|
||||
export interface AIPluginToolParams {
|
||||
name: string;
|
||||
description: string;
|
||||
apiSpec: string;
|
||||
openaiSpec: string;
|
||||
model: BaseLanguageModel;
|
||||
}
|
||||
|
||||
|
||||
export interface PathParameter {
|
||||
name: string;
|
||||
description: string;
|
||||
}
|
||||
|
||||
export interface Info {
|
||||
title: string;
|
||||
description: string;
|
||||
version: string;
|
||||
}
|
||||
export interface PathMethod {
|
||||
summary: string;
|
||||
operationId: string;
|
||||
parameters?: PathParameter[];
|
||||
}
|
||||
|
||||
interface ApiSpec {
|
||||
openapi: string;
|
||||
info: Info;
|
||||
paths: { [key: string]: { [key: string]: PathMethod } };
|
||||
}
|
||||
*/
|
||||
|
||||
function isJson(str) {
|
||||
try {
|
||||
JSON.parse(str);
|
||||
} catch (e) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
function convertJsonToYamlIfApplicable(spec) {
|
||||
if (isJson(spec)) {
|
||||
const jsonData = JSON.parse(spec);
|
||||
return yaml.dump(jsonData);
|
||||
}
|
||||
return spec;
|
||||
}
|
||||
|
||||
function extractShortVersion(openapiSpec) {
|
||||
openapiSpec = convertJsonToYamlIfApplicable(openapiSpec);
|
||||
try {
|
||||
const fullApiSpec = yaml.load(openapiSpec);
|
||||
const shortApiSpec = {
|
||||
openapi: fullApiSpec.openapi,
|
||||
info: fullApiSpec.info,
|
||||
paths: {}
|
||||
};
|
||||
|
||||
for (let path in fullApiSpec.paths) {
|
||||
shortApiSpec.paths[path] = {};
|
||||
for (let method in fullApiSpec.paths[path]) {
|
||||
shortApiSpec.paths[path][method] = {
|
||||
summary: fullApiSpec.paths[path][method].summary,
|
||||
operationId: fullApiSpec.paths[path][method].operationId,
|
||||
parameters: fullApiSpec.paths[path][method].parameters?.map((parameter) => ({
|
||||
name: parameter.name,
|
||||
description: parameter.description
|
||||
}))
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return yaml.dump(shortApiSpec);
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
return '';
|
||||
}
|
||||
}
|
||||
function printOperationDetails(operationId, openapiSpec) {
|
||||
openapiSpec = convertJsonToYamlIfApplicable(openapiSpec);
|
||||
let returnText = '';
|
||||
try {
|
||||
let doc = yaml.load(openapiSpec);
|
||||
let servers = doc.servers;
|
||||
let paths = doc.paths;
|
||||
let components = doc.components;
|
||||
|
||||
for (let path in paths) {
|
||||
for (let method in paths[path]) {
|
||||
let operation = paths[path][method];
|
||||
if (operation.operationId === operationId) {
|
||||
returnText += `The API request to do for operationId "${operationId}" is:\n`;
|
||||
returnText += `Method: ${method.toUpperCase()}\n`;
|
||||
|
||||
let url = servers[0].url + path;
|
||||
returnText += `Path: ${url}\n`;
|
||||
|
||||
returnText += 'Parameters:\n';
|
||||
if (operation.parameters) {
|
||||
for (let param of operation.parameters) {
|
||||
let required = param.required ? '' : ' (optional),';
|
||||
returnText += `- ${param.name} (${param.in},${required} ${param.schema.type}): ${param.description}\n`;
|
||||
}
|
||||
} else {
|
||||
returnText += ' None\n';
|
||||
}
|
||||
returnText += '\n';
|
||||
|
||||
let responseSchema = operation.responses['200'].content['application/json'].schema;
|
||||
|
||||
// Check if schema is a reference
|
||||
if (responseSchema.$ref) {
|
||||
// Extract schema name from reference
|
||||
let schemaName = responseSchema.$ref.split('/').pop();
|
||||
// Look up schema in components
|
||||
responseSchema = components.schemas[schemaName];
|
||||
}
|
||||
|
||||
returnText += 'Response schema:\n';
|
||||
returnText += '- Type: ' + responseSchema.type + '\n';
|
||||
returnText += '- Additional properties:\n';
|
||||
returnText += ' - Type: ' + responseSchema.additionalProperties?.type + '\n';
|
||||
if (responseSchema.additionalProperties?.properties) {
|
||||
returnText += ' - Properties:\n';
|
||||
for (let prop in responseSchema.additionalProperties.properties) {
|
||||
returnText += ` - ${prop} (${responseSchema.additionalProperties.properties[prop].type}): Description not provided in OpenAPI spec\n`;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (returnText === '') {
|
||||
returnText += `No operation with operationId "${operationId}" found.`;
|
||||
}
|
||||
return returnText;
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
return '';
|
||||
}
|
||||
}
|
||||
|
||||
class AIPluginTool extends Tool {
|
||||
/*
|
||||
private _name: string;
|
||||
private _description: string;
|
||||
apiSpec: string;
|
||||
openaiSpec: string;
|
||||
model: BaseLanguageModel;
|
||||
*/
|
||||
|
||||
get name() {
|
||||
return this._name;
|
||||
}
|
||||
|
||||
get description() {
|
||||
return this._description;
|
||||
}
|
||||
|
||||
constructor(params) {
|
||||
super();
|
||||
this._name = params.name;
|
||||
this._description = params.description;
|
||||
this.apiSpec = params.apiSpec;
|
||||
this.openaiSpec = params.openaiSpec;
|
||||
this.model = params.model;
|
||||
}
|
||||
|
||||
async _call(input) {
|
||||
let date = new Date();
|
||||
let fullDate = `Date: ${date.getDate()}/${
|
||||
date.getMonth() + 1
|
||||
}/${date.getFullYear()}, Time: ${date.getHours()}:${date.getMinutes()}:${date.getSeconds()}`;
|
||||
const prompt = `${fullDate}\nQuestion: ${input} \n${this.apiSpec}.`;
|
||||
console.log(prompt);
|
||||
const gptResponse = await this.model.predict(prompt);
|
||||
let operationId = gptResponse.match(/operationId: (.*)/)?.[1];
|
||||
if (!operationId) {
|
||||
return 'No operationId found in the response';
|
||||
}
|
||||
if (operationId == 'No API path found to answer the question') {
|
||||
return 'No API path found to answer the question';
|
||||
}
|
||||
|
||||
let openApiData = printOperationDetails(operationId, this.openaiSpec);
|
||||
|
||||
return openApiData;
|
||||
}
|
||||
|
||||
static async fromPluginUrl(url, model) {
|
||||
const aiPluginRes = await fetch(url, {});
|
||||
if (!aiPluginRes.ok) {
|
||||
throw new Error(`Failed to fetch plugin from ${url} with status ${aiPluginRes.status}`);
|
||||
}
|
||||
const aiPluginJson = await aiPluginRes.json();
|
||||
const apiUrlRes = await fetch(aiPluginJson.api.url, {});
|
||||
if (!apiUrlRes.ok) {
|
||||
throw new Error(
|
||||
`Failed to fetch API spec from ${aiPluginJson.api.url} with status ${apiUrlRes.status}`
|
||||
);
|
||||
}
|
||||
const apiUrlJson = await apiUrlRes.text();
|
||||
const shortApiSpec = extractShortVersion(apiUrlJson);
|
||||
return new AIPluginTool({
|
||||
name: aiPluginJson.name_for_model.toLowerCase(),
|
||||
description: `A \`tool\` to learn the API documentation for ${aiPluginJson.name_for_model.toLowerCase()}, after which you can use 'http_request' to make the actual API call. Short description of how to use the API's results: ${aiPluginJson.description_for_model})`,
|
||||
apiSpec: `
|
||||
As an AI, your task is to identify the operationId of the relevant API path based on the condensed OpenAPI specifications provided.
|
||||
|
||||
Please note:
|
||||
|
||||
1. Do not imagine URLs. Only use the information provided in the condensed OpenAPI specifications.
|
||||
|
||||
2. Do not guess the operationId. Identify it strictly based on the API paths and their descriptions.
|
||||
|
||||
Your output should only include:
|
||||
- operationId: The operationId of the relevant API path
|
||||
|
||||
If you cannot find a suitable API path based on the OpenAPI specifications, please answer only "operationId: No API path found to answer the question".
|
||||
|
||||
Now, based on the question above and the condensed OpenAPI specifications given below, identify the operationId:
|
||||
|
||||
\`\`\`
|
||||
${shortApiSpec}
|
||||
\`\`\`
|
||||
`,
|
||||
openaiSpec: apiUrlJson,
|
||||
model: model
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = AIPluginTool;
|
||||
111
api/app/langchain/tools/DALL-E.js
Normal file
111
api/app/langchain/tools/DALL-E.js
Normal file
|
|
@ -0,0 +1,111 @@
|
|||
// From https://platform.openai.com/docs/api-reference/images/create
|
||||
// To use this tool, you must pass in a configured OpenAIApi object.
|
||||
const fs = require('fs');
|
||||
const { Configuration, OpenAIApi } = require('openai');
|
||||
const { genAzureEndpoint } = require('../../../utils/genAzureEndpoints');
|
||||
const { Tool } = require('langchain/tools');
|
||||
const saveImageFromUrl = require('./saveImageFromUrl');
|
||||
const path = require('path');
|
||||
|
||||
class OpenAICreateImage extends Tool {
|
||||
constructor(fields = {}) {
|
||||
super();
|
||||
|
||||
let apiKey = fields.OPENAI_API_KEY || process.env.OPENAI_API_KEY;
|
||||
let azureKey = fields.AZURE_OPENAI_API_KEY || process.env.AZURE_OPENAI_API_KEY;
|
||||
let config = { apiKey };
|
||||
|
||||
if (azureKey) {
|
||||
apiKey = azureKey;
|
||||
const azureConfig = {
|
||||
apiKey,
|
||||
azureOpenAIApiInstanceName: process.env.AZURE_OPENAI_API_INSTANCE_NAME || fields.azureOpenAIApiInstanceName,
|
||||
azureOpenAIApiDeploymentName: process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME || fields.azureOpenAIApiDeploymentName,
|
||||
azureOpenAIApiVersion: process.env.AZURE_OPENAI_API_VERSION || fields.azureOpenAIApiVersion
|
||||
};
|
||||
config = {
|
||||
apiKey,
|
||||
basePath: genAzureEndpoint({
|
||||
...azureConfig,
|
||||
}),
|
||||
baseOptions: {
|
||||
headers: { 'api-key': apiKey },
|
||||
params: {
|
||||
'api-version': azureConfig.azureOpenAIApiVersion // this might change. I got the current value from the sample code at https://oai.azure.com/portal/chat
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
this.openaiApi = new OpenAIApi(new Configuration(config));
|
||||
this.name = 'dall-e';
|
||||
this.description = `You can generate images with 'dall-e'. This tool is exclusively for visual content.
|
||||
Guidelines:
|
||||
- Visually describe the moods, details, structures, styles, and/or proportions of the image. Remember, the focus is on visual attributes.
|
||||
- Craft your input by "showing" and not "telling" the imagery. Think in terms of what you'd want to see in a photograph or a painting.
|
||||
- It's best to follow this format for image creation. Come up with the optional inputs yourself if none are given:
|
||||
"Subject: [subject], Style: [style], Color: [color], Details: [details], Emotion: [emotion]"
|
||||
- Generate images only once per human query unless explicitly requested by the user`;
|
||||
}
|
||||
// "Subject": "Mona Lisa",
|
||||
// "Style": "Chinese traditional painting",
|
||||
// "Color": "Mainly wash tones of ink, with small color blocks in some parts",
|
||||
// "Details": "Mona Lisa should have long hair, a silk dress, holding a fan. The background should have mountains and trees.",
|
||||
// "Emotion": "Serene and elegant"
|
||||
|
||||
replaceUnwantedChars(inputString) {
|
||||
return inputString.replace(/\r\n|\r|\n/g, ' ').replace('"', '').trim();
|
||||
}
|
||||
|
||||
getMarkdownImageUrl(imageName) {
|
||||
const imageUrl = path.join(this.relativeImageUrl, imageName).replace(/\\/g, '/').replace('public/', '');
|
||||
return ``;
|
||||
}
|
||||
|
||||
async _call(input) {
|
||||
const resp = await this.openaiApi.createImage({
|
||||
prompt: this.replaceUnwantedChars(input),
|
||||
// TODO: Future idea -- could we ask an LLM to extract these arguments from an input that might contain them?
|
||||
n: 1,
|
||||
// size: '1024x1024'
|
||||
size: '512x512'
|
||||
});
|
||||
|
||||
const theImageUrl = resp.data.data[0].url;
|
||||
|
||||
if (!theImageUrl) {
|
||||
throw new Error(`No image URL returned from OpenAI API.`);
|
||||
}
|
||||
|
||||
const regex = /img-[\w\d]+.png/;
|
||||
const match = theImageUrl.match(regex);
|
||||
let imageName = '1.png';
|
||||
|
||||
if (match) {
|
||||
imageName = match[0];
|
||||
console.log(imageName); // Output: img-lgCf7ppcbhqQrz6a5ear6FOb.png
|
||||
} else {
|
||||
console.log('No image name found in the string.');
|
||||
}
|
||||
|
||||
this.outputPath = path.resolve(__dirname, '..', '..', '..', '..', 'client', 'public', 'images');
|
||||
const appRoot = path.resolve(__dirname, '..', '..', '..', '..', 'client');
|
||||
this.relativeImageUrl = path.relative(appRoot, this.outputPath);
|
||||
|
||||
// Check if directory exists, if not create it
|
||||
if (!fs.existsSync(this.outputPath)) {
|
||||
fs.mkdirSync(this.outputPath, { recursive: true });
|
||||
}
|
||||
|
||||
try {
|
||||
await saveImageFromUrl(theImageUrl, this.outputPath, imageName);
|
||||
this.result = this.getMarkdownImageUrl(imageName);
|
||||
} catch (error) {
|
||||
console.error('Error while saving the image:', error);
|
||||
this.result = theImageUrl;
|
||||
}
|
||||
|
||||
return this.result;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = OpenAICreateImage;
|
||||
117
api/app/langchain/tools/GoogleSearch.js
Normal file
117
api/app/langchain/tools/GoogleSearch.js
Normal file
|
|
@ -0,0 +1,117 @@
|
|||
const { Tool } = require('langchain/tools');
|
||||
const { google } = require('googleapis');
|
||||
|
||||
/**
|
||||
* Represents a tool that allows an agent to use the Google Custom Search API.
|
||||
* @extends Tool
|
||||
*/
|
||||
class GoogleSearchAPI extends Tool {
|
||||
constructor(fields = {}) {
|
||||
super();
|
||||
this.cx = fields.GOOGLE_CSE_ID || this.getCx();
|
||||
this.apiKey = fields.GOOGLE_API_KEY || this.getApiKey();
|
||||
this.customSearch = undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* The name of the tool.
|
||||
* @type {string}
|
||||
*/
|
||||
name = 'google';
|
||||
|
||||
/**
|
||||
* A description for the agent to use
|
||||
* @type {string}
|
||||
*/
|
||||
description = `Use the 'google' tool to retrieve internet search results relevant to your input. The results will return links and snippets of text from the webpages`;
|
||||
|
||||
getCx() {
|
||||
const cx = process.env.GOOGLE_CSE_ID || '';
|
||||
if (!cx) {
|
||||
throw new Error('Missing GOOGLE_CSE_ID environment variable.');
|
||||
}
|
||||
return cx;
|
||||
}
|
||||
|
||||
getApiKey() {
|
||||
const apiKey = process.env.GOOGLE_API_KEY || '';
|
||||
if (!apiKey) {
|
||||
throw new Error('Missing GOOGLE_API_KEY environment variable.');
|
||||
}
|
||||
return apiKey;
|
||||
}
|
||||
|
||||
getCustomSearch() {
|
||||
if (!this.customSearch) {
|
||||
const version = 'v1';
|
||||
this.customSearch = google.customsearch(version);
|
||||
}
|
||||
return this.customSearch;
|
||||
}
|
||||
|
||||
resultsToReadableFormat(results) {
|
||||
let output = 'Results:\n';
|
||||
|
||||
results.forEach((resultObj, index) => {
|
||||
output += `Title: ${resultObj.title}\n`;
|
||||
output += `Link: ${resultObj.link}\n`;
|
||||
if (resultObj.snippet) {
|
||||
output += `Snippet: ${resultObj.snippet}\n`;
|
||||
}
|
||||
|
||||
if (index < results.length - 1) {
|
||||
output += '\n';
|
||||
}
|
||||
});
|
||||
|
||||
return output;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calls the tool with the provided input and returns a promise that resolves with a response from the Google Custom Search API.
|
||||
* @param {string} input - The input to provide to the API.
|
||||
* @returns {Promise<String>} A promise that resolves with a response from the Google Custom Search API.
|
||||
*/
|
||||
async _call(input) {
|
||||
try {
|
||||
const metadataResults = [];
|
||||
const response = await this.getCustomSearch().cse.list({
|
||||
q: input,
|
||||
cx: this.cx,
|
||||
auth: this.apiKey,
|
||||
num: 5 // Limit the number of results to 5
|
||||
});
|
||||
|
||||
// return response.data;
|
||||
// console.log(response.data);
|
||||
|
||||
if (!response.data.items || response.data.items.length === 0) {
|
||||
return this.resultsToReadableFormat([
|
||||
{ title: 'No good Google Search Result was found', link: '' }
|
||||
]);
|
||||
}
|
||||
|
||||
// const results = response.items.slice(0, numResults);
|
||||
const results = response.data.items;
|
||||
|
||||
for (const result of results) {
|
||||
const metadataResult = {
|
||||
title: result.title || '',
|
||||
link: result.link || ''
|
||||
};
|
||||
if (result.snippet) {
|
||||
metadataResult.snippet = result.snippet;
|
||||
}
|
||||
metadataResults.push(metadataResult);
|
||||
}
|
||||
|
||||
return this.resultsToReadableFormat(metadataResults);
|
||||
} catch (error) {
|
||||
console.log(`Error searching Google: ${error}`);
|
||||
// throw error;
|
||||
return 'There was an error searching Google.';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = GoogleSearchAPI;
|
||||
107
api/app/langchain/tools/HttpRequestTool.js
Normal file
107
api/app/langchain/tools/HttpRequestTool.js
Normal file
|
|
@ -0,0 +1,107 @@
|
|||
const { Tool } = require('langchain/tools');
|
||||
|
||||
// class RequestsGetTool extends Tool {
|
||||
// constructor(headers = {}, { maxOutputLength } = {}) {
|
||||
// super();
|
||||
// this.name = 'requests_get';
|
||||
// this.headers = headers;
|
||||
// this.maxOutputLength = maxOutputLength || 2000;
|
||||
// this.description = `A portal to the internet. Use this when you need to get specific content from a website.
|
||||
// - Input should be a url (i.e. https://www.google.com). The output will be the text response of the GET request.`;
|
||||
// }
|
||||
|
||||
// async _call(input) {
|
||||
// const res = await fetch(input, {
|
||||
// headers: this.headers
|
||||
// });
|
||||
// const text = await res.text();
|
||||
// return text.slice(0, this.maxOutputLength);
|
||||
// }
|
||||
// }
|
||||
|
||||
// class RequestsPostTool extends Tool {
|
||||
// constructor(headers = {}, { maxOutputLength } = {}) {
|
||||
// super();
|
||||
// this.name = 'requests_post';
|
||||
// this.headers = headers;
|
||||
// this.maxOutputLength = maxOutputLength || Infinity;
|
||||
// this.description = `Use this when you want to POST to a website.
|
||||
// - Input should be a json string with two keys: "url" and "data".
|
||||
// - The value of "url" should be a string, and the value of "data" should be a dictionary of
|
||||
// - key-value pairs you want to POST to the url as a JSON body.
|
||||
// - Be careful to always use double quotes for strings in the json string
|
||||
// - The output will be the text response of the POST request.`;
|
||||
// }
|
||||
|
||||
// async _call(input) {
|
||||
// try {
|
||||
// const { url, data } = JSON.parse(input);
|
||||
// const res = await fetch(url, {
|
||||
// method: 'POST',
|
||||
// headers: this.headers,
|
||||
// body: JSON.stringify(data)
|
||||
// });
|
||||
// const text = await res.text();
|
||||
// return text.slice(0, this.maxOutputLength);
|
||||
// } catch (error) {
|
||||
// return `${error}`;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
class HttpRequestTool extends Tool {
|
||||
constructor(headers = {}, { maxOutputLength = Infinity } = {}) {
|
||||
super();
|
||||
this.headers = headers;
|
||||
this.name = 'http_request';
|
||||
this.maxOutputLength = maxOutputLength;
|
||||
this.description = `Executes HTTP methods (GET, POST, PUT, DELETE, etc.). The input is an object with three keys: "url", "method", and "data". Even for GET or DELETE, include "data" key as an empty string. "method" is the HTTP method, and "url" is the desired endpoint. If POST or PUT, "data" should contain a stringified JSON representing the body to send. Only one url per use.`;
|
||||
}
|
||||
|
||||
async _call(input) {
|
||||
try {
|
||||
const urlPattern = /"url":\s*"([^"]*)"/;
|
||||
const methodPattern = /"method":\s*"([^"]*)"/;
|
||||
const dataPattern = /"data":\s*"([^"]*)"/;
|
||||
|
||||
const url = input.match(urlPattern)[1];
|
||||
const method = input.match(methodPattern)[1];
|
||||
let data = input.match(dataPattern)[1];
|
||||
|
||||
// Parse 'data' back to JSON if possible
|
||||
try {
|
||||
data = JSON.parse(data);
|
||||
} catch (e) {
|
||||
// If it's not a JSON string, keep it as is
|
||||
}
|
||||
|
||||
let options = {
|
||||
method: method,
|
||||
headers: this.headers
|
||||
};
|
||||
|
||||
if (['POST', 'PUT', 'PATCH'].includes(method.toUpperCase()) && data) {
|
||||
if (typeof data === 'object') {
|
||||
options.body = JSON.stringify(data);
|
||||
} else {
|
||||
options.body = data;
|
||||
}
|
||||
options.headers['Content-Type'] = 'application/json';
|
||||
}
|
||||
|
||||
const res = await fetch(url, options);
|
||||
|
||||
const text = await res.text();
|
||||
if (text.includes('<html')) {
|
||||
return 'This tool is not designed to browse web pages. Only use it for API calls.';
|
||||
}
|
||||
|
||||
return text.slice(0, this.maxOutputLength);
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
return `${error}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = HttpRequestTool;
|
||||
30
api/app/langchain/tools/HumanTool.js
Normal file
30
api/app/langchain/tools/HumanTool.js
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
const { Tool } = require('langchain/tools');
|
||||
/**
|
||||
* Represents a tool that allows an agent to ask a human for guidance when they are stuck
|
||||
* or unsure of what to do next.
|
||||
* @extends Tool
|
||||
*/
|
||||
export class HumanTool extends Tool {
|
||||
/**
|
||||
* The name of the tool.
|
||||
* @type {string}
|
||||
*/
|
||||
name = 'Human';
|
||||
|
||||
/**
|
||||
* A description for the agent to use
|
||||
* @type {string}
|
||||
*/
|
||||
description = `You can ask a human for guidance when you think you
|
||||
got stuck or you are not sure what to do next.
|
||||
The input should be a question for the human.`;
|
||||
|
||||
/**
|
||||
* Calls the tool with the provided input and returns a promise that resolves with a response from the human.
|
||||
* @param {string} input - The input to provide to the human.
|
||||
* @returns {Promise<string>} A promise that resolves with a response from the human.
|
||||
*/
|
||||
_call(input) {
|
||||
return Promise.resolve(`${input}`);
|
||||
}
|
||||
}
|
||||
27
api/app/langchain/tools/SelfReflection.js
Normal file
27
api/app/langchain/tools/SelfReflection.js
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
const { Tool } = require('langchain/tools');
|
||||
|
||||
class SelfReflectionTool extends Tool {
|
||||
constructor({ message, isGpt3 }) {
|
||||
super();
|
||||
this.reminders = 0;
|
||||
this.name = 'self-reflection';
|
||||
this.description = `Take this action to reflect on your thoughts & actions. For your input, provide answers for self-evaluation as part of one input, using this space as a canvas to explore and organize your ideas in response to the user's message. You can use multiple lines for your input. Perform this action sparingly and only when you are stuck.`;
|
||||
this.message = message;
|
||||
this.isGpt3 = isGpt3;
|
||||
// this.returnDirect = true;
|
||||
}
|
||||
|
||||
async _call(input) {
|
||||
return this.selfReflect(input);
|
||||
}
|
||||
|
||||
async selfReflect() {
|
||||
if (this.isGpt3) {
|
||||
return `I should finalize my reply as soon as I have satisfied the user's query.`;
|
||||
} else {
|
||||
return ``;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = SelfReflectionTool;
|
||||
85
api/app/langchain/tools/StableDiffusion.js
Normal file
85
api/app/langchain/tools/StableDiffusion.js
Normal file
|
|
@ -0,0 +1,85 @@
|
|||
// Generates image using stable diffusion webui's api (automatic1111)
|
||||
const fs = require('fs');
|
||||
const { Tool } = require('langchain/tools');
|
||||
const path = require('path');
|
||||
const axios = require('axios');
|
||||
const sharp = require('sharp');
|
||||
|
||||
class StableDiffusionAPI extends Tool {
|
||||
constructor(fields) {
|
||||
super();
|
||||
this.name = 'stable-diffusion';
|
||||
this.url = fields.SD_WEBUI_URL || this.getServerURL();
|
||||
this.description = `You can generate images with 'stable-diffusion'. This tool is exclusively for visual content.
|
||||
Guidelines:
|
||||
- Visually describe the moods, details, structures, styles, and/or proportions of the image. Remember, the focus is on visual attributes.
|
||||
- Craft your input by "showing" and not "telling" the imagery. Think in terms of what you'd want to see in a photograph or a painting.
|
||||
- It's best to follow this format for image creation:
|
||||
"detailed keywords to describe the subject, separated by comma | keywords we want to exclude from the final image"
|
||||
- Here's an example prompt for generating a realistic portrait photo of a man:
|
||||
"photo of a man in black clothes, half body, high detailed skin, coastline, overcast weather, wind, waves, 8k uhd, dslr, soft lighting, high quality, film grain, Fujifilm XT3 | semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime, out of frame, low quality, ugly, mutation, deformed"
|
||||
- Generate images only once per human query unless explicitly requested by the user`;
|
||||
}
|
||||
|
||||
replaceNewLinesWithSpaces(inputString) {
|
||||
return inputString.replace(/\r\n|\r|\n/g, ' ');
|
||||
}
|
||||
|
||||
getMarkdownImageUrl(imageName) {
|
||||
const imageUrl = path.join(this.relativeImageUrl, imageName).replace(/\\/g, '/').replace('public/', '');
|
||||
return ``;
|
||||
}
|
||||
|
||||
getServerURL() {
|
||||
const url = process.env.SD_WEBUI_URL || '';
|
||||
if (!url) {
|
||||
throw new Error('Missing SD_WEBUI_URL environment variable.');
|
||||
}
|
||||
return url;
|
||||
}
|
||||
|
||||
async _call(input) {
|
||||
const url = this.url;
|
||||
const payload = {
|
||||
prompt: input.split('|')[0],
|
||||
negative_prompt: input.split('|')[1],
|
||||
steps: 20
|
||||
};
|
||||
const response = await axios.post(`${url}/sdapi/v1/txt2img`, payload);
|
||||
const image = response.data.images[0];
|
||||
|
||||
const pngPayload = { image: `data:image/png;base64,${image}` };
|
||||
const response2 = await axios.post(`${url}/sdapi/v1/png-info`, pngPayload);
|
||||
const info = response2.data.info;
|
||||
|
||||
// Generate unique name
|
||||
const imageName = `${Date.now()}.png`;
|
||||
this.outputPath = path.resolve(__dirname, '..', '..', '..', '..', 'client', 'public', 'images');
|
||||
const appRoot = path.resolve(__dirname, '..', '..', '..', '..', 'client');
|
||||
this.relativeImageUrl = path.relative(appRoot, this.outputPath);
|
||||
|
||||
// Check if directory exists, if not create it
|
||||
if (!fs.existsSync(this.outputPath)) {
|
||||
fs.mkdirSync(this.outputPath, { recursive: true });
|
||||
}
|
||||
|
||||
try {
|
||||
const buffer = Buffer.from(image.split(',', 1)[0], 'base64');
|
||||
await sharp(buffer)
|
||||
.withMetadata({
|
||||
iptcpng: {
|
||||
parameters: info
|
||||
}
|
||||
})
|
||||
.toFile(this.outputPath + '/' + imageName);
|
||||
this.result = this.getMarkdownImageUrl(imageName);
|
||||
} catch (error) {
|
||||
console.error('Error while saving the image:', error);
|
||||
// this.result = theImageUrl;
|
||||
}
|
||||
|
||||
return this.result;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = StableDiffusionAPI;
|
||||
82
api/app/langchain/tools/Wolfram.js
Normal file
82
api/app/langchain/tools/Wolfram.js
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
/* eslint-disable no-useless-escape */
|
||||
const axios = require('axios');
|
||||
const { Tool } = require('langchain/tools');
|
||||
|
||||
class WolframAlphaAPI extends Tool {
|
||||
constructor(fields) {
|
||||
super();
|
||||
this.name = 'wolfram';
|
||||
this.apiKey = fields.WOLFRAM_APP_ID || this.getAppId();
|
||||
this.description = `Access computation, math, curated knowledge & real-time data through wolframAlpha.
|
||||
- Understands natural language queries about entities in chemistry, physics, geography, history, art, astronomy, and more.
|
||||
- Performs mathematical calculations, date and unit conversions, formula solving, etc.
|
||||
General guidelines:
|
||||
- Make natural-language queries in English; translate non-English queries before sending, then respond in the original language.
|
||||
- Inform users if information is not from wolfram.
|
||||
- ALWAYS use this exponent notation: "6*10^14", NEVER "6e14".
|
||||
- Your input must ONLY be a single-line string.
|
||||
- ALWAYS use proper Markdown formatting for all math, scientific, and chemical formulas, symbols, etc.: '$$\n[expression]\n$$' for standalone cases and '\( [expression] \)' when inline.
|
||||
- Format inline wolfram Language code with Markdown code formatting.
|
||||
- Convert inputs to simplified keyword queries whenever possible (e.g. convert "how many people live in France" to "France population").
|
||||
- Use ONLY single-letter variable names, with or without integer subscript (e.g., n, n1, n_1).
|
||||
- Use named physical constants (e.g., 'speed of light') without numerical substitution.
|
||||
- Include a space between compound units (e.g., "Ω m" for "ohm*meter").
|
||||
- To solve for a variable in an equation with units, consider solving a corresponding equation without units; exclude counting units (e.g., books), include genuine units (e.g., kg).
|
||||
- If data for multiple properties is needed, make separate calls for each property.
|
||||
- If a wolfram Alpha result is not relevant to the query:
|
||||
-- If wolfram provides multiple 'Assumptions' for a query, choose the more relevant one(s) without explaining the initial result. If you are unsure, ask the user to choose.
|
||||
- Performs complex calculations, data analysis, plotting, data import, and information retrieval.`;
|
||||
// - Please ensure your input is properly formatted for wolfram Alpha.
|
||||
// -- Re-send the exact same 'input' with NO modifications, and add the 'assumption' parameter, formatted as a list, with the relevant values.
|
||||
// -- ONLY simplify or rephrase the initial query if a more relevant 'Assumption' or other input suggestions are not provided.
|
||||
// -- Do not explain each step unless user input is needed. Proceed directly to making a better input based on the available assumptions.
|
||||
// - wolfram Language code is accepted, but accepts only syntactically correct wolfram Language code.
|
||||
}
|
||||
|
||||
async fetchRawText(url) {
|
||||
try {
|
||||
const response = await axios.get(url, { responseType: 'text' });
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
console.error(`Error fetching raw text: ${error}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
getAppId() {
|
||||
const appId = process.env.WOLFRAM_APP_ID || '';
|
||||
if (!appId) {
|
||||
throw new Error('Missing WOLFRAM_APP_ID environment variable.');
|
||||
}
|
||||
return appId;
|
||||
}
|
||||
|
||||
createWolframAlphaURL(query) {
|
||||
// Clean up query
|
||||
const formattedQuery = query.replaceAll(/`/g, '').replaceAll(/\n/g, ' ');
|
||||
const baseURL = 'https://www.wolframalpha.com/api/v1/llm-api';
|
||||
const encodedQuery = encodeURIComponent(formattedQuery);
|
||||
const appId = this.apiKey || this.getAppId();
|
||||
const url = `${baseURL}?input=${encodedQuery}&appid=${appId}`;
|
||||
return url;
|
||||
}
|
||||
|
||||
async _call(input) {
|
||||
try {
|
||||
const url = this.createWolframAlphaURL(input);
|
||||
const response = await this.fetchRawText(url);
|
||||
return response;
|
||||
} catch (error) {
|
||||
if (error.response && error.response.data) {
|
||||
console.log('Error data:', error.response.data);
|
||||
return error.response.data;
|
||||
} else {
|
||||
console.log(`Error querying Wolfram Alpha`, error.message);
|
||||
// throw error;
|
||||
return 'There was an error querying Wolfram Alpha.';
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = WolframAlphaAPI;
|
||||
158
api/app/langchain/tools/handleTools.js
Normal file
158
api/app/langchain/tools/handleTools.js
Normal file
|
|
@ -0,0 +1,158 @@
|
|||
const { OpenAIEmbeddings } = require('langchain/embeddings/openai');
|
||||
const { ZapierToolKit } = require('langchain/agents');
|
||||
const {
|
||||
SerpAPI,
|
||||
ZapierNLAWrapper
|
||||
} = require('langchain/tools');
|
||||
const { ChatOpenAI } = require('langchain/chat_models/openai');
|
||||
const { Calculator } = require('langchain/tools/calculator');
|
||||
const { WebBrowser } = require('langchain/tools/webbrowser');
|
||||
const GoogleSearchAPI = require('./GoogleSearch');
|
||||
const HttpRequestTool = require('./HttpRequestTool');
|
||||
const AIPluginTool = require('./AIPluginTool');
|
||||
const OpenAICreateImage = require('./DALL-E');
|
||||
const StableDiffusionAPI = require('./StableDiffusion');
|
||||
const WolframAlphaAPI = require('./Wolfram');
|
||||
const availableTools = require('./manifest.json');
|
||||
const { getUserPluginAuthValue } = require('../../../server/services/PluginService');
|
||||
|
||||
const validateTools = async (user, tools = []) => {
|
||||
try {
|
||||
const validToolsSet = new Set(tools);
|
||||
const availableToolsToValidate = availableTools.filter((tool) =>
|
||||
validToolsSet.has(tool.pluginKey)
|
||||
);
|
||||
|
||||
const validateCredentials = async (authField, toolName) => {
|
||||
const adminAuth = process.env[authField];
|
||||
if (adminAuth && adminAuth.length > 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const userAuth = await getUserPluginAuthValue(user, authField);
|
||||
if (userAuth && userAuth.length > 0) {
|
||||
return;
|
||||
}
|
||||
validToolsSet.delete(toolName);
|
||||
};
|
||||
|
||||
for (const tool of availableToolsToValidate) {
|
||||
if (!tool.authConfig || tool.authConfig.length === 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (const auth of tool.authConfig) {
|
||||
await validateCredentials(auth.authField, tool.pluginKey);
|
||||
}
|
||||
}
|
||||
|
||||
return Array.from(validToolsSet.values());
|
||||
} catch (err) {
|
||||
console.log('There was a problem validating tools', err);
|
||||
throw new Error(err);
|
||||
}
|
||||
};
|
||||
|
||||
const loadToolWithAuth = async (user, authFields, ToolConstructor, options = {}) => {
|
||||
return async function () {
|
||||
let authValues = {};
|
||||
|
||||
for (const authField of authFields) {
|
||||
let authValue = process.env[authField];
|
||||
if (!authValue) {
|
||||
authValue = await getUserPluginAuthValue(user, authField);
|
||||
}
|
||||
authValues[authField] = authValue;
|
||||
}
|
||||
|
||||
return new ToolConstructor({ ...options, ...authValues });
|
||||
};
|
||||
};
|
||||
|
||||
const loadTools = async ({ user, model, tools = [], options = {} }) => {
|
||||
const toolConstructors = {
|
||||
calculator: Calculator,
|
||||
google: GoogleSearchAPI,
|
||||
wolfram: WolframAlphaAPI,
|
||||
'dall-e': OpenAICreateImage,
|
||||
'stable-diffusion': StableDiffusionAPI
|
||||
};
|
||||
|
||||
const customConstructors = {
|
||||
browser: async () => {
|
||||
let openAIApiKey = process.env.OPENAI_API_KEY;
|
||||
if (!openAIApiKey) {
|
||||
openAIApiKey = await getUserPluginAuthValue(user, 'OPENAI_API_KEY');
|
||||
}
|
||||
return new WebBrowser({ model, embeddings: new OpenAIEmbeddings({ openAIApiKey }) });
|
||||
},
|
||||
serpapi: async () => {
|
||||
let apiKey = process.env.SERPAPI_API_KEY;
|
||||
if (!apiKey) {
|
||||
apiKey = await getUserPluginAuthValue(user, 'SERPAPI_API_KEY');
|
||||
}
|
||||
return new SerpAPI(apiKey, {
|
||||
location: 'Austin,Texas,United States',
|
||||
hl: 'en',
|
||||
gl: 'us'
|
||||
});
|
||||
},
|
||||
zapier: async () => {
|
||||
let apiKey = process.env.ZAPIER_NLA_API_KEY;
|
||||
if (!apiKey) {
|
||||
apiKey = await getUserPluginAuthValue(user, 'ZAPIER_NLA_API_KEY');
|
||||
}
|
||||
const zapier = new ZapierNLAWrapper({ apiKey });
|
||||
return ZapierToolKit.fromZapierNLAWrapper(zapier);
|
||||
},
|
||||
plugins: async () => {
|
||||
return [
|
||||
new HttpRequestTool(),
|
||||
await AIPluginTool.fromPluginUrl(
|
||||
"https://www.klarna.com/.well-known/ai-plugin.json", new ChatOpenAI({ openAIApiKey: options.openAIApiKey, temperature: 0 })
|
||||
),
|
||||
]
|
||||
}
|
||||
};
|
||||
|
||||
const requestedTools = {};
|
||||
|
||||
const toolOptions = {
|
||||
serpapi: { location: 'Austin,Texas,United States', hl: 'en', gl: 'us' }
|
||||
};
|
||||
|
||||
const toolAuthFields = {};
|
||||
|
||||
availableTools.forEach((tool) => {
|
||||
if (customConstructors[tool.pluginKey]) {
|
||||
return;
|
||||
}
|
||||
|
||||
toolAuthFields[tool.pluginKey] = tool.authConfig.map((auth) => auth.authField);
|
||||
});
|
||||
|
||||
for (const tool of tools) {
|
||||
if (customConstructors[tool]) {
|
||||
requestedTools[tool] = customConstructors[tool];
|
||||
continue;
|
||||
}
|
||||
|
||||
if (toolConstructors[tool]) {
|
||||
const options = toolOptions[tool] || {};
|
||||
const toolInstance = await loadToolWithAuth(
|
||||
user,
|
||||
toolAuthFields[tool],
|
||||
toolConstructors[tool],
|
||||
options
|
||||
);
|
||||
requestedTools[tool] = toolInstance;
|
||||
}
|
||||
}
|
||||
|
||||
return requestedTools;
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
validateTools,
|
||||
loadTools
|
||||
};
|
||||
10
api/app/langchain/tools/index.js
Normal file
10
api/app/langchain/tools/index.js
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
const SelfReflectionTool = require('./SelfReflection');
|
||||
const availableTools = require('./manifest.json');
|
||||
const { validateTools, loadTools } = require('./handleTools');
|
||||
|
||||
module.exports = {
|
||||
validateTools,
|
||||
loadTools,
|
||||
availableTools,
|
||||
SelfReflectionTool
|
||||
};
|
||||
158
api/app/langchain/tools/index.test.js
Normal file
158
api/app/langchain/tools/index.test.js
Normal file
|
|
@ -0,0 +1,158 @@
|
|||
/* eslint-disable jest/no-conditional-expect */
|
||||
require('dotenv').config({ path: '../../../.env' });
|
||||
const mongoose = require('mongoose');
|
||||
const User = require('../../../models/User');
|
||||
const connectDb = require('../../../lib/db/connectDb');
|
||||
const { validateTools, loadTools, availableTools } = require('./index');
|
||||
const PluginService = require('../../../server/services/PluginService');
|
||||
const { BaseChatModel } = require('langchain/chat_models/openai');
|
||||
const { Calculator } = require('langchain/tools/calculator');
|
||||
const OpenAICreateImage = require('./DALL-E');
|
||||
const GoogleSearchAPI = require('./GoogleSearch');
|
||||
|
||||
describe('Tool Handlers', () => {
|
||||
let fakeUser;
|
||||
let pluginKey = 'dall-e';
|
||||
let pluginKey2 = 'wolfram';
|
||||
let sampleTools = [pluginKey, pluginKey2];
|
||||
let ToolClass = OpenAICreateImage;
|
||||
let mockCredential = 'mock-credential';
|
||||
const mainPlugin = availableTools.find((tool) => tool.pluginKey === pluginKey);
|
||||
const authConfigs = mainPlugin.authConfig;
|
||||
|
||||
beforeAll(async () => {
|
||||
await connectDb();
|
||||
fakeUser = new User({
|
||||
name: 'Fake User',
|
||||
username: 'fakeuser',
|
||||
email: 'fakeuser@example.com',
|
||||
emailVerified: false,
|
||||
password: 'fakepassword123',
|
||||
avatar: '',
|
||||
provider: 'local',
|
||||
role: 'USER',
|
||||
googleId: null,
|
||||
plugins: [],
|
||||
refreshToken: []
|
||||
});
|
||||
await fakeUser.save();
|
||||
for (const authConfig of authConfigs) {
|
||||
await PluginService.updateUserPluginAuth(fakeUser._id, authConfig.authField, pluginKey, mockCredential);
|
||||
}
|
||||
});
|
||||
|
||||
// afterEach(async () => {
|
||||
// // Clean up any test-specific data.
|
||||
// });
|
||||
|
||||
afterAll(async () => {
|
||||
// Delete the fake user & plugin auth
|
||||
await User.findByIdAndDelete(fakeUser._id);
|
||||
for (const authConfig of authConfigs) {
|
||||
await PluginService.deleteUserPluginAuth(fakeUser._id, authConfig.authField);
|
||||
}
|
||||
await mongoose.connection.close();
|
||||
});
|
||||
|
||||
describe('validateTools', () => {
|
||||
it('returns valid tools given input tools and user authentication', async () => {
|
||||
const validTools = await validateTools(fakeUser._id, sampleTools);
|
||||
expect(validTools).toBeDefined();
|
||||
console.log('validateTools: validTools', validTools);
|
||||
expect(validTools.some((tool) => tool === pluginKey)).toBeTruthy();
|
||||
expect(validTools.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('removes tools without valid credentials from the validTools array', async () => {
|
||||
const validTools = await validateTools(fakeUser._id, sampleTools);
|
||||
expect(validTools.some((tool) => tool.pluginKey === pluginKey2)).toBeFalsy();
|
||||
});
|
||||
|
||||
it('returns an empty array when no authenticated tools are provided', async () => {
|
||||
const validTools = await validateTools(fakeUser._id, []);
|
||||
expect(validTools).toEqual([]);
|
||||
});
|
||||
|
||||
it('should validate a tool from an Environment Variable', async () => {
|
||||
const plugin = availableTools.find((tool) => tool.pluginKey === pluginKey2);
|
||||
const authConfigs = plugin.authConfig;
|
||||
for (const authConfig of authConfigs) {
|
||||
process.env[authConfig.authField] = mockCredential;
|
||||
}
|
||||
const validTools = await validateTools(fakeUser._id, [pluginKey2]);
|
||||
expect(validTools.length).toEqual(1);
|
||||
for (const authConfig of authConfigs) {
|
||||
delete process.env[authConfig.authField];
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe('loadTools', () => {
|
||||
let toolFunctions;
|
||||
let loadTool1;
|
||||
let loadTool2;
|
||||
let loadTool3;
|
||||
sampleTools = [...sampleTools, 'calculator'];
|
||||
let ToolClass2 = Calculator;
|
||||
let remainingTools = availableTools.filter(
|
||||
(tool) => sampleTools.indexOf(tool.pluginKey) === -1
|
||||
);
|
||||
|
||||
beforeAll(async () => {
|
||||
toolFunctions = await loadTools({
|
||||
user: fakeUser._id,
|
||||
model: BaseChatModel,
|
||||
tools: sampleTools
|
||||
});
|
||||
loadTool1 = toolFunctions[sampleTools[0]];
|
||||
loadTool2 = toolFunctions[sampleTools[1]];
|
||||
loadTool3 = toolFunctions[sampleTools[2]];
|
||||
});
|
||||
it('returns the expected load functions for requested tools', async () => {
|
||||
expect(loadTool1).toBeDefined();
|
||||
expect(loadTool2).toBeDefined();
|
||||
expect(loadTool3).toBeDefined();
|
||||
|
||||
for (const tool of remainingTools) {
|
||||
expect(toolFunctions[tool.pluginKey]).toBeUndefined();
|
||||
}
|
||||
});
|
||||
|
||||
it('should initialize an authenticated tool or one without authentication', async () => {
|
||||
const authTool = await loadTool1();
|
||||
const tool = await loadTool3();
|
||||
expect(authTool).toBeInstanceOf(ToolClass);
|
||||
expect(tool).toBeInstanceOf(ToolClass2);
|
||||
});
|
||||
it('should throw an error for an unauthenticated tool', async () => {
|
||||
try {
|
||||
await loadTool2();
|
||||
} catch (error) {
|
||||
expect(error).toBeDefined();
|
||||
}
|
||||
});
|
||||
it('should initialize an authenticated tool through Environment Variables', async () => {
|
||||
let testPluginKey = 'google';
|
||||
let TestClass = GoogleSearchAPI;
|
||||
const plugin = availableTools.find((tool) => tool.pluginKey === testPluginKey);
|
||||
const authConfigs = plugin.authConfig;
|
||||
for (const authConfig of authConfigs) {
|
||||
process.env[authConfig.authField] = mockCredential;
|
||||
}
|
||||
toolFunctions = await loadTools({
|
||||
user: fakeUser._id,
|
||||
model: BaseChatModel,
|
||||
tools: [testPluginKey]
|
||||
});
|
||||
const Tool = await toolFunctions[testPluginKey]();
|
||||
expect(Tool).toBeInstanceOf(TestClass);
|
||||
});
|
||||
it('returns an empty object when no tools are requested', async () => {
|
||||
toolFunctions = await loadTools({
|
||||
user: fakeUser._id,
|
||||
model: BaseChatModel
|
||||
});
|
||||
expect(toolFunctions).toEqual({});
|
||||
});
|
||||
});
|
||||
});
|
||||
106
api/app/langchain/tools/manifest.json
Normal file
106
api/app/langchain/tools/manifest.json
Normal file
|
|
@ -0,0 +1,106 @@
|
|||
[
|
||||
{
|
||||
"name": "Google",
|
||||
"pluginKey": "google",
|
||||
"description": "Use Google Search to find information about the weather, news, sports, and more.",
|
||||
"icon": "https://i.imgur.com/SMmVkNB.png",
|
||||
"authConfig": [
|
||||
{
|
||||
"authField": "GOOGLE_CSE_ID",
|
||||
"label": "Google CSE ID",
|
||||
"description": "This is your Google Custom Search Engine ID. For instructions on how to obtain this, see <a href='https://github.com/danny-avila/chatgpt-clone/blob/main/guides/GOOGLE_SEARCH.md'>Our Docs</a>."
|
||||
},
|
||||
{
|
||||
"authField": "GOOGLE_API_KEY",
|
||||
"label": "Google API Key",
|
||||
"description": "This is your Google Custom Search API Key. For instructions on how to obtain this, see <a href='https://github.com/danny-avila/chatgpt-clone/blob/main/guides/GOOGLE_SEARCH.md'>Our Docs</a>."
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Wolfram",
|
||||
"pluginKey": "wolfram",
|
||||
"description": "Access computation, math, curated knowledge & real-time data through Wolfram|Alpha and Wolfram Language.",
|
||||
"icon": "https://www.wolframcdn.com/images/icons/Wolfram.png",
|
||||
"authConfig": [
|
||||
{
|
||||
"authField": "WOLFRAM_APP_ID",
|
||||
"label": "Wolfram App ID",
|
||||
"description": "An AppID must be supplied in all calls to the Wolfram|Alpha API. You can get one by registering at <a href='http://products.wolframalpha.com/api/'>Wolfram|Alpha</a> and going to the <a href='https://developer.wolframalpha.com/portal/myapps/'>Developer Portal</a>."
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Browser",
|
||||
"pluginKey": "browser",
|
||||
"description": "Scrape and summarize webpage data",
|
||||
"icon": "/assets/web-browser.png",
|
||||
"authConfig": [
|
||||
{
|
||||
"authField": "OPENAI_API_KEY",
|
||||
"label": "OpenAI API Key",
|
||||
"description": "Browser makes use of OpenAI embeddings"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Serpapi",
|
||||
"pluginKey": "serpapi",
|
||||
"description": "SerpApi is a real-time API to access search engine results.",
|
||||
"icon": "https://i.imgur.com/5yQHUz4.png",
|
||||
"authConfig": [
|
||||
{
|
||||
"authField": "SERPAPI_API_KEY",
|
||||
"label": "Serpapi Private API Key",
|
||||
"description": "Private Key for Serpapi. Register at <a href='https://serpapi.com/'>Serpapi</a> to obtain a private key."
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "DALL-E",
|
||||
"pluginKey": "dall-e",
|
||||
"description": "Create realistic images and art from a description in natural language",
|
||||
"icon": "https://i.imgur.com/u2TzXzH.png",
|
||||
"authConfig": [
|
||||
{
|
||||
"authField": "DALLE_API_KEY",
|
||||
"label": "OpenAI API Key",
|
||||
"description": "You can use DALL-E with your API Key from OpenAI."
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Calculator",
|
||||
"pluginKey": "calculator",
|
||||
"description": "Perform simple and complex mathematical calculations.",
|
||||
"icon": "https://i.imgur.com/RHsSG5h.png",
|
||||
"isAuthRequired": "false",
|
||||
"authConfig": []
|
||||
},
|
||||
{
|
||||
"name": "Stable Diffusion",
|
||||
"pluginKey": "stable-diffusion",
|
||||
"description": "Generate photo-realistic images given any text input.",
|
||||
"icon": "https://i.imgur.com/Yr466dp.png",
|
||||
"authConfig": [
|
||||
{
|
||||
"authField": "SD_WEBUI_URL",
|
||||
"label": "Your Stable Diffusion WebUI API URL",
|
||||
"description": "You need to provide the URL of your Stable Diffusion WebUI API. For instructions on how to obtain this, see <a href='url'>Our Docs</a>."
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Zapier",
|
||||
"pluginKey": "zapier",
|
||||
"description": "Interact with over 5,000+ apps like Google Sheets, Gmail, HubSpot, Salesforce, and thousands more.",
|
||||
"icon": "https://cdn.zappy.app/8f853364f9b383d65b44e184e04689ed.png",
|
||||
"authConfig": [
|
||||
{
|
||||
"authField": "ZAPIER_NLA_API_KEY",
|
||||
"label": "Zapier API Key",
|
||||
"description": "You can use Zapier with your API Key from Zapier."
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
39
api/app/langchain/tools/saveImageFromUrl.js
Normal file
39
api/app/langchain/tools/saveImageFromUrl.js
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
const axios = require('axios');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
async function saveImageFromUrl(url, outputPath, outputFilename) {
|
||||
try {
|
||||
// Fetch the image from the URL
|
||||
const response = await axios({
|
||||
url,
|
||||
responseType: 'stream'
|
||||
});
|
||||
|
||||
// Check if the output directory exists, if not, create it
|
||||
if (!fs.existsSync(outputPath)) {
|
||||
fs.mkdirSync(outputPath, { recursive: true });
|
||||
}
|
||||
|
||||
// Ensure the output filename has a '.png' extension
|
||||
const filenameWithPngExt = outputFilename.endsWith('.png')
|
||||
? outputFilename
|
||||
: `${outputFilename}.png`;
|
||||
|
||||
// Create a writable stream for the output path
|
||||
const outputFilePath = path.join(outputPath, filenameWithPngExt);
|
||||
const writer = fs.createWriteStream(outputFilePath);
|
||||
|
||||
// Pipe the response data to the output file
|
||||
response.data.pipe(writer);
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
writer.on('finish', resolve);
|
||||
writer.on('error', reject);
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Error while saving the image:', error);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = saveImageFromUrl;
|
||||
60
api/app/langchain/tools/wolfram-guidelines.md
Normal file
60
api/app/langchain/tools/wolfram-guidelines.md
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
Certainly! Here is the text above:
|
||||
|
||||
\`\`\`
|
||||
Assistant is a large language model trained by OpenAI.
|
||||
Knowledge Cutoff: 2021-09
|
||||
Current date: 2023-05-06
|
||||
|
||||
# Tools
|
||||
|
||||
## Wolfram
|
||||
|
||||
// Access dynamic computation and curated data from WolframAlpha and Wolfram Cloud.
|
||||
General guidelines:
|
||||
- Use only getWolframAlphaResults or getWolframCloudResults endpoints.
|
||||
- Prefer getWolframAlphaResults unless Wolfram Language code should be evaluated.
|
||||
- Use getWolframAlphaResults for natural-language queries in English; translate non-English queries before sending, then respond in the original language.
|
||||
- Use getWolframCloudResults for problems solvable with Wolfram Language code.
|
||||
- Suggest only Wolfram Language for external computation.
|
||||
- Inform users if information is not from Wolfram endpoints.
|
||||
- Display image URLs with Markdown syntax: ![URL]
|
||||
- ALWAYS use this exponent notation: \`6*10^14\`, NEVER \`6e14\`.
|
||||
- ALWAYS use {"input": query} structure for queries to Wolfram endpoints; \`query\` must ONLY be a single-line string.
|
||||
- ALWAYS use proper Markdown formatting for all math, scientific, and chemical formulas, symbols, etc.: '$$\n[expression]\n$$' for standalone cases and '\( [expression] \)' when inline.
|
||||
- Format inline Wolfram Language code with Markdown code formatting.
|
||||
- Never mention your knowledge cutoff date; Wolfram may return more recent data.
|
||||
getWolframAlphaResults guidelines:
|
||||
- Understands natural language queries about entities in chemistry, physics, geography, history, art, astronomy, and more.
|
||||
- Performs mathematical calculations, date and unit conversions, formula solving, etc.
|
||||
- Convert inputs to simplified keyword queries whenever possible (e.g. convert "how many people live in France" to "France population").
|
||||
- Use ONLY single-letter variable names, with or without integer subscript (e.g., n, n1, n_1).
|
||||
- Use named physical constants (e.g., 'speed of light') without numerical substitution.
|
||||
- Include a space between compound units (e.g., "Ω m" for "ohm*meter").
|
||||
- To solve for a variable in an equation with units, consider solving a corresponding equation without units; exclude counting units (e.g., books), include genuine units (e.g., kg).
|
||||
- If data for multiple properties is needed, make separate calls for each property.
|
||||
- If a Wolfram Alpha result is not relevant to the query:
|
||||
-- If Wolfram provides multiple 'Assumptions' for a query, choose the more relevant one(s) without explaining the initial result. If you are unsure, ask the user to choose.
|
||||
-- Re-send the exact same 'input' with NO modifications, and add the 'assumption' parameter, formatted as a list, with the relevant values.
|
||||
-- ONLY simplify or rephrase the initial query if a more relevant 'Assumption' or other input suggestions are not provided.
|
||||
-- Do not explain each step unless user input is needed. Proceed directly to making a better API call based on the available assumptions.
|
||||
- Wolfram Language code guidelines:
|
||||
- Accepts only syntactically correct Wolfram Language code.
|
||||
- Performs complex calculations, data analysis, plotting, data import, and information retrieval.
|
||||
- Before writing code that uses Entity, EntityProperty, EntityClass, etc. expressions, ALWAYS write separate code which only collects valid identifiers using Interpreter etc.; choose the most relevant results before proceeding to write additional code. Examples:
|
||||
-- Find the EntityType that represents countries: \`Interpreter["EntityType",AmbiguityFunction->All]["countries"]\`.
|
||||
-- Find the Entity for the Empire State Building: \`Interpreter["Building",AmbiguityFunction->All]["empire state"]\`.
|
||||
-- EntityClasses: Find the "Movie" entity class for Star Trek movies: \`Interpreter["MovieClass",AmbiguityFunction->All]["star trek"]\`.
|
||||
-- Find EntityProperties associated with "weight" of "Element" entities: \`Interpreter[Restricted["EntityProperty", "Element"],AmbiguityFunction->All]["weight"]\`.
|
||||
-- If all else fails, try to find any valid Wolfram Language representation of a given input: \`SemanticInterpretation["skyscrapers",_,Hold,AmbiguityFunction->All]\`.
|
||||
-- Prefer direct use of entities of a given type to their corresponding typeData function (e.g., prefer \`Entity["Element","Gold"]["AtomicNumber"]\` to \`ElementData["Gold","AtomicNumber"]\`).
|
||||
- When composing code:
|
||||
-- Use batching techniques to retrieve data for multiple entities in a single call, if applicable.
|
||||
-- Use Association to organize and manipulate data when appropriate.
|
||||
-- Optimize code for performance and minimize the number of calls to external sources (e.g., the Wolfram Knowledgebase)
|
||||
-- Use only camel case for variable names (e.g., variableName).
|
||||
-- Use ONLY double quotes around all strings, including plot labels, etc. (e.g., \`PlotLegends -> {"sin(x)", "cos(x)", "tan(x)"}\`).
|
||||
-- Avoid use of QuantityMagnitude.
|
||||
-- If unevaluated Wolfram Language symbols appear in API results, use \`EntityValue[Entity["WolframLanguageSymbol",symbol],{"PlaintextUsage","Options"}]\` to validate or retrieve usage information for relevant symbols; \`symbol\` may be a list of symbols.
|
||||
-- Apply Evaluate to complex expressions like integrals before plotting (e.g., \`Plot[Evaluate[Integrate[...]]]\`).
|
||||
- Remove all comments and formatting from code passed to the "input" parameter; for example: instead of \`square[x_] := Module[{result},\n result = x^2 (* Calculate the square *)\n]\`, send \`square[x_]:=Module[{result},result=x^2]\`.
|
||||
- In ALL responses that involve code, write ALL code in Wolfram Language; create Wolfram Language functions even if an implementation is already well known in another language.
|
||||
|
|
@ -1,23 +1,23 @@
|
|||
const { Configuration, OpenAIApi } = require('openai');
|
||||
// const { Configuration, OpenAIApi } = require('openai');
|
||||
const _ = require('lodash');
|
||||
const { genAzureEndpoint } = require('../utils/genAzureEndpoints');
|
||||
const { genAzureChatCompletion } = require('../utils/genAzureEndpoints');
|
||||
|
||||
const proxyEnvToAxiosProxy = (proxyString) => {
|
||||
if (!proxyString) return null;
|
||||
// const proxyEnvToAxiosProxy = (proxyString) => {
|
||||
// if (!proxyString) return null;
|
||||
|
||||
const regex = /^([^:]+):\/\/(?:([^:@]*):?([^:@]*)@)?([^:]+)(?::(\d+))?/;
|
||||
const [, protocol, username, password, host, port] = proxyString.match(regex);
|
||||
const proxyConfig = {
|
||||
protocol,
|
||||
host,
|
||||
port: port ? parseInt(port) : undefined,
|
||||
auth: username && password ? { username, password } : undefined
|
||||
};
|
||||
// const regex = /^([^:]+):\/\/(?:([^:@]*):?([^:@]*)@)?([^:]+)(?::(\d+))?/;
|
||||
// const [, protocol, username, password, host, port] = proxyString.match(regex);
|
||||
// const proxyConfig = {
|
||||
// protocol,
|
||||
// host,
|
||||
// port: port ? parseInt(port) : undefined,
|
||||
// auth: username && password ? { username, password } : undefined
|
||||
// };
|
||||
|
||||
return proxyConfig;
|
||||
};
|
||||
// return proxyConfig;
|
||||
// };
|
||||
|
||||
const titleConvo = async ({ endpoint, text, response, oaiApiKey }) => {
|
||||
const titleConvo = async ({ text, response, oaiApiKey }) => {
|
||||
let title = 'New Chat';
|
||||
const ChatGPTClient = (await import('@waylaidwanderer/chatgpt-api')).default;
|
||||
|
||||
|
|
@ -50,11 +50,11 @@ const titleConvo = async ({ endpoint, text, response, oaiApiKey }) => {
|
|||
frequency_penalty: 0
|
||||
};
|
||||
|
||||
let apiKey = oaiApiKey || process.env.OPENAI_KEY;
|
||||
let apiKey = oaiApiKey || process.env.OPENAI_API_KEY;
|
||||
|
||||
if (azure) {
|
||||
apiKey = process.env.AZURE_OPENAI_API_KEY;
|
||||
titleGenClientOptions.reverseProxyUrl = genAzureEndpoint({
|
||||
titleGenClientOptions.reverseProxyUrl = genAzureChatCompletion({
|
||||
azureOpenAIApiInstanceName: process.env.AZURE_OPENAI_API_INSTANCE_NAME,
|
||||
azureOpenAIApiDeploymentName: process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME,
|
||||
azureOpenAIApiVersion: process.env.AZURE_OPENAI_API_VERSION
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue