diff --git a/client/src/components/Input/OpenAIOptions/OptionHover.jsx b/client/src/components/Input/OpenAIOptions/OptionHover.jsx index 8f36468ee9..3fabcf9fa1 100644 --- a/client/src/components/Input/OpenAIOptions/OptionHover.jsx +++ b/client/src/components/Input/OpenAIOptions/OptionHover.jsx @@ -1,38 +1,16 @@ import React from 'react'; -import { - // HoverCard, - // HoverCardTrigger, - HoverCardPortal, - HoverCardContent -} from '~/components/ui/HoverCard.tsx'; - -// import { cn } from '~/utils/'; +import { HoverCardPortal, HoverCardContent } from '~/components/ui/HoverCard.tsx'; const types = { - temp: { - description: - 'Higher values = more random, while lower values = more focused and deterministic. We recommend altering this or Top P but not both.' - }, - max: { - description: - 'The max tokens to generate. The total length of input tokens and generated tokens is limited by the model\'s context length.' - }, - 'top-p': { - description: - 'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We recommend altering this or temperature but not both.' - }, - freq: { - description: - 'Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim.' - }, - pres: { - description: - 'Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics.' - }, + temp: 'Higher values = more random, while lower values = more focused and deterministic. We recommend altering this or Top P but not both.', + max: "The max tokens to generate. The total length of input tokens and generated tokens is limited by the model's context length.", + 'top-p': + 'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We recommend altering this or temperature but not both.', + freq: "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.", + pres: "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics." }; function OptionHover({ type, side }) { - const options = {}; if (type === 'pres') { @@ -41,9 +19,13 @@ function OptionHover({ type, side }) { return ( - +
-

{types[type].description}

+

{types[type]}