2023-09-18 21:21:39 +02:00
|
|
|
import React from 'react';
|
2023-10-03 16:28:19 +02:00
|
|
|
import { Plugin, GPTIcon, AnthropicIcon, AzureMinimalIcon } from '~/components/svg';
|
2023-09-18 21:21:39 +02:00
|
|
|
import { useAuthContext } from '~/hooks';
|
|
|
|
|
import { cn } from '~/utils';
|
|
|
|
|
import { IconProps } from '~/common';
|
|
|
|
|
|
|
|
|
|
const Icon: React.FC<IconProps> = (props) => {
|
|
|
|
|
const { size = 30, isCreatedByUser, button, model = true, endpoint, error, jailbreak } = props;
|
|
|
|
|
|
|
|
|
|
const { user } = useAuthContext();
|
|
|
|
|
|
|
|
|
|
if (isCreatedByUser) {
|
|
|
|
|
const username = user?.name || 'User';
|
|
|
|
|
|
|
|
|
|
return (
|
|
|
|
|
<div
|
|
|
|
|
title={username}
|
|
|
|
|
style={{
|
|
|
|
|
width: size,
|
|
|
|
|
height: size,
|
|
|
|
|
}}
|
2023-09-22 16:16:57 -04:00
|
|
|
className={`relative flex items-center justify-center ${props.className ?? ''}`}
|
2023-09-18 21:21:39 +02:00
|
|
|
>
|
|
|
|
|
<img
|
|
|
|
|
className="rounded-sm"
|
|
|
|
|
src={
|
|
|
|
|
user?.avatar ||
|
|
|
|
|
`https://api.dicebear.com/6.x/initials/svg?seed=${username}&fontFamily=Verdana&fontSize=36`
|
|
|
|
|
}
|
|
|
|
|
alt="avatar"
|
|
|
|
|
/>
|
|
|
|
|
</div>
|
|
|
|
|
);
|
|
|
|
|
} else {
|
|
|
|
|
const endpointIcons = {
|
|
|
|
|
azureOpenAI: {
|
2023-10-03 16:28:19 +02:00
|
|
|
icon: <AzureMinimalIcon size={size * 0.55} />,
|
2023-09-18 21:21:39 +02:00
|
|
|
bg: 'linear-gradient(0.375turn, #61bde2, #4389d0)',
|
|
|
|
|
name: 'ChatGPT',
|
|
|
|
|
},
|
|
|
|
|
openAI: {
|
2023-10-03 16:28:19 +02:00
|
|
|
icon: <GPTIcon size={size * 0.55} />,
|
2023-09-18 21:21:39 +02:00
|
|
|
bg:
|
feat: ConversationSummaryBufferMemory (#973)
* refactor: pass model in message edit payload, use encoder in standalone util function
* feat: add summaryBuffer helper
* refactor(api/messages): use new countTokens helper and add auth middleware at top
* wip: ConversationSummaryBufferMemory
* refactor: move pre-generation helpers to prompts dir
* chore: remove console log
* chore: remove test as payload will no longer carry tokenCount
* chore: update getMessagesWithinTokenLimit JSDoc
* refactor: optimize getMessagesForConversation and also break on summary, feat(ci): getMessagesForConversation tests
* refactor(getMessagesForConvo): count '00000000-0000-0000-0000-000000000000' as root message
* chore: add newer model to token map
* fix: condition was point to prop of array instead of message prop
* refactor(BaseClient): use object for refineMessages param, rename 'summary' to 'summaryMessage', add previous_summary
refactor(getMessagesWithinTokenLimit): replace text and tokenCount if should summarize, summary, and summaryTokenCount are present
fix/refactor(handleContextStrategy): use the right comparison length for context diff, and replace payload first message when a summary is present
* chore: log previous_summary if debugging
* refactor(formatMessage): assume if role is defined that it's a valid value
* refactor(getMessagesWithinTokenLimit): remove summary logic
refactor(handleContextStrategy): add usePrevSummary logic in case only summary was pruned
refactor(loadHistory): initial message query will return all ordered messages but keep track of the latest summary
refactor(getMessagesForConversation): use object for single param, edit jsdoc, edit all files using the method
refactor(ChatGPTClient): order messages before buildPrompt is called, TODO: add convoSumBuffMemory logic
* fix: undefined handling and summarizing only when shouldRefineContext is true
* chore(BaseClient): fix test results omitting system role for summaries and test edge case
* chore: export summaryBuffer from index file
* refactor(OpenAIClient/BaseClient): move refineMessages to subclass, implement LLM initialization for summaryBuffer
* feat: add OPENAI_SUMMARIZE to enable summarizing, refactor: rename client prop 'shouldRefineContext' to 'shouldSummarize', change contextStrategy value to 'summarize' from 'refine'
* refactor: rename refineMessages method to summarizeMessages for clarity
* chore: clarify summary future intent in .env.example
* refactor(initializeLLM): handle case for either 'model' or 'modelName' being passed
* feat(gptPlugins): enable summarization for plugins
* refactor(gptPlugins): utilize new initializeLLM method and formatting methods for messages, use payload array for currentMessages and assign pastMessages sooner
* refactor(agents): use ConversationSummaryBufferMemory for both agent types
* refactor(formatMessage): optimize original method for langchain, add helper function for langchain messages, add JSDocs and tests
* refactor(summaryBuffer): add helper to createSummaryBufferMemory, and use new formatting helpers
* fix: forgot to spread formatMessages also took opportunity to pluralize filename
* refactor: pass memory to tools, namely openapi specs. not used and may never be used by new method but added for testing
* ci(formatMessages): add more exhaustive checks for langchain messages
* feat: add debug env var for OpenAI
* chore: delete unnecessary comments
* chore: add extra note about summary feature
* fix: remove tokenCount from payload instructions
* fix: test fail
* fix: only pass instructions to payload when defined or not empty object
* refactor: fromPromptMessages is deprecated, use renamed method fromMessages
* refactor: use 'includes' instead of 'startsWith' for extended OpenRouter compatibility
* fix(PluginsClient.buildPromptBody): handle undefined message strings
* chore: log langchain titling error
* feat: getModelMaxTokens helper
* feat: tokenSplit helper
* feat: summary prompts updated
* fix: optimize _CUT_OFF_SUMMARIZER prompt
* refactor(summaryBuffer): use custom summary prompt, allow prompt to be passed, pass humanPrefix and aiPrefix to memory, along with any future variables, rename messagesToRefine to context
* fix(summaryBuffer): handle edge case where messagesToRefine exceeds summary context,
refactor(BaseClient): allow custom maxContextTokens to be passed to getMessagesWithinTokenLimit, add defined check before unshifting summaryMessage, update shouldSummarize based on this
refactor(OpenAIClient): use getModelMaxTokens, use cut-off message method for summary if no messages were left after pruning
* fix(handleContextStrategy): handle case where incoming prompt is bigger than model context
* chore: rename refinedContent to splitText
* chore: remove unnecessary debug log
2023-09-26 21:02:28 -04:00
|
|
|
typeof model === 'string' && model.toLowerCase().includes('gpt-4')
|
2023-09-18 21:21:39 +02:00
|
|
|
? '#AB68FF'
|
|
|
|
|
: '#19C37D',
|
|
|
|
|
name: 'ChatGPT',
|
|
|
|
|
},
|
|
|
|
|
gptPlugins: {
|
|
|
|
|
icon: <Plugin size={size * 0.7} />,
|
|
|
|
|
bg: `rgba(69, 89, 164, ${button ? 0.75 : 1})`,
|
|
|
|
|
name: 'Plugins',
|
|
|
|
|
},
|
|
|
|
|
google: { icon: <img src="/assets/google-palm.svg" alt="Palm Icon" />, name: 'PaLM2' },
|
2023-10-03 16:28:19 +02:00
|
|
|
anthropic: { icon: <AnthropicIcon size={size * 0.55} />, bg: '#d09a74', name: 'Claude' },
|
2023-09-18 21:21:39 +02:00
|
|
|
bingAI: {
|
|
|
|
|
icon: jailbreak ? (
|
|
|
|
|
<img src="/assets/bingai-jb.png" alt="Bing Icon" />
|
|
|
|
|
) : (
|
|
|
|
|
<img src="/assets/bingai.png" alt="Sydney Icon" />
|
|
|
|
|
),
|
|
|
|
|
name: jailbreak ? 'Sydney' : 'BingAI',
|
|
|
|
|
},
|
|
|
|
|
chatGPTBrowser: {
|
2023-10-03 16:28:19 +02:00
|
|
|
icon: <GPTIcon size={size * 0.55} />,
|
2023-09-18 21:21:39 +02:00
|
|
|
bg:
|
feat: ConversationSummaryBufferMemory (#973)
* refactor: pass model in message edit payload, use encoder in standalone util function
* feat: add summaryBuffer helper
* refactor(api/messages): use new countTokens helper and add auth middleware at top
* wip: ConversationSummaryBufferMemory
* refactor: move pre-generation helpers to prompts dir
* chore: remove console log
* chore: remove test as payload will no longer carry tokenCount
* chore: update getMessagesWithinTokenLimit JSDoc
* refactor: optimize getMessagesForConversation and also break on summary, feat(ci): getMessagesForConversation tests
* refactor(getMessagesForConvo): count '00000000-0000-0000-0000-000000000000' as root message
* chore: add newer model to token map
* fix: condition was point to prop of array instead of message prop
* refactor(BaseClient): use object for refineMessages param, rename 'summary' to 'summaryMessage', add previous_summary
refactor(getMessagesWithinTokenLimit): replace text and tokenCount if should summarize, summary, and summaryTokenCount are present
fix/refactor(handleContextStrategy): use the right comparison length for context diff, and replace payload first message when a summary is present
* chore: log previous_summary if debugging
* refactor(formatMessage): assume if role is defined that it's a valid value
* refactor(getMessagesWithinTokenLimit): remove summary logic
refactor(handleContextStrategy): add usePrevSummary logic in case only summary was pruned
refactor(loadHistory): initial message query will return all ordered messages but keep track of the latest summary
refactor(getMessagesForConversation): use object for single param, edit jsdoc, edit all files using the method
refactor(ChatGPTClient): order messages before buildPrompt is called, TODO: add convoSumBuffMemory logic
* fix: undefined handling and summarizing only when shouldRefineContext is true
* chore(BaseClient): fix test results omitting system role for summaries and test edge case
* chore: export summaryBuffer from index file
* refactor(OpenAIClient/BaseClient): move refineMessages to subclass, implement LLM initialization for summaryBuffer
* feat: add OPENAI_SUMMARIZE to enable summarizing, refactor: rename client prop 'shouldRefineContext' to 'shouldSummarize', change contextStrategy value to 'summarize' from 'refine'
* refactor: rename refineMessages method to summarizeMessages for clarity
* chore: clarify summary future intent in .env.example
* refactor(initializeLLM): handle case for either 'model' or 'modelName' being passed
* feat(gptPlugins): enable summarization for plugins
* refactor(gptPlugins): utilize new initializeLLM method and formatting methods for messages, use payload array for currentMessages and assign pastMessages sooner
* refactor(agents): use ConversationSummaryBufferMemory for both agent types
* refactor(formatMessage): optimize original method for langchain, add helper function for langchain messages, add JSDocs and tests
* refactor(summaryBuffer): add helper to createSummaryBufferMemory, and use new formatting helpers
* fix: forgot to spread formatMessages also took opportunity to pluralize filename
* refactor: pass memory to tools, namely openapi specs. not used and may never be used by new method but added for testing
* ci(formatMessages): add more exhaustive checks for langchain messages
* feat: add debug env var for OpenAI
* chore: delete unnecessary comments
* chore: add extra note about summary feature
* fix: remove tokenCount from payload instructions
* fix: test fail
* fix: only pass instructions to payload when defined or not empty object
* refactor: fromPromptMessages is deprecated, use renamed method fromMessages
* refactor: use 'includes' instead of 'startsWith' for extended OpenRouter compatibility
* fix(PluginsClient.buildPromptBody): handle undefined message strings
* chore: log langchain titling error
* feat: getModelMaxTokens helper
* feat: tokenSplit helper
* feat: summary prompts updated
* fix: optimize _CUT_OFF_SUMMARIZER prompt
* refactor(summaryBuffer): use custom summary prompt, allow prompt to be passed, pass humanPrefix and aiPrefix to memory, along with any future variables, rename messagesToRefine to context
* fix(summaryBuffer): handle edge case where messagesToRefine exceeds summary context,
refactor(BaseClient): allow custom maxContextTokens to be passed to getMessagesWithinTokenLimit, add defined check before unshifting summaryMessage, update shouldSummarize based on this
refactor(OpenAIClient): use getModelMaxTokens, use cut-off message method for summary if no messages were left after pruning
* fix(handleContextStrategy): handle case where incoming prompt is bigger than model context
* chore: rename refinedContent to splitText
* chore: remove unnecessary debug log
2023-09-26 21:02:28 -04:00
|
|
|
typeof model === 'string' && model.toLowerCase().includes('gpt-4')
|
2023-09-18 21:21:39 +02:00
|
|
|
? '#AB68FF'
|
|
|
|
|
: `rgba(0, 163, 255, ${button ? 0.75 : 1})`,
|
|
|
|
|
name: 'ChatGPT',
|
|
|
|
|
},
|
|
|
|
|
null: { icon: <GPTIcon size={size * 0.7} />, bg: 'grey', name: 'N/A' },
|
|
|
|
|
default: { icon: <GPTIcon size={size * 0.7} />, bg: 'grey', name: 'UNKNOWN' },
|
|
|
|
|
};
|
|
|
|
|
|
2023-10-03 16:28:19 +02:00
|
|
|
const { icon, bg, name } = endpointIcons[endpoint ?? 'default'];
|
2023-09-18 21:21:39 +02:00
|
|
|
|
|
|
|
|
return (
|
|
|
|
|
<div
|
|
|
|
|
title={name}
|
|
|
|
|
style={{
|
|
|
|
|
background: bg || 'transparent',
|
|
|
|
|
width: size,
|
|
|
|
|
height: size,
|
|
|
|
|
}}
|
|
|
|
|
className={cn(
|
|
|
|
|
'relative flex items-center justify-center rounded-sm text-white ',
|
|
|
|
|
props.className || '',
|
|
|
|
|
)}
|
|
|
|
|
>
|
|
|
|
|
{icon}
|
|
|
|
|
{error && (
|
|
|
|
|
<span className="absolute right-0 top-[20px] -mr-2 flex h-4 w-4 items-center justify-center rounded-full border border-white bg-red-500 text-[10px] text-white">
|
|
|
|
|
!
|
|
|
|
|
</span>
|
|
|
|
|
)}
|
|
|
|
|
</div>
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
export default Icon;
|