mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-01-27 04:36:12 +01:00
* wip: first pass, dropdown for selecting sequential agents * refactor: Improve agent selection logic and enhance performance in SequentialAgents component * wip: seq. agents working ideas * wip: sequential agents style change * refactor: move agent form options/submission outside of AgentConfig * refactor: prevent repeating code * refactor: simplify current agent display in SequentialAgents component * feat: persist form value handling in AgentSelect component for agent_ids * feat: first pass, sequential agnets agent update * feat: enhance message display with agent updates and empty text handling * chore: update Icon component to use EModelEndpoint for agent endpoints * feat: update content type checks in BaseClient to use constants for better readability * feat: adjust max context tokens calculation to use 90% of the model's max tokens * feat: first pass, agent run message pruning * chore: increase max listeners for abort controller to prevent memory leaks * feat: enhance runAgent function to include current index count map for improved token tracking * chore: update @librechat/agents dependency to version 2.2.5 * feat: update icons and style of SequentialAgents component for improved UI consistency * feat: add AdvancedButton and AdvancedPanel components for enhanced agent settings navigation, update styling for agent form * chore: adjust minimum height of AdvancedPanel component for better layout consistency * chore: update @librechat/agents dependency to version 2.2.6 * feat: enhance message formatting by incorporating tool set into agent message processing, in order to allow better mix/matching of agents (as tool calls for tools not found in set will be stringified) * refactor: reorder components in AgentConfig for improved readability and maintainability * refactor: enhance layout of AgentUpdate component for improved visual structure * feat: add DeepSeek provider to Bedrock settings and schemas * feat: enhance link styling in mobile.css for better visibility and accessibility * fix: update banner model import in update banner script; export Banner model * refactor: `duplicateAgentHandler` to include tool_resources only for OCR context files * feat: add 'qwen-vl' to visionModels for enhanced model support * fix: change image format from JPEG to PNG in DALLE3 response * feat: reorganize Advanced components and add localizations * refactor: simplify JSX structure in AgentChain component to defer container styling to parent * feat: add FormInput component for reusable input handling * feat: make agent recursion limit configurable from builder * feat: add support for agent capabilities chain in AdvancedPanel and update data-provider version * feat: add maxRecursionLimit configuration for agents and update related documentation * fix: update CONFIG_VERSION to 1.2.3 in data provider configuration * feat: replace recursion limit input with MaxAgentSteps component and enhance input handling * feat: enhance AgentChain component with hover card for additional information and update related labels * fix: pass request and response objects to `createActionTool` when using assistant actions to prevent auth error * feat: update AgentChain component layout to include agent count display * feat: increase default max listeners and implement capability check function for agent chain * fix: update link styles in mobile.css for better visibility in dark mode * chore: temp. remove agents package while bumping shared packages * chore: update @langchain/google-genai package to version 0.1.11 * chore: update @langchain/google-vertexai package to version 0.2.2 * chore: add @librechat/agents package at version 2.2.8 * feat: add deepseek.r1 model with token rate and context values for bedrock
174 lines
5.2 KiB
TypeScript
174 lines
5.2 KiB
TypeScript
import { memo, Suspense, useMemo } from 'react';
|
|
import { useRecoilValue } from 'recoil';
|
|
import type { TMessage } from 'librechat-data-provider';
|
|
import type { TMessageContentProps, TDisplayProps } from '~/common';
|
|
import Error from '~/components/Messages/Content/Error';
|
|
import Thinking from '~/components/Artifacts/Thinking';
|
|
import { DelayedRender } from '~/components/ui';
|
|
import { useChatContext } from '~/Providers';
|
|
import MarkdownLite from './MarkdownLite';
|
|
import EditMessage from './EditMessage';
|
|
import { useLocalize } from '~/hooks';
|
|
import Container from './Container';
|
|
import Markdown from './Markdown';
|
|
import { cn } from '~/utils';
|
|
import store from '~/store';
|
|
|
|
export const ErrorMessage = ({
|
|
text,
|
|
message,
|
|
className = '',
|
|
}: Pick<TDisplayProps, 'text' | 'className'> & {
|
|
message?: TMessage;
|
|
}) => {
|
|
const localize = useLocalize();
|
|
if (text === 'Error connecting to server, try refreshing the page.') {
|
|
console.log('error message', message);
|
|
return (
|
|
<Suspense
|
|
fallback={
|
|
<div className="text-message mb-[0.625rem] flex min-h-[20px] flex-col items-start gap-3 overflow-visible">
|
|
<div className="markdown prose dark:prose-invert light w-full break-words dark:text-gray-100">
|
|
<div className="absolute">
|
|
<p className="submitting relative">
|
|
<span className="result-thinking" />
|
|
</p>
|
|
</div>
|
|
</div>
|
|
</div>
|
|
}
|
|
>
|
|
<DelayedRender delay={5500}>
|
|
<Container message={message}>
|
|
<div
|
|
className={cn(
|
|
'rounded-md border border-red-500 bg-red-500/10 px-3 py-2 text-sm text-gray-600 dark:text-gray-200',
|
|
className,
|
|
)}
|
|
>
|
|
{localize('com_ui_error_connection')}
|
|
</div>
|
|
</Container>
|
|
</DelayedRender>
|
|
</Suspense>
|
|
);
|
|
}
|
|
return (
|
|
<Container message={message}>
|
|
<div
|
|
role="alert"
|
|
aria-live="assertive"
|
|
className={cn(
|
|
'rounded-xl border border-red-500/20 bg-red-500/5 px-3 py-2 text-sm text-gray-600 dark:text-gray-200',
|
|
className,
|
|
)}
|
|
>
|
|
<Error text={text} />
|
|
</div>
|
|
</Container>
|
|
);
|
|
};
|
|
|
|
const DisplayMessage = ({ text, isCreatedByUser, message, showCursor }: TDisplayProps) => {
|
|
const { isSubmitting, latestMessage } = useChatContext();
|
|
const enableUserMsgMarkdown = useRecoilValue(store.enableUserMsgMarkdown);
|
|
const showCursorState = useMemo(
|
|
() => showCursor === true && isSubmitting,
|
|
[showCursor, isSubmitting],
|
|
);
|
|
const isLatestMessage = useMemo(
|
|
() => message.messageId === latestMessage?.messageId,
|
|
[message.messageId, latestMessage?.messageId],
|
|
);
|
|
|
|
let content: React.ReactElement;
|
|
if (!isCreatedByUser) {
|
|
content = <Markdown content={text} isLatestMessage={isLatestMessage} />;
|
|
} else if (enableUserMsgMarkdown) {
|
|
content = <MarkdownLite content={text} />;
|
|
} else {
|
|
content = <>{text}</>;
|
|
}
|
|
|
|
return (
|
|
<Container message={message}>
|
|
<div
|
|
className={cn(
|
|
isSubmitting ? 'submitting' : '',
|
|
showCursorState && !!text.length ? 'result-streaming' : '',
|
|
'markdown prose message-content dark:prose-invert light w-full break-words',
|
|
isCreatedByUser && !enableUserMsgMarkdown && 'whitespace-pre-wrap',
|
|
isCreatedByUser ? 'dark:text-gray-20' : 'dark:text-gray-100',
|
|
)}
|
|
>
|
|
{content}
|
|
</div>
|
|
</Container>
|
|
);
|
|
};
|
|
|
|
// Unfinished Message Component
|
|
export const UnfinishedMessage = ({ message }: { message: TMessage }) => (
|
|
<ErrorMessage
|
|
message={message}
|
|
text="The response is incomplete; it's either still processing, was cancelled, or censored. Refresh or try a different prompt."
|
|
/>
|
|
);
|
|
|
|
const MessageContent = ({
|
|
text,
|
|
edit,
|
|
error,
|
|
unfinished,
|
|
isSubmitting,
|
|
isLast,
|
|
...props
|
|
}: TMessageContentProps) => {
|
|
const { message } = props;
|
|
const { messageId } = message;
|
|
|
|
const { thinkingContent, regularContent } = useMemo(() => {
|
|
const thinkingMatch = text.match(/:::thinking([\s\S]*?):::/);
|
|
return {
|
|
thinkingContent: thinkingMatch ? thinkingMatch[1].trim() : '',
|
|
regularContent: thinkingMatch ? text.replace(/:::thinking[\s\S]*?:::/, '').trim() : text,
|
|
};
|
|
}, [text]);
|
|
|
|
const showRegularCursor = useMemo(() => isLast && isSubmitting, [isLast, isSubmitting]);
|
|
|
|
const unfinishedMessage = useMemo(
|
|
() =>
|
|
!isSubmitting && unfinished ? (
|
|
<Suspense>
|
|
<DelayedRender delay={250}>
|
|
<UnfinishedMessage message={message} />
|
|
</DelayedRender>
|
|
</Suspense>
|
|
) : null,
|
|
[isSubmitting, unfinished, message],
|
|
);
|
|
|
|
if (error) {
|
|
return <ErrorMessage message={props.message} text={text} />;
|
|
} else if (edit) {
|
|
return <EditMessage text={text} isSubmitting={isSubmitting} {...props} />;
|
|
}
|
|
|
|
return (
|
|
<>
|
|
{thinkingContent.length > 0 && (
|
|
<Thinking key={`thinking-${messageId}`}>{thinkingContent}</Thinking>
|
|
)}
|
|
<DisplayMessage
|
|
key={`display-${messageId}`}
|
|
showCursor={showRegularCursor}
|
|
text={regularContent}
|
|
{...props}
|
|
/>
|
|
{unfinishedMessage}
|
|
</>
|
|
);
|
|
};
|
|
|
|
export default memo(MessageContent);
|