llama-gpt/components/Chat/Chat.tsx

248 lines
8.4 KiB
TypeScript

import {
Conversation,
ErrorMessage,
KeyValuePair,
Message,
OpenAIModel,
} from '@/types';
import { throttle } from '@/utils';
import { IconClearAll, IconSettings } from '@tabler/icons-react';
import { useTranslation } from 'next-i18next';
import { FC, memo, MutableRefObject, useEffect, useRef, useState } from 'react';
import { ChatInput } from './ChatInput';
import { ChatLoader } from './ChatLoader';
import { ChatMessage } from './ChatMessage';
import { ErrorMessageDiv } from './ErrorMessageDiv';
import { ModelSelect } from './ModelSelect';
import { SystemPrompt } from './SystemPrompt';
interface Props {
conversation: Conversation;
models: OpenAIModel[];
apiKey: string;
serverSideApiKeyIsSet: boolean;
messageIsStreaming: boolean;
modelError: ErrorMessage | null;
messageError: boolean;
loading: boolean;
onSend: (message: Message, deleteCount?: number) => void;
onUpdateConversation: (
conversation: Conversation,
data: KeyValuePair,
) => void;
onEditMessage: (message: Message, messageIndex: number) => void;
stopConversationRef: MutableRefObject<boolean>;
}
export const Chat: FC<Props> = memo(
({
conversation,
models,
apiKey,
serverSideApiKeyIsSet,
messageIsStreaming,
modelError,
messageError,
loading,
onSend,
onUpdateConversation,
onEditMessage,
stopConversationRef,
}) => {
const { t } = useTranslation('chat');
const [currentMessage, setCurrentMessage] = useState<Message>();
const [autoScrollEnabled, setAutoScrollEnabled] = useState(true);
const [showSettings, setShowSettings] = useState<boolean>(false);
const messagesEndRef = useRef<HTMLDivElement>(null);
const chatContainerRef = useRef<HTMLDivElement>(null);
const textareaRef = useRef<HTMLTextAreaElement>(null);
const handleSettings = () => {
setShowSettings(!showSettings);
};
const onClearAll = () => {
if (confirm(t<string>('Are you sure you want to clear all messages?'))) {
onUpdateConversation(conversation, { key: 'messages', value: [] });
}
};
const scrollDown = () => {
if (autoScrollEnabled) {
messagesEndRef.current?.scrollIntoView(true);
}
};
const throttledScrollDown = throttle(scrollDown, 250);
useEffect(() => {
throttledScrollDown();
setCurrentMessage(
conversation.messages[conversation.messages.length - 2],
);
}, [conversation.messages, throttledScrollDown]);
useEffect(() => {
const observer = new IntersectionObserver(
([entry]) => {
setAutoScrollEnabled(entry.isIntersecting);
if (entry.isIntersecting) {
textareaRef.current?.focus();
}
},
{
root: null,
threshold: 0.5,
},
);
const messagesEndElement = messagesEndRef.current;
if (messagesEndElement) {
observer.observe(messagesEndElement);
}
return () => {
if (messagesEndElement) {
observer.unobserve(messagesEndElement);
}
};
}, [messagesEndRef]);
return (
<div className="overflow-none relative flex-1 bg-white dark:bg-[#343541]">
{!(apiKey || serverSideApiKeyIsSet) ? (
<div className="mx-auto flex h-full w-[300px] flex-col justify-center space-y-6 sm:w-[500px]">
<div className="text-center text-2xl font-semibold text-gray-800 dark:text-gray-100">
{t('OpenAI API Key Required')}
</div>
<div className="text-center text-gray-500 dark:text-gray-400">
{t(
'Please set your OpenAI API key in the bottom left of the sidebar.',
)}
</div>
<div className="text-center text-gray-500 dark:text-gray-400">
{t("If you don't have an OpenAI API key, you can get one here: ")}
<a
href="https://platform.openai.com/account/api-keys"
target="_blank"
rel="noreferrer"
className="text-blue-500 hover:underline"
>
openai.com
</a>
</div>
</div>
) : modelError ? (
<ErrorMessageDiv error={modelError} />
) : (
<>
<div
className="max-h-full overflow-x-hidden"
ref={chatContainerRef}
>
{conversation.messages.length === 0 ? (
<>
<div className="mx-auto flex w-[350px] flex-col space-y-10 pt-12 sm:w-[600px]">
<div className="text-center text-3xl font-semibold text-gray-800 dark:text-gray-100">
{models.length === 0 ? t('Loading...') : 'Chatbot UI'}
</div>
{models.length > 0 && (
<div className="flex h-full flex-col space-y-4 rounded border border-neutral-200 p-4 dark:border-neutral-600">
<ModelSelect
model={conversation.model}
models={models}
onModelChange={(model) =>
onUpdateConversation(conversation, {
key: 'model',
value: model,
})
}
/>
<SystemPrompt
conversation={conversation}
onChangePrompt={(prompt) =>
onUpdateConversation(conversation, {
key: 'prompt',
value: prompt,
})
}
/>
</div>
)}
</div>
</>
) : (
<>
<div className="flex justify-center border border-b-neutral-300 bg-neutral-100 py-2 text-sm text-neutral-500 dark:border-none dark:bg-[#444654] dark:text-neutral-200">
{t('Model')}: {conversation.model.name}
<IconSettings
className="ml-2 cursor-pointer hover:opacity-50"
onClick={handleSettings}
size={18}
/>
<IconClearAll
className="ml-2 cursor-pointer hover:opacity-50"
onClick={onClearAll}
size={18}
/>
</div>
{showSettings && (
<div className="mx-auto flex w-[200px] flex-col space-y-10 pt-8 sm:w-[300px]">
<div className="flex h-full flex-col space-y-4 rounded border border-neutral-500 p-2">
<ModelSelect
model={conversation.model}
models={models}
onModelChange={(model) =>
onUpdateConversation(conversation, {
key: 'model',
value: model,
})
}
/>
</div>
</div>
)}
{conversation.messages.map((message, index) => (
<ChatMessage
key={index}
message={message}
messageIndex={index}
onEditMessage={onEditMessage}
/>
))}
{loading && <ChatLoader />}
<div
className="h-[162px] bg-white dark:bg-[#343541]"
ref={messagesEndRef}
/>
</>
)}
</div>
<ChatInput
stopConversationRef={stopConversationRef}
textareaRef={textareaRef}
messageIsStreaming={messageIsStreaming}
conversationIsEmpty={conversation.messages.length > 0}
model={conversation.model}
onSend={(message) => {
setCurrentMessage(message);
onSend(message);
}}
onRegenerate={() => {
if (currentMessage) {
onSend(currentMessage, 2);
}
}}
/>
</>
)}
</div>
);
},
);
Chat.displayName = 'Chat';