feat: add DEFAULT_MODEL environment variable (#280)

*  feat: add DEFAULT_MODEL environment variable

* set the model maxLength setting in the models definition

* set the model tokenLimit setting in the models definition
This commit is contained in:
Thomas LÉVEIL 2023-03-29 05:10:47 +02:00 committed by GitHub
parent 3f82710cdd
commit 00c6c72270
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 94 additions and 38 deletions

View File

@ -1 +1,2 @@
OPENAI_API_KEY=YOUR_KEY
DEFAULT_MODEL=gpt-3.5-turbo

View File

@ -103,6 +103,18 @@ npm run dev
You should be able to start chatting.
## Configuration
When deploying the application, the following environment variables can be set:
| Environment Variable | Default value | Description |
|----------------------|------------------|---------------------------------------------------------|
| OPENAI_API_KEY | | The default API key used for authentication with OpenAI |
| DEFAULT_MODEL | `gpt-3.5-turbo` | The default model to use on new conversations |
If you do not provide an OpenAI API key with `OPENAI_API_KEY`, users will have to provide their own key.
If you don't have an OpenAI API key, you can get one [here](https://platform.openai.com/account/api-keys).
## Contact
If you have any questions, feel free to reach out to me on [Twitter](https://twitter.com/mckaywrigley).

View File

@ -2,7 +2,7 @@ import { Conversation, Message } from '@/types/chat';
import { IconArrowDown } from '@tabler/icons-react';
import { KeyValuePair } from '@/types/data';
import { ErrorMessage } from '@/types/error';
import { OpenAIModel } from '@/types/openai';
import { OpenAIModel, OpenAIModelID } from '@/types/openai';
import { Prompt } from '@/types/prompt';
import { throttle } from '@/utils';
import { IconClearAll, IconKey, IconSettings } from '@tabler/icons-react';
@ -29,6 +29,7 @@ interface Props {
models: OpenAIModel[];
apiKey: string;
serverSideApiKeyIsSet: boolean;
defaultModelId: OpenAIModelID;
messageIsStreaming: boolean;
modelError: ErrorMessage | null;
loading: boolean;
@ -48,6 +49,7 @@ export const Chat: FC<Props> = memo(
models,
apiKey,
serverSideApiKeyIsSet,
defaultModelId,
messageIsStreaming,
modelError,
loading,
@ -206,6 +208,7 @@ export const Chat: FC<Props> = memo(
<ModelSelect
model={conversation.model}
models={models}
defaultModelId={defaultModelId}
onModelChange={(model) =>
onUpdateConversation(conversation, {
key: 'model',
@ -240,7 +243,8 @@ export const Chat: FC<Props> = memo(
</button>
<button
className="ml-2 cursor-pointer hover:opacity-50"
onClick={onClearAll}>
onClick={onClearAll}
>
<IconClearAll size={18} />
</button>
</div>
@ -250,6 +254,7 @@ export const Chat: FC<Props> = memo(
<ModelSelect
model={conversation.model}
models={models}
defaultModelId={defaultModelId}
onModelChange={(model) =>
onUpdateConversation(conversation, {
key: 'model',
@ -306,7 +311,7 @@ export const Chat: FC<Props> = memo(
className="flex h-7 w-7 items-center justify-center rounded-full bg-white shadow-md hover:shadow-lg focus:outline-none focus:ring-2 focus:ring-blue-500 dark:bg-[#515152d7]"
onClick={handleScrollDown}
>
<IconArrowDown size={18}/>
<IconArrowDown size={18} />
</button>
</div>
)}

View File

@ -1,5 +1,5 @@
import { Message } from '@/types/chat';
import { OpenAIModel, OpenAIModelID } from '@/types/openai';
import { OpenAIModel } from '@/types/openai';
import { Prompt } from '@/types/prompt';
import { IconPlayerStop, IconRepeat, IconSend } from '@tabler/icons-react';
import { useTranslation } from 'next-i18next';
@ -56,7 +56,7 @@ export const ChatInput: FC<Props> = ({
const handleChange = (e: React.ChangeEvent<HTMLTextAreaElement>) => {
const value = e.target.value;
const maxLength = model.id === OpenAIModelID.GPT_3_5 ? 12000 : 24000;
const maxLength = model.maxLength;
if (value.length > maxLength) {
alert(
@ -109,7 +109,10 @@ export const ChatInput: FC<Props> = ({
const selectedPrompt = filteredPrompts[activePromptIndex];
if (selectedPrompt) {
setContent((prevContent) => {
const newContent = prevContent?.replace(/\/\w*$/, selectedPrompt.content);
const newContent = prevContent?.replace(
/\/\w*$/,
selectedPrompt.content,
);
return newContent;
});
handlePromptSelect(selectedPrompt);
@ -211,7 +214,8 @@ export const ChatInput: FC<Props> = ({
if (textareaRef && textareaRef.current) {
textareaRef.current.style.height = 'inherit';
textareaRef.current.style.height = `${textareaRef.current?.scrollHeight}px`;
textareaRef.current.style.overflow = `${textareaRef?.current?.scrollHeight > 400 ? 'auto' : 'hidden'
textareaRef.current.style.overflow = `${
textareaRef?.current?.scrollHeight > 400 ? 'auto' : 'hidden'
}`;
}
}, [content]);
@ -257,12 +261,13 @@ export const ChatInput: FC<Props> = ({
<div className="relative mx-2 flex w-full flex-grow flex-col rounded-md border border-black/10 bg-white shadow-[0_0_10px_rgba(0,0,0,0.10)] dark:border-gray-900/50 dark:bg-[#40414F] dark:text-white dark:shadow-[0_0_15px_rgba(0,0,0,0.10)] sm:mx-4">
<textarea
ref={textareaRef}
className="m-0 w-full resize-none border-0 bg-transparent p-0 pr-8 pl-2 text-black dark:bg-transparent dark:text-white py-2 md:py-3 md:pl-4"
className="m-0 w-full resize-none border-0 bg-transparent p-0 py-2 pr-8 pl-2 text-black dark:bg-transparent dark:text-white md:py-3 md:pl-4"
style={{
resize: 'none',
bottom: `${textareaRef?.current?.scrollHeight}px`,
maxHeight: '400px',
overflow: `${textareaRef.current && textareaRef.current.scrollHeight > 400
overflow: `${
textareaRef.current && textareaRef.current.scrollHeight > 400
? 'auto'
: 'hidden'
}`,
@ -278,7 +283,7 @@ export const ChatInput: FC<Props> = ({
onKeyDown={handleKeyDown}
/>
<button
className="absolute right-2 top-2 rounded-sm p-1 text-neutral-800 hover:bg-neutral-200 hover:text-neutral-900 dark:bg-opacity-50 dark:text-neutral-100 dark:hover:text-neutral-200 opacity-60"
className="absolute right-2 top-2 rounded-sm p-1 text-neutral-800 opacity-60 hover:bg-neutral-200 hover:text-neutral-900 dark:bg-opacity-50 dark:text-neutral-100 dark:hover:text-neutral-200"
onClick={handleSend}
>
<IconSend size={18} />

View File

@ -1,15 +1,22 @@
import { OpenAIModel } from '@/types/openai';
import { OpenAIModel, OpenAIModelID } from '@/types/openai';
import { useTranslation } from 'next-i18next';
import { FC } from 'react';
interface Props {
model: OpenAIModel;
models: OpenAIModel[];
defaultModelId: OpenAIModelID;
onModelChange: (model: OpenAIModel) => void;
}
export const ModelSelect: FC<Props> = ({ model, models, onModelChange }) => {
export const ModelSelect: FC<Props> = ({
model,
models,
defaultModelId,
onModelChange,
}) => {
const { t } = useTranslation('chat');
return (
<div className="flex flex-col">
<label className="mb-2 text-left text-neutral-700 dark:text-neutral-400">
@ -19,7 +26,7 @@ export const ModelSelect: FC<Props> = ({ model, models, onModelChange }) => {
<select
className="w-full bg-transparent p-2"
placeholder={t('Select a model') || ''}
value={model.id}
value={model?.id || defaultModelId}
onChange={(e) => {
onModelChange(
models.find(
@ -34,7 +41,9 @@ export const ModelSelect: FC<Props> = ({ model, models, onModelChange }) => {
value={model.id}
className="dark:bg-[#343541] dark:text-white"
>
{model.name}
{model.id === defaultModelId
? `Default (${model.name})`
: model.name}
</option>
))}
</select>

View File

@ -43,8 +43,7 @@ export const SystemPrompt: FC<Props> = ({
const handleChange = (e: React.ChangeEvent<HTMLTextAreaElement>) => {
const value = e.target.value;
const maxLength =
conversation.model.id === OpenAIModelID.GPT_3_5 ? 12000 : 24000;
const maxLength = conversation.model.maxLength;
if (value.length > maxLength) {
alert(

View File

@ -1,5 +1,4 @@
import { ChatBody, Message } from '@/types/chat';
import { OpenAIModelID } from '@/types/openai';
import { DEFAULT_SYSTEM_PROMPT } from '@/utils/app/const';
import { OpenAIStream } from '@/utils/server';
import tiktokenModel from '@dqbd/tiktoken/encoders/cl100k_base.json';
@ -22,8 +21,6 @@ const handler = async (req: Request): Promise<Response> => {
tiktokenModel.pat_str,
);
const tokenLimit = model.id === OpenAIModelID.GPT_4 ? 6000 : 3000;
let promptToSend = prompt;
if (!promptToSend) {
promptToSend = DEFAULT_SYSTEM_PROMPT;
@ -38,7 +35,7 @@ const handler = async (req: Request): Promise<Response> => {
const message = messages[i];
const tokens = encoding.encode(message.content);
if (tokenCount + tokens.length > tokenLimit) {
if (tokenCount + tokens.length > model.tokenLimit) {
break;
}
tokenCount += tokens.length;

View File

@ -7,7 +7,12 @@ import { KeyValuePair } from '@/types/data';
import { ErrorMessage } from '@/types/error';
import { LatestExportFormat, SupportedExportFormats } from '@/types/export';
import { Folder, FolderType } from '@/types/folder';
import { OpenAIModel, OpenAIModelID, OpenAIModels } from '@/types/openai';
import {
fallbackModelID,
OpenAIModel,
OpenAIModelID,
OpenAIModels,
} from '@/types/openai';
import { Prompt } from '@/types/prompt';
import {
cleanConversationHistory,
@ -32,9 +37,13 @@ import { v4 as uuidv4 } from 'uuid';
interface HomeProps {
serverSideApiKeyIsSet: boolean;
defaultModelId: OpenAIModelID;
}
const Home: React.FC<HomeProps> = ({ serverSideApiKeyIsSet }) => {
const Home: React.FC<HomeProps> = ({
serverSideApiKeyIsSet,
defaultModelId,
}) => {
const { t } = useTranslation('chat');
// STATE ----------------------------------------------
@ -371,7 +380,7 @@ const Home: React.FC<HomeProps> = ({ serverSideApiKeyIsSet }) => {
id: uuidv4(),
name: `${t('New Conversation')}`,
messages: [],
model: OpenAIModels[OpenAIModelID.GPT_3_5],
model: lastConversation?.model || defaultModelId,
prompt: DEFAULT_SYSTEM_PROMPT,
folderId: null,
};
@ -404,7 +413,7 @@ const Home: React.FC<HomeProps> = ({ serverSideApiKeyIsSet }) => {
id: uuidv4(),
name: 'New conversation',
messages: [],
model: OpenAIModels[OpenAIModelID.GPT_3_5],
model: OpenAIModels[defaultModelId],
prompt: DEFAULT_SYSTEM_PROMPT,
folderId: null,
});
@ -438,7 +447,7 @@ const Home: React.FC<HomeProps> = ({ serverSideApiKeyIsSet }) => {
id: uuidv4(),
name: 'New conversation',
messages: [],
model: OpenAIModels[OpenAIModelID.GPT_3_5],
model: OpenAIModels[defaultModelId],
prompt: DEFAULT_SYSTEM_PROMPT,
folderId: null,
});
@ -486,7 +495,7 @@ const Home: React.FC<HomeProps> = ({ serverSideApiKeyIsSet }) => {
name: `Prompt ${prompts.length + 1}`,
description: '',
content: '',
model: OpenAIModels[OpenAIModelID.GPT_3_5],
model: OpenAIModels[defaultModelId],
folderId: null,
};
@ -601,7 +610,7 @@ const Home: React.FC<HomeProps> = ({ serverSideApiKeyIsSet }) => {
id: uuidv4(),
name: 'New conversation',
messages: [],
model: OpenAIModels[OpenAIModelID.GPT_3_5],
model: OpenAIModels[defaultModelId],
prompt: DEFAULT_SYSTEM_PROMPT,
folderId: null,
});
@ -663,7 +672,7 @@ const Home: React.FC<HomeProps> = ({ serverSideApiKeyIsSet }) => {
</button>
<div
onClick={handleToggleChatbar}
className="absolute top-0 left-0 z-10 w-full h-full bg-black opacity-70 sm:hidden"
className="absolute top-0 left-0 z-10 h-full w-full bg-black opacity-70 sm:hidden"
></div>
</div>
) : (
@ -681,6 +690,7 @@ const Home: React.FC<HomeProps> = ({ serverSideApiKeyIsSet }) => {
messageIsStreaming={messageIsStreaming}
apiKey={apiKey}
serverSideApiKeyIsSet={serverSideApiKeyIsSet}
defaultModelId={defaultModelId}
modelError={modelError}
models={models}
loading={loading}
@ -713,7 +723,7 @@ const Home: React.FC<HomeProps> = ({ serverSideApiKeyIsSet }) => {
</button>
<div
onClick={handleTogglePromptbar}
className="absolute top-0 left-0 z-10 w-full h-full bg-black opacity-70 sm:hidden"
className="absolute top-0 left-0 z-10 h-full w-full bg-black opacity-70 sm:hidden"
></div>
</div>
) : (
@ -733,15 +743,24 @@ const Home: React.FC<HomeProps> = ({ serverSideApiKeyIsSet }) => {
export default Home;
export const getServerSideProps: GetServerSideProps = async ({ locale }) => {
const defaultModelId =
(process.env.DEFAULT_MODEL &&
Object.values(OpenAIModelID).includes(
process.env.DEFAULT_MODEL as OpenAIModelID,
) &&
process.env.DEFAULT_MODEL) ||
fallbackModelID;
return {
props: {
serverSideApiKeyIsSet: !!process.env.OPENAI_API_KEY,
defaultModelId,
...(await serverSideTranslations(locale ?? 'en', [
'common',
'chat',
'sidebar',
'markdown',
'promptbar'
'promptbar',
])),
},
};

View File

@ -1,6 +1,8 @@
export interface OpenAIModel {
id: string;
name: string;
maxLength: number; // maximum length of a message
tokenLimit: number;
}
export enum OpenAIModelID {
@ -8,13 +10,20 @@ export enum OpenAIModelID {
GPT_4 = 'gpt-4',
}
// in case the `DEFAULT_MODEL` environment variable is not set or set to an unsupported model
export const fallbackModelID = OpenAIModelID.GPT_3_5;
export const OpenAIModels: Record<OpenAIModelID, OpenAIModel> = {
[OpenAIModelID.GPT_3_5]: {
id: OpenAIModelID.GPT_3_5,
name: 'Default (GPT-3.5)',
name: 'GPT-3.5',
maxLength: 12000,
tokenLimit: 3000,
},
[OpenAIModelID.GPT_4]: {
id: OpenAIModelID.GPT_4,
name: 'GPT-4',
maxLength: 24000,
tokenLimit: 6000,
},
};