change output limit

This commit is contained in:
Mckay Wrigley 2023-04-02 06:59:47 -06:00
parent f9ddf07085
commit 1dc4f86df5
2 changed files with 4 additions and 4 deletions

View File

@ -2,7 +2,7 @@ import { ChatBody, Message } from '@/types/chat';
import { DEFAULT_SYSTEM_PROMPT } from '@/utils/app/const';
import { OpenAIError, OpenAIStream } from '@/utils/server';
import tiktokenModel from '@dqbd/tiktoken/encoders/cl100k_base.json';
import { init, Tiktoken } from '@dqbd/tiktoken/lite/init';
import { Tiktoken, init } from '@dqbd/tiktoken/lite/init';
// @ts-expect-error
import wasm from '../../node_modules/@dqbd/tiktoken/lite/tiktoken_bg.wasm?module';
@ -35,7 +35,7 @@ const handler = async (req: Request): Promise<Response> => {
const message = messages[i];
const tokens = encoding.encode(message.content);
if (tokenCount + tokens.length + 1000 > model.tokenLimit) {
if (tokenCount + tokens.length + 2000 > model.tokenLimit) {
break;
}
tokenCount += tokens.length;

View File

@ -33,7 +33,7 @@ export const OpenAIStream = async (
Authorization: `Bearer ${key ? key : process.env.OPENAI_API_KEY}`,
...(process.env.OPENAI_ORGANIZATION && {
'OpenAI-Organization': process.env.OPENAI_ORGANIZATION,
})
}),
},
method: 'POST',
body: JSON.stringify({
@ -45,7 +45,7 @@ export const OpenAIStream = async (
},
...messages,
],
max_tokens: 1000,
max_tokens: 2000,
temperature: 1,
stream: true,
}),