llama-gpt/utils/index.ts

65 lines
1.7 KiB
TypeScript

import { Message, OpenAIModel } from "@/types";
import { createParser, ParsedEvent, ReconnectInterval } from "eventsource-parser";
export const OpenAIStream = async (model: OpenAIModel, key: string, messages: Message[]) => {
const encoder = new TextEncoder();
const decoder = new TextDecoder();
const res = await fetch("https://api.openai.com/v1/chat/completions", {
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${key ? key : process.env.OPENAI_API_KEY}`
},
method: "POST",
body: JSON.stringify({
model,
messages: [
{
role: "system",
content: `You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown format.`
},
...messages
],
max_tokens: 1000,
temperature: 0.0,
stream: true
})
});
if (res.status !== 200) {
throw new Error("OpenAI API returned an error");
}
const stream = new ReadableStream({
async start(controller) {
const onParse = (event: ParsedEvent | ReconnectInterval) => {
if (event.type === "event") {
const data = event.data;
if (data === "[DONE]") {
controller.close();
return;
}
try {
const json = JSON.parse(data);
const text = json.choices[0].delta.content;
const queue = encoder.encode(text);
controller.enqueue(queue);
} catch (e) {
controller.error(e);
}
}
};
const parser = createParser(onParse);
for await (const chunk of res.body as any) {
parser.feed(decoder.decode(chunk));
}
}
});
return stream;
};