1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import type { ChatCompletion, ChatCompletionCreateParamsNonStreaming, Message } from "npm:@types/openai";
async function getOpenAI() {
// if you don't have a key, use our std library version
if (Deno.env.get("OPENAI_API_KEY") === undefined) {
const { OpenAI } = await import("https://esm.town/v/std/openai");
return new OpenAI();
} else {
const { OpenAI } = await import("npm:openai");
return new OpenAI();
}
}
/**
* Initiates a chat conversation with OpenAI's GPT model and retrieves the content of the first response.
* This function can handle both single string inputs and arrays of message objects.
* It supports various GPT models, allowing for flexibility in choosing the model based on the application's needs.
*
* @param {string | Message[]} input - The input message(s) to send to GPT. Can be a single string or an array of message objects.
* @param {object} options - Additional options for the completion request.
* @returns {Promise<string>} A promise that resolves to the content of the first response from the completion.
*/
export async function chat(
input: string | Message[],
options?: Omit<ChatCompletionCreateParamsNonStreaming, "messages">,
): Promise<ChatCompletion & { content: string }> {
const openai = await getOpenAI();
const messages = Array.isArray(input) ? input : [{ role: "user", content: input }];
const createParams: ChatCompletionCreateParamsNonStreaming = {
max_tokens: 30,
model: "gpt-3.5-turbo",
...(options ?? {}),
messages,
};
const completion = await openai.chat.completions.create(createParams);
return { ...completion, content: completion.choices[0].message.content };
}