1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
/**
* Handles the incoming request and returns a response.
* @param {Request} req - The incoming request object.
* @returns {Promise<Response>} - The response object.
*/
export default async function(req: Request): Promise<Response> {
// Handle POST request
// This is the primary path for the API
if (req.method === "POST") {
const { prompt } = await req.json();
if (!prompt) {
return new Response(JSON.stringify({ message: "Please provide a prompt field in your JSON body" }), {
status: 400,
statusText: "Bad Request",
});
}
try {
const llmResponse = await getLlmResponse(prompt);
return new Response(JSON.stringify({ llmResponse }), {
headers: {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "POST",
"Content-Type": "application/json",
},
});
} catch (error) {
return new Response(JSON.stringify({ message: error.message }), {
status: 500,
statusText: "Internal Server Error",
});
}
}
// Handle OPTIONS request
if (req.method === "OPTIONS") {
return new Response("", {
headers: {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "POST",
"Access-Control-Allow-Headers": "Content-Type",
},
});
}
// Handle unsupported methods
return new Response(JSON.stringify({ message: "Please use POST method" }), {
status: 405,
statusText: "Method Not Allowed",
});
}
/**
* Gets the response from the OpenAI language model.
* @param {string} prompt - The prompt for the language model.
* @returns {Promise<string>} - The response from the language model.
*/
async function getLlmResponse(prompt: string) {
const completion = await openai.chat.completions.create({
"messages": [
{ "role": "user", "content": prompt },
],
model: "gpt-3.5-turbo",
max_tokens: 50,
});
return completion.choices[0].message.content;
}