1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import { DataRequest } from "https://esm.town/v/jxnblk/ReactStream";
const LENGTH = 128;
const MAX_TOKENS = 512;
export default async function indirectionAPI(req: DataRequest, res: Response, next): Promise<Response> {
if (req.method !== "POST") return next();
const url = new URL(req.url);
switch (url.pathname) {
case "/encode":
const b1 = await req.text();
const hash = await encode(b1);
// console.log({ hash });
return Response.json({ ok: true, body: hash });
case "/decode":
const b2 = await req.text();
// console.log(b2);
const str = await decode(b2);
// console.log(str);
return Response.json({ ok: true, body: str });
}
// console.log("JSON", req.headers);
return await getClue(req);
}
export async function getClue(req: Request): Promise<Response> {
const body = await req.json();
const { word, type } = body;
if (!word) return Response.json({ error: "Requires word" });
if (word.length < 3) return Response.json({ error: "too short" });
if (word.length > 20) return Response.json({ error: "too long" });
let clueType = ClueType.Riddle;
console.log(word, clueType);
const tabooPrompt = createTabooPrompt(word);
console.log(tabooPrompt);
const tabooResponse = await fetchOpenAIText(tabooPrompt);
const taboo = getTabooWords(tabooResponse);
const cluePrompt = createCluePrompt(word, taboo, clueType);
return fetchOpenAIStream(cluePrompt);
}
async function fetchOpenAIText(content: string): string {
const { OpenAI } = await import("https://esm.town/v/std/openai");
const openai = new OpenAI();
console.log(content);
const completion = await openai.chat.completions.create({
stream: false,
messages: [
{ role: "user", content },
],
// model: "gpt-4o-mini",
model: "gpt-3.5-turbo",
max_tokens: MAX_TOKENS,
});
console.log(completion?.usage?.total_tokens);
return completion.choices[0].message.content;
}
export async function fetchOpenAIStream(content: string): Promise<Response> {
const { OpenAI } = await import("https://esm.town/v/std/openai");
const openai = new OpenAI();
console.log(content);
const completion = await openai.chat.completions.create({
stream: true,
messages: [
{ role: "user", content },
],
model: "gpt-3.5-turbo",
max_tokens: MAX_TOKENS,
});
console.log(completion);
const encoder = new TextEncoder();
const stream = new ReadableStream({
async start(controller) {
for await (const chunk of completion) {
console.log(chunk.usage?.total_tokens);
const s = encoder.encode(chunk.choices[0].delta.content);
controller.enqueue(s);
}
controller.close();
},
});
return new Response(stream, {
headers: { "Content-Type": "text/event-stream" },
});
}
export async function decode(hash: string): Promise<string> {
const { decode } = await import("https://esm.town/v/jxnblk/comp");
Val Town is a social website to write and deploy JavaScript.
Build APIs and schedule functions from your browser.
Comments
Nobody has commented on this val yet: be the first!
August 13, 2024