Avatar

yawnxyz

i make ui for ai
15 public vals
Joined March 31, 2023

Press to talk, and get a translation!

The app is set up so you can easily have a conversation between two people. The app will translate between the two selected languages, in each voice, as the speakers talk.

Add your OpenAI API Key, and make sure to open in a separate window for Mic to work.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import { Hono } from "npm:hono@3";
import { html } from "npm:hono@3/html";
import { cors } from 'npm:hono/cors';
import { OpenAI } from "npm:openai";
const app = new Hono();
const openai = new OpenAI(Deno.env.get("OPENAI_API_KEY_VOICE"));
class TranscriptionService {
async transcribeAudio(audioFile) {
try {
const transcription = await openai.audio.transcriptions.create({
file: audioFile,
model: "whisper-1",
response_format: "text",
});
return transcription;
} catch (error) {
console.error('OpenAI API error:', error);
throw error;
}
}
}
export const transcriptionService = new TranscriptionService();
app.use('*', cors({
origin: '*',
allowMethods: ['GET', 'POST'],
allowHeaders: ['Content-Type'],
}));
app.get("/", async (c) => {
const languages = [
"Afrikaans", "Arabic", "Armenian", "Azerbaijani", "Belarusian", "Bosnian", "Bulgarian", "Catalan", "Chinese", "Croatian", "Czech", "Danish", "Dutch", "English", "Estonian", "Finnish", "French", "Galician", "German", "Greek", "Hebrew", "Hindi", "Hungari
];
const voices = [
"alloy",
"echo",
"fable",
"onyx",
"nova",
"shimmer"
];
const resHtml = html`
<!DOCTYPE html>
<html>
<head>
<title>Translator</title>
<script defer src="https://cdn.jsdelivr.net/npm/alpinejs@3.x.x/dist/cdn.min.js"></script>
<script src="https://cdn.tailwindcss.com"></script>
<style>
html {
user-select: none;
-webkit-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
}
@media (max-width: 640px) {
.container {
max-width: 100%;
padding: 1rem;
}
button {
font-size: 1.5rem;
padding: 1rem 2rem;
}
select {
font-size: 1.2rem;
padding: 0.8rem;
}
audio {
width: 100%;
}
}
</style>
<meta name="viewport" content="width=device-width, initial-scale=1.0">
</head>
<body>
<div class="container mx-auto py-8 px-8" x-data="recordingData">
<h1 class="text-4xl font-bold mb-4">Translator</h1>
<div class="mb-4">
<h2 class="font-bold mb-2">Languages</h2>
<div class="grid grid-cols-2 gap-4" x-data="{
languages: [
'Afrikaans', 'Arabic', 'Armenian', 'Azerbaijani', 'Belarusian', 'Bosnian', 'Bulgarian', 'Catalan', 'Chinese', 'Croatian', 'Czech', 'Danish', 'Dutch', 'English', 'Estonian', 'Finnish', 'French', 'Galician', 'German', 'Greek', 'Hebrew', 'Hind
]
}">
<div>
<select id="language1" name="language1" class="border border-gray-300 rounded px-4 py-2 mb-4 w-full">
<template x-for="lang in languages">
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
/** @jsx jsx */
import { Hono } from "npm:hono@3";
import { jsx } from "npm:hono@3/jsx";
import { cors } from 'npm:hono/cors';
import { OpenAI } from "npm:openai";
const app = new Hono();
const openai = new OpenAI();
app.use('*', cors({
origin: '*',
allowMethods: ['GET', 'POST'],
allowHeaders: ['Content-Type'],
}));
// Server-side rendering
app.get("/", async (c) => {
const html = (
<html>
<head>
<title>OpenAI Prompt Example</title>
<script defer src="https://cdn.jsdelivr.net/npm/alpinejs@3.x.x/dist/cdn.min.js"></script>
<script src="https://cdn.tailwindcss.com"></script>
</head>
<body>
<div class="container mx-auto py-8">
<h1 class="text-4xl font-bold mb-4">OpenAI Prompt Example</h1>
<form action="/prompt" method="GET">
<label for="prompt" class="block mb-2 font-bold">Prompt:</label>
<input type="text" id="prompt" name="prompt" value="Say hello in a creative way" class="border border-gray-300 rounded px-4 py-2 mb-4 w-full" />
<button type="submit" class="bg-blue-500 hover:bg-blue-600 text-white font-bold py-2 px-4 rounded">
Submit
</button>
</form>
<div class="mt-4">
<h2 class="text-xl font-bold mb-2">Response:</h2>
<div id="output" class="border border-gray-300 rounded p-4">
{c.req.query('response') || ''}
</div>
</div>
</div>
</body>
</html>
);
return c.html(html);
});
app.get('/prompt', async (c) => {
const prompt = c.req.query('prompt');
try {
const response = await openai.chat.completions.create({
model: "gpt-4",
messages: [{ role: "user", content: prompt }],
max_tokens: 100,
});
const generatedResponse = response.choices[0].message.content;
return c.redirect(`/?response=${encodeURIComponent(generatedResponse)}`);
} catch (error) {
console.error('OpenAI API error:', error);
return c.redirect('/?response=Error%20occurred.');
}
});
export default app.fetch;

An http and class wrapper for Vercel's AI SDK

Usage:

  • Groq: https://yawnxyz-ai.web.val.run/generate?prompt="tell me a beer joke"&provider=groq&model=llama3-8b-8192
  • Perplexity: https://yawnxyz-ai.web.val.run/generate?prompt="what's the latest phage directory capsid & tail article about?"&provider=perplexity
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import { Hono } from "npm:hono@3";
import { cors } from "npm:hono/cors";
import { openai, createOpenAI } from "npm:@ai-sdk/openai";
import { anthropic } from "npm:@ai-sdk/anthropic";
import { generateText, streamText, generateObject } from "npm:ai";
import { z } from "npm:zod";
const app = new Hono();
app.use('*', cors({
origin: '*',
allowMethods: ['GET', 'POST'],
allowHeaders: ['Content-Type'],
}));
openai.apiKey = Deno.env.get("OPENAI_API_KEY");
anthropic.apiKey = Deno.env.get("ANTHROPIC_API_KEY");
const groq = createOpenAI({
baseURL: 'https://api.groq.com/openai/v1',
apiKey: Deno.env.get("GROQ_API_KEY"),
});
const perplexity = createOpenAI({
apiKey: Deno.env.get("PERPLEXITY_API_KEY") ?? '',
baseURL: 'https://api.perplexity.ai/',
});
class ModelProvider {
async generateResponse(c, provider, model, prompt, maxTokens, streaming, schema, messages, tools) {
if (provider === 'openai') {
return this.generateOpenAIResponse(c, model, prompt, maxTokens, streaming, schema, messages, tools);
} else if (provider === 'anthropic') {
return this.generateAnthropicResponse(c, model, prompt, maxTokens, streaming, schema, messages, tools);
} else if (provider === 'groq') {
return this.generateGroqResponse(c, model, prompt, maxTokens, streaming, schema, messages, tools);
} else if (provider === 'perplexity') {
return this.generatePerplexityResponse(c, model, prompt, maxTokens, streaming, schema, messages, tools);
} else {
throw new Error('Invalid provider');
}
}
async generateOpenAIResponse(c, model, prompt, maxTokens, streaming, schema, messages) {
const options = {
model: openai(model || 'gpt-3.5-turbo'),
max_tokens: maxTokens || 100,
};
if (prompt && messages && messages.length > 0) {
throw new Error('prompt and messages cannot be defined at the same time');
} else if (prompt) {
options.prompt = prompt;
} else if (messages && messages.length > 0) {
options.messages = messages;
}
if (tools) {
options.tools = tools;
}
if (schema) {
const { object } = await generateObject({
...options,
schema: z.object(JSON.parse(schema)),
});
return c.json(object);
} else if (streaming) {
const encoder = new TextEncoder();
const stream = new ReadableStream({
async start(controller) {
const { textStream } = await streamText(options);
for await (const delta of textStream) {
controller.enqueue(encoder.encode(`data: ${JSON.stringify({ token: delta })}\n\n`));
}
controller.enqueue(encoder.encode(`data: [DONE]\n\n`));
controller.close();
},
});
return c.body(stream, 200, {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
});
} else {
const { text, finishReason, usage } = await generateText(options);
return c.json({ text, finishReason, usage });
}
}
async generateAnthropicResponse(c, model, prompt, maxTokens, streaming, schema, messages) {
if (schema) {
const { object } = await generateObject({
model: anthropic(model || 'claude-3-haiku-20240307'),
prompt: prompt,
1
2
3
4
5
6
7
8
9
10
11
12
13
14
import { OpenAI } from "npm:openai";
// Create a secret named OPENAI_API_KEY at https://www.val.town/settings/environment-variables
const openai = new OpenAI();
const functionExpression = await openai.chat.completions.create({
"messages": [
{ "role": "user", "content": "Say hello in a creative way" },
],
model: "gpt-4",
max_tokens: 30,
});
console.log(functionExpression.choices[0].message.content);

Example of using Hono to stream OpenAI's streamed chat responses

Unfortunately this doesn't work on val.town — use Deno Deploy instead

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import { Hono } from "npm:hono";
import { cors } from 'npm:hono/cors'
import { stream, streamText, streamSSE } from "npm:hono/streaming";
import { OpenAI } from "npm:openai";
const app = new Hono();
const openai = new OpenAI();
app.use('*', cors({
origin: '*',
allowMethods: ['GET'],
allowHeaders: ['Content-Type'],
}))
// Set CORS and headers for SSE
app.use('/chat, /chat/*', async (c, next) => {
c.header('Content-Type', 'text/event-stream');
c.header('Cache-Control', 'no-cache');
c.header('Connection', 'keep-alive');
await next();
});
// const SOURCE_URL = ""; // leave blank for deno deploy / native
// const SOURCE_URL = "https://yawnxyz-openAIHonoChatStreamSample.web.val.run"; // valtown as generator - no SSE
const SOURCE_URL = "https://funny-crow-81.deno.dev"; // deno deploy as generator
const htmlContent = `
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8" />
<title>OpenAI Streaming Example</title>
<style>
body {
font-family: Arial, sans-serif;
max-width: 600px;
margin: 0 auto;
padding: 20px;
}
#output {
white-space: pre-wrap;
border: 1px solid #ccc;
padding: 10px;
min-height: 100px;
}
</style>
</head>
<body>
<h1>OpenAI Streaming Example</h1>
<label for="prompt">Prompt:</label>
<input type="text" id="prompt" value="tell me a joke" />
<button onclick="getResponse()">Submit</button>
<div id="output"></div>
<script>
const outputDiv = document.getElementById('output');
function getResponse() {
const prompt = document.getElementById('prompt').value;
const url = "${SOURCE_URL}/chat?p=" + encodeURIComponent(prompt);
outputDiv.textContent = 'Loading...';
const eventSource = new EventSource(url);
let isFirstChunk = true;
eventSource.onmessage = (event) => {
if (event.data === "[DONE]") {
eventSource.close();
} else {
const data = JSON.parse(event.data);
if (data.error) {
outputDiv.textContent = "Error: " + data.error;
} else {
if (isFirstChunk) {
outputDiv.textContent = data.token;
isFirstChunk = false;
} else {
outputDiv.textContent += data.token;
}
}
}
};
eventSource.onerror = (error) => {
console.error('EventSource error:', error);
outputDiv.textContent = 'Error occurred.';
};
eventSource.addEventListener('close', () => {
console.log('Connection closed.');
});
}
</script>
</body>
</html>
`;
const htmlContent2 = `
<!DOCTYPE html>
<html>

Val.town somehow doesn't run multi-line code. Does work with "3+3" but not more complex stuff.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
/** @jsx jsx */
import { Hono } from "https://deno.land/x/hono@v3.1.3/mod.ts";
import { jsx } from "https://deno.land/x/hono@v3.1.3/middleware.ts";
import { fetchJSON } from "https://esm.town/v/stevekrouse/fetchJSON";
const app = new Hono();
app.post("/eval", async (c) => {
const payload = await c.req.json();
console.log('evaling using:', Deno.env.get('valtown'))
const response = await fetchJSON('https://api.val.town/v1/eval', {
method: 'POST',
body: JSON.stringify(payload),
headers: {
'Authorization': `Bearer ${Deno.env.get('valtown')}`,
'Content-Type': 'application/json'
},
});
return c.json(response);
});
// Server-side rendering
app.get("/", async (c) => {
const html = (
<html>
<head>
<title>Val Town Eval Sandbox</title>
<script defer src="https://cdn.jsdelivr.net/npm/alpinejs@3.x.x/dist/cdn.min.js"></script>
<style>{`
.loader {
border: 4px solid #f3f3f3;
border-top: 4px solid #3498db;
border-radius: 50%;
width: 30px;
height: 30px;
animation: spin 1s linear infinite;
}
@keyframes spin {
0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
}
`}</style>
</head>
<body x-data="{
code: '',
result: '',
isEvaluating: false,
elapsedTime: 0,
async evaluate() {
this.isEvaluating = true;
this.elapsedTime = 0;
this.startTimer();
try {
const payload = { code: this.code };
console.log('Sending payload:', payload);
const response = await fetch('/eval', {
method: 'POST',
body: JSON.stringify(payload),
headers: {
'Content-Type': 'application/json'
}
});
const data = await response.json();
console.log('Received response:', data);
this.result = JSON.stringify(data, null, 2);
} catch (error) {
console.error('Error:', error);
this.result = JSON.stringify(error, null, 2);
} finally {
this.isEvaluating = false;
this.stopTimer();
}
},
startTimer() {
this.timer = setInterval(() => {
this.elapsedTime++;
}, 1000);
},
stopTimer() {
clearInterval(this.timer);
}
}">
<h1>Val Town Eval Sandbox</h1>
<div>
<textarea
x-model="code"
rows="8"
cols="50"
placeholder="Enter code to evaluate (e.g. 5+3)"
></textarea>
</div>
<div>
<pre x-text="JSON.stringify({ code }, null, 2)"></pre>
</div>
<div>
<button
x-on:click="evaluate()"
x-bind:disabled="isEvaluating"
>

Basic demo of getting reactive Alpine.js working on a hono/jsx "backend"

  • For server <> frontend interaction, do form POST submissions (or potentially POST json to the public val address?)

https://alpinejs.dev/start-here#building-a-counter

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
/** @jsx jsx */
import { Hono } from "npm:hono@3";
import { jsx } from "npm:hono@3/jsx";
const app = new Hono();
// Server-side rendering
app.get("/", async (c) => {
const html = (
<html>
<head>
<title >Hono/JSX Example</title>
<script defer src="https://cdn.jsdelivr.net/npm/alpinejs@3.x.x/dist/cdn.min.js"></script>
<script src="https://cdn.tailwindcss.com"></script>
</head>
<body>
<div class="py-2 px-4">
<h1 class="text-4xl font-bold mt-2 mb-4">Welcome to the Hono/JSX/Alpine 🏔️🏔️ Example!</h1>
<h2 class="text-xl font-bold mb-4" x-data="{ message: 'I ❤️ Alpine' }" x-text="message"></h2>
<div x-data="{ count: 0 }">
<button class="border-2 border-slate-500 rounded-md px-1 py-1" x-on:click="count++">Increment</button>
<span class="pl-4" x-text="count"></span>
</div>
</div>
</body>
</html>
);
return c.html(html);
});
export default app.fetch;

Doesn't seem to work on val.town, should work in principle

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
/** @jsxImportSource npm:hono@3/jsx */
import { fetchText } from "https://esm.town/v/stevekrouse/fetchText";
import { chat } from "https://esm.town/v/stevekrouse/openai";
import cronstrue from "npm:cronstrue";
import { Hono } from "npm:hono@3";
const examples = [
{
user: "website that shows the current time",
content: `/** @jsxImportSource npm:react */
export default function() {
return <h1>{new Date().toLocaleTimeString()}</h1>;
}`,
},
{
user: `A collaborative poem builder.
It stores each line of the poem in sqlite.
It has a textbox that lets anyone input a new line to the poem.`,
content: await fetchText("https://esm.town/v/stevekrouse/poembuilder3?v=4"),
},
{
user: "an app that uses chatgpt to convert natural language to cron syntax",
content: await fetchText("https://esm.town/v/stevekrouse/cron2"),
},
];
const app = new Hono();
export default app.fetch;
app.get("/", async (c) => {
const example = examples[0] // examples[Math.floor(Math.random() * examples.length)];
const description = c.req.query("description") || example.user;
let code = c.req.query("description") ? await compile(description) : example.content;
return c.html(
<html>
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<script src="https://cdn.tailwindcss.com" />
<title>Val Writer</title>
</head>
<body class="flex p-6 flex-col space-y-4 max-w-2xl mx-auto">
<div>
<h1 class="text-3xl">Val Writer</h1>
<p>Compile your prompt into code</p>
</div>
<form class="flex space-x-2" hx-disabled-elt="button">
<textarea
name="description"
required
class="w-full border-2 rounded-lg p-2"
rows={5}
>
{description}
</textarea>
<button class="bg-purple-500 hover:bg-purple-700 text-white font-bold py-2 px-4 rounded disabled:hidden">
Write
</button>
</form>
<div>
<pre>
{code}
</pre>
</div>
</body>
</html>,
);
});
export async function compile(description: string) {
const messages = [
{
role: "system",
content: `You are an expert fullstack developer.
You convert user requests to code.
You write Deno TypeScript.
Reply ONLY with valid Typescript.
Export the fetch function to run the server.
Only use web standard fetch. Export the fetch function to start the server.
Add extensive comments`,
},
...examples.flatMap((example) => [, {
role: "user",
content: example.user,
}, {
role: "assistant",
content: example.content,
}]),
{ role: "user", content: description },
];
const { content } = await chat(messages, {
model: "gpt-4-turbo-2024-04-09",
max_tokens: 4000,
});
return content;
}

markdown.download

Handy microservice/library to convert various data sources into markdown. Intended to make it easier to consume the web in ereaders

https://jsr.io/@tarasglek/markdown-download

Features

  • Apply readability
  • Further convert article into markdown to simplify it
  • Allow webpages to be viewable as markdown via curl
  • Serve markdown converted to html to browsers
  • Extract youtube subtitles

Source

https://github.com/tarasglek/markdown-download

https://www.val.town/v/taras/markdown_download

License: MIT

Usage: https://markdown.download/ + URL

Dev: https://val.markdown.download/ + URL

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import { isProbablyReaderable, Readability } from "npm:@mozilla/readability@^0.5.0";
import { DOMParser } from "npm:linkedom@0.16.10";
import { marked } from "npm:marked@12.0.1";
import { getSubtitles } from "npm:youtube-captions-scraper@^2.0.1";
const isCloudflareWorker = typeof Request !== "undefined" && typeof Response !== "undefined";
// init async loading of modules
const AgentMarkdownImport = isCloudflareWorker ? import("npm:agentmarkdown@6.0.0") : null;
const TurndownService = isCloudflareWorker ? null : await import("npm:turndown@^7.1.3");
async function markdown2html(html: string): Promise<string> {
if (AgentMarkdownImport) {
// TurndownService doesn't work on cf
// Dynamically import AgentMarkdown when running in Cloudflare Worker
const { AgentMarkdown } = await AgentMarkdownImport;
return await AgentMarkdown.produce(html);
} else {
// Dynamically import TurndownService otherwise
return new (await TurndownService)().turndown(html);
}
}
function getYoutubeVideoID(url: URL): string | null {
const regExp = /(?:youtube\.com\/(?:[^/]+\/.+\/|(?:v|e(?:mbed)?)\/|.*[?&]v=)|youtu\.be\/)([^"&?/\s]{11})/i;
const match = url.href.match(regExp);
return match ? match[1] : null;
}
function response(message: string, contentType = "text/markdown"): Response {
const headers = new Headers();
headers.set("Access-Control-Allow-Origin", "*");
headers.set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS");
headers.set("Access-Control-Allow-Headers", "Content-Type, Authorization");
headers.set("Access-Control-Max-Age", "86400");
headers.set("Content-Type", contentType);
return new Response(message, {
status: 200,
headers: headers,
});
}
function err(msg: string): Response {
const errorMessage = JSON.stringify({
error: {
message: msg,
code: 400,
},
});
return response(errorMessage, "application/json");
}
function fudgeURL(url: string) {
try {
return new URL(url);
} catch (e) {
// console.log("Url parsing failed", e.stack);
return new URL("https://" + url);
}
}
function processInput(req: Request) {
let ret = {
url: undefined as undefined | URL,
response: undefined as undefined | Response,
};
const myurl = new URL(req.url);
let pathname = myurl.pathname.substring(1) + myurl.search;
if (!pathname.startsWith("http")) {
const urlAsFormParam = myurl.searchParams.get("url");
if (urlAsFormParam) {
pathname = urlAsFormParam;
} else if (pathname.length < 2) {
ret.response = response(
generate_ui(
"URL to convert to markdown:",
"https://www.val.town/v/taras/markdown_download",
"markdown.download",
),
"text/html",
);
return ret;
}
}
ret.url = fudgeURL(pathname);
return ret;
}
export default async function(req: Request): Promise<Response> {
const action = processInput(req);
const url = action.url;
if (!url) {
return action.response!;
}
const youtubeVideoID = getYoutubeVideoID(url);
if (youtubeVideoID) {
const arr = (await getSubtitles({
videoID: youtubeVideoID,
})) as { text: string }[];
const description = "## Generated Transcription\n\n"
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
import Anthropic from "npm:@anthropic-ai/sdk";
const anthropic = new Anthropic({
// apiKey: 'my_api_key', // defaults to process.env["ANTHROPIC_API_KEY"]
});
// Define a mapping for model shortcuts
const modelMap = {
opus: "claude-3-opus-20240229",
sonnet: "claude-3-sonnet-20240229",
haiku: "claude-3-haiku-20240307",
};
export async function prompt(
text,
{ mode = "text", model = "opus", max_tokens = 1024, messages = [] } = {},
) {
const modelId = modelMap[model] || model;
console.log('modelId: ', modelId);
messages.push({ role: "user", content: text });
let res = await anthropic.messages.create({
model: modelId,
max_tokens,
messages,
});
if (mode == "text") return res.content?.[0].text;
// return {...res.content?.[0].text, model: modelId};
return {...res, model: modelId};
}