Search

Results include substring matches and semantically similar vals. Learn more
brianleroux avatar
key_value_api
@brianleroux
An interactive, runnable TypeScript val by brianleroux
HTTP
let res = await sqlite.execute(`select key, value from kv`);
return c.json(res);
app.post("/", async function(c) {
let res = await sqlite.execute({
sql: `insert into kv(key, value) values (:key, :value)`,
dantaeyoung avatar
weatherGPT
@dantaeyoung
If you fork this, you'll need to set OPENAI_API_KEY in your Val Town Secrets .
Cron
If you fork this, you'll need to set `OPENAI_API_KEY` in your [Val Town Secrets](https://www.val.town/settings/secrets).
import { email } from "https://esm.town/v/std/email?v=11";
import { OpenAI } from "npm:openai";
let location = "brooklyn ny";
).then(r => r.json());
const openai = new OpenAI();
let chatCompletion = await openai.chat.completions.create({
messages: [{
console.log(text);
export async function weatherGPT() {
await email({ subject: "Weather Today", text });
dhvanil avatar
val_hrjcREMr1T
@dhvanil
An interactive, runnable TypeScript val by dhvanil
HTTP
export async function val_hrjcREMr1T(req) {
try {
// Execute the code directly and capture its result
std avatar
API_URL
@std
Val Town API URL When Val Town code is run on Val Town servers we use a local URL so we can save time by skipping a roundtrip to the public internet. However, if you want to run your vals that use our API, ie std library vals, locally, you'll want to use our public API's URL, https://api.val.town . We recommend importing and using std/API_URL whenever you use our API so that you are always using the most efficient route. Example Usage import { API_URL } from "https://esm.town/v/std/API_URL"; const response = await fetch(`${API_URL}/v1/me`, { headers: { Authorization: `Bearer ${Deno.env.get("valtown")}`, Accept: "application/json", }, }); const data = await response.json(); console.log(data)
Script
function envOrUndefined(key: string): string | undefined {
// try/catch prevents crashes if the script doesn't have env access
try {
lionad avatar
myApi
@lionad
An interactive, runnable TypeScript val by lionad
Script
export function myApi(name) {
console.email("hi " + name);
dhvanil avatar
val_P5Jos9W9mM
@dhvanil
An interactive, runnable TypeScript val by dhvanil
HTTP
export async function val_P5Jos9W9mM(req) {
try {
const body = await req.text();
// Create a function from the provided code and execute it
const userFunction = async () => {
const findPrimes = (n) => {
// Execute and capture the result
const result = await userFunction();
// Handle different types of results
ascarden avatar
falDemoApp
@ascarden
@jsxImportSource https://esm.sh/react
HTTP
import { falProxyRequest } from "https://esm.town/v/stevekrouse/falProxyRequest";
function App() {
const [prompt, setPrompt] = useState("");
</div>
function client() {
createRoot(document.getElementById("root")).render(<App />);
if (typeof document !== "undefined") { client(); }
export default async function server(req: Request): Promise<Response> {
const url = new URL(req.url);
dhvanil avatar
val_PJEj4CadkM
@dhvanil
An interactive, runnable TypeScript val by dhvanil
HTTP
export async function val_PJEj4CadkM(req) {
try {
// Execute the code directly and capture its result
shivammunday avatar
competentCoffeeTyrannosaurus
@shivammunday
@jsxImportSource https://esm.sh/react@18.2.0
HTTP
import { createRoot } from "https://esm.sh/react-dom@18.2.0/client";
function WebsiteChatbot() {
const [messages, setMessages] = useState([
const [isLoading, setIsLoading] = useState(false);
async function handleSubmit(e: React.FormEvent) {
e.preventDefault();
</div>
function client() {
const chatbotRoot = document.createElement('div');
if (typeof document !== 'undefined') { client(); }
export default async function server(request: Request) {
if (request.method === 'POST' && new URL(request.url).pathname === '/chat') {
const { OpenAI } = await import("https://esm.town/v/std/openai");
const openai = new OpenAI();
const body = await request.json();
Be conversational, helpful, and website-specific in your responses.`;
const completion = await openai.chat.completions.create({
messages: [
stevekrouse avatar
dailyDadJoke
@stevekrouse
Daily Dad Joke How do you make a programmer laugh every morning? A dad joke cron job! Setup Fork this val Click Create fork 🤣🤣🤣🤣 API This val uses the icanhazdadjoke API . You can find more docs here , such as how to filter by type .
Cron
import { email } from "https://esm.town/v/std/email";
import { fetchJSON } from "https://esm.town/v/stevekrouse/fetchJSON";
export async function dailyDadJoke() {
let { setup, punchline } = await fetchJSON("https://official-joke-api.appspot.com/random_joke");
return email({
andreterron avatar
genval
@andreterron
Generate a Val Uses the OpenAI API to generate code for a val based on the description given by the user. TODO: [ ] Improve code detection on GPT response [ ] Give more context on val town exclusive features like console.email or @references [ ] Enable the AI to search val town to find other vals to use
HTTP
# [Generate a Val](https://andreterron-genval.express.val.run)
Uses the OpenAI API to generate code for a val based on the description given by the user.
TODO:
const labelClass = "w-full text-sm font-bold uppercase text-emerald-800 [&>span]:pl-0.5 flex flex-col gap-2";
function parseCookies(cookie: string) {
const out: Record<string, string> = {};
const code = await generateValCode(
process.env.VT_OPENAI_KEY,
value.description,
<span>Description</span>
=${inputClass} id="description" name="description" type="text" placeholder="Function to return a random number" autocomplete=
</label>
websrai avatar
tidyRedWhale
@websrai
@jsxImportSource https://esm.sh/react
HTTP
import { createRoot } from "https://esm.sh/react-dom/client";
function AssistantChat() {
const [messages, setMessages] = useState<{role: string, content: string}[]>([
</div>
function client() {
createRoot(document.getElementById("root")).render(<AssistantChat />);
if (typeof document !== "undefined") { client(); }
export default async function server(request: Request): Promise<Response> {
if (request.method === 'POST' && new URL(request.url).pathname === '/chat') {
const { OpenAI } = await import("https://esm.town/v/std/openai");
const openai = new OpenAI();
try {
const { messages } = await request.json();
const completion = await openai.chat.completions.create({
model: "gpt-4o-mini",
yawnxyz avatar
stringInferExample
@yawnxyz
example of inferring params from a prompt: https://x.com/yawnxyz/status/1812922642510586039 putting results in does wonders use an "extraction guide" to direct results
Script
import { modelProvider } from "https://esm.town/v/yawnxyz/ai";
// selects from xml selectors like <json></json> gets better results
export function selectFromString(str, selector = "json", returnAll = false) {
const regex = new RegExp(`<${selector}>([\\s\\S]*?)</${selector}>`, 'gm');
const matches = [...str.matchAll(regex)].map(match => match[1]);
dialnco avatar
myApi
@dialnco
An interactive, runnable TypeScript val by dialnco
HTTP
export function myApi(name) {
return "hi " + name;
webup avatar
pipeSampleLLMBind
@webup
An interactive, runnable TypeScript val by webup
Script
type: "chat",
provider: "openai",
const model = await mb();
const tracer = await tb();
const functionSchema = [
name: "joke",
const chain = prompt.pipe(model.bind({
functions: functionSchema,
function_call: { name: "joke" },
return await chain.invoke({ subject: "bears" }, { callbacks: [tracer] });