Search

Results include substring matches and semantically similar vals. Learn more
mrshorts avatar
CodeGeneratorApp
@mrshorts
@jsxImportSource https://esm.sh/react@18.2.0
HTTP
import { createRoot } from "https://esm.sh/react-dom@18.2.0/client";
function App() {
const [prompt, setPrompt] = useState("");
textDecoration: 'none',
function client() {
createRoot(document.getElementById("root")).render(<App />);
if (typeof document !== "undefined") { client(); }
export default async function server(request: Request): Promise<Response> {
if (request.method === "POST") {
const { OpenAI } = await import("https://esm.town/v/std/openai");
const openai = new OpenAI();
const { prompt, language } = await request.json();
cpp: "Generate a clean, concise C++ code snippet for: "
const completion = await openai.chat.completions.create({
messages: [
feynman avatar
myApi
@feynman
An interactive, runnable TypeScript val by feynman
Script
export async function myApi(name) {
const got = await import("got");
const res = await got.post("http://34.87.31.113:3000/infer", {
spinningideas avatar
interview_practice
@spinningideas
@jsxImportSource https://esm.sh/react@18.2.0
HTTP
timestamp: number;
function App() {
const [intervieweeResponse, setIntervieweeResponse] = useState("");
</div>
function client() {
createRoot(document.getElementById("root")).render(<App />);
if (typeof document !== "undefined") { client(); }
export default async function server(request: Request): Promise<Response> {
if (request.method === "POST" && new URL(request.url).pathname === "/ask") {
const { intervieweeResponse, interviewPosition } = await request.json();
const { OpenAI } = await import("https://esm.town/v/std/openai");
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
pomdtr avatar
oauth
@pomdtr
An interactive, runnable TypeScript val by pomdtr
Script
type Handler = (req: Request) => Response | Promise<Response>;
export function oauth(params: any, next: (req: Request, accessToken: string) => Response | Promise<Response>): Handler {
return (req: Request) => {
// TODO: go through oauth flow to extract access token
knulp222 avatar
interview_practice
@knulp222
@jsxImportSource https://esm.sh/react
HTTP
timestamp: number;
function App() {
const [intervieweeResponse, setIntervieweeResponse] = useState("");
</div>
function client() {
createRoot(document.getElementById("root")).render(<App />);
if (typeof document !== "undefined") { client(); }
export default async function server(request: Request): Promise<Response> {
if (request.method === "POST" && new URL(request.url).pathname === "/ask") {
const { intervieweeResponse, interviewPosition } = await request.json();
const { OpenAI } = await import("https://esm.town/v/std/openai");
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
robertmccallnz avatar
petunia_chat
@robertmccallnz
An interactive, runnable TypeScript val by robertmccallnz
HTTP
import { html } from "https://esm.sh/hono/html";
import { OpenAI } from "https://esm.town/v/std/openai";
const app = new Hono();
const errorDisplay = document.getElementById('error-display');
function logError(message) {
console.error(message);
errorDisplay.textContent = message;
async function sendMessage() {
const message = userInput.value.trim();
const refreshButton = document.getElementById('refresh-market');
function createMarketStatHTML(emoji, name, trend, value) {
const div = document.createElement('div');
return div;
async function fetchMarketInsights() {
try {
app.get("/market-data", async (c) => {
const openai = new OpenAI();
try {
const completion = await openai.chat.completions.create({
model: "gpt-4o-mini",
const { message } = await c.req.json();
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
model: "gpt-4o-mini",
rattrayalex avatar
VERSION
@rattrayalex
An interactive, runnable TypeScript val by rattrayalex
Script
import OpenAI from "https://esm.sh/openai";
import { VERSION } from "https://esm.sh/openai/version";
console.log(VERSION);
export { VERSION };
jxnblk avatar
hello_cli
@jxnblk
Example CLI hosted on Val Town Install with Deno on your computer (name it whatever you want): deno install -n hello https://esm.town/v/jxnblk/hello_cli Run: hello Uninstall: deno uninstall hello Read the deno install docs
Script
// deno install -n hello https://esm.town/v/jxnblk/hello_cli
export default function main() {
const name = Deno.args[0] || "";
console.log(`hello ${name}`);
nbbaier avatar
getAocData
@nbbaier
Helper function to get Advent of Code data AOC_TOKEN is a session token
Script
## Helper function to get Advent of Code data
`AOC_TOKEN` is a session token
yawnxyz avatar
unpaywall
@yawnxyz
// Unpaywall DOI access function
HTTP
const YOUR_EMAIL = "jan@phage.directory";
// Unpaywall DOI access function
async function unpaywallDOI(doi) {
const url = `https://api.unpaywall.org/v2/${doi}?email=${YOUR_EMAIL}`;
throw error;
// Unpaywall search function
async function unpaywallSearch(query, additionalParams = {}) {
const url = new URL('https://api.unpaywall.org/v2/search');
stevekrouse avatar
chatGPTPlugin
@stevekrouse
ChatGPT Plugin for Val Town Run code on Val Town from ChatGPT. Usage I haven't been able to get it to do very useful things yet. It certainly can evaluate simple JS code: It would be awesome if it knew how to use other APIs and make fetch calls to them, but it has been failing at that . Limitations This plugin currently only has unauthenticated access to POST /v1/eval, which basically means that all it can do is evaluate JavaScript or TypeScript. In theory it could refer to any existing vals in Val Town, but it wouldn't know about those unless you told it. Future directions Once we have more robust APIs to search for existing vals, this plugin could be WAY more valuable! In theory GPT4 could first search for vals to do a certain task and then if it finds one it could then write code based on that val. In practice however, that might require too many steps for poor GPT. We might need to use some sort of agent or langchain thing if we wanted that sort of behavior. Adding authentication could also enable it to make requests using your secrets and private vals and create new vals for you. However I am dubious that this would actually be practically useful. Installation Select GPT-4 (requires ChatGPT Plus) Click No plugins enabled Click "Install an unverified plugin" or "Develop your own plugin" (I'm not sure the difference) Paste in this val's express endpoint https://stevekrouse-chatGPTPlugin.express.val.run Click through the prompts until it's installed
Express (deprecated)
![](https://i.imgur.com/lLUAcVc.png)
d make `fetch` calls to them, but it has been [failing at that](https://chat.openai.com/share/428183eb-8e6d-4008-b295-f3b0ef2
## Limitations
import { fetchJSON } from "https://esm.town/v/stevekrouse/fetchJSON";
import { openaiOpenAPI } from "https://esm.town/v/stevekrouse/openaiOpenAPI";
// https://stevekrouse-chatgptplugin.express.val.run/.well-known/ai-plugin.json
export async function chatGPTPlugin(req: express.Request, res: express.Response) {
if (req.path === "/.well-known/ai-plugin.json") {
// only POST /v1/eval for now
res.send(openaiOpenAPI);
else if (req.path === "/v1/eval") {
rozek avatar
OpenRouterChatCompletion_Test
@rozek
a simple smoke test for val "OpenRouterChatCompletion"
HTTP
/**** a simple smoke test for val "OpenRouterChatCompletion" ****/
export default async function (Request:Request):Promise<Response> {
const OpenAIRequest = {
messages: [
headers:{ 'Content-Type':'application/json' },
body: JSON.stringify(OpenAIRequest)
const Completion = await CompletionResponse.json()
janpaul123 avatar
valle_tmp_3173618096166977554668362851031
@janpaul123
@jsxImportSource https://esm.sh/react
HTTP
import OpenAI from "npm:openai";
unless strictly necessary, for example use APIs that don't require a key, prefer internal function
functions where possible. Unless specified, don't add error handling,
The val should create a "export default async function main" which is the main function that gets
function write(text) {
function openTab(tab) {
const callback = function (mutationsList, observer) {
const openai = new OpenAI();
const stream = await openai.chat.completions.create({
window.addToken = function(str) {
nate avatar
kindness
@nate
An interactive, runnable TypeScript val by nate
Script
export let kindness = async () => {
return await gpt3({
openAiKey: process.env.OPENAI_API_KEY,
prompt:
"Speaking as universal consciousness, say something short, true, uplifting, loving, and kind.",
rozek avatar
OpenRouterChatCompletion
@rozek
free, but rate-limited access to the OpenRouter API
HTTP
import { floatingQuotaTracker } from 'https://esm.town/v/rozek/floatingQuotaTracker'
/**** free, but rate-limited access to the OpenRouter API ****/
export default async function (Request:Request):Promise<Response> {
if (Request.method === 'OPTIONS') {
return new Response(null, {
} catch (Signal:any) {
return new Response(JSON.stringify({ error:Signal.message }), { status:500 });
return await OpenAIChatCompletion(Request)