Search

Results include substring matches and semantically similar vals. Learn more
vtdocs avatar
browserlessScrapeExample
@vtdocs
An interactive, runnable TypeScript val by vtdocs
Script
method: "POST",
body: JSON.stringify({
"url": "https://en.wikipedia.org/wiki/OpenAI",
"elements": [{
// The second <p> element on the page
AppleLamps avatar
GROKPROMPT
@AppleLamps
@jsxImportSource https://esm.sh/react@18.2.0
HTTP
* It uses OpenAI API to generate detailed and concise prompts.
const { OpenAI } = await import("https://esm.town/v/std/openai");
const openai = new OpenAI();
console.log("Sending request to OpenAI:", JSON.stringify(analysisMessages, null, 2));
const completion = await openai.chat.completions.create({
console.log("Received response from OpenAI:", JSON.stringify(completion, null, 2));
const { OpenAI } = await import("https://esm.town/v/std/openai");
const openai = new OpenAI();
console.log("Sending clarification request to OpenAI:", JSON.stringify(analysisMessages, null, 2));
const completion = await openai.chat.completions.create({
dctalbot avatar
pirateTalk
@dctalbot
A Slack bot which rephrases messages in a channel in the voice of a Pirate. Originally built for National Talk Like a Pirate Day, September 19, 2024. https://dctalbot.nyc/blog/pirate-talk/
HTTP
import { OpenAI } from "https://esm.town/v/std/openai";
import { fetchJSON } from "https://esm.town/v/stevekrouse/fetchJSON?v=41";
const openai = new OpenAI();
const ok = new Response(undefined, { status: 200 });
async function ahoy(prompt: string): Promise<string> {
const completion = await openai.chat.completions.create({
messages: [
yawnxyz avatar
nighthawks
@yawnxyz
This is nighthawks an experimental NPC character generator that remembers details about conversations. Import this into other workflows for now; a UI is coming soon!
Script
const characterModel = new ModelProvider({
id: `${characterName}`,
// for web search / tool use, use openai
provider: 'openai',
model: 'gpt-4o',
// provider: 'groq',
nicosuave avatar
databaseRunner
@nicosuave
An interactive, runnable TypeScript val by nicosuave
HTTP
const fileResponses = await Promise.all(chunks.map((chunk, index) => createGzipFile(chunk, index, gzip, introspect)));
console.log(`Created ${fileResponses.length} ${gzip ? "gzip" : ""} files`);
return new Response(JSON.stringify({ openaiFileResponse: fileResponses }), {
status: 200,
headers: { "Content-Type": "application/json" },
movienerd avatar
randomMovieQuiz
@movienerd
@jsxImportSource https://esm.sh/react
HTTP
if (request.url.endsWith("/new-game")) {
const { OpenAI } = await import("https://esm.town/v/std/openai");
const openai = new OpenAI();
const correctMovie = movies[Math.floor(Math.random() * movies.length)];
`Give a rhyming vague description of the movie "${correctMovie}" without mentioning any character names, actor names, o
const completion = await openai.chat.completions.create({
messages: [{ role: "user", content: prompt }],
mahtabtattur avatar
plantIdentifierApp
@mahtabtattur
@jsxImportSource https://esm.sh/react
HTTP
import { createRoot } from "https://esm.sh/react-dom/client";
import OpenAI from "https://esm.sh/openai";
// Plant Database
const base64Image = reader.result.split(',')[1];
const openai = new OpenAI({
apiKey: 'sk-proj-6oC8e5yxx4_Wl4GjnVGlzF2cYFJ-XAO7R56FAbvYwPo50OvYI-a6KbjFpvZHNDm0fA05zHACOFT3BlbkFJ6fEKZntz3oXlBkkz
try {
const response = await openai.chat.completions.create({
model: "gpt-4o",
aleaf avatar
randomTextGenerator
@aleaf
@jsxImportSource https://esm.sh/react
HTTP
if (url.searchParams.has("paragraphs")) {
const { OpenAI } = await import("https://esm.town/v/std/openai");
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
guidoism avatar
hackerNewsAuthors
@guidoism
An interactive, runnable TypeScript val by guidoism
Script
"worrydream", // Bret Victor (dynamicland)
"stevewoz", // Steve Wozniak (apple)
"sama", // Sam Altman (openai, yc)
"geoff", // Geoff Ralston (yc)
"dang", // Dan G (hn)
ubixsnow avatar
insightfulSalmonRabbit
@ubixsnow
@jsxImportSource https://esm.sh/react
HTTP
const webpageText = await webpageResponse.text();
// Use OpenAI as Claude proxy
const { OpenAI } = await import("https://esm.town/v/std/openai");
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
JamesAndrew avatar
codecommenter
@JamesAndrew
Code Commenter Code Commenter is a web application that automatically adds concise and useful comments to your code using OpenAI's language model. It supports JavaScript, TypeScript, and C# languages. Features Two-panel interface: Input code on the left, view commented code on the right Language selection: Choose between JavaScript, TypeScript, and C# Syntax highlighting: Commented code is displayed with proper syntax highlighting Loading indicator: A spinner animation shows when the AI is processing your code Responsive design: The app adjusts to fit various screen sizes How It Works The user pastes their code into the left panel The user selects the appropriate language from the dropdown menu When the "Add Comments" button is clicked, the code is sent to the server The server uses OpenAI's API to generate comments for the code The commented code is returned and displayed in the right panel with syntax highlighting Technology Stack Frontend: React: For building the user interface Prism.js: For syntax highlighting Backend: Deno: Runtime environment Val Town: Hosting platform OpenAI API: For generating code comments Usage Open the application in your web browser Paste your code into the left panel Select the appropriate language from the dropdown menu Click the "Add Comments" button Wait for the AI to process your code (a spinner will indicate that it's working) View the commented code in the right panel Limitations The maximum length of code that can be processed is limited by the OpenAI API's token limit The quality of comments may vary depending on the complexity of the code and the AI model's capabilities Internet connection is required to use the application Privacy Considerations Please note that the code you submit is sent to OpenAI's servers for processing. Do not submit sensitive or proprietary code that you don't want to be processed by a third-party service. Future Improvements Add support for more programming languages Implement user authentication for personalized experiences Add the ability to save and share commented code snippets Improve error handling and user feedback Feedback and Contributions This project is hosted on Val Town. If you have any suggestions, bug reports, or want to contribute to the project, please contact the Val Town team or the val's creator.
HTTP
# Code Commenter
ation that automatically adds concise and useful comments to your code using OpenAI's language model. It supports JavaScript,
## Features
3. When the "Add Comments" button is clicked, the code is sent to the server
4. The server uses OpenAI's API to generate comments for the code
5. The commented code is returned and displayed in the right panel with syntax highlighting
- Val Town: Hosting platform
- OpenAI API: For generating code comments
## Usage
## Limitations
- The maximum length of code that can be processed is limited by the OpenAI API's token limit
- The quality of comments may vary depending on the complexity of the code and the AI model's capabilities
## Privacy Considerations
Please note that the code you submit is sent to OpenAI's servers for processing. Do not submit sensitive or proprietary code
## Future Improvements
if (request.method === "POST" && new URL(request.url).pathname === "/comment") {
const { OpenAI } = await import("https://esm.town/v/std/openai");
const openai = new OpenAI();
const { code, language } = await request.json();
const completion = await openai.chat.completions.create({
messages: [
maxm avatar
infiniteSVGGraph
@maxm
Infinite SVG Graph A connected graph of AI-generated SVG images. Ask it to make any kind of SVG. Add your contribution to the graph. Make it POP!
HTTP
import { extractValInfo } from "https://esm.town/v/pomdtr/extractValInfo?v=29";
import { OpenAI } from "https://esm.town/v/std/openai?v=4";
import { ResultSet, sqlite } from "https://esm.town/v/std/sqlite?v=6";
app.post("/remix/:id", async (c) => {
const openai = new OpenAI();
const { prompt } = await c.req.json();
if (svg === undefined) return c.text("Not found", 404);
const stream = await openai.chat.completions.create({
messages: [
janpaul123 avatar
valle_tmp_25273384802368385202566002443081
@janpaul123
@jsxImportSource https://esm.sh/react
HTTP
import _ from "npm:lodash@4";
import OpenAI from "npm:openai";
import { renderToString } from "npm:react-dom/server";
const contextWindow: any = await valleGetValsContextWindow(model);
const openai = new OpenAI();
const stream = await openai.chat.completions.create({
model,
janpaul123 avatar
valle_tmp_9571549127942918010434925760972602
@janpaul123
@jsxImportSource https://esm.sh/react
HTTP
import _ from "npm:lodash@4";
import OpenAI from "npm:openai";
import { renderToString } from "npm:react-dom/server";
const contextWindow: any = await valleGetValsContextWindow(model);
const openai = new OpenAI();
const stream = await openai.chat.completions.create({
model,
janpaul123 avatar
valle_tmp_1369396001916440808441248753827946
@janpaul123
@jsxImportSource https://esm.sh/react
HTTP
import _ from "npm:lodash@4";
import OpenAI from "npm:openai";
import { renderToString } from "npm:react-dom/server";
const contextWindow: any = await valleGetValsContextWindow(model);
const openai = new OpenAI();
const stream = await openai.chat.completions.create({
model,
…
25
…
Next