Search

Results include substring matches and semantically similar vals. Learn more
alfongj avatar
crossmintSolanaTxGenerator
@alfongj
@jsxImportSource https://esm.sh/react
HTTP
export default async function server(request: Request): Promise<Response> {
const { OpenAI } = await import("https://esm.town/v/std/openai");
const openai = new OpenAI();
const url = new URL(request.url);
try {
const completion = await openai.chat.completions.create({
model: "gpt-4o",
janpaul123 avatar
valle_tmp_202239440460780356420276977660784
@janpaul123
@jsxImportSource https://esm.sh/react
HTTP
import _ from "npm:lodash@4";
import OpenAI from "npm:openai";
import { renderToString } from "npm:react-dom/server";
const contextWindow: any = await valleGetValsContextWindow(model);
const openai = new OpenAI();
const stream = await openai.chat.completions.create({
model,
janpaul123 avatar
VALLErun
@janpaul123
The actual code for VALL-E: https://www.val.town/v/janpaul123/VALLE
HTTP
import { sleep } from "https://esm.town/v/stevekrouse/sleep?v=1";
import { anthropic } from "npm:@ai-sdk/anthropic";
import { openai } from "npm:@ai-sdk/openai";
import ValTown from "npm:@valtown/sdk";
import { StreamingTextResponse, streamText } from "npm:ai";
let vercelModel;
if (model.includes("gpt")) {
vercelModel = openai(model);
} else {
vercelModel = anthropic(model);
yawnxyz avatar
valwriter
@yawnxyz
Doesn't seem to work on val.town, should work in principle
HTTP
/** @jsxImportSource npm:hono@3/jsx */
import { fetchText } from "https://esm.town/v/stevekrouse/fetchText";
import { chat } from "https://esm.town/v/stevekrouse/openai";
import cronstrue from "npm:cronstrue";
import { Hono } from "npm:hono@3";
janpaul123 avatar
valle_tmp_1232975987134350426195802232196885
@janpaul123
@jsxImportSource https://esm.sh/react
HTTP
import _ from "npm:lodash@4";
import OpenAI from "npm:openai";
import { renderToString } from "npm:react-dom/server";
const contextWindow: any = await valleGetValsContextWindow(model);
const openai = new OpenAI();
const stream = await openai.chat.completions.create({
model,
mrshorts avatar
grammarCheckApp
@mrshorts
@jsxImportSource https://esm.sh/react@18.2.0
HTTP
if (request.method === 'POST') {
const { OpenAI } = await import("https://esm.town/v/std/openai");
const openai = new OpenAI();
try {
const { text } = await request.json();
const completion = await openai.chat.completions.create({
messages: [
zzz avatar
ListenTo
@zzz
// https://api.val.town/v1/express/zzz.ListenTo?val=stevekrouse.whatIsValTown
Script
// https://api.val.town/v1/express/zzz.ListenTo?val=stevekrouse.whatIsValTown
export async function ListenTo(req, res) {
const { val = "zzz.demoOpenAIGPT4Summary" } = req.query;
const url = `https://api.val.town/v1/run/${val.replace("@", "")}`
const resp = await fetch(url);
willthereader avatar
InventionDetailstoJSONConverter
@willthereader
@jsxImportSource https://esm.sh/react
HTTP
try {
const { OpenAI } = await import("https://esm.town/v/std/openai");
const { blob } = await import("https://esm.town/v/std/blob");
const openai = new OpenAI();
const body = await request.json();
try {
const completion = await openai.chat.completions.create({
messages: [
AppleLamps avatar
LampChat
@AppleLamps
@jsxImportSource https://esm.sh/react@18.2.0
HTTP
if (request.method === "POST" && new URL(request.url).pathname === "/chat") {
const { OpenAI } = await import("https://esm.town/v/std/openai");
const openai = new OpenAI();
try {
messages.push({ role: "user", content: prompt });
const completion = await openai.chat.completions.create({
messages: messages,
} catch (error) {
console.error("OpenAI API error:", error);
return new Response(
campsite avatar
email_channel
@campsite
Email a Campsite channel This val creates an email address that posts forwarded emails to a Campsite channel. It uses GPT-4 to extract a readable version of the forwarded email from the raw body. If you don't want to use GPT-4, omit the OPENAI_API_KEY and the raw body will be included in the post. Other providers are available via Vercel's AI SDK . For help with creating integrations, check out the Campsite API docs . You'll need to create an integration and get an API key.
Email
This val creates an email address that posts forwarded emails to a [Campsite](https://campsite.com) channel.
forwarded email from the raw body. If you don't want to use GPT-4, omit the `OPENAI_API_KEY` and the raw body will be include
For help with creating integrations, check out the [Campsite API docs](https://app.campsite.com/campsite/p/notes/campsite-api
import { email } from "https://esm.town/v/std/email";
import { createOpenAI } from "npm:@ai-sdk/openai";
import { generateObject } from "npm:ai";
// https://sdk.vercel.ai/docs/introduction#model-providers
const OPENAI_API_KEY = Deno.env.get("OPENAI_API_KEY");
const campsite = new Campsite({ apiKey: CAMPSITE_API_KEY });
let subject = payload.subject || "Forwarded email (no subject)";
if (OPENAI_API_KEY) {
const openai = createOpenAI({ apiKey: OPENAI_API_KEY });
// Extract name and email from the forwarded message
const { object: { name, email, subject, message } } = await generateObject({
model: openai("gpt-4-turbo"),
schema: z.object({
spinningideas avatar
webpage_summarizer
@spinningideas
@jsxImportSource https://esm.sh/react@18.2.0
HTTP
const webpageText = await webpageResponse.text();
// Use OpenAI as Claude proxy
const { OpenAI } = await import("https://esm.town/v/std/openai");
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
all avatar
townGen
@all
townGen [[https://www.val.town/v/all/promptGen]] [[https://www.val.town/v/stevekrouse/valwriter]] Note : It looks like openai enhancement was dropped at some point when adding all the gizmos;
HTTP
- [[https://www.val.town/v/stevekrouse/valwriter]]
- _Note_: It looks like openai enhancement was dropped at some point when adding all the gizmos;
iamseeley avatar
templateModalTest
@iamseeley
An interactive, runnable TypeScript val by iamseeley
HTTP
<path stroke-linecap="round" stroke-linejoin="round" d="M20.25 8.511c.884.284 1.5 1.128 1.5 2.097v4.286c0 1.1
</svg>
<div class="font-semibold group-hover:text-black">OpenAI chat completion</div>
</button>
<button class="group bg-gray-50 border-gray-300 flex items-center gap-2 overflow-hidden rounded border p-3 text-l
AppleLamps avatar
LampBlogs
@AppleLamps
@jsxImportSource https://esm.sh/react@18.2.0
HTTP
&& new URL(request.url).pathname === "/generate-blog"
const { OpenAI } = await import("https://esm.town/v/std/openai");
const openai = new OpenAI();
try {
if (topic === "RANDOM") {
const randomTopicCompletion = await openai.chat.completions.create({
messages: [
topicPrompt = `Write a brief blog post about ${topic}. Keep it concise and engaging, around 200-300 words.`;
const completion = await openai.chat.completions.create({
messages: [
ejfox avatar
extremePlumCaribou
@ejfox
@jsxImportSource https://esm.sh/react
HTTP
headers: { 'Content-Type': 'application/json' }
// Generate new emoji using OpenAI
const { OpenAI } = await import("https://esm.town/v/std/openai");
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [{ role: "user", content: `Generate a single emoji that best represents "${name}". Respond with only the em