Search

gpt4FunctionCallingExample
@stevekrouse
// TODO pull out function call and initial message
Script
import { OpenAI } from "npm:openai";
const openai = new OpenAI();
const functionExpression = await openai.chat.completions.create({
"functions": [
console.log(functionExpression);
// TODO pull out function call and initial message
let args = functionExpression.choices[0].message.function_call.arguments;
let functionCallResult = { "temperature": "22", "unit": "celsius", "description": "Sunny" };
const result = await openai.chat.completions.create({
"function_call": { "name": "get_current_weather", "arguments": "{ \"location\": \"Boston, MA\"}" },

ai
@yawnxyz
An http and class wrapper for Vercel's AI SDK Usage: Groq: https://yawnxyz-ai.web.val.run/generate?prompt="tell me a beer joke"&provider=groq&model=llama3-8b-8192 Perplexity: https://yawnxyz-ai.web.val.run/generate?prompt="what's the latest phage directory capsid & tail article about?"&provider=perplexity Mistral: https://yawnxyz-ai.web.val.run/generate?prompt="tell me a joke?"&provider=mistral&model="mistral-small-latest" async function calculateEmbeddings(text) {
const url = `https://yawnxyz-ai.web.val.run/generate?embed=true&value=${encodeURIComponent(text)}`;
try {
const response = await fetch(url);
const data = await response.json();
return data;
} catch (error) {
console.error('Error calculating embeddings:', error);
return null;
}
}
HTTP
async function calculateEmbeddings(text) {
import { createOpenAI } from "npm:@ai-sdk/openai";
const openai = createOpenAI({
// apiKey = Deno.env.get("OPENAI_API_KEY");
apiKey: Deno.env.get("OPENAI_API_KEY_COVERSHEET")
const groq = createOpenAI({
baseURL: 'https://api.groq.com/openai/v1',
const perplexity = createOpenAI({
this.defaultProvider = options.provider || 'openai';
case 'openai':
result = await this.generateOpenAIResponse({ model, prompt, maxTokens, temperature, streaming, schema, system, messages, tools, ...additionalSettings });
grayWildfowl
@jdan
An interactive, runnable TypeScript val by jdan
Script
import { OpenAI } from "https://esm.town/v/std/openai?v=4";
const openai = new OpenAI();
async function runConversation() {
const inputWord = "almond latte";
`.replaceAll(/\s+/g, "");
const response = await openai.chat.completions.create({
messages: [
// for (let i = 0; i < message.tool_calls.length; i++) {
// console.log("[CALLING]", message.tool_calls[i].function);
// const tool = toolbox[message.tool_calls[i].function.name];
// if (tool) {
// const result = await tool.call(JSON.parse(message.tool_calls[i].function.arguments));
// console.log("[RESULT]", truncate(result));
ai
@goode_bye
An http and class wrapper for Vercel's AI SDK Usage: Groq: https://yawnxyz-ai.web.val.run/generate?prompt="tell me a beer joke"&provider=groq&model=llama3-8b-8192 Perplexity: https://yawnxyz-ai.web.val.run/generate?prompt="what's the latest phage directory capsid & tail article about?"&provider=perplexity Mistral: https://yawnxyz-ai.web.val.run/generate?prompt="tell me a joke?"&provider=mistral&model="mistral-small-latest" async function calculateEmbeddings(text) {
const url = `https://yawnxyz-ai.web.val.run/generate?embed=true&value=${encodeURIComponent(text)}`;
try {
const response = await fetch(url);
const data = await response.json();
return data;
} catch (error) {
console.error('Error calculating embeddings:', error);
return null;
}
}
HTTP
async function calculateEmbeddings(text) {
import { createOpenAI } from "npm:@ai-sdk/openai";
const openai = createOpenAI({
// apiKey = Deno.env.get("OPENAI_API_KEY");
apiKey: Deno.env.get("OPENAI_API_KEY_COVERSHEET"),
const groq = createOpenAI({
baseURL: "https://api.groq.com/openai/v1",
const perplexity = createOpenAI({
this.defaultProvider = options.provider || "openai";
case "openai":
result = await this.generateOpenAIResponse({
weatherBot
@jdan
An interactive, runnable TypeScript val by jdan
Script
import { OpenAI } from "https://esm.town/v/std/openai?v=4";
const openai = new OpenAI();
openAiTool: {
type: "function",
function: {
openAiTool: {
type: "function",
function: {
openAiTool: {
type: "function",
weatherBot
@charmaine
An interactive, runnable TypeScript val by charmaine
Script
import { OpenAI } from "https://esm.town/v/std/openai?v=4";
const openai = new OpenAI();
openAiTool: {
type: "function",
function: {
openAiTool: {
type: "function",
function: {
openAiTool: {
type: "function",

openaiOpenAPI
@stevekrouse
An interactive, runnable TypeScript val by stevekrouse
Script
export let openaiOpenAPI = `
openapi: 3.0.0
description: |
The Val Town API provides services to evaluate JavaScript and TypeScript expressions, run vals as APIs, either as functions or Express handlers.
Learn more at [https://docs.val.town](https://docs.val.town)
The JavaScript or TypeScript expression to be evaluated.
This should be a single expression, like a single function
call, assignment operation, or calculation. If you need
to execute multiple expressions, wrap them in a function.
schema:
summary: Simple addition
functionCall:
value: "@stevekrouse.addOne(@stevekrouse.example1)"
summary: Calling a user's val function
handle:
description: |
This val is a function that returns its arguments. It is useful for testing how the API handles the arguments passed to it.
View the val at [https://val.town/v/stevekrouse.id](https://val.town/v/stevekrouse.id)
summary: Simple addition result
functionCall:
value: 42
summary: Calling a function result
security:

cerebrasTemplate
@stevekrouse
Cerebras Inference template This val shows you how you can deploy an app using Cerebras Inference on Val Town
in seconds. What is Cerebras? Cerebras is an American chip manufacturer that
produces large wafer chips that deliver mind-blowing LLM inference speeds.
As of this writing on Jan 17, 2025, Cerebras Inference
provides Llama 3.18b, 3.1 70b, and 3.370b at a jaw-dropping 2k tokens per second –
that's 50x faster than what the frontier labs produce. Llama 3.370b at 2k tokens
per second is particularly noteworthy because it is
a GPT-4-class model . This level
of intelligence at that level of speed will unlock whole new classes of applications. Quick start Set up Cerebras Sign up for Cerebras Get a Cerebras API Key Save it in a Val Town environment variable called CEREBRAS_API_KEY Once Cerebras is set up in your Val Town account, there are two ways to get started: Fork this app and customize it (or ask Townie AI to customize it) Start a new chat with Townie AI and copy & paste
the following instructions: . Use Cerebras for AI on the backend like so:
const { OpenAI } = await import("https://esm.sh/openai");
const client = new OpenAI({
apiKey: Deno.env.get("CEREBRAS_API_KEY"),
baseURL: "https://api.cerebras.ai/v1"
});
const response = await client.chat.completions.create({
model: "llama-3.3-70b",
messages: [],
});
const generatedText = response.choices[0].message.content; For example, the val in this template was created by asking
Townie AI to "Make a chatgpt clone",
then I hit shift-enter twice, and then pasted in the instructions on
how to use Cerebras from above, then hit enter. Townie built this app on its first try, in about 20 seconds. Sample apps Cerebras Searcher - a Perplexity clone that uses the SerpAPI to do RAG
and summaries with Cerebras ( requires a SerpAPI key ) Cerebras Coder - an app that
generates websites in a second with Cerebras Cerebras Debater - an
app that truly shows Cerebras's speed: it's Cerebras talking to Cerebras in a debate
HTTP
Use Cerebras for AI on the backend like so:
const { OpenAI } = await import("https://esm.sh/openai");
const client = new OpenAI({
apiKey: Deno.env.get("CEREBRAS_API_KEY"),
import React, { useState } from "https://esm.sh/react@18.2.0";
function App() {
const [messages, setMessages] = useState([]);
</div>
function client() {
createRoot(document.getElementById("root")).render(<App />);
client();
export default async function server(request: Request): Promise<Response> {
if (request.method === "POST" && new URL(request.url).pathname === "/chat") {
const { messages } = await request.json();
const { OpenAI } = await import("https://esm.sh/openai");
// Cerebras Inference
// Keep these comments so we remember not to change this
const client = new OpenAI({
apiKey: Deno.env.get("CEREBRAS_API_KEY"),
} catch (error) {
console.error("Error calling Cerebras API:", error);
if (error.status === 429) {
promptScorer
@toowired
An interactive, runnable TypeScript val by toowired
Script
import { OpenAI } from "https://esm.town/v/std/openai";
export async function promptScorer({
original,
evolved: string;
const openai = new OpenAI();
const scoringPrompt =
try {
const completion = await openai.chat.completions.create({
model: "gpt-4o",
} catch (error) {
console.error("Error calling OpenAI API:", error);
return { score: 5, error: "Failed to get score from OpenAI" };

byob
@vawogbemi
BYOB - Build Your Own Bot You can chat with llms over email, the email thread functions as memory. The biggest thing is that you can instantly create a chat
like interface with llms. Pair that with back end data and functions and you got something really powerful. Take it further Custom domains Use cloudflare email workers or a similiar service to create a custom email domain and route any incoming emails to this val. Use any email api set up with that domain to send emails ie. Sendgrid, Resend, Postmark. Toolings Llms can uses tools , meaning you can make this an agent and a whole lot more useful.
Email
# BYOB - Build Your Own Bot
You can chat with llms over email, the email thread functions as memory. The biggest thing is that you can instantly create a chat
like interface with llms. Pair that with back end data and functions and you got something really powerful.
## Take it further
### Toolings
* Llms can uses [tools](https://platform.openai.com/docs/guides/function-calling), meaning you can make this an agent and a whole lot more useful.
import { zodResponseFormat } from "https://esm.sh/openai/helpers/zod";
import { z } from "https://esm.sh/zod";
import { email } from "https://esm.town/v/std/email";
import { OpenAI } from "https://esm.town/v/std/openai";
export default async function(e: Email) {
const client = new OpenAI();
const Messages = z.object({
generateBackendSchema
@toowired
// HTML content for testing
HTTP
import { OpenAI } from "https://esm.town/v/std/openai";
interface SchemaResponse {
dependencies: string[];
export default async function (req: Request): Promise<Response> {
// Set CORS headers
return new Response("Invalid input: description must be a non-empty string", { status: 400, headers });
const openai = new OpenAI();
let completion;
try {
completion = await openai.chat.completions.create({
messages: [
} catch (error) {
console.error("Error calling OpenAI API:", error);
return new Response("Error generating schema. Please try again later.", { status: 500, headers });
} catch (error) {
console.error("Error parsing OpenAI response:", error);
return new Response("Error processing the generated schema. Please try again.", { status: 500, headers });
<script>
async function generateSchema() {
const description = document.getElementById('description').value;

egoBooster
@stevekrouse
* This ego booster app takes a selfie, sends it to GPT-4o-mini for analysis,
* and streams funny, specific compliments about the user's appearance.
* We use the WebRTC API for camera access, the OpenAI API for image analysis,
* and server-sent events for real-time streaming of compliments.
HTTP
* We use the WebRTC API for camera access, the OpenAI API for image analysis,
function App() {
function client() {
console.log("Document is defined, calling client()");
console.log("Document is undefined, not calling client()");
async function server(request: Request): Promise<Response> {
const { OpenAI } = await import("https://esm.town/v/std/openai");
const openai = new OpenAI();
console.log("Sending request to OpenAI");
const completion = await openai.chat.completions.create({
cerebrasTemplate
@cory16457
Cerebras Inference template This val shows you how you can deploy an app using Cerebras Inference on Val Town
in seconds. What is Cerebras? Cerebras is an American chip manufacturer that
produces large wafer chips that deliver mind-blowing LLM inference speeds.
As of this writing on Jan 17, 2025, Cerebras Inference
provides Llama 3.18b, 3.1 70b, and 3.370b at a jaw-dropping 2k tokens per second –
that's 50x faster than what the frontier labs produce. Llama 3.370b at 2k tokens
per second is particularly noteworthy because it is
a GPT-4-class model . This level
of intelligence at that level of speed will unlock whole new classes of applications. Quick start Set up Cerebras Sign up for Cerebras Get a Cerebras API Key Save it in a Val Town environment variable called CEREBRAS_API_KEY Once Cerebras is set up in your Val Town account, there are two ways to get started: Fork this app and customize it (or ask Townie AI to customize it) Start a new chat with Townie AI and copy & paste
the following instructions: . Use Cerebras for AI on the backend like so:
const { OpenAI } = await import("https://esm.sh/openai");
const client = new OpenAI({
apiKey: Deno.env.get("CEREBRAS_API_KEY"),
baseURL: "https://api.cerebras.ai/v1"
});
const response = await client.chat.completions.create({
model: "llama-3.3-70b",
messages: [],
});
const generatedText = response.choices[0].message.content; For example, the val in this template was created by asking
Townie AI to "Make a chatgpt clone",
then I hit shift-enter twice, and then pasted in the instructions on
how to use Cerebras from above, then hit enter. Townie built this app on its first try, in about 20 seconds. Sample apps Cerebras Searcher - a Perplexity clone that uses the SerpAPI to do RAG
and summaries with Cerebras ( requires a SerpAPI key ) Cerebras Coder - an app that
generates websites in a second with Cerebras Cerebras Debater - an
app that truly shows Cerebras's speed: it's Cerebras talking to Cerebras in a debate
HTTP
Use Cerebras for AI on the backend like so:
const { OpenAI } = await import("https://esm.sh/openai");
const client = new OpenAI({
apiKey: Deno.env.get("CEREBRAS_API_KEY"),
import React, { useState } from "https://esm.sh/react@18.2.0";
function App() {
const [messages, setMessages] = useState([]);
</div>
function client() {
createRoot(document.getElementById("root")).render(<App />);
client();
export default async function server(request: Request): Promise<Response> {
if (request.method === "POST" && new URL(request.url).pathname === "/chat") {
const { messages } = await request.json();
const { OpenAI } = await import("https://esm.sh/openai");
// Cerebras Inference
// Keep these comments so we remember not to change this
const client = new OpenAI({
apiKey: Deno.env.get("csk-n552ehwetvcnykdcwc3wyffn33kw8yfcwddj6vw9cdvyxkpr"),
} catch (error) {
console.error("Error calling Cerebras API:", error);
if (error.status === 429) {

preciseScarletHerring
@stevekrouse
Cerebras Inference template This val shows you how you can deploy an app using Cerebras Inference on Val Town
in seconds. What is Cerebras? Cerebras is an American chip manufacturer that
produces large wafer chips that deliver mind-blowing LLM inference speeds.
As of this writing on Jan 17, 2025, Cerebras Inference
provides Llama 3.18b, 3.1 70b, and 3.370b at a jaw-dropping 2k tokens per second –
that's 50x faster than what the frontier labs produce. Llama 3.370b at 2k tokens
per second is particularly noteworthy because it is
a GPT-4-class model . This level
of intelligence at that level of speed will unlock whole new classes of applications. Quick start Set up Cerebras Sign up for Cerebras Get a Cerebras API Key Save it in a Val Town environment variable called CEREBRAS_API_KEY Once Cerebras is set up in your Val Town account, there are two ways to get started: Fork this app and customize it (or ask Townie AI to customize it) Start a new chat with Townie AI and copy & paste
the following instructions: . Use Cerebras for AI on the backend like so:
const { OpenAI } = await import("https://esm.sh/openai");
const client = new OpenAI({
apiKey: "YOUR_CEREBRAS_API_KEY",
baseURL: "https://api.cerebras.ai/v1"
});
const response = await client.chat.completions.create({
model: "llama-3.3-70b",
messages: [],
});
const generatedText = response.choices[0].message.content; For example, the val in this template was created by asking
Townie AI to "Make a chatgpt clone",
then I hit shift-enter twice, and then pasted in the instructions on
how to use Cerebras from above, then hit enter. Townie built this app on its first try, in about 20 seconds. Sample apps Cerebras Searcher - a Perplexity clone that uses the SerpAPI to do RAG
and summaries with Cerebras ( requires a SerpAPI key ) Cerebras Coder - an app that
generates websites in a second with Cerebras Cerebras Debater - an
app that truly shows Cerebras's speed: it's Cerebras talking to Cerebras in a debate
HTTP
Use Cerebras for AI on the backend like so:
const { OpenAI } = await import("https://esm.sh/openai");
const client = new OpenAI({
apiKey: "YOUR_CEREBRAS_API_KEY",
import React, { useState } from "https://esm.sh/react@18.2.0";
function App() {
const [messages, setMessages] = useState([]);
</div>
function client() {
createRoot(document.getElementById("root")).render(<App />);
client();
export default async function server(request: Request): Promise<Response> {
if (request.method === "POST" && new URL(request.url).pathname === "/chat") {
const { messages } = await request.json();
const { OpenAI } = await import("https://esm.sh/openai");
// Cerebras Inference
// Keep these comments so we remember not to change this
const client = new OpenAI({
apiKey: Deno.env.get("CEREBRAS_API_KEY"),
} catch (error) {
console.error("Error calling Cerebras API:", error);
if (error.status === 429) {

masterfulPeachHookworm
@stevekrouse
Cerebras Inference template This val shows you how you can deploy an app using Cerebras Inference on Val Town
in seconds. What is Cerebras? Cerebras is an American chip manufacturer that
produces large wafer chips that deliver mind-blowing LLM inference speeds.
As of this writing on Jan 17, 2025, Cerebras Inference
provides Llama 3.18b, 3.1 70b, and 3.370b at a jaw-dropping 2k tokens per second –
that's 50x faster than what the frontier labs produce. Llama 3.370b at 2k tokens
per second is particularly noteworthy because it is
a GPT-4-class model . This level
of intelligence at that level of speed will unlock whole new classes of applications. Quick start There are two ways to get started: Fork this app and customize it (or ask Townie AI to customize it) Start a new chat with Townie AI and copy & paste
the following instructions: . Use Cerebras for AI on the backend like so:
const { OpenAI } = await import("https://esm.sh/openai");
const client = new OpenAI({
apiKey: "YOUR_CEREBRAS_API_KEY",
baseURL: "https://api.cerebras.ai/v1"
});
const response = await client.chat.completions.create({
model: "llama-3.3-70b",
messages: [],
});
const generatedText = response.choices[0].message.content; For example, the val in this template was created by asking
Townie AI to "Make a chatgpt clone",
then I hit shift-enter twice, and then pasted in the instructions on
how to use Cerebras from above, then hit enter. Townie built this app on its first try, in about 20 seconds. Sample apps Cerebras Searcher - a Perplexity clone that uses the SerpAPI to do RAG
and summaries with Cerebras ( requires a SerpAPI key ) Cerebras Coder - an app that
generates websites in a second with Cerebras Cerebras Debater - an
app that truly shows Cerebras's speed: it's Cerebras talking to Cerebras in a debate
HTTP
Use Cerebras for AI on the backend like so:
const { OpenAI } = await import("https://esm.sh/openai");
const client = new OpenAI({
apiKey: "YOUR_CEREBRAS_API_KEY",
import React, { useState } from "https://esm.sh/react@18.2.0";
function App() {
const [messages, setMessages] = useState([]);
</div>
function client() {
createRoot(document.getElementById("root")).render(<App />);
client();
export default async function server(request: Request): Promise<Response> {
if (request.method === "POST" && new URL(request.url).pathname === "/chat") {
const { messages, sassy } = await request.json();
const { OpenAI } = await import("https://esm.sh/openai");
const client = new OpenAI({
apiKey: Deno.env.get("CEREBRAS_API_KEY"),
} catch (error) {
console.error("Error calling Cerebras API:", error);
if (error.status === 429) {