Search
openaiDefiner
@willthereader
An interactive, runnable TypeScript val by willthereader
HTTP
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
export default async function(req: Request): Promise<Response> {
log.info("Prepared messages for OpenAI:", JSON.stringify(messages));
const openai = new OpenAI();
const stream = await openai.chat.completions.create({
log.info("OpenAI stream created successfully");
log.error("Error in OpenAI request:", error);
export async function parseRequest(req: Request): Promise<{ selection: string; followUp: string; context: string[] }> {
export function prepareMessages(selection: string, followUp: string, context: string[]): Array<Object> {
val_f4POmlAgdz
@dhvanil
An interactive, runnable TypeScript val by dhvanil
HTTP
export async function val_f4POmlAgdz(req) {
try {
// Execute the code directly and capture its result
getShowcaseVals
@stevekrouse
Get vals that were created in the last month, augmented with all sorts of AI and screenshots, and shoved in a sqlite database for @stevekrouse/showcase.
Script
import { OpenAI } from "https://esm.sh/openai";
import { zodResponseFormat } from "https://esm.sh/openai/helpers/zod";
import { z } from "https://esm.sh/zod";
}, { concurrency: 3 });
const openai = new OpenAI();
const ValDescriptions = z.object({
description: z.string(),
async function getDescriptions(val) {
const completion = await openai.beta.chat.completions.parse({
model: "gpt-4o-mini",
}, { concurrency: 3 });
async function getScreenshot(url: string) {
const resp = await fetch(
browserlessPuppeteerExample
@nbbaier
An interactive, runnable TypeScript val by nbbaier
Script
browserWSEndpoint: `wss://chrome.browserless.io?token=${process.env.browserlessKey}`,
const page = await browser.newPage();
await page.goto("https://en.wikipedia.org/wiki/OpenAI");
const intro = await page.evaluate(
`document.querySelector('p:nth-of-type(2)').innerText`,
deno_server
@pomdtr
An interactive, runnable TypeScript val by pomdtr
Script
export function denoServer(handler: (req: Request) => Response | Promise<Response>, thisArg?: any) {
if (thisArg) {
handler = handler.bind(thisArg);
myApi
@oevl
An interactive, runnable TypeScript val by oevl
Script
export function myApi(name) {
return "hi " + name;
valTownChatGPT2
@janpaul123
https://x.com/JanPaul123/status/1811801305066651997 Fork it and authenticate with your Val Town API token as the password.
HTTP
import { Hono } from "npm:hono@3";
import OpenAI from "npm:openai";
import { renderToString } from "npm:react-dom/server";
return new Response(renderToString(jsx), { headers: { "Content-Type": "text/html" } });
function renderForm(
valName: string,
const textEncoder = new TextEncoder();
function write(text) {
writer.write(textEncoder.encode(text));
const scrollingElement = document.body;
const callback = function (mutationsList, observer) {
window.scrollTo({ left: 0, top: document.body.scrollHeight, behavior: "instant" });
content:
`Act as an expert software engineer. Always use best practices. Your response should start with \`\`\`ts and end with \`\`\`. Start your response with a comment explaining how your approach will work, what libraries or API calls you will use, and any tradeoffs you're making. Then write the code in a consise way, the simplest way to achieve the goal, though you can add some inline comments to explain your reasoning (not for every line, but for major groups of lines). Don't use any environment variables unless strictly necessary, for example use APIs that don't require a key, prefer internal function imports (using esm.town), and prefer putting API keys as inline variables. Use built-in Deno functions where possible. The val should create a "export default async function main() {" which is the main function that gets executed, without any arguments. Don't return a Response object, just return a plain Javascript object, array, or string. Unless specified, don't add error handling, make sure that errors bubble up to the caller.`,
role: "user",
</div>,
const openai = new OpenAI();
const stream = await openai.chat.completions.create({
model,
OpenRouterChatCompletionLead
@rozek
how long until "OpenAIChatCompletion" becomes available again?
HTTP
import { InvocationTracker } from 'https://esm.town/v/rozek/InvocationTracker'
import { floatingQuotaTracker } from 'https://esm.town/v/rozek/floatingQuotaTracker'
/**** how long until "OpenAIChatCompletion" becomes available again? ****/
export default async function (Request:Request):Promise<Response> {
if (Request.method === 'OPTIONS') {
return new Response(null, {
entireGoldLark
@nlnhsr
An interactive, runnable TypeScript val by nlnhsr
HTTP
$error: "Run completed without signaling end or error.",
export const service = (boardUrl: string, options?: ServiceOptions) => {
return async function(req: Request): Promise<Response> {
const url = new URL(req.url);
const path = url.pathname;
smallweb_openapi_guide
@all
An interactive, runnable TypeScript val by all
HTTP
export default async function (req: Request): Promise<Response> {
<p>This schema defines several components that can be used to integrate OpenAI's services into a logging and configuration system. Here's how each component relates to potential OpenAI use cases:</p>
<li><strong>App</strong>: Represents an OpenAI-powered application with a name and URL.</li>
<li><strong>Config</strong>: Defines configuration options for OpenAI API integration and application settings.</li>
<li><strong>ConsoleLog</strong>: Captures console output from OpenAI model interactions and application processes.</li>
<li><strong>CronLog</strong>: Logs scheduled tasks related to OpenAI operations, such as model fine-tuning or dataset updates.</li>
<li><strong>HttpLog</strong>: Records HTTP requests made to and from the OpenAI API.</li>
<button class="collapsible-button">Key Components and OpenAI Use Cases</button>
Use Case: Store OpenAI API keys, model preferences, and application settings.
Example: Track rate limits, response times, and payload sizes for OpenAI API calls.
val_BfYl3Zckp6
@dhvanil
An interactive, runnable TypeScript val by dhvanil
HTTP
export async function val_BfYl3Zckp6(req) {
try {
// Execute the code directly and capture its result
const result = await (async () => {
function probeComputationalLimits(depth = 0, maxDepth = 1000) {
if (depth >= maxDepth) {
return {
OpenRouter_Test
@rozek
This is a simple test of OpenRouter and their new offering of DeepSeek-R1 671B for free The actual wrapper for the request is found in val OpenRouterChatCompletion with a simple smoke test in val OpenRouterChatCompletion_Test Please mind that the OpenRouter server often takes a long time to respond - and even then, it often does not respond with an actual chat completion but with an error message. Current overall usage limits for this instance of the app are: up to 20 requests/minute and up to 200 requests/day.
HTTP
const TrackingTable = 'OpenRouter_Test'
const Granularity = 15*60*1000
export default async function (Request:Request):Promise<Response> {
if (Request.method !== 'GET') {
return new Response('Method Not Allowed', { status:405 })
loop
@cyrilos
An interactive, runnable TypeScript val by cyrilos
HTTP
import axios from "npm:axios";
const TARGET = 1000 * 10;
async function loop() {
var url = "https://cyrilos-loop.web.val.run";
await axios({
url: url,
/* bet session */
async function playDice() {
const PLATFORM = "polpick.io";
const cookies = await blob.getJSON("platforms");
Chatprincipal
@arthrod
@jsxImportSource https://esm.sh/react
HTTP
transition: background-color 0.5s ease;
function AnimatedHeadline() {
const [animationStarted, setAnimationStarted] = useState(false);
transition: width 2.5s linear;
function SubheadlineAnimation() {
const [animationStarted, setAnimationStarted] = useState(false);
opacity: 0.8;
function App() {
const [isNavOpen, setIsNavOpen] = React.useState(false);
</Footer>
function client() {
createRoot(document.getElementById("root")).render(<App />);
if (typeof document !== "undefined") { client(); }
export default async function server(request: Request): Promise<Response> {
return new Response(`
telegramWebhookEchoMessage
@dcm31
// v15 was the last stable version
HTTP
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
async function handleMessage(message) {
async function processUserInputWithAI(userMessage: string, tasks: Task[], state: UserState): Promise<{ response: string, updatedTasks: Task[], updatedState: UserState, suggestedActions: string[] }> {
You can use the following functions:
const completion = await openai.chat.completions.create({
function addTask(tasks: Task[], title: string) {
function markTaskAsDone(tasks: Task[], taskId: string) {
function breakDownTask(tasks: Task[], taskId: string, subtasks: string[]) {
function findTask(tasks: Task[], taskId: string): Task | null {