Search

Results include substring matches and semantically similar vals. Learn more
jdan avatar
gpt4o_images
@jdan
An interactive, runnable TypeScript val by jdan
Script
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
"messages": [
stevekrouse avatar
gpt4
@stevekrouse
An interactive, runnable TypeScript val by stevekrouse
Script
import { OpenAI } from "npm:openai";
const openai = new OpenAI();
export const gpt4 = async (content: string, max_tokens: number = 50) => {
let chatCompletion = await openai.chat.completions.create({
messages: [{
Aizen avatar
adeptSalmonOx
@Aizen
An interactive, runnable TypeScript val by Aizen
Script
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
sky_porie_fire443 avatar
robustCopperCardinal
@sky_porie_fire443
An interactive, runnable TypeScript val by sky_porie_fire443
Script
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
stevekrouse avatar
gpt4vDemo
@stevekrouse
An interactive, runnable TypeScript val by stevekrouse
Script
import process from "node:process";
import OpenAI from "npm:openai";
const openai = new OpenAI({ apiKey: process.env.openai });
async function main() {
const response = await openai.chat.completions.create({
model: "gpt-4-vision-preview",
mjweaver01 avatar
OpenAIStreaming
@mjweaver01
An interactive, runnable TypeScript val by mjweaver01
HTTP
import { OpenAI } from "https://esm.town/v/std/openai";
export default async function(req: Request): Promise<Response> {
const openai = new OpenAI();
const stream = await openai.chat.completions.create({
stream: true,
kakiagp avatar
ai
@kakiagp
An interactive, runnable TypeScript val by kakiagp
HTTP
import { OpenAI } from "https://esm.town/v/std/openai";
export default async function(req: Request): Promise<Response> {
status: 204,
const openai = new OpenAI();
try {
model: "gpt-4-turbo",
const stream = await openai.chat.completions.create(body);
if (!body.stream) {
cosmo avatar
chat_openai
@cosmo
An interactive, runnable TypeScript val by cosmo
Script
const { default: OpenAI } = await import("npm:openai");
export async function chat(apiKey, messages) {
const openai = new OpenAI({ apiKey });
return openai.chat.completions.create({
messages,
ylno avatar
whiteSnail
@ylno
An interactive, runnable TypeScript val by ylno
Script
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
scio avatar
gpt4_playground
@scio
An interactive, runnable TypeScript val by scio
Script
export const gpt4_playground = (async (query) => {
const { OpenAI } = await import("https://deno.land/x/openai/mod.ts");
const openAI = new OpenAI(process.env.OPENAI_KEY);
const chatCompletion = openAI.createChatCompletion({
model: "gpt-4",
return await chatCompletion;
})("Please explain how OpenAI GPT-4 is better than GPT-3");
stevekrouse avatar
openai_structured_output_demo
@stevekrouse
OpenAI Structured Output Demo Ensure responses follow JSON Schema for structured outputs The following demo uses zod to describe and parse the response to JSON. Learn more in the OpenAI Structured outputs docs .
Script
# OpenAI Structured Output Demo
Ensure responses follow JSON Schema for structured outputs
The following demo uses zod to describe and parse the response to JSON.
Learn more in the [OpenAI Structured outputs
docs](https://platform.openai.com/docs/guides/structured-outputs/introduction).
import { zodResponseFormat } from "https://esm.sh/openai/helpers/zod";
import { z } from "https://esm.sh/zod";
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const CalendarEvent = z.object({
participants: z.array(z.string()),
const completion = await openai.beta.chat.completions.parse({
model: "gpt-4o-mini",
yawnxyz avatar
gettingOpenAiStreamingtoWork
@yawnxyz
Blatantly copied code from theseephist's webgen: https://www.val.town/v/thesephist/webgen. Couldn't get streaming to work in valtown myself!!
HTTP
import OpenAI from "npm:openai";
const openai = new OpenAI();
export default async (req) => {
// Generate the AI response
const stream = await openai.chat.completions.create({
model: "gpt-4o",
stevekrouse avatar
openaiFineTuneData
@stevekrouse
An interactive, runnable TypeScript val by stevekrouse
Script
import { delay } from "https://esm.town/v/stevekrouse/delay";
import { openaiFineTune } from "https://esm.town/v/stevekrouse/openaiFineTune";
import { openaiUploadFile } from "https://esm.town/v/stevekrouse/openaiUploadFile?v=15";
export async function openaiFineTuneData({ key, data, model }: {
key: string;
model?: string;
let upload = await openaiUploadFile({
key,
data,
let fineTune = await openaiFineTune({
key,
await delay(1000);
fineTune = await openaiFineTune({
key,
canglangdahai avatar
beigeEarthworm
@canglangdahai
An interactive, runnable TypeScript val by canglangdahai
HTTP
import { OpenAI } from "https://esm.town/v/std/openai";
export default async function(req: Request): Promise<Response> {
status: 204,
const openai = new OpenAI();
try {
model: "gpt-4-turbo",
const stream = await openai.chat.completions.create(body);
if (!body.stream) {
stevekrouse avatar
openaiStreamingDemo
@stevekrouse
An interactive, runnable TypeScript val by stevekrouse
HTTP
import OpenAI from "npm:openai";
const openai = new OpenAI();
export default async (req) => {
const stream = await openai.chat.completions.create({
model: "gpt-3.5-turbo",