Search
Code3,833
Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.### OpenAI```tsimport { OpenAI } from "https://esm.town/v/std/openai";const openai = new OpenAI();const completion = await openai.chat.completions.create({ messages: [ { role: "user", content: "Say hello in a creative way" },/** @jsxImportSource https://esm.sh/react@18.2.0 */import { OpenAI } from "https://esm.town/v/std/openai";import { blob } from "https://esm.town/v/std/blob";import { nanoid } from "npm:nanoid";// --- SERVER-SIDE LOGIC ---const openai = new OpenAI();async function runAgent( agentType: keyof typeof AGENT_PROTOCOLS,) { const completion = await openai.chat.completions.create({ model: "gpt-4o", // Heavy hitter for density messages: [ if (req.method === "POST" && path.endsWith("/synthesize")) { const { results } = await req.json(); const completion = await openai.chat.completions.create({ model: "gpt-4o", messages: [/** @jsxImportSource https://esm.sh/react@18.2.0 */import { OpenAI } from "https://esm.town/v/std/openai?v=4";import { blob } from "https://esm.town/v/std/blob?v=11";import { nanoid } from "npm:nanoid";// --- SERVER-SIDE LOGIC ---const openai = new OpenAI();async function runAgent( agentType: "NARRATOR" | "AUDITOR" | "FACT_CHECKER",) { const completion = await openai.chat.completions.create({ model: "gpt-4o-mini", // Speed and cost efficiency for high-volume chunking messages: [ const aggregation = JSON.stringify(results); // Context window safety check const completion = await openai.chat.completions.create({ model: "gpt-4o", // Use the heavy hitter for synthesis messages: [/** @jsxImportSource https://esm.sh/react@18.2.0 */import { OpenAI } from "https://esm.town/v/std/openai?v=4";import { blob } from "https://esm.town/v/std/blob?v=11";import { nanoid } from "npm:nanoid";): Promise<AgentResult> { const agent = AGENT_PERSONAS[agentKey]; const openai = new OpenAI(); // SAFETY: Truncate ledger for individual agents to prevent context exhaustion`; const completion = await openai.chat.completions.create({ model: "gpt-4o", messages: [ agentResults: AgentResult[],): Promise<any> { const openai = new OpenAI(); const consolidatedIntelligence = agentResults.map((ar) =>`; const completion = await openai.chat.completions.create({ model: "gpt-4o", messages: [Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.### OpenAI```tsimport { OpenAI } from "https://esm.town/v/std/openai";const openai = new OpenAI();const completion = await openai.chat.completions.create({ messages: [ { role: "user", content: "Say hello in a creative way" },};// 一个安全的 demo fallback,方便你没连 OpenAI 也能测 UIfunction demoResponse(axis: string): ApiResponse { return { const axis = (body.axis || "fun").trim() || "fun"; const apiKey = Deno.env.get("OPENAI_API_KEY"); // 如果你还没在 Valtown 里设 env,先直接返回 demo,方便你马上测试 if (!apiKey) { console.warn("OPENAI_API_KEY missing – returning demo response"); const demo = demoResponse(axis); return new Response(JSON.stringify(demo), { `.trim(); const openaiRes = await fetch( "https://api.openai.com/v1/chat/completions", { method: "POST", ); if (!openaiRes.ok) { const text = await openaiRes.text(); console.error("OpenAI error:", text); const demo = demoResponse(axis); return new Response(JSON.stringify(demo), { } const data = await openaiRes.json() as any; let raw = data.choices?.[0]?.message?.content?.trim() ?? ""; "dependencies": { "dotenv": "^16.4.5", "openai": "^4.63.0", "zod": "^3.23.8" }, } }, "node_modules/openai": { "version": "4.104.0", "resolved": "https://registry.npmjs.org/openai/-/openai-4.104.0.tgz", "integrity": "sha512-p99EFNsA/yX6UhVO93f5kJsDRLAg+CTA2RBqdHK4RtK8u5IJw32Hyb2dTGKbnnFmnuoBv5r7Z2CURI9sGZpSuA==", "license": "Apache-2.0", }, "bin": { "openai": "bin/cli" }, "peerDependencies": { } }, "node_modules/openai/node_modules/@types/node": { "version": "18.19.130", "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.130.tgz", } }, "node_modules/openai/node_modules/undici-types": { "version": "5.26.5", "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", .default("development"), // OpenAI OPENAI_API_KEY: z.string().min(1, "OPENAI_API_KEY is required"), OPENAI_ORG_ID: z .string() .optional() .transform((v) => (v ? v : undefined)), // A cost-effective model with websearch capability enabled via tool usage at runtime OPENAI_MODEL: z.string().default("gpt-5-nano"), // Feature flag to enable web search where supported OPENAI_ENABLE_WEB_SEARCH: z.coerce.boolean().default(true), // Steel.dev isTest: env.NODE_ENV === "test", // OpenAI openai: { apiKey: env.OPENAI_API_KEY, orgId: env.OPENAI_ORG_ID, model: env.OPENAI_MODEL, enableWebSearch: env.OPENAI_ENABLE_WEB_SEARCH, }, } // 3) Use OpenAI to synthesize an answer with inline citations const synthesis = await synthesizeWithCitations({ query, answer: synthesis.answer, citations: synthesis.sources, model: config.openai.model, meta: { tookMs }, };import OpenAI from "openai";import { config } from "./config";/** * Centralized OpenAI and Steel.dev clients and helpers. * * Responsibilities: * - searchTopRelevantUrls: Use an economical OpenAI model to produce top-K relevant URLs (JSON) * - scrapeUrlsToMarkdown: Use Steel.dev scrape API to get Markdown for each URL * - synthesizeWithCitations: Use OpenAI to synthesize an answer from scrapes with inline citations */// ---------- OpenAI Client ----------export const openai = new OpenAI({ apiKey: config.openai.apiKey, organization: config.openai.orgId,});}// ---------- Search (OpenAI) ----------export interface UrlSearchResult { queries: string[]; urls: string[]; // Raw OpenAI generation and Brave responses for debugging _raw?: unknown;}/** * Generate 3 specific, high-signal search queries with OpenAI, * run them against Brave Search (1s staggered), * and rank URLs by a combination of frequency and position across results. topKPerQuery = config.search.topK,): Promise<MultiQuerySearchResult> { // 1) Ask OpenAI to produce exactly 3 queries as strict JSON. const prompt = [ "You are a search strategist.", ].join("\n"); const completion = await openai.chat.completions.create({ model: config.openai.model, messages: [ { role: "system", content: "You produce JSON only. No prose." }, queries, urls: ranked.map((url) => url.url), _raw: { openai: completion, perQueryUrls }, };}}// ---------- Synthesis (OpenAI) ----------export interface SynthesisInput { ); const completion = await openai.chat.completions.create({ model: config.openai.model, messages: [ { role: "system", content: system },reconsumeralization
import { OpenAI } from "https://esm.town/v/std/openai";
import { sqlite } from "https://esm.town/v/stevekrouse/sqlite";
/**
* Practical Implementation of Collective Content Intelligence
* Bridging advanced AI with collaborative content creation
*/
exp
kwhinnery_openai
lost1991
import { OpenAI } from "https://esm.town/v/std/openai";
export default async function(req: Request): Promise<Response> {
if (req.method === "OPTIONS") {
return new Response(null, {
headers: {
"Access-Control-Allow-Origin": "*",
No docs found