Search

Results include substring matches and semantically similar vals. Learn more
stevekrouse avatar
gpt3Unsafe
@stevekrouse
An interactive, runnable TypeScript val by stevekrouse
Script
export const gpt3Unsafe = runVal("patrickjm.gpt3", {
prompt: "Write a haiku about being cool:",
openAiKey: process.env.openai,
webup avatar
getVectorStoreBuilder
@webup
An interactive, runnable TypeScript val by webup
Script
import process from "node:process";
import { getModelBuilder } from "https://esm.town/v/webup/getModelBuilder";
export async function getVectorStoreBuilder(docs: object[], spec: {
type: "memory" | "baas";
provider?: "pinecone" | "milvus";
} = { type: "memory" }, embed: "openai" | "huggingface" = "openai") {
const { cond, matches } = await import("npm:lodash-es");
const builder = await getModelBuilder({
galligan avatar
callPrivateAPI
@galligan
An interactive, runnable TypeScript val by galligan
Script
throw new Error(`Request failed with status code ${response.status}`);
else if (endpoint === "@galligan.simpleTest") {
return "Success calling @galligan.simpleTest. To test a specific endpoint, please set `handle` and `val` as arguments";
// If the request was successful and endpoint is not simpleTest, return the response
return response;
yawnxyz avatar
embeddingsSearchExample
@yawnxyz
This is an example of in-memory search, using a combination of lunr, OpenAI embeddings, and cosine similarity
Script
This is an example of in-memory search, using a combination of lunr, OpenAI embeddings, and cosine similarity
import { openai } from "npm:@ai-sdk/openai";
// Function to get a single embedding
async function getEmbedding(text) {
model: openai.embedding('text-embedding-3-small'),
// Function to get embeddings for multiple texts
async function getEmbeddings(texts) {
model: openai.embedding('text-embedding-3-small'),
async function prepareDocumentsWithEmbeddings() {
function cosineSimilarity(a, b) {
function findNearestNeighbors(embedding, k = 1) {
thomasatflexos avatar
generateEmbeddings
@thomasatflexos
An interactive, runnable TypeScript val by thomasatflexos
Script
message: "The URL parameter is required for this end point",
const { OpenAIEmbeddings } = await import("npm:langchain/embeddings");
const { createClient } = await import(
splittedDocs,
new OpenAIEmbeddings({
openAIApiKey: process.env.OPEN_API_KEY,
client,
willthereader avatar
chat
@willthereader
OpenAI ChatGPT helper function This val uses your OpenAI token if you have one, and the @std/openai if not, so it provides limited OpenAI usage for free. import { chat } from "https://esm.town/v/stevekrouse/openai"; const { content } = await chat("Hello, GPT!"); console.log(content); import { chat } from "https://esm.town/v/stevekrouse/openai"; const { content } = await chat( [ { role: "system", content: "You are Alan Kay" }, { role: "user", content: "What is the real computer revolution?"} ], { max_tokens: 50, model: "gpt-4o" } ); console.log(content);
Script
# OpenAI ChatGPT helper function
This val uses your OpenAI token if you have one, and the @std/openai if not, so it provides limited OpenAI usage for free.
import { chat } from "https://esm.town/v/stevekrouse/openai";
import type { ChatCompletion, ChatCompletionCreateParamsNonStreaming, Message } from "npm:@types/openai";
async function getOpenAI() {
if (Deno.env.get("OPENAI_API_KEY") === @std/openai) {
const { OpenAI } = await import("https://esm.town/v/std/openai");
return new OpenAI();
const { OpenAI } = await import("npm:openai");
return new OpenAI();
* Initiates a chat conversation with OpenAI's GPT model and retrieves the content of the first response.
* This function can handle both single string inputs and arrays of message objects.
export async function chat(
janpaul123 avatar
valleBlogV0
@janpaul123
Fork this val to your own profile. Create a Val Town API token , open the browser preview of this val, and use the API token as the password to log in.
HTTP
import { verifyToken } from "https://esm.town/v/pomdtr/verifyToken?v=1";
import { openai } from "npm:@ai-sdk/openai";
import ValTown from "npm:@valtown/sdk";
import _ from "npm:lodash@4";
async function main(req: Request): Promise<Response> {
const { readable, writable } = new TransformStream();
// Scroll to the bottom of the page when the page changes.
(new MutationObserver(function (mutationsList, observer) {
window.scrollTo({ left: 0, top: document.body.scrollHeight, behavior: "instant" });
const stream = await streamText({
model: openai("gpt-4o", {
baseURL: "https://std-openaiproxy.web.val.run/v1",
apiKey: Deno.env.get("valtown"),
There should be no comments like "more content here", it should be complete and directly runnable.
The val should have an "export default async function main". The val should return a valid HTML website.
Prefer using Tailwind. Put frontend functions in a <script> tag, using dangerouslySetInnerHTML. Don't use Hono. D
`.replace("\n", " "),
dhvanil avatar
val_PNwC0nefnJ
@dhvanil
An interactive, runnable TypeScript val by dhvanil
HTTP
export async function val_PNwC0nefnJ(req) {
try {
// Execute the code directly and capture its result
stevekrouse avatar
multipleKeysAndMemoryConversationChainExample
@stevekrouse
// Forked from @jacoblee93.multipleKeysAndMemoryConversationChainExample
Script
export const multipleKeysAndMemoryConversationChainExample = (async () => {
const { ChatOpenAI } = await import(
"https://esm.sh/langchain/chat_models/openai"
const { BufferMemory } = await import("https://esm.sh/langchain/memory");
const { ConversationChain } = await import("https://esm.sh/langchain/chains");
const llm = new ChatOpenAI({
modelName: "gpt-3.5-turbo",
openAIApiKey: process.env.openai,
temperature: 0,
yawnxyz avatar
claude
@yawnxyz
// Define a mapping for model shortcuts
Script
sonnet: "claude-3-sonnet-20240229",
haiku: "claude-3-haiku-20240307",
export async function prompt(
text,
{ mode = "text", model = "opus", max_tokens = 1024, messages = [] } = {},
laidlaw avatar
books
@laidlaw
@jsxImportSource npm:hono@3/jsx
HTTP
import { Hono } from "npm:hono@3";
import { OpenAI } from "npm:openai";
const openai = new OpenAI();
function esmTown(url) {
return fetch(url, {
</div>,
export async function getBooks(file: File) {
const dataURL = await fileToDataURL(file);
try {
const response = await openai.chat.completions.create({
messages: [
stevekrouse avatar
librarySecretEx
@stevekrouse
An interactive, runnable TypeScript val by stevekrouse
HTTP
export let librarySecretEx = gpt3({
prompt: "what is the meaning of life?",
openAiKey: process.env.openai,
vtdocs avatar
browserlessPuppeteerExample
@vtdocs
An interactive, runnable TypeScript val by vtdocs
Script
`wss://chrome.browserless.io?token=${process.env.browserlessKey}`,
const page = await browser.newPage();
await page.goto("https://en.wikipedia.org/wiki/OpenAI");
const intro = await page.evaluate(
`document.querySelector('p:nth-of-type(2)').innerText`,
feynman avatar
myApi
@feynman
An interactive, runnable TypeScript val by feynman
Script
export async function myApi(name) {
const got = await import("got");
const res = await got.post("http://34.87.31.113:3000/infer", {
mrshorts avatar
CodeGeneratorApp
@mrshorts
@jsxImportSource https://esm.sh/react@18.2.0
HTTP
import { createRoot } from "https://esm.sh/react-dom@18.2.0/client";
function App() {
const [prompt, setPrompt] = useState("");
textDecoration: 'none',
function client() {
createRoot(document.getElementById("root")).render(<App />);
if (typeof document !== "undefined") { client(); }
export default async function server(request: Request): Promise<Response> {
if (request.method === "POST") {
const { OpenAI } = await import("https://esm.town/v/std/openai");
const openai = new OpenAI();
const { prompt, language } = await request.json();
cpp: "Generate a clean, concise C++ code snippet for: "
const completion = await openai.chat.completions.create({
messages: [
…
13
…
Next