Search
compareEmbeddings
@janpaul123
An interactive, runnable TypeScript val by janpaul123
Script
import _ from "npm:lodash";
import OpenAI from "npm:openai";
const comparisons = [
["chat server integration", "discord bot"],
const openai = new OpenAI();
const cache = {};
async function getEmbedding(str) {
cache[str] = cache[str] || (await openai.embeddings.create({
model: "text-embedding-3-large",
getVectorStoreBuilder
@webup
An interactive, runnable TypeScript val by webup
Script
type: "memory" | "baas";
provider?: "pinecone" | "milvus";
} = { type: "memory" }, embed: "openai" | "huggingface" = "openai") {
const { cond, matches } = await import("npm:lodash-es");
const builder = await getModelBuilder({
audioManager
@yawnxyz
Usage: import { ai } from "https://esm.town/v/yawnxyz/ai";
import { AudioManager } from "https://esm.town/v/yawnxyz/audioManager";
let audio = new AudioManager();
let joke = await ai("tell me a joke in chinese!");
console.log('text', joke)
let result = await audio.textToSpeechUpload(joke, {key: "random-joke.mp3"});
console.log('result:', result)
Script
import { OpenAI } from "https://esm.town/v/yawnxyz/OpenAI";
import { fetch } from "https://esm.town/v/std/fetch";
constructor(apiKey=null, uploadFunction = null, downloadFunction = null) {
this.openai = new OpenAI(apiKey);
this.uploadFunction = uploadFunction || this.blobUpload;
const mergedOptions = { ...defaultOptions, ...options };
const transcription = await this.openai.audio.transcriptions.create(mergedOptions);
return transcription;
const mergedOptions = { ...defaultOptions, ...options };
const translation = await this.openai.audio.translations.create(mergedOptions);
return translation;
// returns an openai speech object
async textToSpeech(text, options = {}) {
const mergedOptions = { ...defaultOptions, ...options };
const speech = await this.openai.audio.speech.create(mergedOptions);
const arrayBuffer = await speech.arrayBuffer();
const mergedOptions = { ...defaultOptions, ...options };
const speech = await this.openai.audio.speech.create(mergedOptions);
const arrayBuffer = await speech.arrayBuffer();
fineTuningJob1
@stevekrouse
An interactive, runnable TypeScript val by stevekrouse
Script
import process from "node:process";
import { openaiFineTuneData } from "https://esm.town/v/stevekrouse/openaiFineTuneData";
export let fineTuningJob1 = openaiFineTuneData({
key: process.env.openai,
data: fineTuneExMispellings,
librarySecretEx
@stevekrouse
An interactive, runnable TypeScript val by stevekrouse
HTTP
export let librarySecretEx = gpt3({
prompt: "what is the meaning of life?",
openAiKey: process.env.openai,
weatherGPT
@liaolile
If you fork this, you'll need to set OPENAI_API_KEY in your Val Town Secrets .
Cron
If you fork this, you'll need to set `OPENAI_API_KEY` in your [Val Town Secrets](https://www.val.town/settings/secrets).
import { fetch } from "https://esm.town/v/std/fetch";
import { OpenAI } from "npm:openai";
let location = "shenzhen";
).then(r => r.json());
const openai = new OpenAI();
let chatCompletion = await openai.chat.completions.create({
messages: [{
langchainEx
@jacoblee93
// Forked from @stevekrouse.langchainEx
Script
export const langchainEx = (async () => {
const { ChatOpenAI } = await import(
"https://esm.sh/langchain/chat_models/openai"
const { PromptTemplate } = await import("https://esm.sh/langchain/prompts");
const { LLMChain } = await import("https://esm.sh/langchain/chains");
const model = new ChatOpenAI({
temperature: 0.9,
openAIApiKey: process.env.OPENAI_API_KEY,
verbose: true,
conversationalRetrievalQAChainStreamingExample
@jacoblee93
An interactive, runnable TypeScript val by jacoblee93
Script
const { ChatOpenAI } = await import(
"https://esm.sh/langchain/chat_models/openai"
const { OpenAIEmbeddings } = await import(
"https://esm.sh/langchain/embeddings/openai"
const streamingModel = new ChatOpenAI({
openAIApiKey: process.env.OPENAI_API_KEY,
const nonStreamingModel = new ChatOpenAI({
openAIApiKey: process.env.OPENAI_API_KEY,
new OpenAIEmbeddings({
openAIApiKey: process.env.OPENAI_API_KEY,
TokenizerDemo
@zzz
// Demo of tokenizer to mimic behavior of https://platform.openai.com/tokenizer
Script
import { Tokenizer } from "https://esm.town/v/zzz/Tokenizer";
// Demo of tokenizer to mimic behavior of https://platform.openai.com/tokenizer
// Tokenizer uses "gpt-3.5-turbo" model by default but this demo uses davinci to match the playground
export const TokenizerDemo = (async () => {
googlesearch
@lolocoo
An interactive, runnable TypeScript val by lolocoo
Express (deprecated)
dateRestrict: "m[1]",
const getSearch = async (data) => {
const response = await fetch("https://api.openai.com/v1/completions", {
method: "GET",
body: JSON.stringify(data),
relationshipgenerator
@ejfox
// This val receives text input, sends it to OpenAI to generate relationships,
HTTP
// This val receives text input, sends it to OpenAI to generate relationships,
// and returns a newline-delimited list of relationships.
// It uses the OpenAI API to generate the relationships.
// Tradeoff: This approach relies on an external API, which may have rate limits or costs.
// curl -X POST -H "Content-Type: text/plain" -d "Your text here" https://your-val-url.web.val.run
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
export default async function main(req: Request): Promise<Response> {
const text = await req.text();
const completion = await openai.chat.completions.create({
model: "gpt-3.5-turbo",
main
@henrique
An interactive, runnable TypeScript val by henrique
HTTP
export async function main(prompt: string) {
const { useCursive } = await import("npm:cursive");
const cursive = useCursive({ openAI: { apiKey: process.env.OPENAI_API_KEY } });
const res = await cursive.ask({
prompt,
openAiFreeQuotaExceeded
@patrickjm
An interactive, runnable TypeScript val by patrickjm
HTTP
import { openAiFreeUsage } from "https://esm.town/v/patrickjm/openAiFreeUsage";
export let openAiFreeQuotaExceeded = () =>
openAiFreeUsage.exceeded;
modelSampleChatGenerate
@webup
An interactive, runnable TypeScript val by webup
Script
const builder = await getModelBuilder({
type: "chat",
provider: "openai",
const model = await builder();
const { SystemMessage, HumanMessage } = await import("npm:langchain/schema");
VALLE
@oijoijcoiejoijce
VALL-E LLM code generation for vals! Make apps with a frontend, backend, and database. It's a bit of work to get this running, but it's worth it. Fork this val to your own profile. Make a folder for the temporary vals that get generated, take the ID from the URL, and put it in tempValsParentFolderId . If you want to use OpenAI models you need to set the OPENAI_API_KEY env var . If you want to use Anthropic models you need to set the ANTHROPIC_API_KEY env var . Create a Val Town API token , open the browser preview of this val, and use the API token as the password to log in.
HTTP
* Make a folder for the temporary vals that get generated, take the ID from the URL, and put it in `tempValsParentFolderId`.
* If you want to use OpenAI models you need to set the `OPENAI_API_KEY` [env var](https://www.val.town/settings/environment-variables).
* If you want to use Anthropic models you need to set the `ANTHROPIC_API_KEY` [env var](https://www.val.town/settings/environment-variables).