Search
openaiFineTuneData
@stevekrouse
An interactive, runnable TypeScript val by stevekrouse
Script
import { delay } from "https://esm.town/v/stevekrouse/delay";
import { openaiFineTune } from "https://esm.town/v/stevekrouse/openaiFineTune";
import { openaiUploadFile } from "https://esm.town/v/stevekrouse/openaiUploadFile?v=15";
export async function openaiFineTuneData({ key, data, model }: {
key: string;
model?: string;
let upload = await openaiUploadFile({
key,
data,
let fineTune = await openaiFineTune({
key,
await delay(1000);
fineTune = await openaiFineTune({
key,
semanticSearch
@yawnxyz
In-memory semantic search; load it up with valtown KV. This is a "dumb" version of vector search, for prototyping RAG responses and UIs — with both regular search (w/ Lunr) and vector search (with OpenAI embeddings + cosine similarity) Usage: import { semanticSearch } from "https://esm.town/v/yawnxyz/semanticSearch";
const documents = [
{ id: 1, content: 'cats dogs' },
{ id: 2, content: 'elephants giraffes lions tigers' },
{ id: 3, content: 'edam camembert cheddar' }
];
async function runExample() {
// Add documents to the semantic search instance
await semanticSearch.addDocuments(documents);
const results = await semanticSearch.search('animals', 0, 3);
console.log('Top 3 search results for "animals":');
console.log(results);
}
runExample();
Script
In-memory semantic search; load it up with valtown KV.
This is a "dumb" version of vector search, for prototyping RAG responses and UIs — with both regular search (w/ Lunr) and vector search (with OpenAI embeddings + cosine similarity)
Usage:
{ id: 3, content: 'edam camembert cheddar' }
async function runExample() {
// Add documents to the semantic search instance
import { embed, embedMany } from "npm:ai";
import { openai } from "npm:@ai-sdk/openai";
import lunr from "https://cdn.skypack.dev/lunr";
allowHeaders: ['Content-Type'],
openai.apiKey = Deno.env.get("OPENAI_API_KEY");
class SemanticSearch {
const fieldList = fields.split(',').map(field => field.trim());
this.idx = lunr(function () {
this.ref('id');
const { embedding } = await embed({
model: openai.embedding(modelName),
value: text,
const { embeddings } = await embedMany({
model: openai.embedding(modelName),
values: texts,
apricotTurkey
@stevekrouse
An interactive, runnable TypeScript val by stevekrouse
Script
import { OpenAI } from "https://esm.town/v/std/openai?v=2";
const openai = new OpenAI();
const functionExpression = await openai.chat.completions.create({
"messages": [
tools: [
function: {
name: "weather",
"required": ["location"],
type: "function",
model: "gpt-4",
console.log(functionExpression.choices[0].message.tool_calls);
openaistreaminghtml
@stevekrouse
An interactive, runnable TypeScript val by stevekrouse
HTTP
import OpenAI from "npm:openai";
const openai = new OpenAI();
export default async (req) => {
const stream = await openai.chat.completions.create({
model: "gpt-3.5-turbo",
whiteSnail
@ylno
An interactive, runnable TypeScript val by ylno
Script
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
conversationalQAChainEx
@jacoblee93
An interactive, runnable TypeScript val by jacoblee93
Script
const { ChatOpenAI } = await import(
"https://esm.sh/langchain/chat_models/openai"
const { OpenAIEmbeddings } = await import(
"https://esm.sh/langchain/embeddings/openai"
const gpt35 = new ChatOpenAI({
openAIApiKey: process.env.OPENAI_API_KEY,
const gpt4 = new ChatOpenAI({
openAIApiKey: process.env.OPENAI_API_KEY,
new OpenAIEmbeddings({
openAIApiKey: process.env.OPENAI_API_KEY,
translateToEnglishWithOpenAI
@ynonp
An interactive, runnable TypeScript val by ynonp
Script
import { OpenAI } from "https://esm.town/v/std/openai";
export default async function translateToEnglishWithOpenAI(text: string) {
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
main
@henrique
An interactive, runnable TypeScript val by henrique
HTTP
import process from "node:process";
export async function main(prompt: string) {
const { useCursive } = await import("npm:cursive");
const cursive = useCursive({ openAI: { apiKey: process.env.OPENAI_API_KEY } });
const res = await cursive.ask({
prompt,
spotlessMagentaSilverfish
@Pushpam
An interactive, runnable TypeScript val by Pushpam
Script
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
githubcollabgen
@ejfox
GitHub Collaboration Suggester This tool analyzes the recent GitHub activity of two users and suggests potential collaboration opportunities. Features Fetches the last 3 months of GitHub activity for two users Summarizes activity including event counts, repositories, commits, issues, and pull requests Uses AI to generate collaboration suggestions based on the activity summaries Usage To use it, make a GET request with two GitHub usernames as query parameters: https://ejfox-githubcollabgen.web.val.run?user1=<username1>&user2=<username2> Curl Compare two specific users: curl "https://ejfox-githubcollabgen.web.val.run?user1=ejfox&user2=stevekrouse" Response The API returns a plain text response with AI-generated collaboration suggestions, including: Potential collaborative projects Technologies to explore or learn Ways to complement each other's skills Opportunities for knowledge sharing or mentoring Possible open-source contributions
HTTP
import { OpenAI } from "https://esm.town/v/std/openai";
const OPENAI_API_KEY = "your_openai_api_key"; // Replace with your actual OpenAI API key
export default async function main(req: Request): Promise<Response> {
const url = new URL(req.url);
const user2 = url.searchParams.get("user2") || "stevekrouse";
async function fetchUserActivity(username: string) {
const threeMonthsAgo = new Date(Date.now() - 90 * 24 * 60 * 60 * 1000).toISOString();
return data;
function summarizeActivity(data: any[]): string {
const summary = [];
const user2Summary = summarizeActivity(user2Data);
const openai = new OpenAI(OPENAI_API_KEY);
const completion = await openai.chat.completions.create({
model: "gpt-3.5-turbo",
} catch (error) {
console.error("Error in main function:", error);
return new Response(`Error: ${error.message}. Please check the logs for more details.`, { status: 500 });
complete
@webup
An interactive, runnable TypeScript val by webup
Script
export const complete = async (prompt: string | object, options = {}) => {
// Initialize OpenAI API stub
const { Configuration, OpenAIApi } = await import("https://esm.sh/openai");
const configuration = new Configuration({
apiKey: process.env.OPENAI,
const openai = new OpenAIApi(configuration);
// Request chat completion
const completion = await openai.createCompletion({
model: "text-davinci-003",
MILLENCHAT
@LucasMillen
// "name": "AI Chat Assistant",
HTTP
// "description": "A chat assistant using OpenAI's API",
function generateFallbackResponse(userMessage: string): string {
async function callOpenAI(userMessage: string): Promise<string> {
const apiKey = Deno.env.get("OPENAI_API_KEY");
throw new Error("OpenAI API key is not configured. Please set the OPENAI_API_KEY environment variable.");
const response = await fetch('https://api.openai.com/v1/chat/completions', {
throw new Error(`OpenAI API error: ${response.status} - ${errorBody}`);
console.error("OpenAI API Call Error:", error);
function App() {
const [openAIError, setOpenAIError] = useState<string | null>(null);
BlogChatbotServer
@weaverwhale
// This approach will create a chatbot using OpenAI's SDK with access to a blog's content stored in a JSON.
HTTP
// This approach will create a chatbot using OpenAI's SDK with access to a blog's content stored in a JSON.
// We'll use Hono for routing, OpenAI for the chatbot, and stream the responses to the client.
import { OpenAI } from "https://esm.town/v/std/openai";
// Function to search blog content
function searchBlogContent(query: string) {
function addMessage(message, isUser = false) {
async function sendMessage() {
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
export default async function server(req: Request): Promise<Response> {
fetchAndStoreOpenAiUsage2
@nbbaier
An interactive, runnable TypeScript val by nbbaier
Cron
import { cronEvalLogger as logger } from "https://esm.town/v/nbbaier/cronLogger";
import { fetchOpenAiUsageData } from "https://esm.town/v/nbbaier/fetchOpenAiUsageData";
import { updateBlobUsageDB } from "https://esm.town/v/nbbaier/updateBlobUsageDB";
import { DateTime } from "npm:luxon";
const fetchAndStoreOpenAiUsage = async (interval: Interval) => {
const timeZone = "America/Chicago";
try {
const { data, whisper_api_data, dalle_api_data } = await fetchOpenAiUsageData(today);
const day_total = await createDayTotal(data, whisper_api_data, dalle_api_data);
console.error(error);
export default logger(fetchAndStoreOpenAiUsage);
OpenAIChatCompletion
@rozek
Use Val.Town's Open AI access wherever you need to enter AI Provider Credentials Perhaps, you are going to build an AI application for the public with Val.Town. Personally, you may want to use the Open AI access that comes with Val.Town, but for the entirety of your users the rate limits of that access will be too low. Thus, you may want them to bring their own AI provider with their own server URL and access key. But, wait, how do you than use your own application, still using Val.Town's Open AI access? This is where this val comes in (assuming that you already have a Val.Town account, either a free or a paid one): fork this val use the fork's HTTP endpoint URL (in the form "https://XXX-openaichatcompletion.web.val.run") as AI server URL define an environment variable called "OpenAIChatCompletion" with any kind of content (but without any blanks or control characters, e.g., a UUID ) and use that as your personal access key Now, you can ask everybody to provide their AI credentials and still use the OpenAI access provided by Val.Town for your personal tests. Nota bene: if the environment variable "OpenAIChatCompletion" has not been defined, access to your fork's endpoint is free for everybody! In addition to the described authorization, this val also provides resource "throttling" (using val floatingQuotaTracker in sqlite tables "OpenAIChatCompletion_Info" and "OpenAIChatCompletion_Log") and calculates some access statistics (using val InvocationTracker in sqlite tables "OpenAIChatCompletion_Usage_Info" and "OpenAIChatCompletion_Usage_Log")
HTTP
* use the fork's HTTP endpoint URL (in the form "https://XXX-openaichatcompletion.web.val.run") as AI server URL
* **define an environment variable called "OpenAIChatCompletion"** with any kind of content (but without any blanks or control characters, e.g., a [UUID](https://rozek-uuidv4_generator.web.val.run)) and use that as your personal access key
Now, you can ask everybody to provide their AI credentials and still use the OpenAI access provided by Val.Town for your personal tests.
import { OpenAI } from 'https://esm.town/v/std/openai'
// don't forget to define Env.Var "OpenAIChatCompletion" for authorization!
/**** rate-limited access to the OpenAI API ****/
export default async function (Request:Request):Promise<Response> {
const AccessToken = process.env.OpenAIChatCompletion
const TrackingTable = 'OpenAIChatCompletion_Usage'
const ResourceTable = 'OpenAIChatCompletion'
const CompletionStream = await new OpenAI().chat.completions.create({
const Completion = await new OpenAI().chat.completions.create({
return await OpenAIChatCompletion(Request)