Public
HTTP (deprecated)
Val Town is a social website to write and deploy JavaScript.
Build APIs and schedule functions from your browser.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
/**
* This ego booster app takes a selfie, sends it to GPT-4o-mini for analysis,
* and streams funny, specific compliments about the user's appearance.
* We use the WebRTC API for camera access, the OpenAI API for image analysis,
* and server-sent events for real-time streaming of compliments.
*/
/** @jsxImportSource https://esm.sh/react */
import React, { useState, useEffect, useRef } from "https://esm.sh/react";
import { createRoot } from "https://esm.sh/react-dom/client";
import { marked } from "https://esm.sh/marked";
function App() {
const [compliments, setCompliments] = useState<string>("");
const [isLoading, setIsLoading] = useState(false);
const [isCameraOn, setIsCameraOn] = useState(true);
const [error, setError] = useState<string | null>(null);
const videoRef = useRef<HTMLVideoElement>(null);
const canvasRef = useRef<HTMLCanvasElement>(null);
useEffect(() => {
console.log("App mounted, starting camera");
startCamera();
return () => {
console.log("App unmounting, stopping camera");
stopCamera();
};
}, []);
const startCamera = async () => {
console.log("Attempting to start camera");
try {
const stream = await navigator.mediaDevices.getUserMedia({ video: { facingMode: "user" } });
if (videoRef.current) {
videoRef.current.srcObject = stream;
console.log("Camera started successfully");
} else {
console.log("Video ref is null, camera not started");
setError("Camera initialization failed. Please refresh the page.");
}
} catch (err) {
console.error("Error accessing camera:", err);
setError("Failed to access camera. Please ensure you've granted camera permissions.");
}
};
const stopCamera = () => {
console.log("Stopping camera");
if (videoRef.current && videoRef.current.srcObject) {
const tracks = (videoRef.current.srcObject as MediaStream).getTracks();
tracks.forEach(track => track.stop());
console.log("Camera stopped");
} else {
console.log("No camera to stop");
}
};
const takeSelfie = async () => {
console.log("Taking selfie");
if (!videoRef.current) {
console.error("Video ref is null");
setError("Camera not initialized. Please refresh the page.");
return;
}
if (!canvasRef.current) {
console.error("Canvas ref is null");
setError("Canvas not initialized. Please refresh the page.");
return;
}
setIsLoading(true);
setIsCameraOn(false);
setError(null);
const context = canvasRef.current.getContext('2d');
if (context) {
context.drawImage(videoRef.current, 0, 0, canvasRef.current.width, canvasRef.current.height);
console.log("Selfie captured on canvas");
try {
const blob = await new Promise<Blob | null>((resolve) => canvasRef.current!.toBlob(resolve, 'image/jpeg'));
if (blob) {
console.log("Blob created, size:", blob.size);
const formData = new FormData();
formData.append('image', blob, 'selfie.jpg');
console.log("Sending request to /analyze");
const response = await fetch('/analyze', {
method: 'POST',
body: formData
});
console.log("Response received:", response.status);
if (response.ok) {
const reader = response.body?.getReader();
if (reader) {
console.log("Starting to read stream");
while (true) {
const { done, value } = await reader.read();
if (done) {
console.log("Stream reading complete");
break;
}
stevekrouse-egobooster.web.val.run
August 21, 2024