1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
// This app creates a 10-second typing test with audio recording and spectrogram visualization.
// It uses the Web Audio API for recording and analyzing audio, Canvas for drawing the spectrogram, and provides a typing interface.
// TailwindCSS is used for styling.
/** @jsxImportSource https://esm.sh/react */
import React, { useState, useEffect, useRef } from "https://esm.sh/react";
import { createRoot } from "https://esm.sh/react-dom/client";
function App() {
const [input, setInput] = useState("");
const [currentIndex, setCurrentIndex] = useState(0);
const [timeLeft, setTimeLeft] = useState(10);
const [isRecording, setIsRecording] = useState(false);
const [audioData, setAudioData] = useState(null);
const audioContextRef = useRef(null);
const mediaRecorderRef = useRef(null);
const canvasRef = useRef(null);
const text = `
function quickSort(arr) {
if (arr.length <= 1) {
return arr;
}
const pivot = arr[Math.floor(arr.length / 2)];
const left = arr.filter(x => x < pivot);
const middle = arr.filter(x => x === pivot);
const right = arr.filter(x => x > pivot);
return [...quickSort(left), ...middle, ...quickSort(right)];
}
// Example usage:
const unsortedArray = [64, 34, 25, 12, 22, 11, 90];
console.log(quickSort(unsortedArray));
`.trim();
useEffect(() => {
if (isRecording && timeLeft > 0) {
const timer = setInterval(() => setTimeLeft(t => t - 1), 1000);
return () => clearInterval(timer);
}
}, [isRecording, timeLeft]);
const startRecording = async () => {
setCurrentIndex(0);
setInput("");
try {
console.log("Starting recording...");
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
console.log("Got audio stream");
audioContextRef.current = new (window.AudioContext || window.webkitAudioContext)();
const source = audioContextRef.current.createMediaStreamSource(stream);
const analyzer = audioContextRef.current.createAnalyser();
source.connect(analyzer);
mediaRecorderRef.current = new MediaRecorder(stream);
const chunks = [];
mediaRecorderRef.current.ondataavailable = e => chunks.push(e.data);
mediaRecorderRef.current.onstop = async () => {
console.log("Recording stopped");
const blob = new Blob(chunks, { type: 'audio/ogg; codecs=opus' });
const arrayBuffer = await blob.arrayBuffer();
setAudioData(arrayBuffer);
};
mediaRecorderRef.current.start();
console.log("MediaRecorder started");
setIsRecording(true);
setTimeLeft(10);
setTimeout(() => {
mediaRecorderRef.current.stop();
setIsRecording(false);
}, 10000);
} catch (error) {
console.error("Error starting recording:", error);
}
};
useEffect(() => {
if (audioData && canvasRef.current) {
const audioContext = new (window.AudioContext || window.webkitAudioContext)();
const canvas = canvasRef.current;
const ctx = canvas.getContext('2d');
console.log("Drawing spectrogram...");
audioContext.decodeAudioData(audioData, buffer => {
const data = buffer.getChannelData(0);
const step = Math.ceil(data.length / canvas.width);
const amp = canvas.height / 2;
ctx.fillStyle = 'rgb(200, 200, 200)';
ctx.fillRect(0, 0, canvas.width, canvas.height);
ctx.lineWidth = 1;
ctx.strokeStyle = 'rgb(0, 0, 0)';
ctx.beginPath();
for (let i = 0; i < canvas.width; i++) {