'use client'; import * as React from 'react'; export interface RecordingState { isRecording: boolean; isPaused: boolean; duration: number; inputLevel: number; } export interface UseRecordingReturn { state: RecordingState; startRecording: () => Promise; stopRecording: () => Promise; pauseRecording: () => void; resumeRecording: () => void; getInputDevices: () => Promise; selectInputDevice: (deviceId: string) => Promise; requestPermission: () => Promise; } export function useRecording(): UseRecordingReturn { const [state, setState] = React.useState({ isRecording: false, isPaused: false, duration: 0, inputLevel: 0, }); const mediaRecorderRef = React.useRef(null); const audioContextRef = React.useRef(null); const analyserRef = React.useRef(null); const streamRef = React.useRef(null); const chunksRef = React.useRef([]); const startTimeRef = React.useRef(0); const animationFrameRef = React.useRef(0); const selectedDeviceIdRef = React.useRef(''); const isMonitoringRef = React.useRef(false); // Request microphone permission const requestPermission = React.useCallback(async (): Promise => { try { const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); stream.getTracks().forEach((track) => track.stop()); return true; } catch (error) { console.error('Microphone permission denied:', error); return false; } }, []); // Get available input devices const getInputDevices = React.useCallback(async (): Promise => { try { const devices = await navigator.mediaDevices.enumerateDevices(); return devices.filter((device) => device.kind === 'audioinput'); } catch (error) { console.error('Failed to enumerate devices:', error); return []; } }, []); // Select input device const selectInputDevice = React.useCallback(async (deviceId: string): Promise => { selectedDeviceIdRef.current = deviceId; }, []); // Monitor input level const monitorInputLevel = React.useCallback(() => { if (!analyserRef.current) return; const analyser = analyserRef.current; const dataArray = new Uint8Array(analyser.frequencyBinCount); const updateLevel = () => { if (!isMonitoringRef.current) return; analyser.getByteTimeDomainData(dataArray); // Calculate peak level (more responsive than RMS for visual meters) let peak = 0; for (let i = 0; i < dataArray.length; i++) { const normalized = Math.abs((dataArray[i] - 128) / 128); if (normalized > peak) { peak = normalized; } } setState((prev) => ({ ...prev, inputLevel: peak })); animationFrameRef.current = requestAnimationFrame(updateLevel); }; updateLevel(); }, []); // Start recording const startRecording = React.useCallback(async (): Promise => { try { // Get user media with selected device const constraints: MediaStreamConstraints = { audio: selectedDeviceIdRef.current ? { deviceId: { exact: selectedDeviceIdRef.current } } : true, }; const stream = await navigator.mediaDevices.getUserMedia(constraints); streamRef.current = stream; // Create audio context and analyser for level monitoring const audioContext = new AudioContext(); audioContextRef.current = audioContext; const source = audioContext.createMediaStreamSource(stream); const analyser = audioContext.createAnalyser(); analyser.fftSize = 256; analyser.smoothingTimeConstant = 0.3; source.connect(analyser); analyserRef.current = analyser; // Create MediaRecorder const mediaRecorder = new MediaRecorder(stream); mediaRecorderRef.current = mediaRecorder; chunksRef.current = []; mediaRecorder.ondataavailable = (event) => { if (event.data.size > 0) { chunksRef.current.push(event.data); } }; // Start recording mediaRecorder.start(); startTimeRef.current = Date.now(); setState({ isRecording: true, isPaused: false, duration: 0, inputLevel: 0, }); // Start monitoring input level isMonitoringRef.current = true; monitorInputLevel(); } catch (error) { console.error('Failed to start recording:', error); throw error; } }, [monitorInputLevel]); // Stop recording and return AudioBuffer const stopRecording = React.useCallback(async (): Promise => { return new Promise((resolve) => { if (!mediaRecorderRef.current || !streamRef.current) { resolve(null); return; } const mediaRecorder = mediaRecorderRef.current; mediaRecorder.onstop = async () => { // Stop all tracks streamRef.current?.getTracks().forEach((track) => track.stop()); // Create blob from recorded chunks const blob = new Blob(chunksRef.current, { type: 'audio/webm' }); // Convert blob to AudioBuffer try { const arrayBuffer = await blob.arrayBuffer(); const audioContext = new AudioContext(); const audioBuffer = await audioContext.decodeAudioData(arrayBuffer); // Clean up isMonitoringRef.current = false; if (audioContextRef.current) { await audioContextRef.current.close(); } if (animationFrameRef.current) { cancelAnimationFrame(animationFrameRef.current); } setState({ isRecording: false, isPaused: false, duration: 0, inputLevel: 0, }); resolve(audioBuffer); } catch (error) { console.error('Failed to decode recorded audio:', error); resolve(null); } }; mediaRecorder.stop(); }); }, []); // Pause recording const pauseRecording = React.useCallback(() => { if (mediaRecorderRef.current && state.isRecording && !state.isPaused) { mediaRecorderRef.current.pause(); setState((prev) => ({ ...prev, isPaused: true })); isMonitoringRef.current = false; if (animationFrameRef.current) { cancelAnimationFrame(animationFrameRef.current); } } }, [state.isRecording, state.isPaused]); // Resume recording const resumeRecording = React.useCallback(() => { if (mediaRecorderRef.current && state.isRecording && state.isPaused) { mediaRecorderRef.current.resume(); setState((prev) => ({ ...prev, isPaused: false })); isMonitoringRef.current = true; monitorInputLevel(); } }, [state.isRecording, state.isPaused, monitorInputLevel]); // Update duration React.useEffect(() => { if (!state.isRecording || state.isPaused) return; const interval = setInterval(() => { const elapsed = (Date.now() - startTimeRef.current) / 1000; setState((prev) => ({ ...prev, duration: elapsed })); }, 100); return () => clearInterval(interval); }, [state.isRecording, state.isPaused]); // Cleanup on unmount React.useEffect(() => { return () => { isMonitoringRef.current = false; if (streamRef.current) { streamRef.current.getTracks().forEach((track) => track.stop()); } if (audioContextRef.current) { audioContextRef.current.close(); } if (animationFrameRef.current) { cancelAnimationFrame(animationFrameRef.current); } }; }, []); return { state, startRecording, stopRecording, pauseRecording, resumeRecording, getInputDevices, selectInputDevice, requestPermission, }; }