'use client'; import * as React from 'react'; export interface RecordingState { isRecording: boolean; isPaused: boolean; duration: number; inputLevel: number; } export interface RecordingSettings { inputGain: number; // 0.0 to 2.0 (1.0 = unity) recordMono: boolean; // true = mono, false = stereo sampleRate: number; // target sample rate (44100, 48000, etc.) } export interface UseRecordingReturn { state: RecordingState; settings: RecordingSettings; startRecording: () => Promise; stopRecording: () => Promise; pauseRecording: () => void; resumeRecording: () => void; getInputDevices: () => Promise; selectInputDevice: (deviceId: string) => Promise; requestPermission: () => Promise; setInputGain: (gain: number) => void; setRecordMono: (mono: boolean) => void; setSampleRate: (sampleRate: number) => void; } export function useRecording(): UseRecordingReturn { const [state, setState] = React.useState({ isRecording: false, isPaused: false, duration: 0, inputLevel: 0, }); const [settings, setSettings] = React.useState({ inputGain: 1.0, recordMono: false, sampleRate: 48000, }); const mediaRecorderRef = React.useRef(null); const audioContextRef = React.useRef(null); const analyserRef = React.useRef(null); const gainNodeRef = React.useRef(null); const streamRef = React.useRef(null); const chunksRef = React.useRef([]); const startTimeRef = React.useRef(0); const animationFrameRef = React.useRef(0); const selectedDeviceIdRef = React.useRef(''); const isMonitoringRef = React.useRef(false); // Request microphone permission const requestPermission = React.useCallback(async (): Promise => { try { const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); stream.getTracks().forEach((track) => track.stop()); return true; } catch (error) { console.error('Microphone permission denied:', error); return false; } }, []); // Get available input devices const getInputDevices = React.useCallback(async (): Promise => { try { const devices = await navigator.mediaDevices.enumerateDevices(); return devices.filter((device) => device.kind === 'audioinput'); } catch (error) { console.error('Failed to enumerate devices:', error); return []; } }, []); // Select input device const selectInputDevice = React.useCallback(async (deviceId: string): Promise => { selectedDeviceIdRef.current = deviceId; }, []); // Convert linear amplitude to dB scale normalized to 0-1 range const linearToDbScale = React.useCallback((linear: number): number => { if (linear === 0) return 0; // Convert to dB (20 * log10(linear)) const db = 20 * Math.log10(linear); // Normalize -60dB to 0dB range to 0-1 // -60dB or lower = 0%, 0dB = 100% const minDb = -60; const maxDb = 0; const normalized = (db - minDb) / (maxDb - minDb); // Clamp to 0-1 range return Math.max(0, Math.min(1, normalized)); }, []); // Monitor input level const monitorInputLevel = React.useCallback(() => { if (!analyserRef.current) return; const analyser = analyserRef.current; const dataArray = new Float32Array(analyser.fftSize); const updateLevel = () => { if (!isMonitoringRef.current) return; analyser.getFloatTimeDomainData(dataArray); // Calculate peak level using float data (-1 to +1 range) let peak = 0; for (let i = 0; i < dataArray.length; i++) { const abs = Math.abs(dataArray[i]); if (abs > peak) { peak = abs; } } // Convert linear peak to logarithmic dB scale const dbLevel = linearToDbScale(peak); setState((prev) => ({ ...prev, inputLevel: dbLevel })); animationFrameRef.current = requestAnimationFrame(updateLevel); }; updateLevel(); }, [linearToDbScale]); // Start recording const startRecording = React.useCallback(async (): Promise => { try { // Get user media with selected device const constraints: MediaStreamConstraints = { audio: selectedDeviceIdRef.current ? { deviceId: { exact: selectedDeviceIdRef.current } } : true, }; const stream = await navigator.mediaDevices.getUserMedia(constraints); streamRef.current = stream; // Create audio context with target sample rate const audioContext = new AudioContext({ sampleRate: settings.sampleRate }); audioContextRef.current = audioContext; const source = audioContext.createMediaStreamSource(stream); // Create gain node for input gain control const gainNode = audioContext.createGain(); gainNode.gain.value = settings.inputGain; gainNodeRef.current = gainNode; // Create analyser for level monitoring const analyser = audioContext.createAnalyser(); analyser.fftSize = 256; analyser.smoothingTimeConstant = 0.3; // Connect: source -> gain -> analyser source.connect(gainNode); gainNode.connect(analyser); analyserRef.current = analyser; // Create MediaRecorder const mediaRecorder = new MediaRecorder(stream); mediaRecorderRef.current = mediaRecorder; chunksRef.current = []; mediaRecorder.ondataavailable = (event) => { if (event.data.size > 0) { chunksRef.current.push(event.data); } }; // Start recording mediaRecorder.start(); startTimeRef.current = Date.now(); setState({ isRecording: true, isPaused: false, duration: 0, inputLevel: 0, }); // Start monitoring input level isMonitoringRef.current = true; monitorInputLevel(); } catch (error) { console.error('Failed to start recording:', error); throw error; } }, [monitorInputLevel, settings.sampleRate, settings.inputGain]); // Stop recording and return AudioBuffer const stopRecording = React.useCallback(async (): Promise => { return new Promise((resolve) => { if (!mediaRecorderRef.current || !streamRef.current) { resolve(null); return; } const mediaRecorder = mediaRecorderRef.current; mediaRecorder.onstop = async () => { // Stop all tracks streamRef.current?.getTracks().forEach((track) => track.stop()); // Create blob from recorded chunks const blob = new Blob(chunksRef.current, { type: 'audio/webm' }); // Convert blob to AudioBuffer try { const arrayBuffer = await blob.arrayBuffer(); const audioContext = new AudioContext(); let audioBuffer = await audioContext.decodeAudioData(arrayBuffer); // Convert to mono if requested if (settings.recordMono && audioBuffer.numberOfChannels > 1) { const monoBuffer = audioContext.createBuffer( 1, audioBuffer.length, audioBuffer.sampleRate ); const monoData = monoBuffer.getChannelData(0); // Mix all channels to mono by averaging for (let i = 0; i < audioBuffer.length; i++) { let sum = 0; for (let channel = 0; channel < audioBuffer.numberOfChannels; channel++) { sum += audioBuffer.getChannelData(channel)[i]; } monoData[i] = sum / audioBuffer.numberOfChannels; } audioBuffer = monoBuffer; } // Clean up isMonitoringRef.current = false; if (audioContextRef.current) { await audioContextRef.current.close(); } if (animationFrameRef.current) { cancelAnimationFrame(animationFrameRef.current); } setState({ isRecording: false, isPaused: false, duration: 0, inputLevel: 0, }); resolve(audioBuffer); } catch (error) { console.error('Failed to decode recorded audio:', error); resolve(null); } }; mediaRecorder.stop(); }); }, [settings.recordMono]); // Pause recording const pauseRecording = React.useCallback(() => { if (mediaRecorderRef.current && state.isRecording && !state.isPaused) { mediaRecorderRef.current.pause(); setState((prev) => ({ ...prev, isPaused: true })); isMonitoringRef.current = false; if (animationFrameRef.current) { cancelAnimationFrame(animationFrameRef.current); } } }, [state.isRecording, state.isPaused]); // Resume recording const resumeRecording = React.useCallback(() => { if (mediaRecorderRef.current && state.isRecording && state.isPaused) { mediaRecorderRef.current.resume(); setState((prev) => ({ ...prev, isPaused: false })); isMonitoringRef.current = true; monitorInputLevel(); } }, [state.isRecording, state.isPaused, monitorInputLevel]); // Update duration React.useEffect(() => { if (!state.isRecording || state.isPaused) return; const interval = setInterval(() => { const elapsed = (Date.now() - startTimeRef.current) / 1000; setState((prev) => ({ ...prev, duration: elapsed })); }, 100); return () => clearInterval(interval); }, [state.isRecording, state.isPaused]); // Cleanup on unmount React.useEffect(() => { return () => { isMonitoringRef.current = false; if (streamRef.current) { streamRef.current.getTracks().forEach((track) => track.stop()); } if (audioContextRef.current) { audioContextRef.current.close(); } if (animationFrameRef.current) { cancelAnimationFrame(animationFrameRef.current); } }; }, []); // Settings setters const setInputGain = React.useCallback((gain: number) => { setSettings((prev) => ({ ...prev, inputGain: Math.max(0, Math.min(2, gain)) })); // Update gain node if recording if (gainNodeRef.current) { gainNodeRef.current.gain.value = Math.max(0, Math.min(2, gain)); } }, []); const setRecordMono = React.useCallback((mono: boolean) => { setSettings((prev) => ({ ...prev, recordMono: mono })); }, []); const setSampleRate = React.useCallback((sampleRate: number) => { setSettings((prev) => ({ ...prev, sampleRate })); }, []); return { state, settings, startRecording, stopRecording, pauseRecording, resumeRecording, getInputDevices, selectInputDevice, requestPermission, setInputGain, setRecordMono, setSampleRate, }; }