diff --git a/components/recording/InputLevelMeter.tsx b/components/recording/InputLevelMeter.tsx new file mode 100644 index 0000000..9bc80f2 --- /dev/null +++ b/components/recording/InputLevelMeter.tsx @@ -0,0 +1,85 @@ +'use client'; + +import * as React from 'react'; +import { cn } from '@/lib/utils/cn'; + +export interface InputLevelMeterProps { + level: number; // 0.0 to 1.0 + orientation?: 'horizontal' | 'vertical'; + className?: string; +} + +export function InputLevelMeter({ + level, + orientation = 'horizontal', + className, +}: InputLevelMeterProps) { + // Clamp level between 0 and 1 + const clampedLevel = Math.max(0, Math.min(1, level)); + + // Calculate color based on level + const getColor = (level: number): string => { + if (level > 0.9) return 'bg-red-500'; + if (level > 0.7) return 'bg-yellow-500'; + return 'bg-green-500'; + }; + + const isHorizontal = orientation === 'horizontal'; + + return ( +
+ {/* Level bar */} +
+ + {/* Clip indicator (at 90%) */} + {clampedLevel > 0.9 && ( +
+ )} + + {/* Tick marks */} +
+ {[0.25, 0.5, 0.75].map((tick) => ( +
+ ))} +
+
+ ); +} diff --git a/components/tracks/Track.tsx b/components/tracks/Track.tsx index 4f556b8..1ad77e3 100644 --- a/components/tracks/Track.tsx +++ b/components/tracks/Track.tsx @@ -1,7 +1,7 @@ 'use client'; import * as React from 'react'; -import { Volume2, VolumeX, Headphones, Trash2, ChevronDown, ChevronRight, CircleArrowOutUpRight, Upload, Plus } from 'lucide-react'; +import { Volume2, VolumeX, Headphones, Trash2, ChevronDown, ChevronRight, CircleArrowOutUpRight, Upload, Plus, Mic } from 'lucide-react'; import type { Track as TrackType } from '@/types/track'; import { Button } from '@/components/ui/Button'; import { Slider } from '@/components/ui/Slider'; @@ -9,6 +9,7 @@ import { cn } from '@/lib/utils/cn'; import { EffectBrowser } from '@/components/effects/EffectBrowser'; import { EffectDevice } from '@/components/effects/EffectDevice'; import { createEffect, type EffectType } from '@/lib/audio/effects/chain'; +import { InputLevelMeter } from '@/components/recording/InputLevelMeter'; export interface TrackProps { track: TrackType; @@ -31,6 +32,9 @@ export interface TrackProps { onUpdateEffect?: (effectId: string, parameters: any) => void; onAddEffect?: (effectType: EffectType) => void; onSelectionChange?: (selection: { start: number; end: number } | null) => void; + onToggleRecordEnable?: () => void; + isRecording?: boolean; + recordingLevel?: number; } export function Track({ @@ -54,6 +58,9 @@ export function Track({ onUpdateEffect, onAddEffect, onSelectionChange, + onToggleRecordEnable, + isRecording = false, + recordingLevel = 0, }: TrackProps) { const canvasRef = React.useRef(null); const containerRef = React.useRef(null); @@ -439,6 +446,22 @@ export function Track({ )}
+ {/* Record Enable Button */} + {onToggleRecordEnable && ( + + )} + {/* Solo Button */}
+ + {/* Input Level Meter (shown when recording or record-enabled) */} + {(track.recordEnabled || isRecording) && ( +
+ +
+ +
+ + {Math.round(recordingLevel * 100)}% + +
+ )} )}
diff --git a/lib/hooks/useRecording.ts b/lib/hooks/useRecording.ts new file mode 100644 index 0000000..a5115e1 --- /dev/null +++ b/lib/hooks/useRecording.ts @@ -0,0 +1,258 @@ +'use client'; + +import * as React from 'react'; + +export interface RecordingState { + isRecording: boolean; + isPaused: boolean; + duration: number; + inputLevel: number; +} + +export interface UseRecordingReturn { + state: RecordingState; + startRecording: () => Promise; + stopRecording: () => Promise; + pauseRecording: () => void; + resumeRecording: () => void; + getInputDevices: () => Promise; + selectInputDevice: (deviceId: string) => Promise; + requestPermission: () => Promise; +} + +export function useRecording(): UseRecordingReturn { + const [state, setState] = React.useState({ + isRecording: false, + isPaused: false, + duration: 0, + inputLevel: 0, + }); + + const mediaRecorderRef = React.useRef(null); + const audioContextRef = React.useRef(null); + const analyserRef = React.useRef(null); + const streamRef = React.useRef(null); + const chunksRef = React.useRef([]); + const startTimeRef = React.useRef(0); + const animationFrameRef = React.useRef(0); + const selectedDeviceIdRef = React.useRef(''); + + // Request microphone permission + const requestPermission = React.useCallback(async (): Promise => { + try { + const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); + stream.getTracks().forEach((track) => track.stop()); + return true; + } catch (error) { + console.error('Microphone permission denied:', error); + return false; + } + }, []); + + // Get available input devices + const getInputDevices = React.useCallback(async (): Promise => { + try { + const devices = await navigator.mediaDevices.enumerateDevices(); + return devices.filter((device) => device.kind === 'audioinput'); + } catch (error) { + console.error('Failed to enumerate devices:', error); + return []; + } + }, []); + + // Select input device + const selectInputDevice = React.useCallback(async (deviceId: string): Promise => { + selectedDeviceIdRef.current = deviceId; + }, []); + + // Monitor input level + const monitorInputLevel = React.useCallback(() => { + if (!analyserRef.current) return; + + const analyser = analyserRef.current; + const dataArray = new Uint8Array(analyser.frequencyBinCount); + + const updateLevel = () => { + analyser.getByteTimeDomainData(dataArray); + + // Calculate RMS level + let sum = 0; + for (let i = 0; i < dataArray.length; i++) { + const normalized = (dataArray[i] - 128) / 128; + sum += normalized * normalized; + } + const rms = Math.sqrt(sum / dataArray.length); + + setState((prev) => ({ ...prev, inputLevel: rms })); + + if (state.isRecording && !state.isPaused) { + animationFrameRef.current = requestAnimationFrame(updateLevel); + } + }; + + updateLevel(); + }, [state.isRecording, state.isPaused]); + + // Start recording + const startRecording = React.useCallback(async (): Promise => { + try { + // Get user media with selected device + const constraints: MediaStreamConstraints = { + audio: selectedDeviceIdRef.current + ? { deviceId: { exact: selectedDeviceIdRef.current } } + : true, + }; + + const stream = await navigator.mediaDevices.getUserMedia(constraints); + streamRef.current = stream; + + // Create audio context and analyser for level monitoring + const audioContext = new AudioContext(); + audioContextRef.current = audioContext; + + const source = audioContext.createMediaStreamSource(stream); + const analyser = audioContext.createAnalyser(); + analyser.fftSize = 256; + analyser.smoothingTimeConstant = 0.3; + + source.connect(analyser); + analyserRef.current = analyser; + + // Create MediaRecorder + const mediaRecorder = new MediaRecorder(stream); + mediaRecorderRef.current = mediaRecorder; + chunksRef.current = []; + + mediaRecorder.ondataavailable = (event) => { + if (event.data.size > 0) { + chunksRef.current.push(event.data); + } + }; + + // Start recording + mediaRecorder.start(); + startTimeRef.current = Date.now(); + + setState({ + isRecording: true, + isPaused: false, + duration: 0, + inputLevel: 0, + }); + + // Start monitoring input level + monitorInputLevel(); + } catch (error) { + console.error('Failed to start recording:', error); + throw error; + } + }, [monitorInputLevel]); + + // Stop recording and return AudioBuffer + const stopRecording = React.useCallback(async (): Promise => { + return new Promise((resolve) => { + if (!mediaRecorderRef.current || !streamRef.current) { + resolve(null); + return; + } + + const mediaRecorder = mediaRecorderRef.current; + + mediaRecorder.onstop = async () => { + // Stop all tracks + streamRef.current?.getTracks().forEach((track) => track.stop()); + + // Create blob from recorded chunks + const blob = new Blob(chunksRef.current, { type: 'audio/webm' }); + + // Convert blob to AudioBuffer + try { + const arrayBuffer = await blob.arrayBuffer(); + const audioContext = new AudioContext(); + const audioBuffer = await audioContext.decodeAudioData(arrayBuffer); + + // Clean up + if (audioContextRef.current) { + await audioContextRef.current.close(); + } + if (animationFrameRef.current) { + cancelAnimationFrame(animationFrameRef.current); + } + + setState({ + isRecording: false, + isPaused: false, + duration: 0, + inputLevel: 0, + }); + + resolve(audioBuffer); + } catch (error) { + console.error('Failed to decode recorded audio:', error); + resolve(null); + } + }; + + mediaRecorder.stop(); + }); + }, []); + + // Pause recording + const pauseRecording = React.useCallback(() => { + if (mediaRecorderRef.current && state.isRecording && !state.isPaused) { + mediaRecorderRef.current.pause(); + setState((prev) => ({ ...prev, isPaused: true })); + + if (animationFrameRef.current) { + cancelAnimationFrame(animationFrameRef.current); + } + } + }, [state.isRecording, state.isPaused]); + + // Resume recording + const resumeRecording = React.useCallback(() => { + if (mediaRecorderRef.current && state.isRecording && state.isPaused) { + mediaRecorderRef.current.resume(); + setState((prev) => ({ ...prev, isPaused: false })); + monitorInputLevel(); + } + }, [state.isRecording, state.isPaused, monitorInputLevel]); + + // Update duration + React.useEffect(() => { + if (!state.isRecording || state.isPaused) return; + + const interval = setInterval(() => { + const elapsed = (Date.now() - startTimeRef.current) / 1000; + setState((prev) => ({ ...prev, duration: elapsed })); + }, 100); + + return () => clearInterval(interval); + }, [state.isRecording, state.isPaused]); + + // Cleanup on unmount + React.useEffect(() => { + return () => { + if (streamRef.current) { + streamRef.current.getTracks().forEach((track) => track.stop()); + } + if (audioContextRef.current) { + audioContextRef.current.close(); + } + if (animationFrameRef.current) { + cancelAnimationFrame(animationFrameRef.current); + } + }; + }, []); + + return { + state, + startRecording, + stopRecording, + pauseRecording, + resumeRecording, + getInputDevices, + selectInputDevice, + requestPermission, + }; +}