feat: implement Phase 8.1 - audio recording infrastructure
Added recording capabilities to the multi-track editor: - useRecording hook with MediaRecorder API integration - Audio input device enumeration and selection - Microphone permission handling - Input level monitoring with RMS calculation - InputLevelMeter component with visual feedback - Record-enable button per track with pulsing indicator - Real-time input level display when recording Recording infrastructure is complete. Next: integrate into AudioEditor for global recording control and buffer storage. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
85
components/recording/InputLevelMeter.tsx
Normal file
85
components/recording/InputLevelMeter.tsx
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
'use client';
|
||||||
|
|
||||||
|
import * as React from 'react';
|
||||||
|
import { cn } from '@/lib/utils/cn';
|
||||||
|
|
||||||
|
export interface InputLevelMeterProps {
|
||||||
|
level: number; // 0.0 to 1.0
|
||||||
|
orientation?: 'horizontal' | 'vertical';
|
||||||
|
className?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function InputLevelMeter({
|
||||||
|
level,
|
||||||
|
orientation = 'horizontal',
|
||||||
|
className,
|
||||||
|
}: InputLevelMeterProps) {
|
||||||
|
// Clamp level between 0 and 1
|
||||||
|
const clampedLevel = Math.max(0, Math.min(1, level));
|
||||||
|
|
||||||
|
// Calculate color based on level
|
||||||
|
const getColor = (level: number): string => {
|
||||||
|
if (level > 0.9) return 'bg-red-500';
|
||||||
|
if (level > 0.7) return 'bg-yellow-500';
|
||||||
|
return 'bg-green-500';
|
||||||
|
};
|
||||||
|
|
||||||
|
const isHorizontal = orientation === 'horizontal';
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
className={cn(
|
||||||
|
'relative bg-muted rounded-sm overflow-hidden',
|
||||||
|
isHorizontal ? 'h-4 w-full' : 'w-4 h-full',
|
||||||
|
className
|
||||||
|
)}
|
||||||
|
>
|
||||||
|
{/* Level bar */}
|
||||||
|
<div
|
||||||
|
className={cn(
|
||||||
|
'absolute transition-all duration-75 ease-out',
|
||||||
|
getColor(clampedLevel),
|
||||||
|
isHorizontal ? 'h-full left-0 top-0' : 'w-full bottom-0 left-0'
|
||||||
|
)}
|
||||||
|
style={{
|
||||||
|
[isHorizontal ? 'width' : 'height']: `${clampedLevel * 100}%`,
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
|
||||||
|
{/* Clip indicator (at 90%) */}
|
||||||
|
{clampedLevel > 0.9 && (
|
||||||
|
<div
|
||||||
|
className={cn(
|
||||||
|
'absolute bg-red-600 animate-pulse',
|
||||||
|
isHorizontal
|
||||||
|
? 'right-0 top-0 w-1 h-full'
|
||||||
|
: 'bottom-0 left-0 h-1 w-full'
|
||||||
|
)}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{/* Tick marks */}
|
||||||
|
<div
|
||||||
|
className={cn(
|
||||||
|
'absolute inset-0 flex',
|
||||||
|
isHorizontal ? 'flex-row' : 'flex-col-reverse'
|
||||||
|
)}
|
||||||
|
>
|
||||||
|
{[0.25, 0.5, 0.75].map((tick) => (
|
||||||
|
<div
|
||||||
|
key={tick}
|
||||||
|
className={cn(
|
||||||
|
'absolute bg-background/30',
|
||||||
|
isHorizontal
|
||||||
|
? 'h-full w-px top-0'
|
||||||
|
: 'w-full h-px left-0'
|
||||||
|
)}
|
||||||
|
style={{
|
||||||
|
[isHorizontal ? 'left' : 'bottom']: `${tick * 100}%`,
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
'use client';
|
'use client';
|
||||||
|
|
||||||
import * as React from 'react';
|
import * as React from 'react';
|
||||||
import { Volume2, VolumeX, Headphones, Trash2, ChevronDown, ChevronRight, CircleArrowOutUpRight, Upload, Plus } from 'lucide-react';
|
import { Volume2, VolumeX, Headphones, Trash2, ChevronDown, ChevronRight, CircleArrowOutUpRight, Upload, Plus, Mic } from 'lucide-react';
|
||||||
import type { Track as TrackType } from '@/types/track';
|
import type { Track as TrackType } from '@/types/track';
|
||||||
import { Button } from '@/components/ui/Button';
|
import { Button } from '@/components/ui/Button';
|
||||||
import { Slider } from '@/components/ui/Slider';
|
import { Slider } from '@/components/ui/Slider';
|
||||||
@@ -9,6 +9,7 @@ import { cn } from '@/lib/utils/cn';
|
|||||||
import { EffectBrowser } from '@/components/effects/EffectBrowser';
|
import { EffectBrowser } from '@/components/effects/EffectBrowser';
|
||||||
import { EffectDevice } from '@/components/effects/EffectDevice';
|
import { EffectDevice } from '@/components/effects/EffectDevice';
|
||||||
import { createEffect, type EffectType } from '@/lib/audio/effects/chain';
|
import { createEffect, type EffectType } from '@/lib/audio/effects/chain';
|
||||||
|
import { InputLevelMeter } from '@/components/recording/InputLevelMeter';
|
||||||
|
|
||||||
export interface TrackProps {
|
export interface TrackProps {
|
||||||
track: TrackType;
|
track: TrackType;
|
||||||
@@ -31,6 +32,9 @@ export interface TrackProps {
|
|||||||
onUpdateEffect?: (effectId: string, parameters: any) => void;
|
onUpdateEffect?: (effectId: string, parameters: any) => void;
|
||||||
onAddEffect?: (effectType: EffectType) => void;
|
onAddEffect?: (effectType: EffectType) => void;
|
||||||
onSelectionChange?: (selection: { start: number; end: number } | null) => void;
|
onSelectionChange?: (selection: { start: number; end: number } | null) => void;
|
||||||
|
onToggleRecordEnable?: () => void;
|
||||||
|
isRecording?: boolean;
|
||||||
|
recordingLevel?: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
export function Track({
|
export function Track({
|
||||||
@@ -54,6 +58,9 @@ export function Track({
|
|||||||
onUpdateEffect,
|
onUpdateEffect,
|
||||||
onAddEffect,
|
onAddEffect,
|
||||||
onSelectionChange,
|
onSelectionChange,
|
||||||
|
onToggleRecordEnable,
|
||||||
|
isRecording = false,
|
||||||
|
recordingLevel = 0,
|
||||||
}: TrackProps) {
|
}: TrackProps) {
|
||||||
const canvasRef = React.useRef<HTMLCanvasElement>(null);
|
const canvasRef = React.useRef<HTMLCanvasElement>(null);
|
||||||
const containerRef = React.useRef<HTMLDivElement>(null);
|
const containerRef = React.useRef<HTMLDivElement>(null);
|
||||||
@@ -439,6 +446,22 @@ export function Track({
|
|||||||
)}
|
)}
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
{/* Record Enable Button */}
|
||||||
|
{onToggleRecordEnable && (
|
||||||
|
<Button
|
||||||
|
variant="ghost"
|
||||||
|
size="icon-sm"
|
||||||
|
onClick={onToggleRecordEnable}
|
||||||
|
title="Arm track for recording"
|
||||||
|
className={cn(
|
||||||
|
track.recordEnabled && 'bg-red-500/20 hover:bg-red-500/30',
|
||||||
|
isRecording && 'animate-pulse'
|
||||||
|
)}
|
||||||
|
>
|
||||||
|
<Mic className={cn('h-4 w-4', track.recordEnabled && 'text-red-500')} />
|
||||||
|
</Button>
|
||||||
|
)}
|
||||||
|
|
||||||
{/* Solo Button */}
|
{/* Solo Button */}
|
||||||
<Button
|
<Button
|
||||||
variant="ghost"
|
variant="ghost"
|
||||||
@@ -523,6 +546,22 @@ export function Track({
|
|||||||
: `R${Math.round(track.pan * 100)}`}
|
: `R${Math.round(track.pan * 100)}`}
|
||||||
</span>
|
</span>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
{/* Input Level Meter (shown when recording or record-enabled) */}
|
||||||
|
{(track.recordEnabled || isRecording) && (
|
||||||
|
<div className="flex items-center gap-2">
|
||||||
|
<label className="text-xs text-muted-foreground flex items-center gap-1 w-16 flex-shrink-0">
|
||||||
|
<Mic className="h-3 w-3" />
|
||||||
|
Input
|
||||||
|
</label>
|
||||||
|
<div className="flex-1">
|
||||||
|
<InputLevelMeter level={recordingLevel} orientation="horizontal" />
|
||||||
|
</div>
|
||||||
|
<span className="text-xs text-muted-foreground w-10 text-right flex-shrink-0">
|
||||||
|
{Math.round(recordingLevel * 100)}%
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
</>
|
</>
|
||||||
)}
|
)}
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
258
lib/hooks/useRecording.ts
Normal file
258
lib/hooks/useRecording.ts
Normal file
@@ -0,0 +1,258 @@
|
|||||||
|
'use client';
|
||||||
|
|
||||||
|
import * as React from 'react';
|
||||||
|
|
||||||
|
export interface RecordingState {
|
||||||
|
isRecording: boolean;
|
||||||
|
isPaused: boolean;
|
||||||
|
duration: number;
|
||||||
|
inputLevel: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface UseRecordingReturn {
|
||||||
|
state: RecordingState;
|
||||||
|
startRecording: () => Promise<void>;
|
||||||
|
stopRecording: () => Promise<AudioBuffer | null>;
|
||||||
|
pauseRecording: () => void;
|
||||||
|
resumeRecording: () => void;
|
||||||
|
getInputDevices: () => Promise<MediaDeviceInfo[]>;
|
||||||
|
selectInputDevice: (deviceId: string) => Promise<void>;
|
||||||
|
requestPermission: () => Promise<boolean>;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function useRecording(): UseRecordingReturn {
|
||||||
|
const [state, setState] = React.useState<RecordingState>({
|
||||||
|
isRecording: false,
|
||||||
|
isPaused: false,
|
||||||
|
duration: 0,
|
||||||
|
inputLevel: 0,
|
||||||
|
});
|
||||||
|
|
||||||
|
const mediaRecorderRef = React.useRef<MediaRecorder | null>(null);
|
||||||
|
const audioContextRef = React.useRef<AudioContext | null>(null);
|
||||||
|
const analyserRef = React.useRef<AnalyserNode | null>(null);
|
||||||
|
const streamRef = React.useRef<MediaStream | null>(null);
|
||||||
|
const chunksRef = React.useRef<Blob[]>([]);
|
||||||
|
const startTimeRef = React.useRef<number>(0);
|
||||||
|
const animationFrameRef = React.useRef<number>(0);
|
||||||
|
const selectedDeviceIdRef = React.useRef<string>('');
|
||||||
|
|
||||||
|
// Request microphone permission
|
||||||
|
const requestPermission = React.useCallback(async (): Promise<boolean> => {
|
||||||
|
try {
|
||||||
|
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
||||||
|
stream.getTracks().forEach((track) => track.stop());
|
||||||
|
return true;
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Microphone permission denied:', error);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
// Get available input devices
|
||||||
|
const getInputDevices = React.useCallback(async (): Promise<MediaDeviceInfo[]> => {
|
||||||
|
try {
|
||||||
|
const devices = await navigator.mediaDevices.enumerateDevices();
|
||||||
|
return devices.filter((device) => device.kind === 'audioinput');
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Failed to enumerate devices:', error);
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
// Select input device
|
||||||
|
const selectInputDevice = React.useCallback(async (deviceId: string): Promise<void> => {
|
||||||
|
selectedDeviceIdRef.current = deviceId;
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
// Monitor input level
|
||||||
|
const monitorInputLevel = React.useCallback(() => {
|
||||||
|
if (!analyserRef.current) return;
|
||||||
|
|
||||||
|
const analyser = analyserRef.current;
|
||||||
|
const dataArray = new Uint8Array(analyser.frequencyBinCount);
|
||||||
|
|
||||||
|
const updateLevel = () => {
|
||||||
|
analyser.getByteTimeDomainData(dataArray);
|
||||||
|
|
||||||
|
// Calculate RMS level
|
||||||
|
let sum = 0;
|
||||||
|
for (let i = 0; i < dataArray.length; i++) {
|
||||||
|
const normalized = (dataArray[i] - 128) / 128;
|
||||||
|
sum += normalized * normalized;
|
||||||
|
}
|
||||||
|
const rms = Math.sqrt(sum / dataArray.length);
|
||||||
|
|
||||||
|
setState((prev) => ({ ...prev, inputLevel: rms }));
|
||||||
|
|
||||||
|
if (state.isRecording && !state.isPaused) {
|
||||||
|
animationFrameRef.current = requestAnimationFrame(updateLevel);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
updateLevel();
|
||||||
|
}, [state.isRecording, state.isPaused]);
|
||||||
|
|
||||||
|
// Start recording
|
||||||
|
const startRecording = React.useCallback(async (): Promise<void> => {
|
||||||
|
try {
|
||||||
|
// Get user media with selected device
|
||||||
|
const constraints: MediaStreamConstraints = {
|
||||||
|
audio: selectedDeviceIdRef.current
|
||||||
|
? { deviceId: { exact: selectedDeviceIdRef.current } }
|
||||||
|
: true,
|
||||||
|
};
|
||||||
|
|
||||||
|
const stream = await navigator.mediaDevices.getUserMedia(constraints);
|
||||||
|
streamRef.current = stream;
|
||||||
|
|
||||||
|
// Create audio context and analyser for level monitoring
|
||||||
|
const audioContext = new AudioContext();
|
||||||
|
audioContextRef.current = audioContext;
|
||||||
|
|
||||||
|
const source = audioContext.createMediaStreamSource(stream);
|
||||||
|
const analyser = audioContext.createAnalyser();
|
||||||
|
analyser.fftSize = 256;
|
||||||
|
analyser.smoothingTimeConstant = 0.3;
|
||||||
|
|
||||||
|
source.connect(analyser);
|
||||||
|
analyserRef.current = analyser;
|
||||||
|
|
||||||
|
// Create MediaRecorder
|
||||||
|
const mediaRecorder = new MediaRecorder(stream);
|
||||||
|
mediaRecorderRef.current = mediaRecorder;
|
||||||
|
chunksRef.current = [];
|
||||||
|
|
||||||
|
mediaRecorder.ondataavailable = (event) => {
|
||||||
|
if (event.data.size > 0) {
|
||||||
|
chunksRef.current.push(event.data);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Start recording
|
||||||
|
mediaRecorder.start();
|
||||||
|
startTimeRef.current = Date.now();
|
||||||
|
|
||||||
|
setState({
|
||||||
|
isRecording: true,
|
||||||
|
isPaused: false,
|
||||||
|
duration: 0,
|
||||||
|
inputLevel: 0,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Start monitoring input level
|
||||||
|
monitorInputLevel();
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Failed to start recording:', error);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}, [monitorInputLevel]);
|
||||||
|
|
||||||
|
// Stop recording and return AudioBuffer
|
||||||
|
const stopRecording = React.useCallback(async (): Promise<AudioBuffer | null> => {
|
||||||
|
return new Promise((resolve) => {
|
||||||
|
if (!mediaRecorderRef.current || !streamRef.current) {
|
||||||
|
resolve(null);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const mediaRecorder = mediaRecorderRef.current;
|
||||||
|
|
||||||
|
mediaRecorder.onstop = async () => {
|
||||||
|
// Stop all tracks
|
||||||
|
streamRef.current?.getTracks().forEach((track) => track.stop());
|
||||||
|
|
||||||
|
// Create blob from recorded chunks
|
||||||
|
const blob = new Blob(chunksRef.current, { type: 'audio/webm' });
|
||||||
|
|
||||||
|
// Convert blob to AudioBuffer
|
||||||
|
try {
|
||||||
|
const arrayBuffer = await blob.arrayBuffer();
|
||||||
|
const audioContext = new AudioContext();
|
||||||
|
const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
|
||||||
|
|
||||||
|
// Clean up
|
||||||
|
if (audioContextRef.current) {
|
||||||
|
await audioContextRef.current.close();
|
||||||
|
}
|
||||||
|
if (animationFrameRef.current) {
|
||||||
|
cancelAnimationFrame(animationFrameRef.current);
|
||||||
|
}
|
||||||
|
|
||||||
|
setState({
|
||||||
|
isRecording: false,
|
||||||
|
isPaused: false,
|
||||||
|
duration: 0,
|
||||||
|
inputLevel: 0,
|
||||||
|
});
|
||||||
|
|
||||||
|
resolve(audioBuffer);
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Failed to decode recorded audio:', error);
|
||||||
|
resolve(null);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
mediaRecorder.stop();
|
||||||
|
});
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
// Pause recording
|
||||||
|
const pauseRecording = React.useCallback(() => {
|
||||||
|
if (mediaRecorderRef.current && state.isRecording && !state.isPaused) {
|
||||||
|
mediaRecorderRef.current.pause();
|
||||||
|
setState((prev) => ({ ...prev, isPaused: true }));
|
||||||
|
|
||||||
|
if (animationFrameRef.current) {
|
||||||
|
cancelAnimationFrame(animationFrameRef.current);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, [state.isRecording, state.isPaused]);
|
||||||
|
|
||||||
|
// Resume recording
|
||||||
|
const resumeRecording = React.useCallback(() => {
|
||||||
|
if (mediaRecorderRef.current && state.isRecording && state.isPaused) {
|
||||||
|
mediaRecorderRef.current.resume();
|
||||||
|
setState((prev) => ({ ...prev, isPaused: false }));
|
||||||
|
monitorInputLevel();
|
||||||
|
}
|
||||||
|
}, [state.isRecording, state.isPaused, monitorInputLevel]);
|
||||||
|
|
||||||
|
// Update duration
|
||||||
|
React.useEffect(() => {
|
||||||
|
if (!state.isRecording || state.isPaused) return;
|
||||||
|
|
||||||
|
const interval = setInterval(() => {
|
||||||
|
const elapsed = (Date.now() - startTimeRef.current) / 1000;
|
||||||
|
setState((prev) => ({ ...prev, duration: elapsed }));
|
||||||
|
}, 100);
|
||||||
|
|
||||||
|
return () => clearInterval(interval);
|
||||||
|
}, [state.isRecording, state.isPaused]);
|
||||||
|
|
||||||
|
// Cleanup on unmount
|
||||||
|
React.useEffect(() => {
|
||||||
|
return () => {
|
||||||
|
if (streamRef.current) {
|
||||||
|
streamRef.current.getTracks().forEach((track) => track.stop());
|
||||||
|
}
|
||||||
|
if (audioContextRef.current) {
|
||||||
|
audioContextRef.current.close();
|
||||||
|
}
|
||||||
|
if (animationFrameRef.current) {
|
||||||
|
cancelAnimationFrame(animationFrameRef.current);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
return {
|
||||||
|
state,
|
||||||
|
startRecording,
|
||||||
|
stopRecording,
|
||||||
|
pauseRecording,
|
||||||
|
resumeRecording,
|
||||||
|
getInputDevices,
|
||||||
|
selectInputDevice,
|
||||||
|
requestPermission,
|
||||||
|
};
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user