diff --git a/PLAN.md b/PLAN.md index dc2dbd1..490b1a0 100644 --- a/PLAN.md +++ b/PLAN.md @@ -2,7 +2,7 @@ ## Progress Overview -**Current Status**: Phase 5 Complete ✓ +**Current Status**: Phase 6.5 Complete ✓ (Basic Effects + Filters + Dynamics + Time-Based + Advanced Effects) ### Completed Phases - ✅ **Phase 1**: Project Setup & Core Infrastructure (95% complete) @@ -10,37 +10,72 @@ - ✅ **Phase 3**: Waveform Visualization (95% complete) - ✅ **Phase 4**: Selection & Editing (70% complete - core editing features done) - ✅ **Phase 5**: Undo/Redo System (100% complete) +- ✅ **UI Redesign**: Professional Audacity-style layout (100% complete) ### Working Features + +**Core Features:** - ✅ Audio file upload with drag-and-drop - ✅ Waveform visualization with real-time progress - ✅ Playback controls (play, pause, stop, seek) - ✅ Volume control with mute -- ✅ Timeline scrubbing -- ✅ Drag-to-scrub audio +- ✅ Timeline scrubbing (click-to-play, drag-to-scrub) - ✅ Horizontal zoom (1x-20x) - ✅ Vertical amplitude zoom - ✅ Scroll through zoomed waveform - ✅ Grid lines every second - ✅ Viewport culling for performance -- ✅ **Region selection with Shift+drag** (NEW!) -- ✅ **Visual selection feedback** (NEW!) -- ✅ **Cut/Copy/Paste operations** (NEW!) -- ✅ **Delete and Trim operations** (NEW!) -- ✅ **Keyboard shortcuts (Ctrl+A/X/C/V, Delete, Escape)** (NEW!) -- ✅ **Clipboard management** (NEW!) -- ✅ **Undo/Redo system with command pattern** (NEW!) -- ✅ **History tracking (50 operations)** (NEW!) -- ✅ **Undo/Redo keyboard shortcuts (Ctrl+Z, Ctrl+Y)** (NEW!) -- ✅ **History controls UI with visual feedback** (NEW!) - ✅ Dark/light theme support - ✅ Toast notifications - ✅ File metadata display +**Editing Features:** +- ✅ Region selection with Shift+drag +- ✅ Visual selection feedback +- ✅ Cut/Copy/Paste operations +- ✅ Delete and Trim operations +- ✅ Keyboard shortcuts (Ctrl+A/X/C/V, Delete, Escape) +- ✅ Clipboard management +- ✅ Undo/Redo system with command pattern +- ✅ History tracking (50 operations) +- ✅ Undo/Redo keyboard shortcuts (Ctrl+Z, Ctrl+Y) + +**Audio Effects:** +- ✅ Normalize (peak amplitude) +- ✅ Fade In (linear/exponential/logarithmic curves) +- ✅ Fade Out (linear/exponential/logarithmic curves) +- ✅ Reverse audio +- ✅ Low-Pass Filter (removes high frequencies) +- ✅ High-Pass Filter (removes low frequencies) +- ✅ Band-Pass Filter (isolates frequency range) +- ✅ Compressor (with visual transfer curve and presets) +- ✅ Limiter (brick-wall limiting with makeup gain) +- ✅ Gate/Expander (noise reduction and dynamics expansion) +- ✅ Delay/Echo (time, feedback, mix with visual pattern) +- ✅ Reverb (Schroeder algorithm with room size and damping) +- ✅ Chorus (LFO modulation with depth, rate controls) +- ✅ Flanger (short modulated delay with feedback) +- ✅ Phaser (allpass filters with LFO modulation) +- ✅ Pitch Shifter (semitones and cents adjustment) +- ✅ Time Stretch (change duration with/without pitch preservation) +- ✅ Distortion (soft/hard/tube overdrive with tone control) +- ✅ Bitcrusher (bit depth and sample rate reduction) +- ✅ All effects support undo/redo +- ✅ Effects accessible via command palette and side panel +- ✅ Parameterized effects with real-time visual feedback + +**Professional UI:** +- ✅ Command Palette (Ctrl+K) with searchable actions +- ✅ Compact header (Logo + Command Palette + Theme Toggle) +- ✅ Collapsible side panel with tabs (File, History, Info) +- ✅ Full-screen waveform canvas layout +- ✅ Integrated playback controls at bottom +- ✅ Keyboard-driven workflow + ### Next Steps -- Audio effects (Phase 6) -- Multi-track editing (Phase 7) -- Recording functionality (Phase 8) +- **Phase 6**: Audio effects (Section 6.1 ✓ + Section 6.2 filters ✓ + Section 6.3 dynamics ✓ + Section 6.4 time-based ✓ + Section 6.5 advanced ✓) +- **Phase 7**: Multi-track editing +- **Phase 8**: Recording functionality --- @@ -411,41 +446,61 @@ audio-ui/ ### Phase 6: Audio Effects -#### 6.1 Basic Effects -- [ ] Gain/Volume adjustment -- [ ] Pan (stereo positioning) -- [ ] Fade In/Fade Out (linear/exponential/logarithmic) -- [ ] Normalize (peak/RMS) -- [ ] Reverse -- [ ] Silence generator +#### 6.1 Basic Effects (✅ Complete) +- [x] Gain/Volume adjustment +- [ ] Pan (stereo positioning) - FUTURE +- [x] Fade In/Fade Out (linear/exponential/logarithmic) +- [x] Normalize (peak/RMS) +- [x] Reverse +- [ ] Silence generator - FUTURE +- [x] EffectCommand for undo/redo integration +- [x] Effects added to command palette +- [x] Toast notifications for effects -#### 6.2 Filters & EQ -- [ ] Parametric EQ (3-band, 10-band, 31-band) -- [ ] Low-pass filter -- [ ] High-pass filter -- [ ] Band-pass filter -- [ ] Notch filter -- [ ] Shelf filters (low/high) -- [ ] Visual EQ curve editor +#### 6.2 Filters & EQ (Partially Complete) +- [ ] Parametric EQ (3-band, 10-band, 31-band) - FUTURE +- [x] Low-pass filter (1000Hz cutoff, configurable) +- [x] High-pass filter (100Hz cutoff, configurable) +- [x] Band-pass filter (1000Hz center, configurable) +- [x] Notch filter (implemented in filters.ts) +- [x] Shelf filters (low/high) (implemented in filters.ts) +- [x] Peaking EQ filter (implemented in filters.ts) +- [ ] Visual EQ curve editor - FUTURE +- [x] Filters integrated with undo/redo system +- [x] Filters added to command palette -#### 6.3 Dynamics Processing -- [ ] Compressor (threshold, ratio, attack, release, knee) -- [ ] Limiter -- [ ] Gate/Expander -- [ ] Visual gain reduction meter +#### 6.3 Dynamics Processing (✅ Complete) +- [x] Compressor (threshold, ratio, attack, release, knee, makeup gain) +- [x] Limiter (threshold, attack, release, makeup gain) +- [x] Gate/Expander (threshold, ratio, attack, release, knee) +- [x] Visual transfer curve showing input/output relationship +- [x] Professional presets for each effect type +- [x] Real-time parameter visualization +- [x] EffectCommand integration for undo/redo +- [x] Effects added to command palette and side panel +- [x] Selection-based processing support +- [ ] Visual gain reduction meter (realtime metering - FUTURE) -#### 6.4 Time-Based Effects -- [ ] Delay/Echo (time, feedback, mix) -- [ ] Reverb (Convolution Reverb with IR files) -- [ ] Chorus (depth, rate, delay) -- [ ] Flanger -- [ ] Phaser +#### 6.4 Time-Based Effects ✓ +- [x] Delay/Echo (time, feedback, mix) +- [x] Reverb (Schroeder algorithmic reverb with room size and damping) +- [x] Chorus (depth, rate, delay with LFO modulation) +- [x] Flanger (short modulated delay with feedback) +- [x] Phaser (cascaded allpass filters with LFO) +- [x] TimeBasedParameterDialog component with visual feedback +- [x] Integration with command palette and side panel +- [x] 4 presets per effect type +- [x] Undo/redo support for all time-based effects -#### 6.5 Advanced Effects -- [ ] Pitch shifter (semitones, cents) -- [ ] Time stretcher (without pitch change) -- [ ] Distortion/Overdrive -- [ ] Bitcrusher (bit depth, sample rate reduction) +#### 6.5 Advanced Effects ✓ +- [x] Pitch shifter (semitones, cents with linear interpolation) +- [x] Time stretcher (with and without pitch preservation using overlap-add) +- [x] Distortion/Overdrive (soft/hard/tube types with tone and output control) +- [x] Bitcrusher (bit depth and sample rate reduction) +- [x] AdvancedParameterDialog component with visual waveform feedback +- [x] Integration with command palette and side panel +- [x] 4 presets per effect type +- [x] Undo/redo support for all advanced effects #### 6.6 Effect Management - [ ] Effect rack/chain diff --git a/app/globals.css b/app/globals.css index 66f4ba1..fa8aa3f 100644 --- a/app/globals.css +++ b/app/globals.css @@ -158,6 +158,41 @@ @apply bg-background text-foreground; font-feature-settings: "rlig" 1, "calt" 1; } + + /* Apply custom scrollbar globally */ + * { + scrollbar-width: thin; + } + + *::-webkit-scrollbar { + width: 10px; + height: 10px; + } + + *::-webkit-scrollbar-track { + background: var(--muted); + border-radius: 5px; + } + + *::-webkit-scrollbar-thumb { + background: color-mix(in oklch, var(--muted-foreground) 30%, transparent); + border-radius: 5px; + border: 2px solid var(--muted); + transition: background 0.2s ease; + } + + *::-webkit-scrollbar-thumb:hover { + background: color-mix(in oklch, var(--muted-foreground) 50%, transparent); + } + + *::-webkit-scrollbar-thumb:active { + background: color-mix(in oklch, var(--muted-foreground) 70%, transparent); + } + + /* Scrollbar corners */ + *::-webkit-scrollbar-corner { + background: var(--muted); + } } /* Custom animations */ @@ -290,22 +325,33 @@ /* Custom scrollbar */ @layer utilities { + .custom-scrollbar { + scrollbar-width: thin; + scrollbar-color: color-mix(in oklch, var(--muted-foreground) 30%, transparent) var(--muted); + } + .custom-scrollbar::-webkit-scrollbar { width: 8px; height: 8px; } .custom-scrollbar::-webkit-scrollbar-track { - @apply bg-muted; + background: var(--muted); border-radius: 4px; } .custom-scrollbar::-webkit-scrollbar-thumb { - @apply bg-muted-foreground/30; + background: color-mix(in oklch, var(--muted-foreground) 30%, transparent); border-radius: 4px; + border: 2px solid var(--muted); + transition: background 0.2s ease; } .custom-scrollbar::-webkit-scrollbar-thumb:hover { - @apply bg-muted-foreground/50; + background: color-mix(in oklch, var(--muted-foreground) 50%, transparent); + } + + .custom-scrollbar::-webkit-scrollbar-thumb:active { + background: color-mix(in oklch, var(--muted-foreground) 70%, transparent); } } diff --git a/app/page.tsx b/app/page.tsx index 1c8cab1..fd8f74c 100644 --- a/app/page.tsx +++ b/app/page.tsx @@ -1,74 +1,17 @@ 'use client'; import * as React from 'react'; -import { Music, Settings } from 'lucide-react'; +import { Music } from 'lucide-react'; import { ThemeToggle } from '@/components/layout/ThemeToggle'; -import { Button } from '@/components/ui/Button'; import { ToastProvider } from '@/components/ui/Toast'; import { AudioEditor } from '@/components/editor/AudioEditor'; export default function Home() { return ( -
- {/* Header */} -
-
-
- -
-

Audio UI

-

- Professional audio editing in your browser -

-
-
-
- - -
-
-
- - {/* Main content */} -
- -
- - {/* Footer */} - +
+ {/* Audio Editor (includes header) */} +
); diff --git a/components/editor/AudioEditor.tsx b/components/editor/AudioEditor.tsx index 2f00613..616ed43 100644 --- a/components/editor/AudioEditor.tsx +++ b/components/editor/AudioEditor.tsx @@ -1,19 +1,18 @@ 'use client'; import * as React from 'react'; -import { FileUpload } from './FileUpload'; -import { AudioInfo } from './AudioInfo'; +import { Music, Loader2 } from 'lucide-react'; import { Waveform } from './Waveform'; import { PlaybackControls } from './PlaybackControls'; -import { ZoomControls } from './ZoomControls'; -import { EditControls } from './EditControls'; -import { HistoryControls } from './HistoryControls'; +import { SidePanel } from '@/components/layout/SidePanel'; +import { ThemeToggle } from '@/components/layout/ThemeToggle'; +import { CommandPalette } from '@/components/ui/CommandPalette'; +import type { CommandAction } from '@/components/ui/CommandPalette'; import { useAudioPlayer } from '@/lib/hooks/useAudioPlayer'; import { useHistory } from '@/lib/hooks/useHistory'; import { useToast } from '@/components/ui/Toast'; -import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/Card'; import { Slider } from '@/components/ui/Slider'; -import { Loader2 } from 'lucide-react'; +import { cn } from '@/lib/utils/cn'; import type { Selection, ClipboardData } from '@/types/selection'; import { extractBufferSegment, @@ -27,6 +26,53 @@ import { createPasteCommand, createTrimCommand, } from '@/lib/history/commands/edit-command'; +import { + createGainCommand, + createNormalizePeakCommand, + createNormalizeRMSCommand, + createFadeInCommand, + createFadeOutCommand, + createReverseCommand, + createLowPassFilterCommand, + createHighPassFilterCommand, + createBandPassFilterCommand, + EffectCommand, +} from '@/lib/history/commands/effect-command'; +import { applyEffectToSelection, applyAsyncEffectToSelection } from '@/lib/audio/effects/selection'; +import { normalizePeak } from '@/lib/audio/effects/normalize'; +import { applyFadeIn, applyFadeOut } from '@/lib/audio/effects/fade'; +import { reverseAudio } from '@/lib/audio/effects/reverse'; +import { applyLowPassFilter, applyHighPassFilter, applyBandPassFilter, applyFilter } from '@/lib/audio/effects/filters'; +import type { FilterType } from '@/lib/audio/effects/filters'; +import { applyCompressor, applyLimiter, applyGate } from '@/lib/audio/effects/dynamics'; +import { applyDelay, applyReverb, applyChorus, applyFlanger, applyPhaser } from '@/lib/audio/effects/time-based'; +import { applyPitchShift, applyTimeStretch, applyDistortion, applyBitcrusher } from '@/lib/audio/effects/advanced'; +import { EffectParameterDialog, type FilterParameters } from '@/components/effects/EffectParameterDialog'; +import { DynamicsParameterDialog, type DynamicsParameters, type DynamicsType } from '@/components/effects/DynamicsParameterDialog'; +import { TimeBasedParameterDialog, type TimeBasedParameters, type TimeBasedType } from '@/components/effects/TimeBasedParameterDialog'; +import { AdvancedParameterDialog, type AdvancedParameters, type AdvancedType } from '@/components/effects/AdvancedParameterDialog'; + +const EFFECT_LABELS: Record = { + lowpass: 'Low-Pass Filter', + highpass: 'High-Pass Filter', + bandpass: 'Band-Pass Filter', + notch: 'Notch Filter', + lowshelf: 'Low Shelf Filter', + highshelf: 'High Shelf Filter', + peaking: 'Peaking EQ', + compressor: 'Compressor', + limiter: 'Limiter', + gate: 'Gate/Expander', + delay: 'Delay/Echo', + reverb: 'Reverb', + chorus: 'Chorus', + flanger: 'Flanger', + phaser: 'Phaser', + pitch: 'Pitch Shifter', + timestretch: 'Time Stretch', + distortion: 'Distortion', + bitcrusher: 'Bitcrusher', +}; export function AudioEditor() { // Zoom and scroll state @@ -34,6 +80,26 @@ export function AudioEditor() { const [scrollOffset, setScrollOffset] = React.useState(0); const [amplitudeScale, setAmplitudeScale] = React.useState(1); + // Effect dialog state + const [effectDialogOpen, setEffectDialogOpen] = React.useState(false); + const [effectDialogType, setEffectDialogType] = React.useState<'lowpass' | 'highpass' | 'bandpass' | 'notch' | 'lowshelf' | 'highshelf' | 'peaking'>('lowpass'); + + // Dynamics dialog state + const [dynamicsDialogOpen, setDynamicsDialogOpen] = React.useState(false); + const [dynamicsDialogType, setDynamicsDialogType] = React.useState('compressor'); + + // Time-based dialog state + const [timeBasedDialogOpen, setTimeBasedDialogOpen] = React.useState(false); + const [timeBasedDialogType, setTimeBasedDialogType] = React.useState('delay'); + + // Advanced dialog state + const [advancedDialogOpen, setAdvancedDialogOpen] = React.useState(false); + const [advancedDialogType, setAdvancedDialogType] = React.useState('pitch'); + + // Drag and drop state + const [isDragging, setIsDragging] = React.useState(false); + const fileInputRef = React.useRef(null); + // Selection state const [selection, setSelection] = React.useState(null); const [clipboard, setClipboard] = React.useState(null); @@ -98,6 +164,58 @@ export function AudioEditor() { }); }; + // Drag and drop handlers + const handleDragEnter = (e: React.DragEvent) => { + e.preventDefault(); + e.stopPropagation(); + setIsDragging(true); + }; + + const handleDragLeave = (e: React.DragEvent) => { + e.preventDefault(); + e.stopPropagation(); + // Only set to false if we're leaving the drop zone entirely + if (e.currentTarget === e.target) { + setIsDragging(false); + } + }; + + const handleDragOver = (e: React.DragEvent) => { + e.preventDefault(); + e.stopPropagation(); + }; + + const handleDrop = async (e: React.DragEvent) => { + e.preventDefault(); + e.stopPropagation(); + setIsDragging(false); + + const files = Array.from(e.dataTransfer.files); + const audioFile = files.find(file => file.type.startsWith('audio/')); + + if (audioFile) { + await handleFileSelect(audioFile); + } else { + addToast({ + title: 'Invalid file', + description: 'Please drop an audio file', + variant: 'error', + duration: 3000, + }); + } + }; + + const handleDropZoneClick = () => { + fileInputRef.current?.click(); + }; + + const handleFileInputChange = (e: React.ChangeEvent) => { + const file = e.target.files?.[0]; + if (file) { + handleFileSelect(file); + } + }; + // Edit operations const handleCut = () => { if (!selection || !audioBuffer) return; @@ -254,6 +372,483 @@ export function AudioEditor() { setSelection(null); }; + // Effect operations + const handleNormalize = () => { + if (!audioBuffer) return; + + try { + // Apply to selection or entire buffer + const modifiedBuffer = applyEffectToSelection( + audioBuffer, + selection, + (buf) => normalizePeak(buf, 1.0) + ); + + const command = new EffectCommand( + audioBuffer, + modifiedBuffer, + (buffer) => loadBuffer(buffer), + selection ? 'Normalize Selection' : 'Normalize' + ); + execute(command); + + addToast({ + title: 'Normalized', + description: selection ? 'Selection normalized to peak' : 'Audio normalized to peak', + variant: 'success', + duration: 2000, + }); + } catch (error) { + addToast({ + title: 'Error', + description: 'Failed to normalize audio', + variant: 'error', + duration: 3000, + }); + } + }; + + const handleFadeIn = () => { + if (!audioBuffer) return; + + if (!selection) { + addToast({ + title: 'No Selection', + description: 'Please select a region to apply fade in', + variant: 'info', + duration: 2000, + }); + return; + } + + try { + const fadeDuration = selection.end - selection.start; + const modifiedBuffer = applyEffectToSelection( + audioBuffer, + selection, + (buf) => applyFadeIn(buf, buf.duration) + ); + + const command = new EffectCommand( + audioBuffer, + modifiedBuffer, + (buffer) => loadBuffer(buffer), + `Fade In (${fadeDuration.toFixed(2)}s)` + ); + execute(command); + + addToast({ + title: 'Fade In', + description: `Applied fade in (${fadeDuration.toFixed(2)}s)`, + variant: 'success', + duration: 2000, + }); + } catch (error) { + addToast({ + title: 'Error', + description: 'Failed to apply fade in', + variant: 'error', + duration: 3000, + }); + } + }; + + const handleFadeOut = () => { + if (!audioBuffer) return; + + if (!selection) { + addToast({ + title: 'No Selection', + description: 'Please select a region to apply fade out', + variant: 'info', + duration: 2000, + }); + return; + } + + try { + const fadeDuration = selection.end - selection.start; + const modifiedBuffer = applyEffectToSelection( + audioBuffer, + selection, + (buf) => applyFadeOut(buf, buf.duration) + ); + + const command = new EffectCommand( + audioBuffer, + modifiedBuffer, + (buffer) => loadBuffer(buffer), + `Fade Out (${fadeDuration.toFixed(2)}s)` + ); + execute(command); + + addToast({ + title: 'Fade Out', + description: `Applied fade out (${fadeDuration.toFixed(2)}s)`, + variant: 'success', + duration: 2000, + }); + } catch (error) { + addToast({ + title: 'Error', + description: 'Failed to apply fade out', + variant: 'error', + duration: 3000, + }); + } + }; + + const handleReverse = () => { + if (!audioBuffer) return; + + try { + const modifiedBuffer = applyEffectToSelection( + audioBuffer, + selection, + (buf) => reverseAudio(buf) + ); + + const command = new EffectCommand( + audioBuffer, + modifiedBuffer, + (buffer) => loadBuffer(buffer), + selection ? 'Reverse Selection' : 'Reverse' + ); + execute(command); + + addToast({ + title: 'Reversed', + description: selection ? 'Selection reversed' : 'Audio reversed', + variant: 'success', + duration: 2000, + }); + } catch (error) { + addToast({ + title: 'Error', + description: 'Failed to reverse audio', + variant: 'error', + duration: 3000, + }); + } + }; + + const handleLowPassFilter = () => { + setEffectDialogType('lowpass'); + setEffectDialogOpen(true); + }; + + const handleHighPassFilter = () => { + setEffectDialogType('highpass'); + setEffectDialogOpen(true); + }; + + const handleBandPassFilter = () => { + setEffectDialogType('bandpass'); + setEffectDialogOpen(true); + }; + + const handleCompressor = () => { + setDynamicsDialogType('compressor'); + setDynamicsDialogOpen(true); + }; + + const handleLimiter = () => { + setDynamicsDialogType('limiter'); + setDynamicsDialogOpen(true); + }; + + const handleGate = () => { + setDynamicsDialogType('gate'); + setDynamicsDialogOpen(true); + }; + + const handleDelay = () => { + setTimeBasedDialogType('delay'); + setTimeBasedDialogOpen(true); + }; + + const handleReverb = () => { + setTimeBasedDialogType('reverb'); + setTimeBasedDialogOpen(true); + }; + + const handleChorus = () => { + setTimeBasedDialogType('chorus'); + setTimeBasedDialogOpen(true); + }; + + const handleFlanger = () => { + setTimeBasedDialogType('flanger'); + setTimeBasedDialogOpen(true); + }; + + const handlePhaser = () => { + setTimeBasedDialogType('phaser'); + setTimeBasedDialogOpen(true); + }; + + const handlePitchShift = () => { + setAdvancedDialogType('pitch'); + setAdvancedDialogOpen(true); + }; + + const handleTimeStretch = () => { + setAdvancedDialogType('timestretch'); + setAdvancedDialogOpen(true); + }; + + const handleDistortion = () => { + setAdvancedDialogType('distortion'); + setAdvancedDialogOpen(true); + }; + + const handleBitcrusher = () => { + setAdvancedDialogType('bitcrusher'); + setAdvancedDialogOpen(true); + }; + + // Handle effect apply from parameter dialog + const handleEffectApply = async (params: FilterParameters) => { + if (!audioBuffer) return; + + try { + const modifiedBuffer = await applyAsyncEffectToSelection( + audioBuffer, + selection, + (buf) => applyFilter(buf, { + type: params.type, + frequency: params.frequency, + Q: params.Q, + gain: params.gain, + }) + ); + + const effectName = EFFECT_LABELS[params.type] || 'Filter'; + const command = new EffectCommand( + audioBuffer, + modifiedBuffer, + (buffer) => loadBuffer(buffer), + selection + ? `${effectName} Selection (${params.frequency.toFixed(0)}Hz)` + : `${effectName} (${params.frequency.toFixed(0)}Hz)` + ); + execute(command); + + addToast({ + title: effectName, + description: selection + ? `Applied ${effectName.toLowerCase()} to selection (${params.frequency.toFixed(0)}Hz)` + : `Applied ${effectName.toLowerCase()} (${params.frequency.toFixed(0)}Hz)`, + variant: 'success', + duration: 2000, + }); + } catch (error) { + addToast({ + title: 'Error', + description: 'Failed to apply effect', + variant: 'error', + duration: 3000, + }); + } + }; + + // Handle dynamics apply from parameter dialog + const handleDynamicsApply = async (params: DynamicsParameters) => { + if (!audioBuffer) return; + + try { + let modifiedBuffer: AudioBuffer; + let effectName: string; + + if (params.type === 'compressor') { + modifiedBuffer = await applyAsyncEffectToSelection( + audioBuffer, + selection, + (buf) => applyCompressor(buf, params) + ); + effectName = 'Compressor'; + } else if (params.type === 'limiter') { + modifiedBuffer = await applyAsyncEffectToSelection( + audioBuffer, + selection, + (buf) => applyLimiter(buf, params) + ); + effectName = 'Limiter'; + } else { + modifiedBuffer = await applyAsyncEffectToSelection( + audioBuffer, + selection, + (buf) => applyGate(buf, params) + ); + effectName = 'Gate'; + } + + const command = new EffectCommand( + audioBuffer, + modifiedBuffer, + (buffer) => loadBuffer(buffer), + selection + ? `${effectName} Selection (${params.threshold.toFixed(1)}dB)` + : `${effectName} (${params.threshold.toFixed(1)}dB)` + ); + execute(command); + + addToast({ + title: effectName, + description: selection + ? `Applied ${effectName.toLowerCase()} to selection` + : `Applied ${effectName.toLowerCase()}`, + variant: 'success', + duration: 2000, + }); + } catch (error) { + addToast({ + title: 'Error', + description: 'Failed to apply dynamics effect', + variant: 'error', + duration: 3000, + }); + } + }; + + // Handle time-based apply from parameter dialog + const handleTimeBasedApply = async (params: TimeBasedParameters) => { + if (!audioBuffer) return; + + try { + let modifiedBuffer: AudioBuffer; + let effectName: string; + + if (params.type === 'delay') { + modifiedBuffer = await applyAsyncEffectToSelection( + audioBuffer, + selection, + (buf) => applyDelay(buf, params) + ); + effectName = 'Delay'; + } else if (params.type === 'reverb') { + modifiedBuffer = await applyAsyncEffectToSelection( + audioBuffer, + selection, + (buf) => applyReverb(buf, params) + ); + effectName = 'Reverb'; + } else if (params.type === 'chorus') { + modifiedBuffer = await applyAsyncEffectToSelection( + audioBuffer, + selection, + (buf) => applyChorus(buf, params) + ); + effectName = 'Chorus'; + } else if (params.type === 'flanger') { + modifiedBuffer = await applyAsyncEffectToSelection( + audioBuffer, + selection, + (buf) => applyFlanger(buf, params) + ); + effectName = 'Flanger'; + } else { + modifiedBuffer = await applyAsyncEffectToSelection( + audioBuffer, + selection, + (buf) => applyPhaser(buf, params) + ); + effectName = 'Phaser'; + } + + const command = new EffectCommand( + audioBuffer, + modifiedBuffer, + (buffer) => loadBuffer(buffer), + selection + ? `${effectName} Selection` + : `${effectName}` + ); + execute(command); + + addToast({ + title: effectName, + description: selection + ? `Applied ${effectName.toLowerCase()} to selection` + : `Applied ${effectName.toLowerCase()}`, + variant: 'success', + duration: 2000, + }); + } catch (error) { + addToast({ + title: 'Error', + description: 'Failed to apply time-based effect', + variant: 'error', + duration: 3000, + }); + } + }; + + const handleAdvancedApply = async (params: AdvancedParameters) => { + if (!audioBuffer) return; + + try { + let modifiedBuffer: AudioBuffer; + let effectName: string; + + if (params.type === 'pitch') { + modifiedBuffer = await applyAsyncEffectToSelection( + audioBuffer, + selection, + (buf) => applyPitchShift(buf, params) + ); + effectName = 'Pitch Shift'; + } else if (params.type === 'timestretch') { + modifiedBuffer = await applyAsyncEffectToSelection( + audioBuffer, + selection, + (buf) => applyTimeStretch(buf, params) + ); + effectName = 'Time Stretch'; + } else if (params.type === 'distortion') { + modifiedBuffer = await applyAsyncEffectToSelection( + audioBuffer, + selection, + (buf) => applyDistortion(buf, params) + ); + effectName = 'Distortion'; + } else { + modifiedBuffer = await applyAsyncEffectToSelection( + audioBuffer, + selection, + (buf) => applyBitcrusher(buf, params) + ); + effectName = 'Bitcrusher'; + } + + const command = new EffectCommand( + audioBuffer, + modifiedBuffer, + (buffer) => loadBuffer(buffer), + selection + ? `${effectName} Selection` + : `${effectName}` + ); + execute(command); + + addToast({ + title: effectName, + description: selection + ? `Applied ${effectName.toLowerCase()} to selection` + : `Applied ${effectName.toLowerCase()}`, + variant: 'success', + duration: 2000, + }); + } catch (error) { + addToast({ + title: 'Error', + description: 'Failed to apply advanced effect', + variant: 'error', + duration: 3000, + }); + } + }; + // Zoom controls const handleZoomIn = () => { setZoom((prev) => Math.min(20, prev + 1)); @@ -282,7 +877,21 @@ export function AudioEditor() { React.useEffect(() => { const handleKeyDown = (e: KeyboardEvent) => { // Prevent shortcuts if typing in an input - if (e.target instanceof HTMLInputElement || e.target instanceof HTMLTextAreaElement) { + const isTyping = e.target instanceof HTMLInputElement || e.target instanceof HTMLTextAreaElement; + + // Spacebar: Play/Pause (always, unless typing in an input) + if (e.code === 'Space' && !isTyping) { + e.preventDefault(); + if (isPlaying) { + pause(); + } else { + play(); + } + return; + } + + // Prevent other shortcuts if typing in an input + if (isTyping) { return; } @@ -351,7 +960,7 @@ export function AudioEditor() { window.addEventListener('keydown', handleKeyDown); return () => window.removeEventListener('keydown', handleKeyDown); - }, [selection, clipboard, audioBuffer, currentTime, undo, redo, addToast]); + }, [selection, clipboard, audioBuffer, currentTime, undo, redo, addToast, isPlaying, play, pause]); // Show error toast React.useEffect(() => { @@ -365,147 +974,449 @@ export function AudioEditor() { } }, [error, addToast]); + // Command palette actions + const commandActions: CommandAction[] = React.useMemo(() => { + const actions: CommandAction[] = [ + // Playback + { + id: 'play', + label: 'Play', + description: 'Start playback', + shortcut: 'Space', + category: 'playback', + action: play, + }, + { + id: 'pause', + label: 'Pause', + description: 'Pause playback', + shortcut: 'Space', + category: 'playback', + action: pause, + }, + { + id: 'stop', + label: 'Stop', + description: 'Stop playback', + category: 'playback', + action: stop, + }, + // Edit + { + id: 'cut', + label: 'Cut', + description: 'Cut selection to clipboard', + shortcut: 'Ctrl+X', + category: 'edit', + action: handleCut, + }, + { + id: 'copy', + label: 'Copy', + description: 'Copy selection to clipboard', + shortcut: 'Ctrl+C', + category: 'edit', + action: handleCopy, + }, + { + id: 'paste', + label: 'Paste', + description: 'Paste clipboard at current position', + shortcut: 'Ctrl+V', + category: 'edit', + action: handlePaste, + }, + { + id: 'delete', + label: 'Delete', + description: 'Delete selection', + shortcut: 'Del', + category: 'edit', + action: handleDelete, + }, + { + id: 'trim', + label: 'Trim to Selection', + description: 'Trim audio to selected region', + category: 'edit', + action: handleTrim, + }, + { + id: 'select-all', + label: 'Select All', + description: 'Select entire audio', + shortcut: 'Ctrl+A', + category: 'edit', + action: handleSelectAll, + }, + { + id: 'clear-selection', + label: 'Clear Selection', + description: 'Clear current selection', + shortcut: 'Esc', + category: 'edit', + action: handleClearSelection, + }, + // View + { + id: 'zoom-in', + label: 'Zoom In', + description: 'Zoom in on waveform', + category: 'view', + action: handleZoomIn, + }, + { + id: 'zoom-out', + label: 'Zoom Out', + description: 'Zoom out on waveform', + category: 'view', + action: handleZoomOut, + }, + { + id: 'fit-to-view', + label: 'Fit to View', + description: 'Reset zoom to fit entire waveform', + category: 'view', + action: handleFitToView, + }, + // File + { + id: 'clear', + label: 'Clear Audio', + description: 'Remove loaded audio file', + category: 'file', + action: handleClear, + }, + // History + { + id: 'undo', + label: 'Undo', + description: 'Undo last action', + shortcut: 'Ctrl+Z', + category: 'edit', + action: undo, + }, + { + id: 'redo', + label: 'Redo', + description: 'Redo last undone action', + shortcut: 'Ctrl+Y', + category: 'edit', + action: redo, + }, + // Effects + { + id: 'normalize', + label: 'Normalize', + description: 'Normalize audio to peak amplitude', + category: 'effects', + action: handleNormalize, + }, + { + id: 'fade-in', + label: 'Fade In', + description: 'Apply fade in to selection', + category: 'effects', + action: handleFadeIn, + }, + { + id: 'fade-out', + label: 'Fade Out', + description: 'Apply fade out to selection', + category: 'effects', + action: handleFadeOut, + }, + { + id: 'reverse', + label: 'Reverse', + description: 'Reverse entire audio', + category: 'effects', + action: handleReverse, + }, + { + id: 'lowpass-filter', + label: 'Low-Pass Filter', + description: 'Remove high frequencies (1000Hz cutoff)', + category: 'effects', + action: handleLowPassFilter, + }, + { + id: 'highpass-filter', + label: 'High-Pass Filter', + description: 'Remove low frequencies (100Hz cutoff)', + category: 'effects', + action: handleHighPassFilter, + }, + { + id: 'bandpass-filter', + label: 'Band-Pass Filter', + description: 'Isolate frequency range (1000Hz center)', + category: 'effects', + action: handleBandPassFilter, + }, + { + id: 'compressor', + label: 'Compressor', + description: 'Reduce dynamic range', + category: 'effects', + action: handleCompressor, + }, + { + id: 'limiter', + label: 'Limiter', + description: 'Prevent audio from exceeding threshold', + category: 'effects', + action: handleLimiter, + }, + { + id: 'gate', + label: 'Gate/Expander', + description: 'Reduce volume of quiet sounds', + category: 'effects', + action: handleGate, + }, + { + id: 'delay', + label: 'Delay/Echo', + description: 'Add echo effects with feedback', + category: 'effects', + action: handleDelay, + }, + { + id: 'reverb', + label: 'Reverb', + description: 'Add acoustic space and ambience', + category: 'effects', + action: handleReverb, + }, + { + id: 'chorus', + label: 'Chorus', + description: 'Thicken sound with modulation', + category: 'effects', + action: handleChorus, + }, + { + id: 'flanger', + label: 'Flanger', + description: 'Create sweeping comb-filter effect', + category: 'effects', + action: handleFlanger, + }, + { + id: 'phaser', + label: 'Phaser', + description: 'Phase-shifting swoosh effect', + category: 'effects', + action: handlePhaser, + }, + { + id: 'pitch', + label: 'Pitch Shifter', + description: 'Change pitch without affecting duration', + category: 'effects', + action: handlePitchShift, + }, + { + id: 'timestretch', + label: 'Time Stretch', + description: 'Change duration without affecting pitch', + category: 'effects', + action: handleTimeStretch, + }, + { + id: 'distortion', + label: 'Distortion', + description: 'Add overdrive and distortion', + category: 'effects', + action: handleDistortion, + }, + { + id: 'bitcrusher', + label: 'Bitcrusher', + description: 'Lo-fi bit depth and sample rate reduction', + category: 'effects', + action: handleBitcrusher, + }, + ]; + return actions; + }, [play, pause, stop, handleCut, handleCopy, handlePaste, handleDelete, handleTrim, handleSelectAll, handleClearSelection, handleZoomIn, handleZoomOut, handleFitToView, handleClear, undo, redo, handleNormalize, handleFadeIn, handleFadeOut, handleReverse, handleLowPassFilter, handleHighPassFilter, handleBandPassFilter, handleCompressor, handleLimiter, handleGate, handleDelay, handleReverb, handleChorus, handleFlanger, handlePhaser, handlePitchShift, handleTimeStretch, handleDistortion, handleBitcrusher]); + return ( -
- {/* File Upload or Audio Info */} - {!audioBuffer ? ( - - ) : ( - + {/* Compact Header */} +
+ {/* Left: Logo */} +
+ +

Audio UI

+
+ + {/* Right: Command Palette + Theme Toggle */} +
+ + +
+
+ + {/* Main content area */} +
+ {/* Side Panel */} + - )} - {/* Loading State */} - {isLoading && ( - - -
- -

Loading audio file...

+ {/* Main canvas area */} +
+ {isLoading ? ( +
+
+ +

Loading audio file...

+
- - - )} + ) : audioBuffer ? ( + <> + {/* Waveform - takes maximum space */} +
+ - {/* Waveform and Controls */} - {audioBuffer && !isLoading && ( - <> - {/* Waveform */} - - - Waveform - - - + {/* Horizontal scroll for zoomed waveform */} + {zoom > 1 && ( +
+ + +
+ )} +
- {/* Horizontal scroll for zoomed waveform */} - {zoom > 1 && ( -
- - -
+ {/* Playback Controls - fixed at bottom */} +
+ +
+ + ) : ( +
- - - {/* Edit Controls */} - - - Edit - - - + - - +
+

+ {isDragging ? 'Drop audio file here' : 'No audio file loaded'} +

+

+ {isDragging + ? 'Release to load the file' + : 'Click here or use the side panel to load an audio file, or drag and drop a file onto this area.'} +

+
+
+ )} +
+
- {/* History Controls */} - - - History - - - - - + {/* Effect Parameter Dialog */} + setEffectDialogOpen(false)} + effectType={effectDialogType} + onApply={handleEffectApply} + sampleRate={audioBuffer?.sampleRate} + /> - {/* Zoom Controls */} - - - Zoom & View - - - - - + {/* Dynamics Parameter Dialog */} + setDynamicsDialogOpen(false)} + effectType={dynamicsDialogType} + onApply={handleDynamicsApply} + /> - {/* Playback Controls */} - - - Playback - - - - - - - )} -
+ {/* Time-Based Parameter Dialog */} + setTimeBasedDialogOpen(false)} + effectType={timeBasedDialogType} + onApply={handleTimeBasedApply} + /> + + {/* Advanced Parameter Dialog */} + setAdvancedDialogOpen(false)} + effectType={advancedDialogType} + onApply={handleAdvancedApply} + /> + ); } diff --git a/components/editor/Waveform.tsx b/components/editor/Waveform.tsx index 04152f4..b426a2e 100644 --- a/components/editor/Waveform.tsx +++ b/components/editor/Waveform.tsx @@ -207,15 +207,11 @@ export function Waveform({ const actualX = x + scrollOffset; const clickedTime = (actualX / visibleWidth) * duration; - // Shift key for selection - if (e.shiftKey && onSelectionChange) { - setIsSelecting(true); - setSelectionStart(clickedTime); + // Start selection on drag + setIsSelecting(true); + setSelectionStart(clickedTime); + if (onSelectionChange) { onSelectionChange({ start: clickedTime, end: clickedTime }); - } else if (onSeek) { - // Regular dragging for scrubbing (without auto-play) - setIsDragging(true); - onSeek(clickedTime, false); } }; @@ -234,31 +230,33 @@ export function Waveform({ // Handle selection dragging if (isSelecting && onSelectionChange && selectionStart !== null) { + setIsDragging(true); // Mark that we're dragging const start = Math.min(selectionStart, clampedTime); const end = Math.max(selectionStart, clampedTime); onSelectionChange({ start, end }); } - // Handle scrubbing (without auto-play during drag) - else if (isDragging && onSeek) { - onSeek(clampedTime, false); - } }; const handleMouseUp = (e: React.MouseEvent) => { - // If we were dragging (scrubbing), trigger auto-play on mouse up - if (isDragging && onSeek && !isSelecting) { + // If we didn't drag (just clicked), seek to that position and clear selection + if (!isDragging && onSeek) { const canvas = canvasRef.current; if (canvas) { const rect = canvas.getBoundingClientRect(); const x = e.clientX - rect.left; const visibleWidth = width * zoom; const actualX = x + scrollOffset; - const releaseTime = (actualX / visibleWidth) * duration; - const clampedTime = Math.max(0, Math.min(duration, releaseTime)); - // Auto-play on mouse up after dragging + const clickTime = (actualX / visibleWidth) * duration; + const clampedTime = Math.max(0, Math.min(duration, clickTime)); + // Seek and auto-play onSeek(clampedTime, true); + // Clear selection on click + if (onSelectionChange) { + onSelectionChange(null); + } } } + // If we dragged, the selection is already set via handleMouseMove setIsDragging(false); setIsSelecting(false); diff --git a/components/effects/AdvancedParameterDialog.tsx b/components/effects/AdvancedParameterDialog.tsx new file mode 100644 index 0000000..6dc9961 --- /dev/null +++ b/components/effects/AdvancedParameterDialog.tsx @@ -0,0 +1,445 @@ +'use client'; + +import * as React from 'react'; +import { Modal } from '@/components/ui/Modal'; +import { Button } from '@/components/ui/Button'; +import { Slider } from '@/components/ui/Slider'; +import type { + PitchShifterParameters, + TimeStretchParameters, + DistortionParameters, + BitcrusherParameters, +} from '@/lib/audio/effects/advanced'; + +export type AdvancedType = 'pitch' | 'timestretch' | 'distortion' | 'bitcrusher'; + +export type AdvancedParameters = + | (PitchShifterParameters & { type: 'pitch' }) + | (TimeStretchParameters & { type: 'timestretch' }) + | (DistortionParameters & { type: 'distortion' }) + | (BitcrusherParameters & { type: 'bitcrusher' }); + +interface EffectPreset { + name: string; + parameters: Omit; +} + +const PRESETS: Record = { + pitch: [ + { name: 'Octave Up', parameters: { semitones: 12, cents: 0, mix: 1.0 } }, + { name: 'Fifth Up', parameters: { semitones: 7, cents: 0, mix: 1.0 } }, + { name: 'Octave Down', parameters: { semitones: -12, cents: 0, mix: 1.0 } }, + { name: 'Subtle Shift', parameters: { semitones: 2, cents: 0, mix: 0.5 } }, + ], + timestretch: [ + { name: 'Half Speed', parameters: { rate: 0.5, preservePitch: true, mix: 1.0 } }, + { name: 'Double Speed', parameters: { rate: 2.0, preservePitch: true, mix: 1.0 } }, + { name: 'Slow Motion', parameters: { rate: 0.75, preservePitch: true, mix: 1.0 } }, + { name: 'Fast Forward', parameters: { rate: 1.5, preservePitch: true, mix: 1.0 } }, + ], + distortion: [ + { name: 'Light Overdrive', parameters: { drive: 0.3, tone: 0.7, output: 0.8, type: 'soft' as const, mix: 1.0 } }, + { name: 'Heavy Distortion', parameters: { drive: 0.8, tone: 0.5, output: 0.6, type: 'hard' as const, mix: 1.0 } }, + { name: 'Tube Warmth', parameters: { drive: 0.4, tone: 0.6, output: 0.75, type: 'tube' as const, mix: 0.8 } }, + { name: 'Extreme Fuzz', parameters: { drive: 1.0, tone: 0.3, output: 0.5, type: 'hard' as const, mix: 1.0 } }, + ], + bitcrusher: [ + { name: 'Lo-Fi', parameters: { bitDepth: 8, sampleRate: 8000, mix: 1.0 } }, + { name: 'Telephone', parameters: { bitDepth: 4, sampleRate: 4000, mix: 1.0 } }, + { name: 'Subtle Crunch', parameters: { bitDepth: 12, sampleRate: 22050, mix: 0.6 } }, + { name: 'Extreme Crush', parameters: { bitDepth: 2, sampleRate: 2000, mix: 1.0 } }, + ], +}; + +const DEFAULT_PARAMS: Record> = { + pitch: { semitones: 0, cents: 0, mix: 1.0 }, + timestretch: { rate: 1.0, preservePitch: true, mix: 1.0 }, + distortion: { drive: 0.5, tone: 0.5, output: 0.7, type: 'soft', mix: 1.0 }, + bitcrusher: { bitDepth: 8, sampleRate: 8000, mix: 1.0 }, +}; + +const EFFECT_LABELS: Record = { + pitch: 'Pitch Shifter', + timestretch: 'Time Stretch', + distortion: 'Distortion', + bitcrusher: 'Bitcrusher', +}; + +export interface AdvancedParameterDialogProps { + open: boolean; + onClose: () => void; + effectType: AdvancedType; + onApply: (params: AdvancedParameters) => void; +} + +export function AdvancedParameterDialog({ + open, + onClose, + effectType, + onApply, +}: AdvancedParameterDialogProps) { + const [parameters, setParameters] = React.useState>( + DEFAULT_PARAMS[effectType] + ); + + const canvasRef = React.useRef(null); + + // Reset parameters when effect type changes + React.useEffect(() => { + setParameters(DEFAULT_PARAMS[effectType]); + }, [effectType]); + + // Draw visual feedback + React.useEffect(() => { + const canvas = canvasRef.current; + if (!canvas) return; + + const ctx = canvas.getContext('2d'); + if (!ctx) return; + + const width = canvas.width; + const height = canvas.height; + + // Clear canvas + ctx.clearRect(0, 0, width, height); + + // Draw background + ctx.fillStyle = 'rgb(15, 23, 42)'; + ctx.fillRect(0, 0, width, height); + + // Draw visualization based on effect type + ctx.strokeStyle = 'rgb(59, 130, 246)'; + ctx.lineWidth = 2; + + if (effectType === 'pitch') { + const pitchParams = parameters as PitchShifterParameters; + const totalCents = (pitchParams.semitones ?? 0) * 100 + (pitchParams.cents ?? 0); + const pitchRatio = Math.pow(2, totalCents / 1200); + + // Draw waveform with pitch shift + ctx.beginPath(); + for (let x = 0; x < width; x++) { + const t = (x / width) * 4 * Math.PI * pitchRatio; + const y = height / 2 + Math.sin(t) * (height / 3); + if (x === 0) ctx.moveTo(x, y); + else ctx.lineTo(x, y); + } + ctx.stroke(); + + // Draw reference waveform + ctx.strokeStyle = 'rgba(148, 163, 184, 0.3)'; + ctx.beginPath(); + for (let x = 0; x < width; x++) { + const t = (x / width) * 4 * Math.PI; + const y = height / 2 + Math.sin(t) * (height / 3); + if (x === 0) ctx.moveTo(x, y); + else ctx.lineTo(x, y); + } + ctx.stroke(); + } else if (effectType === 'timestretch') { + const stretchParams = parameters as TimeStretchParameters; + + // Draw time-stretched waveform + ctx.beginPath(); + for (let x = 0; x < width; x++) { + const t = (x / width) * 4 * Math.PI / (stretchParams.rate ?? 1.0); + const y = height / 2 + Math.sin(t) * (height / 3); + if (x === 0) ctx.moveTo(x, y); + else ctx.lineTo(x, y); + } + ctx.stroke(); + } else if (effectType === 'distortion') { + const distParams = parameters as DistortionParameters; + + // Draw distorted waveform + ctx.beginPath(); + for (let x = 0; x < width; x++) { + const t = (x / width) * 4 * Math.PI; + let sample = Math.sin(t); + + // Apply distortion + const drive = 1 + (distParams.drive ?? 0.5) * 10; + sample *= drive; + + const distType = distParams.type ?? 'soft'; + if (distType === 'soft') { + sample = Math.tanh(sample); + } else if (distType === 'hard') { + sample = Math.max(-1, Math.min(1, sample)); + } else { + sample = sample > 0 ? 1 - Math.exp(-sample) : -1 + Math.exp(sample); + } + + const y = height / 2 - sample * (height / 3); + if (x === 0) ctx.moveTo(x, y); + else ctx.lineTo(x, y); + } + ctx.stroke(); + } else if (effectType === 'bitcrusher') { + const crushParams = parameters as BitcrusherParameters; + const bitLevels = Math.pow(2, crushParams.bitDepth ?? 8); + const step = 2 / bitLevels; + + // Draw bitcrushed waveform + ctx.beginPath(); + let lastY = height / 2; + for (let x = 0; x < width; x++) { + const t = (x / width) * 4 * Math.PI; + let sample = Math.sin(t); + + // Quantize + sample = Math.floor(sample / step) * step; + + const y = height / 2 - sample * (height / 3); + + // Sample and hold effect + if (x % Math.max(1, Math.floor(width / ((crushParams.sampleRate ?? 8000) / 1000))) === 0) { + lastY = y; + } + + if (x === 0) ctx.moveTo(x, lastY); + else ctx.lineTo(x, lastY); + } + ctx.stroke(); + } + + // Draw center line + ctx.strokeStyle = 'rgba(148, 163, 184, 0.2)'; + ctx.lineWidth = 1; + ctx.beginPath(); + ctx.moveTo(0, height / 2); + ctx.lineTo(width, height / 2); + ctx.stroke(); + }, [parameters, effectType]); + + const handleApply = () => { + onApply({ ...parameters, type: effectType } as AdvancedParameters); + onClose(); + }; + + const handlePreset = (preset: EffectPreset) => { + setParameters(preset.parameters); + }; + + return ( + +
+ {/* Visual Feedback */} +
+ +
+ + {/* Presets */} +
+ +
+ {PRESETS[effectType].map((preset) => ( + + ))} +
+
+ + {/* Effect-specific parameters */} + {effectType === 'pitch' && ( + <> +
+ + + setParameters({ ...parameters, semitones: value }) + } + min={-12} + max={12} + step={1} + /> +
+
+ + + setParameters({ ...parameters, cents: value }) + } + min={-100} + max={100} + step={1} + /> +
+ + )} + + {effectType === 'timestretch' && ( + <> +
+ + + setParameters({ ...parameters, rate: value }) + } + min={0.5} + max={2.0} + step={0.1} + /> +
+
+ + setParameters({ ...parameters, preservePitch: e.target.checked }) + } + className="h-4 w-4 rounded border-border" + /> + +
+ + )} + + {effectType === 'distortion' && ( + <> +
+ +
+ {(['soft', 'hard', 'tube'] as const).map((type) => ( + + ))} +
+
+
+ + + setParameters({ ...parameters, drive: value }) + } + min={0} + max={1} + step={0.01} + /> +
+
+ + + setParameters({ ...parameters, tone: value }) + } + min={0} + max={1} + step={0.01} + /> +
+
+ + + setParameters({ ...parameters, output: value }) + } + min={0} + max={1} + step={0.01} + /> +
+ + )} + + {effectType === 'bitcrusher' && ( + <> +
+ + + setParameters({ ...parameters, bitDepth: Math.round(value) }) + } + min={1} + max={16} + step={1} + /> +
+
+ + + setParameters({ ...parameters, sampleRate: Math.round(value) }) + } + min={100} + max={48000} + step={100} + /> +
+ + )} + + {/* Mix control */} +
+ + + setParameters({ ...parameters, mix: value }) + } + min={0} + max={1} + step={0.01} + /> +
+ + {/* Actions */} +
+ + +
+
+
+ ); +} diff --git a/components/effects/DynamicsParameterDialog.tsx b/components/effects/DynamicsParameterDialog.tsx new file mode 100644 index 0000000..312bed0 --- /dev/null +++ b/components/effects/DynamicsParameterDialog.tsx @@ -0,0 +1,522 @@ +'use client'; + +import * as React from 'react'; +import { Modal } from '@/components/ui/Modal'; +import { Button } from '@/components/ui/Button'; +import { Slider } from '@/components/ui/Slider'; +import { cn } from '@/lib/utils/cn'; +import type { + CompressorParameters, + LimiterParameters, + GateParameters, +} from '@/lib/audio/effects/dynamics'; + +export type DynamicsType = 'compressor' | 'limiter' | 'gate'; + +export type DynamicsParameters = + | (CompressorParameters & { type: 'compressor' }) + | (LimiterParameters & { type: 'limiter' }) + | (GateParameters & { type: 'gate' }); + +export interface EffectPreset { + name: string; + parameters: Partial; +} + +export interface DynamicsParameterDialogProps { + open: boolean; + onClose: () => void; + effectType: DynamicsType; + onApply: (params: DynamicsParameters) => void; +} + +const EFFECT_LABELS: Record = { + compressor: 'Compressor', + limiter: 'Limiter', + gate: 'Gate/Expander', +}; + +const EFFECT_DESCRIPTIONS: Record = { + compressor: 'Reduces dynamic range by lowering loud sounds', + limiter: 'Prevents audio from exceeding threshold', + gate: 'Reduces volume of quiet sounds below threshold', +}; + +const PRESETS: Record = { + compressor: [ + { name: 'Gentle', parameters: { threshold: -20, ratio: 2, attack: 10, release: 100, knee: 6, makeupGain: 3 } }, + { name: 'Medium', parameters: { threshold: -18, ratio: 4, attack: 5, release: 50, knee: 3, makeupGain: 6 } }, + { name: 'Heavy', parameters: { threshold: -15, ratio: 8, attack: 1, release: 30, knee: 0, makeupGain: 10 } }, + { name: 'Vocal', parameters: { threshold: -16, ratio: 3, attack: 5, release: 80, knee: 4, makeupGain: 5 } }, + ], + limiter: [ + { name: 'Transparent', parameters: { threshold: -3, attack: 0.5, release: 50, makeupGain: 0 } }, + { name: 'Loud', parameters: { threshold: -1, attack: 0.1, release: 20, makeupGain: 2 } }, + { name: 'Broadcast', parameters: { threshold: -0.5, attack: 0.1, release: 10, makeupGain: 0 } }, + { name: 'Mastering', parameters: { threshold: -2, attack: 0.3, release: 30, makeupGain: 1 } }, + ], + gate: [ + { name: 'Gentle', parameters: { threshold: -40, ratio: 2, attack: 5, release: 100, knee: 6 } }, + { name: 'Medium', parameters: { threshold: -50, ratio: 4, attack: 1, release: 50, knee: 3 } }, + { name: 'Hard', parameters: { threshold: -60, ratio: 10, attack: 0.5, release: 20, knee: 0 } }, + { name: 'Noise Reduction', parameters: { threshold: -45, ratio: 6, attack: 1, release: 80, knee: 4 } }, + ], +}; + +export function DynamicsParameterDialog({ + open, + onClose, + effectType, + onApply, +}: DynamicsParameterDialogProps) { + const [parameters, setParameters] = React.useState(() => { + if (effectType === 'compressor') { + return { + type: 'compressor', + threshold: -20, + ratio: 4, + attack: 5, + release: 50, + knee: 3, + makeupGain: 6, + }; + } else if (effectType === 'limiter') { + return { + type: 'limiter', + threshold: -3, + attack: 0.5, + release: 50, + makeupGain: 0, + }; + } else { + return { + type: 'gate', + threshold: -40, + ratio: 4, + attack: 5, + release: 50, + knee: 3, + }; + } + }); + + const canvasRef = React.useRef(null); + + // Get appropriate presets for this effect type + const presets = PRESETS[effectType] || []; + + // Update parameters when effect type changes + React.useEffect(() => { + if (effectType === 'compressor') { + setParameters({ + type: 'compressor', + threshold: -20, + ratio: 4, + attack: 5, + release: 50, + knee: 3, + makeupGain: 6, + }); + } else if (effectType === 'limiter') { + setParameters({ + type: 'limiter', + threshold: -3, + attack: 0.5, + release: 50, + makeupGain: 0, + }); + } else { + setParameters({ + type: 'gate', + threshold: -40, + ratio: 4, + attack: 5, + release: 50, + knee: 3, + }); + } + }, [effectType]); + + // Draw transfer curve (input level vs output level) + React.useEffect(() => { + if (!canvasRef.current) return; + + const canvas = canvasRef.current; + const ctx = canvas.getContext('2d'); + if (!ctx) return; + + // Get actual dimensions + const rect = canvas.getBoundingClientRect(); + const dpr = window.devicePixelRatio || 1; + + // Set actual size in memory (scaled to account for extra pixel density) + canvas.width = rect.width * dpr; + canvas.height = rect.height * dpr; + + // Normalize coordinate system to use CSS pixels + ctx.scale(dpr, dpr); + + // Clear any previous drawings first + ctx.clearRect(0, 0, canvas.width, canvas.height); + + const width = rect.width; + const height = rect.height; + const padding = 40; + const graphWidth = width - padding * 2; + const graphHeight = height - padding * 2; + + // Clear canvas + ctx.fillStyle = getComputedStyle(canvas).getPropertyValue('background-color') || '#1a1a1a'; + ctx.fillRect(0, 0, width, height); + + // Draw axes + ctx.strokeStyle = 'rgba(128, 128, 128, 0.5)'; + ctx.lineWidth = 1; + + // Horizontal and vertical grid lines + ctx.beginPath(); + for (let db = -60; db <= 0; db += 10) { + const x = padding + ((db + 60) / 60) * graphWidth; + const y = padding + graphHeight - ((db + 60) / 60) * graphHeight; + + // Vertical grid line + ctx.moveTo(x, padding); + ctx.lineTo(x, padding + graphHeight); + + // Horizontal grid line + ctx.moveTo(padding, y); + ctx.lineTo(padding + graphWidth, y); + } + ctx.stroke(); + + // Draw unity line (input = output) + ctx.strokeStyle = 'rgba(128, 128, 128, 0.3)'; + ctx.lineWidth = 1; + ctx.setLineDash([5, 5]); + ctx.beginPath(); + ctx.moveTo(padding, padding + graphHeight); + ctx.lineTo(padding + graphWidth, padding); + ctx.stroke(); + ctx.setLineDash([]); + + // Draw threshold line + const threshold = parameters.threshold; + const thresholdX = padding + ((threshold + 60) / 60) * graphWidth; + ctx.strokeStyle = 'rgba(255, 165, 0, 0.5)'; + ctx.lineWidth = 1; + ctx.setLineDash([3, 3]); + ctx.beginPath(); + ctx.moveTo(thresholdX, padding); + ctx.lineTo(thresholdX, padding + graphHeight); + ctx.stroke(); + ctx.setLineDash([]); + + // Draw transfer curve + ctx.strokeStyle = '#3b82f6'; // Primary blue + ctx.lineWidth = 2; + ctx.beginPath(); + + for (let inputDb = -60; inputDb <= 0; inputDb += 0.5) { + let outputDb = inputDb; + + if (effectType === 'compressor' || effectType === 'limiter') { + const ratio = parameters.type === 'limiter' ? 100 : (parameters as CompressorParameters).ratio; + const knee = parameters.type === 'limiter' ? 0 : (parameters as CompressorParameters).knee; + const makeupGain = (parameters as CompressorParameters | LimiterParameters).makeupGain; + + if (inputDb > threshold) { + const overThreshold = inputDb - threshold; + + // Soft knee calculation + if (knee > 0 && overThreshold < knee / 2) { + const kneeRatio = overThreshold / (knee / 2); + const compressionAmount = (1 - 1 / ratio) * kneeRatio; + outputDb = inputDb - overThreshold * compressionAmount; + } else { + // Above knee - full compression + outputDb = threshold + overThreshold / ratio; + } + + outputDb += makeupGain; + } else { + outputDb += makeupGain; + } + } else if (effectType === 'gate') { + const { ratio, knee } = parameters as GateParameters; + + if (inputDb < threshold) { + const belowThreshold = threshold - inputDb; + + // Soft knee calculation + if (knee > 0 && belowThreshold < knee / 2) { + const kneeRatio = belowThreshold / (knee / 2); + const expansionAmount = (ratio - 1) * kneeRatio; + outputDb = inputDb - belowThreshold * expansionAmount; + } else { + // Below knee - full expansion + outputDb = threshold - belowThreshold * ratio; + } + } + } + + // Clamp output + outputDb = Math.max(-60, Math.min(0, outputDb)); + + const x = padding + ((inputDb + 60) / 60) * graphWidth; + const y = padding + graphHeight - ((outputDb + 60) / 60) * graphHeight; + + if (inputDb === -60) { + ctx.moveTo(x, y); + } else { + ctx.lineTo(x, y); + } + } + + ctx.stroke(); + + // Draw axis labels + ctx.fillStyle = 'rgba(156, 163, 175, 0.8)'; + ctx.font = '10px sans-serif'; + ctx.textAlign = 'center'; + + // X-axis label + ctx.fillText('Input Level (dB)', width / 2, height - 5); + + // Y-axis label (rotated) + ctx.save(); + ctx.translate(10, height / 2); + ctx.rotate(-Math.PI / 2); + ctx.fillText('Output Level (dB)', 0, 0); + ctx.restore(); + + // Tick labels + ctx.textAlign = 'center'; + for (let db = -60; db <= 0; db += 20) { + const x = padding + ((db + 60) / 60) * graphWidth; + ctx.fillText(db.toString(), x, height - 20); + } + + }, [parameters, effectType]); + + const handleApply = () => { + onApply(parameters); + onClose(); + }; + + const handlePresetClick = (preset: EffectPreset) => { + setParameters((prev) => ({ + ...prev, + ...preset.parameters, + })); + }; + + return ( + + + + + } + > +
+ {/* Transfer Curve Visualization */} +
+ + +

+ Shows input vs output levels. Threshold (orange line), ratio, knee, and makeup gain affect this curve. + Attack and release control timing (not shown here). +

+
+ + {/* Presets */} + {presets.length > 0 && ( +
+ +
+ {presets.map((preset) => ( + + ))} +
+
+ )} + + {/* Threshold Parameter */} +
+ + + setParameters((prev) => ({ ...prev, threshold: value })) + } + min={-60} + max={0} + step={0.5} + className="w-full" + /> +
+ -60 dB + 0 dB +
+
+ + {/* Ratio Parameter (Compressor and Gate only) */} + {(effectType === 'compressor' || effectType === 'gate') && ( +
+ + + setParameters((prev) => ({ ...prev, ratio: value })) + } + min={1} + max={20} + step={0.5} + className="w-full" + /> +
+ 1:1 (None) + 20:1 (Hard) +
+
+ )} + + {/* Attack Parameter */} +
+ + + setParameters((prev) => ({ ...prev, attack: Math.pow(10, value) })) + } + min={-1} + max={2} + step={0.01} + className="w-full" + /> +
+ 0.1 ms (Fast) + 100 ms (Slow) +
+
+ + {/* Release Parameter */} +
+ + + setParameters((prev) => ({ ...prev, release: Math.pow(10, value) })) + } + min={1} + max={3} + step={0.01} + className="w-full" + /> +
+ 10 ms (Fast) + 1000 ms (Slow) +
+
+ + {/* Knee Parameter (Compressor and Gate only) */} + {(effectType === 'compressor' || effectType === 'gate') && ( +
+ + + setParameters((prev) => ({ ...prev, knee: value })) + } + min={0} + max={12} + step={0.5} + className="w-full" + /> +
+ 0 dB (Hard) + 12 dB (Soft) +
+
+ )} + + {/* Makeup Gain Parameter (Compressor and Limiter only) */} + {(effectType === 'compressor' || effectType === 'limiter') && ( +
+ + + setParameters((prev) => ({ ...prev, makeupGain: value })) + } + min={0} + max={24} + step={0.5} + className="w-full" + /> +
+ 0 dB + +24 dB +
+
+ )} +
+
+ ); +} diff --git a/components/effects/EffectParameterDialog.tsx b/components/effects/EffectParameterDialog.tsx new file mode 100644 index 0000000..15bafce --- /dev/null +++ b/components/effects/EffectParameterDialog.tsx @@ -0,0 +1,391 @@ +'use client'; + +import * as React from 'react'; +import { Modal } from '@/components/ui/Modal'; +import { Button } from '@/components/ui/Button'; +import { Slider } from '@/components/ui/Slider'; +import { cn } from '@/lib/utils/cn'; +import type { FilterType } from '@/lib/audio/effects/filters'; + +export interface FilterParameters { + type: FilterType; + frequency: number; + Q?: number; + gain?: number; +} + +export interface EffectPreset { + name: string; + parameters: Partial; +} + +export interface EffectParameterDialogProps { + open: boolean; + onClose: () => void; + effectType: 'lowpass' | 'highpass' | 'bandpass' | 'notch' | 'lowshelf' | 'highshelf' | 'peaking'; + onApply: (params: FilterParameters) => void; + sampleRate?: number; +} + +const EFFECT_LABELS: Record = { + lowpass: 'Low-Pass Filter', + highpass: 'High-Pass Filter', + bandpass: 'Band-Pass Filter', + notch: 'Notch Filter', + lowshelf: 'Low Shelf Filter', + highshelf: 'High Shelf Filter', + peaking: 'Peaking EQ', +}; + +const EFFECT_DESCRIPTIONS: Record = { + lowpass: 'Removes high frequencies above the cutoff', + highpass: 'Removes low frequencies below the cutoff', + bandpass: 'Isolates frequencies around the center frequency', + notch: 'Removes frequencies around the center frequency', + lowshelf: 'Boosts or cuts low frequencies', + highshelf: 'Boosts or cuts high frequencies', + peaking: 'Boosts or cuts a specific frequency band', +}; + +const PRESETS: Record = { + lowpass: [ + { name: 'Telephone', parameters: { frequency: 3000, Q: 0.7 } }, + { name: 'Radio', parameters: { frequency: 5000, Q: 1.0 } }, + { name: 'Warm', parameters: { frequency: 8000, Q: 0.5 } }, + { name: 'Muffled', parameters: { frequency: 1000, Q: 1.5 } }, + ], + highpass: [ + { name: 'Rumble Removal', parameters: { frequency: 80, Q: 0.7 } }, + { name: 'Voice Clarity', parameters: { frequency: 150, Q: 1.0 } }, + { name: 'Thin', parameters: { frequency: 300, Q: 0.5 } }, + ], + bandpass: [ + { name: 'Telephone', parameters: { frequency: 1000, Q: 2.0 } }, + { name: 'Vocal Range', parameters: { frequency: 2000, Q: 1.0 } }, + { name: 'Narrow', parameters: { frequency: 1000, Q: 10.0 } }, + ], + notch: [ + { name: '60Hz Hum', parameters: { frequency: 60, Q: 10.0 } }, + { name: '50Hz Hum', parameters: { frequency: 50, Q: 10.0 } }, + { name: 'Narrow Notch', parameters: { frequency: 1000, Q: 20.0 } }, + ], + lowshelf: [ + { name: 'Bass Boost', parameters: { frequency: 200, gain: 6 } }, + { name: 'Bass Cut', parameters: { frequency: 200, gain: -6 } }, + { name: 'Warmth', parameters: { frequency: 150, gain: 3 } }, + ], + highshelf: [ + { name: 'Treble Boost', parameters: { frequency: 3000, gain: 6 } }, + { name: 'Treble Cut', parameters: { frequency: 3000, gain: -6 } }, + { name: 'Brightness', parameters: { frequency: 5000, gain: 3 } }, + ], + peaking: [ + { name: 'Presence Boost', parameters: { frequency: 3000, Q: 1.0, gain: 4 } }, + { name: 'Vocal Cut', parameters: { frequency: 2000, Q: 2.0, gain: -3 } }, + { name: 'Narrow Boost', parameters: { frequency: 1000, Q: 5.0, gain: 6 } }, + ], +}; + +export function EffectParameterDialog({ + open, + onClose, + effectType, + onApply, + sampleRate = 48000, +}: EffectParameterDialogProps) { + const [parameters, setParameters] = React.useState(() => ({ + type: effectType, + frequency: effectType === 'lowpass' ? 1000 : effectType === 'highpass' ? 100 : 1000, + Q: 1.0, + gain: 0, + })); + + const canvasRef = React.useRef(null); + + // Get appropriate presets for this effect type + const presets = PRESETS[effectType] || []; + + // Update parameters when effect type changes + React.useEffect(() => { + setParameters((prev) => ({ ...prev, type: effectType })); + }, [effectType]); + + // Draw frequency response curve + React.useEffect(() => { + if (!canvasRef.current) return; + + const canvas = canvasRef.current; + const ctx = canvas.getContext('2d'); + if (!ctx) return; + + // Get actual dimensions + const rect = canvas.getBoundingClientRect(); + const dpr = window.devicePixelRatio || 1; + + // Set actual size in memory (scaled to account for extra pixel density) + canvas.width = rect.width * dpr; + canvas.height = rect.height * dpr; + + // Normalize coordinate system to use CSS pixels + ctx.scale(dpr, dpr); + + const width = rect.width; + const height = rect.height; + const nyquist = sampleRate / 2; + + // Clear canvas + ctx.fillStyle = getComputedStyle(canvas).getPropertyValue('background-color') || '#1a1a1a'; + ctx.fillRect(0, 0, width, height); + + // Draw grid + ctx.strokeStyle = 'rgba(128, 128, 128, 0.2)'; + ctx.lineWidth = 1; + + // Horizontal grid lines (dB) + for (let db = -24; db <= 24; db += 6) { + const y = height / 2 - (db / 24) * (height / 2); + ctx.beginPath(); + ctx.moveTo(0, y); + ctx.lineTo(width, y); + ctx.stroke(); + } + + // Vertical grid lines (frequency) + const frequencies = [100, 1000, 10000]; + frequencies.forEach((freq) => { + const x = (Math.log10(freq) - 1) / (Math.log10(nyquist) - 1) * width; + ctx.beginPath(); + ctx.moveTo(x, 0); + ctx.lineTo(x, height); + ctx.stroke(); + }); + + // Draw frequency response curve + ctx.strokeStyle = '#3b82f6'; // Primary blue + ctx.lineWidth = 2; + ctx.beginPath(); + + for (let x = 0; x < width; x++) { + const freq = Math.pow(10, 1 + (x / width) * (Math.log10(nyquist) - 1)); + const magnitude = getFilterMagnitude(freq, parameters, sampleRate); + const db = 20 * Math.log10(Math.max(magnitude, 0.0001)); // Prevent log(0) + const y = height / 2 - (db / 24) * (height / 2); + + if (x === 0) { + ctx.moveTo(x, y); + } else { + ctx.lineTo(x, y); + } + } + + ctx.stroke(); + + // Draw 0dB line + ctx.strokeStyle = 'rgba(156, 163, 175, 0.5)'; // Muted foreground + ctx.lineWidth = 1; + ctx.setLineDash([5, 5]); + ctx.beginPath(); + ctx.moveTo(0, height / 2); + ctx.lineTo(width, height / 2); + ctx.stroke(); + ctx.setLineDash([]); + + }, [parameters, sampleRate]); + + const handleApply = () => { + onApply(parameters); + onClose(); + }; + + const handlePresetClick = (preset: EffectPreset) => { + setParameters((prev) => ({ + ...prev, + ...preset.parameters, + })); + }; + + const needsQ = ['lowpass', 'highpass', 'bandpass', 'notch', 'peaking'].includes(effectType); + const needsGain = ['lowshelf', 'highshelf', 'peaking'].includes(effectType); + + return ( + + + + + } + > +
+ {/* Frequency Response Visualization */} +
+ + +
+ 100 Hz + 1 kHz + 10 kHz +
+
+ + {/* Presets */} + {presets.length > 0 && ( +
+ +
+ {presets.map((preset) => ( + + ))} +
+
+ )} + + {/* Frequency Parameter */} +
+ + + setParameters((prev) => ({ ...prev, frequency: Math.pow(10, value) })) + } + min={1} + max={Math.log10(sampleRate / 2)} + step={0.01} + className="w-full" + /> +
+ 10 Hz + {(sampleRate / 2).toFixed(0)} Hz +
+
+ + {/* Q Parameter */} + {needsQ && ( +
+ + + setParameters((prev) => ({ ...prev, Q: value })) + } + min={0.1} + max={20} + step={0.1} + className="w-full" + /> +
+ 0.1 (Gentle) + 20 (Sharp) +
+
+ )} + + {/* Gain Parameter */} + {needsGain && ( +
+ + + setParameters((prev) => ({ ...prev, gain: value })) + } + min={-24} + max={24} + step={0.5} + className="w-full" + /> +
+ -24 dB + +24 dB +
+
+ )} +
+
+ ); +} + +/** + * Calculate filter magnitude at a given frequency + */ +function getFilterMagnitude( + freq: number, + params: FilterParameters, + sampleRate: number +): number { + const w = (2 * Math.PI * freq) / sampleRate; + const w0 = (2 * Math.PI * params.frequency) / sampleRate; + const Q = params.Q || 1.0; + const gain = params.gain || 0; + const A = Math.pow(10, gain / 40); + + // Simplified magnitude calculation for different filter types + switch (params.type) { + case 'lowpass': { + const ratio = freq / params.frequency; + return 1 / Math.sqrt(1 + Math.pow(ratio * Q, 2 * 2)); + } + case 'highpass': { + const ratio = params.frequency / freq; + return 1 / Math.sqrt(1 + Math.pow(ratio * Q, 2 * 2)); + } + case 'bandpass': { + const ratio = Math.abs(freq - params.frequency) / (params.frequency / Q); + return 1 / Math.sqrt(1 + Math.pow(ratio, 2)); + } + case 'notch': { + const ratio = Math.abs(freq - params.frequency) / (params.frequency / Q); + return Math.abs(ratio) / Math.sqrt(1 + Math.pow(ratio, 2)); + } + case 'lowshelf': + case 'highshelf': + case 'peaking': { + // Simplified for visualization + const dist = Math.abs(Math.log(freq / params.frequency)); + const influence = Math.exp(-dist * Q); + return 1 + (A - 1) * influence; + } + default: + return 1; + } +} diff --git a/components/effects/TimeBasedParameterDialog.tsx b/components/effects/TimeBasedParameterDialog.tsx new file mode 100644 index 0000000..d327aa7 --- /dev/null +++ b/components/effects/TimeBasedParameterDialog.tsx @@ -0,0 +1,673 @@ +'use client'; + +import * as React from 'react'; +import { Modal } from '@/components/ui/Modal'; +import { Button } from '@/components/ui/Button'; +import { Slider } from '@/components/ui/Slider'; +import { cn } from '@/lib/utils/cn'; +import type { + DelayParameters, + ReverbParameters, + ChorusParameters, + FlangerParameters, + PhaserParameters, +} from '@/lib/audio/effects/time-based'; + +export type TimeBasedType = 'delay' | 'reverb' | 'chorus' | 'flanger' | 'phaser'; + +export type TimeBasedParameters = + | (DelayParameters & { type: 'delay' }) + | (ReverbParameters & { type: 'reverb' }) + | (ChorusParameters & { type: 'chorus' }) + | (FlangerParameters & { type: 'flanger' }) + | (PhaserParameters & { type: 'phaser' }); + +export interface EffectPreset { + name: string; + parameters: Partial; +} + +export interface TimeBasedParameterDialogProps { + open: boolean; + onClose: () => void; + effectType: TimeBasedType; + onApply: (params: TimeBasedParameters) => void; +} + +const EFFECT_LABELS: Record = { + delay: 'Delay/Echo', + reverb: 'Reverb', + chorus: 'Chorus', + flanger: 'Flanger', + phaser: 'Phaser', +}; + +const EFFECT_DESCRIPTIONS: Record = { + delay: 'Creates echo effects by repeating the audio signal', + reverb: 'Simulates acoustic space and ambience', + chorus: 'Thickens sound by adding modulated copies', + flanger: 'Creates sweeping comb-filter effect', + phaser: 'Creates a phase-shifting swoosh effect', +}; + +const PRESETS: Record = { + delay: [ + { name: 'Short Slap', parameters: { time: 80, feedback: 0.2, mix: 0.3 } }, + { name: 'Medium Echo', parameters: { time: 250, feedback: 0.4, mix: 0.4 } }, + { name: 'Long Echo', parameters: { time: 500, feedback: 0.5, mix: 0.5 } }, + { name: 'Ping Pong', parameters: { time: 375, feedback: 0.6, mix: 0.4 } }, + ], + reverb: [ + { name: 'Small Room', parameters: { roomSize: 0.3, damping: 0.5, mix: 0.2 } }, + { name: 'Medium Hall', parameters: { roomSize: 0.6, damping: 0.3, mix: 0.3 } }, + { name: 'Large Hall', parameters: { roomSize: 0.8, damping: 0.2, mix: 0.4 } }, + { name: 'Cathedral', parameters: { roomSize: 1.0, damping: 0.1, mix: 0.5 } }, + ], + chorus: [ + { name: 'Subtle', parameters: { rate: 0.5, depth: 0.2, delay: 20, mix: 0.3 } }, + { name: 'Classic', parameters: { rate: 1.0, depth: 0.5, delay: 25, mix: 0.5 } }, + { name: 'Deep', parameters: { rate: 1.5, depth: 0.7, delay: 30, mix: 0.6 } }, + { name: 'Lush', parameters: { rate: 0.8, depth: 0.6, delay: 35, mix: 0.7 } }, + ], + flanger: [ + { name: 'Subtle', parameters: { rate: 0.3, depth: 0.3, feedback: 0.2, delay: 2, mix: 0.4 } }, + { name: 'Classic', parameters: { rate: 0.5, depth: 0.5, feedback: 0.4, delay: 3, mix: 0.5 } }, + { name: 'Jet', parameters: { rate: 0.2, depth: 0.7, feedback: 0.6, delay: 1.5, mix: 0.6 } }, + { name: 'Extreme', parameters: { rate: 1.0, depth: 0.8, feedback: 0.7, delay: 2.5, mix: 0.7 } }, + ], + phaser: [ + { name: 'Gentle', parameters: { rate: 0.4, depth: 0.3, feedback: 0.2, stages: 4, mix: 0.4 } }, + { name: 'Classic', parameters: { rate: 0.6, depth: 0.5, feedback: 0.4, stages: 6, mix: 0.5 } }, + { name: 'Deep', parameters: { rate: 0.3, depth: 0.7, feedback: 0.5, stages: 8, mix: 0.6 } }, + { name: 'Vintage', parameters: { rate: 0.5, depth: 0.6, feedback: 0.6, stages: 4, mix: 0.7 } }, + ], +}; + +export function TimeBasedParameterDialog({ + open, + onClose, + effectType, + onApply, +}: TimeBasedParameterDialogProps) { + const [parameters, setParameters] = React.useState(() => { + if (effectType === 'delay') { + return { type: 'delay', time: 250, feedback: 0.4, mix: 0.4 }; + } else if (effectType === 'reverb') { + return { type: 'reverb', roomSize: 0.6, damping: 0.3, mix: 0.3 }; + } else if (effectType === 'chorus') { + return { type: 'chorus', rate: 1.0, depth: 0.5, delay: 25, mix: 0.5 }; + } else if (effectType === 'flanger') { + return { type: 'flanger', rate: 0.5, depth: 0.5, feedback: 0.4, delay: 3, mix: 0.5 }; + } else { + return { type: 'phaser', rate: 0.6, depth: 0.5, feedback: 0.4, stages: 6, mix: 0.5 }; + } + }); + + const canvasRef = React.useRef(null); + + // Get appropriate presets for this effect type + const presets = PRESETS[effectType] || []; + + // Update parameters when effect type changes + React.useEffect(() => { + if (effectType === 'delay') { + setParameters({ type: 'delay', time: 250, feedback: 0.4, mix: 0.4 }); + } else if (effectType === 'reverb') { + setParameters({ type: 'reverb', roomSize: 0.6, damping: 0.3, mix: 0.3 }); + } else if (effectType === 'chorus') { + setParameters({ type: 'chorus', rate: 1.0, depth: 0.5, delay: 25, mix: 0.5 }); + } else if (effectType === 'flanger') { + setParameters({ type: 'flanger', rate: 0.5, depth: 0.5, feedback: 0.4, delay: 3, mix: 0.5 }); + } else { + setParameters({ type: 'phaser', rate: 0.6, depth: 0.5, feedback: 0.4, stages: 6, mix: 0.5 }); + } + }, [effectType]); + + // Draw visualization + React.useEffect(() => { + if (!canvasRef.current) return; + + const canvas = canvasRef.current; + const ctx = canvas.getContext('2d'); + if (!ctx) return; + + // Get actual dimensions + const rect = canvas.getBoundingClientRect(); + const dpr = window.devicePixelRatio || 1; + + // Set actual size in memory + canvas.width = rect.width * dpr; + canvas.height = rect.height * dpr; + + // Normalize coordinate system + ctx.scale(dpr, dpr); + + // Clear canvas + ctx.clearRect(0, 0, canvas.width, canvas.height); + + const width = rect.width; + const height = rect.height; + + // Clear with background + ctx.fillStyle = getComputedStyle(canvas).getPropertyValue('background-color') || '#1a1a1a'; + ctx.fillRect(0, 0, width, height); + + if (effectType === 'delay') { + // Draw delay echoes + const delayParams = parameters as DelayParameters & { type: 'delay' }; + const maxTime = 2000; // ms + const echoCount = 5; + + ctx.strokeStyle = 'rgba(128, 128, 128, 0.3)'; + ctx.lineWidth = 1; + ctx.setLineDash([2, 2]); + for (let i = 0; i <= 4; i++) { + const x = (i / 4) * width; + ctx.beginPath(); + ctx.moveTo(x, 0); + ctx.lineTo(x, height); + ctx.stroke(); + } + ctx.setLineDash([]); + + let gain = 1.0; + for (let i = 0; i < echoCount; i++) { + const x = (i * delayParams.time / maxTime) * width; + const barHeight = height * gain * 0.8; + const y = (height - barHeight) / 2; + + ctx.fillStyle = `rgba(59, 130, 246, ${gain})`; + ctx.fillRect(x - 3, y, 6, barHeight); + + gain *= delayParams.feedback; + if (gain < 0.01) break; + } + } else if (effectType === 'reverb') { + // Draw reverb decay + const reverbParams = parameters as ReverbParameters & { type: 'reverb' }; + const decayTime = reverbParams.roomSize * 3000; // ms + + ctx.strokeStyle = '#3b82f6'; + ctx.lineWidth = 2; + ctx.beginPath(); + + for (let x = 0; x < width; x++) { + const time = (x / width) * 3000; + const decay = Math.exp(-time / (decayTime * (1 - reverbParams.damping * 0.5))); + const y = height / 2 + (height / 2 - 20) * (1 - decay); + + if (x === 0) ctx.moveTo(x, y); + else ctx.lineTo(x, y); + } + ctx.stroke(); + + // Draw reference line + ctx.strokeStyle = 'rgba(128, 128, 128, 0.3)'; + ctx.lineWidth = 1; + ctx.setLineDash([5, 5]); + ctx.beginPath(); + ctx.moveTo(0, height / 2); + ctx.lineTo(width, height / 2); + ctx.stroke(); + ctx.setLineDash([]); + } else { + // Draw LFO waveform for chorus, flanger, phaser + let rate = 1.0; + let depth = 0.5; + + if (effectType === 'chorus') { + const chorusParams = parameters as ChorusParameters & { type: 'chorus' }; + rate = chorusParams.rate; + depth = chorusParams.depth; + } else if (effectType === 'flanger') { + const flangerParams = parameters as FlangerParameters & { type: 'flanger' }; + rate = flangerParams.rate; + depth = flangerParams.depth; + } else if (effectType === 'phaser') { + const phaserParams = parameters as PhaserParameters & { type: 'phaser' }; + rate = phaserParams.rate; + depth = phaserParams.depth; + } + + ctx.strokeStyle = '#3b82f6'; + ctx.lineWidth = 2; + ctx.beginPath(); + + const cycles = rate * 2; // Show 2 seconds worth + for (let x = 0; x < width; x++) { + const phase = (x / width) * cycles * 2 * Math.PI; + const lfo = Math.sin(phase); + const y = height / 2 - (lfo * depth * height * 0.4); + + if (x === 0) ctx.moveTo(x, y); + else ctx.lineTo(x, y); + } + ctx.stroke(); + + // Draw center line + ctx.strokeStyle = 'rgba(128, 128, 128, 0.3)'; + ctx.lineWidth = 1; + ctx.setLineDash([5, 5]); + ctx.beginPath(); + ctx.moveTo(0, height / 2); + ctx.lineTo(width, height / 2); + ctx.stroke(); + ctx.setLineDash([]); + } + + }, [parameters, effectType]); + + const handleApply = () => { + onApply(parameters); + onClose(); + }; + + const handlePresetClick = (preset: EffectPreset) => { + setParameters((prev) => ({ + ...prev, + ...preset.parameters, + })); + }; + + return ( + + + + + } + > +
+ {/* Visualization */} +
+ + +
+ + {/* Presets */} + {presets.length > 0 && ( +
+ +
+ {presets.map((preset) => ( + + ))} +
+
+ )} + + {/* Effect-specific parameters */} + {effectType === 'delay' && ( + <> + {/* Delay Time */} +
+ + + setParameters((prev) => ({ ...prev, time: value })) + } + min={10} + max={2000} + step={10} + className="w-full" + /> +
+ + {/* Feedback */} +
+ + + setParameters((prev) => ({ ...prev, feedback: value })) + } + min={0} + max={0.95} + step={0.01} + className="w-full" + /> +
+ + )} + + {effectType === 'reverb' && ( + <> + {/* Room Size */} +
+ + + setParameters((prev) => ({ ...prev, roomSize: value })) + } + min={0.1} + max={1} + step={0.01} + className="w-full" + /> +
+ + {/* Damping */} +
+ + + setParameters((prev) => ({ ...prev, damping: value })) + } + min={0} + max={1} + step={0.01} + className="w-full" + /> +
+ + )} + + {effectType === 'chorus' && ( + <> + {/* Rate */} +
+ + + setParameters((prev) => ({ ...prev, rate: value })) + } + min={0.1} + max={5} + step={0.1} + className="w-full" + /> +
+ + {/* Depth */} +
+ + + setParameters((prev) => ({ ...prev, depth: value })) + } + min={0} + max={1} + step={0.01} + className="w-full" + /> +
+ + {/* Base Delay */} +
+ + + setParameters((prev) => ({ ...prev, delay: value })) + } + min={5} + max={50} + step={0.5} + className="w-full" + /> +
+ + )} + + {effectType === 'flanger' && ( + <> + {/* Rate */} +
+ + + setParameters((prev) => ({ ...prev, rate: value })) + } + min={0.1} + max={5} + step={0.1} + className="w-full" + /> +
+ + {/* Depth */} +
+ + + setParameters((prev) => ({ ...prev, depth: value })) + } + min={0} + max={1} + step={0.01} + className="w-full" + /> +
+ + {/* Feedback */} +
+ + + setParameters((prev) => ({ ...prev, feedback: value })) + } + min={0} + max={0.95} + step={0.01} + className="w-full" + /> +
+ + {/* Base Delay */} +
+ + + setParameters((prev) => ({ ...prev, delay: value })) + } + min={0.5} + max={10} + step={0.1} + className="w-full" + /> +
+ + )} + + {effectType === 'phaser' && ( + <> + {/* Rate */} +
+ + + setParameters((prev) => ({ ...prev, rate: value })) + } + min={0.1} + max={5} + step={0.1} + className="w-full" + /> +
+ + {/* Depth */} +
+ + + setParameters((prev) => ({ ...prev, depth: value })) + } + min={0} + max={1} + step={0.01} + className="w-full" + /> +
+ + {/* Feedback */} +
+ + + setParameters((prev) => ({ ...prev, feedback: value })) + } + min={0} + max={0.95} + step={0.01} + className="w-full" + /> +
+ + {/* Stages */} +
+ + + setParameters((prev) => ({ ...prev, stages: Math.floor(value) })) + } + min={2} + max={12} + step={1} + className="w-full" + /> +
+ + )} + + {/* Mix (common to all) */} +
+ + + setParameters((prev) => ({ ...prev, mix: value })) + } + min={0} + max={1} + step={0.01} + className="w-full" + /> +
+ 0% (Dry) + 100% (Wet) +
+
+
+
+ ); +} diff --git a/components/layout/SidePanel.tsx b/components/layout/SidePanel.tsx new file mode 100644 index 0000000..ef8a1b1 --- /dev/null +++ b/components/layout/SidePanel.tsx @@ -0,0 +1,515 @@ +'use client'; + +import * as React from 'react'; +import { + FileAudio, + History, + Info, + ChevronLeft, + ChevronRight, + Upload, + Download, + X, + Sparkles, +} from 'lucide-react'; +import { Button } from '@/components/ui/Button'; +import { cn } from '@/lib/utils/cn'; +import { formatDuration } from '@/lib/audio/decoder'; +import type { Selection } from '@/types/selection'; +import type { HistoryState } from '@/lib/history/history-manager'; + +export interface SidePanelProps { + // File info + fileName: string | null; + audioBuffer: AudioBuffer | null; + onFileSelect: (file: File) => void; + onClear: () => void; + + // Selection info + selection: Selection | null; + + // History info + historyState: HistoryState; + + // Effects handlers + onNormalize: () => void; + onFadeIn: () => void; + onFadeOut: () => void; + onReverse: () => void; + onLowPassFilter: () => void; + onHighPassFilter: () => void; + onBandPassFilter: () => void; + onCompressor: () => void; + onLimiter: () => void; + onGate: () => void; + onDelay: () => void; + onReverb: () => void; + onChorus: () => void; + onFlanger: () => void; + onPhaser: () => void; + onPitchShift: () => void; + onTimeStretch: () => void; + onDistortion: () => void; + onBitcrusher: () => void; + + className?: string; +} + +export function SidePanel({ + fileName, + audioBuffer, + onFileSelect, + onClear, + selection, + historyState, + onNormalize, + onFadeIn, + onFadeOut, + onReverse, + onLowPassFilter, + onHighPassFilter, + onBandPassFilter, + onCompressor, + onLimiter, + onGate, + onDelay, + onReverb, + onChorus, + onFlanger, + onPhaser, + onPitchShift, + onTimeStretch, + onDistortion, + onBitcrusher, + className, +}: SidePanelProps) { + const [isCollapsed, setIsCollapsed] = React.useState(false); + const [activeTab, setActiveTab] = React.useState<'file' | 'history' | 'info' | 'effects'>('file'); + const fileInputRef = React.useRef(null); + + const handleFileClick = () => { + fileInputRef.current?.click(); + }; + + const handleFileChange = (e: React.ChangeEvent) => { + const file = e.target.files?.[0]; + if (file) { + onFileSelect(file); + } + }; + + if (isCollapsed) { + return ( +
+ +
+ ); + } + + return ( +
+ {/* Header */} +
+
+ + + + +
+ +
+ + {/* Content */} +
+ {activeTab === 'file' && ( + <> +
+

+ Audio File +

+ {audioBuffer ? ( +
+
+
+ {fileName || 'Unknown'} +
+
+ Duration: {formatDuration(audioBuffer.duration)} +
+
+ Channels: {audioBuffer.numberOfChannels} +
+
+ Sample Rate: {audioBuffer.sampleRate} Hz +
+
+ +
+ ) : ( +
+ + +
+ Or drag and drop an audio file onto the waveform area. +
+
+ )} +
+ + )} + + {activeTab === 'history' && ( +
+

+ Edit History +

+ {historyState.historySize > 0 ? ( +
+
+
+ {historyState.historySize} action{historyState.historySize !== 1 ? 's' : ''} +
+ {historyState.undoDescription && ( +
+ Next undo: {historyState.undoDescription} +
+ )} + {historyState.redoDescription && ( +
+ Next redo: {historyState.redoDescription} +
+ )} +
+
+ ) : ( +
+ No history available. Edit operations will appear here. +
+ )} +
+ )} + + {activeTab === 'info' && ( +
+

+ Selection Info +

+ {selection ? ( +
+
Selection Active
+
+ Duration: {formatDuration(selection.end - selection.start)} +
+
+ Start: {formatDuration(selection.start)} +
+
+ End: {formatDuration(selection.end)} +
+
+ ) : ( +
+ No selection. Drag on the waveform to select a region. +
+ )} +
+ )} + + {activeTab === 'effects' && ( +
+
+

+ Basic Effects +

+ {audioBuffer ? ( +
+ + + + +
+ ) : ( +
+ Load an audio file to apply effects. +
+ )} +
+ +
+

+ Filters +

+ {audioBuffer ? ( +
+ + + +
+ ) : ( +
+ Load an audio file to apply filters. +
+ )} +
+ +
+

+ Dynamics Processing +

+ {audioBuffer ? ( +
+ + + +
+ ) : ( +
+ Load an audio file to apply dynamics processing. +
+ )} +
+ +
+

+ Time-Based Effects +

+ {audioBuffer ? ( +
+ + + + + +
+ ) : ( +
+ Load an audio file to apply time-based effects. +
+ )} +
+ +
+

+ Advanced Effects +

+ {audioBuffer ? ( +
+ + + + +
+ ) : ( +
+ Load an audio file to apply advanced effects. +
+ )} +
+
+ )} +
+
+ ); +} diff --git a/components/layout/Toolbar.tsx b/components/layout/Toolbar.tsx new file mode 100644 index 0000000..7b9ce6c --- /dev/null +++ b/components/layout/Toolbar.tsx @@ -0,0 +1,237 @@ +'use client'; + +import * as React from 'react'; +import { + Play, + Pause, + Square, + SkipBack, + Scissors, + Copy, + Clipboard, + Trash2, + CropIcon, + Undo2, + Redo2, + ZoomIn, + ZoomOut, + Maximize2, +} from 'lucide-react'; +import { Button } from '@/components/ui/Button'; +import { cn } from '@/lib/utils/cn'; + +export interface ToolbarProps { + // Playback + isPlaying: boolean; + isPaused: boolean; + onPlay: () => void; + onPause: () => void; + onStop: () => void; + + // Edit + hasSelection: boolean; + hasClipboard: boolean; + onCut: () => void; + onCopy: () => void; + onPaste: () => void; + onDelete: () => void; + onTrim: () => void; + + // History + canUndo: boolean; + canRedo: boolean; + onUndo: () => void; + onRedo: () => void; + + // Zoom + onZoomIn: () => void; + onZoomOut: () => void; + onFitToView: () => void; + + disabled?: boolean; + className?: string; +} + +export function Toolbar({ + isPlaying, + isPaused, + onPlay, + onPause, + onStop, + hasSelection, + hasClipboard, + onCut, + onCopy, + onPaste, + onDelete, + onTrim, + canUndo, + canRedo, + onUndo, + onRedo, + onZoomIn, + onZoomOut, + onFitToView, + disabled = false, + className, +}: ToolbarProps) { + const handlePlayPause = () => { + if (isPlaying) { + onPause(); + } else { + onPlay(); + } + }; + + return ( +
+ {/* Transport Controls */} +
+ + + + + +
+ + {/* Edit Tools */} +
+ + + + + + + + + +
+ + {/* History */} +
+ + + +
+ + {/* Zoom Controls */} +
+ + + + + +
+
+ ); +} diff --git a/components/ui/Button.tsx b/components/ui/Button.tsx index 57b692d..e0d6af7 100644 --- a/components/ui/Button.tsx +++ b/components/ui/Button.tsx @@ -4,7 +4,7 @@ import { cn } from '@/lib/utils/cn'; export interface ButtonProps extends React.ButtonHTMLAttributes { variant?: 'default' | 'destructive' | 'outline' | 'secondary' | 'ghost' | 'link'; - size?: 'default' | 'sm' | 'lg' | 'icon'; + size?: 'default' | 'sm' | 'lg' | 'icon' | 'icon-sm'; } const Button = React.forwardRef( @@ -32,6 +32,7 @@ const Button = React.forwardRef( 'h-9 rounded-md px-3': size === 'sm', 'h-11 rounded-md px-8': size === 'lg', 'h-10 w-10': size === 'icon', + 'h-8 w-8': size === 'icon-sm', }, className )} diff --git a/components/ui/CommandPalette.tsx b/components/ui/CommandPalette.tsx new file mode 100644 index 0000000..6d10c09 --- /dev/null +++ b/components/ui/CommandPalette.tsx @@ -0,0 +1,195 @@ +'use client'; + +import * as React from 'react'; +import { Command } from 'lucide-react'; +import { cn } from '@/lib/utils/cn'; + +export interface CommandAction { + id: string; + label: string; + description?: string; + shortcut?: string; + category: 'edit' | 'playback' | 'file' | 'view' | 'effects'; + action: () => void; +} + +export interface CommandPaletteProps { + actions: CommandAction[]; + className?: string; +} + +export function CommandPalette({ actions, className }: CommandPaletteProps) { + const [isOpen, setIsOpen] = React.useState(false); + const [search, setSearch] = React.useState(''); + const [selectedIndex, setSelectedIndex] = React.useState(0); + const inputRef = React.useRef(null); + + const filteredActions = React.useMemo(() => { + if (!search) return actions; + const query = search.toLowerCase(); + return actions.filter( + (action) => + action.label.toLowerCase().includes(query) || + action.description?.toLowerCase().includes(query) || + action.category.toLowerCase().includes(query) + ); + }, [actions, search]); + + const groupedActions = React.useMemo(() => { + const groups: Record = {}; + filteredActions.forEach((action) => { + if (!groups[action.category]) { + groups[action.category] = []; + } + groups[action.category].push(action); + }); + return groups; + }, [filteredActions]); + + React.useEffect(() => { + const handleKeyDown = (e: KeyboardEvent) => { + // Ctrl+K or Cmd+K to open + if ((e.ctrlKey || e.metaKey) && e.key === 'k') { + e.preventDefault(); + setIsOpen(true); + } + // Escape to close + if (e.key === 'Escape') { + setIsOpen(false); + setSearch(''); + setSelectedIndex(0); + } + }; + + window.addEventListener('keydown', handleKeyDown); + return () => window.removeEventListener('keydown', handleKeyDown); + }, []); + + React.useEffect(() => { + if (isOpen && inputRef.current) { + inputRef.current.focus(); + } + }, [isOpen]); + + const handleKeyDown = (e: React.KeyboardEvent) => { + if (e.key === 'ArrowDown') { + e.preventDefault(); + setSelectedIndex((prev) => Math.min(prev + 1, filteredActions.length - 1)); + } else if (e.key === 'ArrowUp') { + e.preventDefault(); + setSelectedIndex((prev) => Math.max(prev - 1, 0)); + } else if (e.key === 'Enter') { + e.preventDefault(); + if (filteredActions[selectedIndex]) { + filteredActions[selectedIndex].action(); + setIsOpen(false); + setSearch(''); + setSelectedIndex(0); + } + } + }; + + const executeAction = (action: CommandAction) => { + action.action(); + setIsOpen(false); + setSearch(''); + setSelectedIndex(0); + }; + + if (!isOpen) { + return ( + + ); + } + + return ( +
+
+ {/* Search Input */} +
+ + { + setSearch(e.target.value); + setSelectedIndex(0); + }} + onKeyDown={handleKeyDown} + placeholder="Type a command or search..." + className="flex-1 bg-transparent border-none outline-none text-foreground placeholder:text-muted-foreground" + /> + + ESC + +
+ + {/* Results */} +
+ {Object.keys(groupedActions).length === 0 ? ( +
+ No commands found +
+ ) : ( + Object.entries(groupedActions).map(([category, categoryActions]) => ( +
+
+ {category} +
+ {categoryActions.map((action, index) => { + const globalIndex = filteredActions.indexOf(action); + return ( + + ); + })} +
+ )) + )} +
+
+
+ ); +} diff --git a/components/ui/Modal.tsx b/components/ui/Modal.tsx new file mode 100644 index 0000000..a602242 --- /dev/null +++ b/components/ui/Modal.tsx @@ -0,0 +1,118 @@ +'use client'; + +import * as React from 'react'; +import { X } from 'lucide-react'; +import { Button } from './Button'; +import { cn } from '@/lib/utils/cn'; + +export interface ModalProps { + open: boolean; + onClose: () => void; + title: string; + description?: string; + children: React.ReactNode; + footer?: React.ReactNode; + size?: 'sm' | 'md' | 'lg' | 'xl'; + className?: string; +} + +export function Modal({ + open, + onClose, + title, + description, + children, + footer, + size = 'md', + className, +}: ModalProps) { + // Close on Escape key + React.useEffect(() => { + const handleEscape = (e: KeyboardEvent) => { + if (e.key === 'Escape' && open) { + onClose(); + } + }; + + if (open) { + document.addEventListener('keydown', handleEscape); + // Prevent body scroll when modal is open + document.body.style.overflow = 'hidden'; + } + + return () => { + document.removeEventListener('keydown', handleEscape); + document.body.style.overflow = 'unset'; + }; + }, [open, onClose]); + + if (!open) return null; + + const sizeClasses = { + sm: 'max-w-sm', + md: 'max-w-md', + lg: 'max-w-lg', + xl: 'max-w-xl', + }; + + return ( +
+ {/* Backdrop */} + + ); +} diff --git a/components/ui/Slider.tsx b/components/ui/Slider.tsx index c33ff49..bf4e975 100644 --- a/components/ui/Slider.tsx +++ b/components/ui/Slider.tsx @@ -4,9 +4,10 @@ import * as React from 'react'; import { cn } from '@/lib/utils/cn'; export interface SliderProps - extends Omit, 'onChange' | 'value'> { - value?: number; + extends Omit, 'onChange' | 'value' | 'onValueChange'> { + value?: number | number[]; onChange?: (value: number) => void; + onValueChange?: (value: number[]) => void; min?: number; max?: number; step?: number; @@ -20,6 +21,7 @@ const Slider = React.forwardRef( className, value = 0, onChange, + onValueChange, min = 0, max = 100, step = 1, @@ -30,8 +32,13 @@ const Slider = React.forwardRef( }, ref ) => { + // Support both value formats (number or number[]) + const currentValue = Array.isArray(value) ? value[0] : value; + const handleChange = (e: React.ChangeEvent) => { - onChange?.(parseFloat(e.target.value)); + const numValue = parseFloat(e.target.value); + onChange?.(numValue); + onValueChange?.([numValue]); }; return ( @@ -44,7 +51,7 @@ const Slider = React.forwardRef( )} {showValue && ( - {value} + {currentValue} )}
)} @@ -54,7 +61,7 @@ const Slider = React.forwardRef( min={min} max={max} step={step} - value={value} + value={currentValue} onChange={handleChange} disabled={disabled} className={cn( diff --git a/lib/audio/effects/advanced.ts b/lib/audio/effects/advanced.ts new file mode 100644 index 0000000..a77a56d --- /dev/null +++ b/lib/audio/effects/advanced.ts @@ -0,0 +1,281 @@ +/** + * Advanced effects (Pitch Shifter, Time Stretcher, Distortion, Bitcrusher) + */ + +import { getAudioContext } from '../context'; + +export interface PitchShifterParameters { + semitones: number; // -12 to +12 - pitch shift in semitones + cents: number; // -100 to +100 - fine tuning in cents + mix: number; // 0-1 - dry/wet mix +} + +export interface TimeStretchParameters { + rate: number; // 0.5-2.0 - playback rate (0.5 = half speed, 2 = double speed) + preservePitch: boolean; // whether to preserve pitch + mix: number; // 0-1 - dry/wet mix +} + +export interface DistortionParameters { + drive: number; // 0-1 - amount of distortion + tone: number; // 0-1 - pre-distortion tone control + output: number; // 0-1 - output level + type: 'soft' | 'hard' | 'tube'; // distortion type + mix: number; // 0-1 - dry/wet mix +} + +export interface BitcrusherParameters { + bitDepth: number; // 1-16 - bit depth + sampleRate: number; // 100-48000 - sample rate reduction + mix: number; // 0-1 - dry/wet mix +} + +/** + * Apply pitch shifting to audio buffer + * Uses simple time-domain pitch shifting (overlap-add) + */ +export async function applyPitchShift( + buffer: AudioBuffer, + params: PitchShifterParameters +): Promise { + const audioContext = getAudioContext(); + const channels = buffer.numberOfChannels; + const sampleRate = buffer.sampleRate; + + // Calculate pitch shift ratio + const totalCents = params.semitones * 100 + params.cents; + const pitchRatio = Math.pow(2, totalCents / 1200); + + // For pitch shifting, we change the playback rate then resample + const newLength = Math.floor(buffer.length / pitchRatio); + const outputBuffer = audioContext.createBuffer(channels, newLength, sampleRate); + + // Simple linear interpolation resampling + for (let channel = 0; channel < channels; channel++) { + const inputData = buffer.getChannelData(channel); + const outputData = outputBuffer.getChannelData(channel); + + for (let i = 0; i < newLength; i++) { + const srcIndex = i * pitchRatio; + const srcIndexInt = Math.floor(srcIndex); + const srcIndexFrac = srcIndex - srcIndexInt; + + if (srcIndexInt < buffer.length - 1) { + const sample1 = inputData[srcIndexInt]; + const sample2 = inputData[srcIndexInt + 1]; + const interpolated = sample1 + (sample2 - sample1) * srcIndexFrac; + + // Mix dry/wet + const dry = i < buffer.length ? inputData[i] : 0; + outputData[i] = dry * (1 - params.mix) + interpolated * params.mix; + } else if (srcIndexInt < buffer.length) { + const dry = i < buffer.length ? inputData[i] : 0; + outputData[i] = dry * (1 - params.mix) + inputData[srcIndexInt] * params.mix; + } + } + } + + return outputBuffer; +} + +/** + * Apply time stretching to audio buffer + * Changes duration without affecting pitch (basic implementation) + */ +export async function applyTimeStretch( + buffer: AudioBuffer, + params: TimeStretchParameters +): Promise { + const audioContext = getAudioContext(); + const channels = buffer.numberOfChannels; + const sampleRate = buffer.sampleRate; + + if (params.preservePitch) { + // Time stretch with pitch preservation (overlap-add) + const newLength = Math.floor(buffer.length / params.rate); + const outputBuffer = audioContext.createBuffer(channels, newLength, sampleRate); + + const windowSize = 2048; + const hopSize = Math.floor(windowSize / 4); + + for (let channel = 0; channel < channels; channel++) { + const inputData = buffer.getChannelData(channel); + const outputData = outputBuffer.getChannelData(channel); + + let readPos = 0; + let writePos = 0; + + while (writePos < newLength) { + // Simple overlap-add + for (let i = 0; i < windowSize && writePos + i < newLength; i++) { + const readIndex = Math.floor(readPos + i); + if (readIndex < buffer.length) { + // Hanning window + const window = 0.5 * (1 - Math.cos((2 * Math.PI * i) / windowSize)); + outputData[writePos + i] += inputData[readIndex] * window; + } + } + + readPos += hopSize * params.rate; + writePos += hopSize; + } + + // Normalize + let maxVal = 0; + for (let i = 0; i < newLength; i++) { + maxVal = Math.max(maxVal, Math.abs(outputData[i])); + } + if (maxVal > 0) { + for (let i = 0; i < newLength; i++) { + outputData[i] /= maxVal; + } + } + } + + return outputBuffer; + } else { + // Simple speed change (changes pitch) + const newLength = Math.floor(buffer.length / params.rate); + const outputBuffer = audioContext.createBuffer(channels, newLength, sampleRate); + + for (let channel = 0; channel < channels; channel++) { + const inputData = buffer.getChannelData(channel); + const outputData = outputBuffer.getChannelData(channel); + + for (let i = 0; i < newLength; i++) { + const srcIndex = i * params.rate; + const srcIndexInt = Math.floor(srcIndex); + const srcIndexFrac = srcIndex - srcIndexInt; + + if (srcIndexInt < buffer.length - 1) { + const sample1 = inputData[srcIndexInt]; + const sample2 = inputData[srcIndexInt + 1]; + outputData[i] = sample1 + (sample2 - sample1) * srcIndexFrac; + } else if (srcIndexInt < buffer.length) { + outputData[i] = inputData[srcIndexInt]; + } + } + } + + return outputBuffer; + } +} + +/** + * Apply distortion/overdrive effect + */ +export async function applyDistortion( + buffer: AudioBuffer, + params: DistortionParameters +): Promise { + const audioContext = getAudioContext(); + const channels = buffer.numberOfChannels; + const length = buffer.length; + const sampleRate = buffer.sampleRate; + + const outputBuffer = audioContext.createBuffer(channels, length, sampleRate); + + // Distortion function based on type + const distort = (sample: number, drive: number, type: string): number => { + const x = sample * (1 + drive * 10); + + switch (type) { + case 'soft': + // Soft clipping (tanh) + return Math.tanh(x); + + case 'hard': + // Hard clipping + return Math.max(-1, Math.min(1, x)); + + case 'tube': + // Tube-like distortion (asymmetric) + if (x > 0) { + return 1 - Math.exp(-x); + } else { + return -1 + Math.exp(x); + } + + default: + return x; + } + }; + + for (let channel = 0; channel < channels; channel++) { + const inputData = buffer.getChannelData(channel); + const outputData = outputBuffer.getChannelData(channel); + + // Simple low-pass filter for tone control + let filterState = 0; + const filterCutoff = params.tone; + + for (let i = 0; i < length; i++) { + let sample = inputData[i]; + + // Pre-distortion tone filter + filterState = filterState * (1 - filterCutoff) + sample * filterCutoff; + sample = filterState; + + // Apply distortion + const distorted = distort(sample, params.drive, params.type); + + // Output level + const processed = distorted * params.output; + + // Mix dry/wet + outputData[i] = inputData[i] * (1 - params.mix) + processed * params.mix; + } + } + + return outputBuffer; +} + +/** + * Apply bitcrusher effect + */ +export async function applyBitcrusher( + buffer: AudioBuffer, + params: BitcrusherParameters +): Promise { + const audioContext = getAudioContext(); + const channels = buffer.numberOfChannels; + const length = buffer.length; + const sampleRate = buffer.sampleRate; + + const outputBuffer = audioContext.createBuffer(channels, length, sampleRate); + + // Calculate bit depth quantization step + const bitLevels = Math.pow(2, params.bitDepth); + const step = 2 / bitLevels; + + // Calculate sample rate reduction ratio + const srRatio = sampleRate / params.sampleRate; + + for (let channel = 0; channel < channels; channel++) { + const inputData = buffer.getChannelData(channel); + const outputData = outputBuffer.getChannelData(channel); + + let holdSample = 0; + let holdCounter = 0; + + for (let i = 0; i < length; i++) { + // Sample rate reduction (sample and hold) + if (holdCounter <= 0) { + let sample = inputData[i]; + + // Bit depth reduction + sample = Math.floor(sample / step) * step; + + holdSample = sample; + holdCounter = srRatio; + } + + holdCounter--; + + // Mix dry/wet + outputData[i] = inputData[i] * (1 - params.mix) + holdSample * params.mix; + } + } + + return outputBuffer; +} diff --git a/lib/audio/effects/dynamics.ts b/lib/audio/effects/dynamics.ts new file mode 100644 index 0000000..3721f0a --- /dev/null +++ b/lib/audio/effects/dynamics.ts @@ -0,0 +1,205 @@ +/** + * Dynamics processing effects (Compressor, Limiter, Gate/Expander) + */ + +import { getAudioContext } from '../context'; + +export interface CompressorParameters { + threshold: number; // dB - level where compression starts + ratio: number; // Compression ratio (e.g., 4 = 4:1) + attack: number; // ms - how quickly to compress + release: number; // ms - how quickly to stop compressing + knee: number; // dB - width of soft knee (0 = hard knee) + makeupGain: number; // dB - gain to apply after compression +} + +export interface LimiterParameters { + threshold: number; // dB - maximum level + attack: number; // ms - how quickly to limit + release: number; // ms - how quickly to stop limiting + makeupGain: number; // dB - gain to apply after limiting +} + +export interface GateParameters { + threshold: number; // dB - level below which gate activates + ratio: number; // Expansion ratio (e.g., 2 = 2:1) + attack: number; // ms - how quickly to close gate + release: number; // ms - how quickly to open gate + knee: number; // dB - width of soft knee +} + +/** + * Apply compression to audio buffer + */ +export async function applyCompressor( + buffer: AudioBuffer, + params: CompressorParameters +): Promise { + const audioContext = getAudioContext(); + const channels = buffer.numberOfChannels; + const length = buffer.length; + const sampleRate = buffer.sampleRate; + + // Create output buffer + const outputBuffer = audioContext.createBuffer(channels, length, sampleRate); + + // Convert time constants to samples + const attackSamples = (params.attack / 1000) * sampleRate; + const releaseSamples = (params.release / 1000) * sampleRate; + + // Convert dB to linear + const thresholdLinear = dbToLinear(params.threshold); + const makeupGainLinear = dbToLinear(params.makeupGain); + const kneeLinear = dbToLinear(params.knee); + + // Process each channel + for (let channel = 0; channel < channels; channel++) { + const inputData = buffer.getChannelData(channel); + const outputData = outputBuffer.getChannelData(channel); + + let envelope = 0; + + for (let i = 0; i < length; i++) { + const input = inputData[i]; + const inputAbs = Math.abs(input); + + // Envelope follower with attack/release + if (inputAbs > envelope) { + envelope = envelope + (inputAbs - envelope) / attackSamples; + } else { + envelope = envelope + (inputAbs - envelope) / releaseSamples; + } + + // Calculate gain reduction + let gain = 1.0; + + if (envelope > thresholdLinear) { + // Soft knee calculation + const overThreshold = envelope - thresholdLinear; + const kneeRange = kneeLinear / 2; + + if (params.knee > 0 && overThreshold < kneeRange) { + // In the knee region - smooth transition + const kneeRatio = overThreshold / kneeRange; + const compressionAmount = (1 - 1 / params.ratio) * kneeRatio; + gain = 1 - compressionAmount * (overThreshold / envelope); + } else { + // Above knee - full compression + const exceededDb = linearToDb(envelope) - params.threshold; + const gainReductionDb = exceededDb * (1 - 1 / params.ratio); + gain = dbToLinear(-gainReductionDb); + } + } + + // Apply gain reduction and makeup gain + outputData[i] = input * gain * makeupGainLinear; + } + } + + return outputBuffer; +} + +/** + * Apply limiting to audio buffer + */ +export async function applyLimiter( + buffer: AudioBuffer, + params: LimiterParameters +): Promise { + // Limiter is essentially a compressor with infinite ratio + return applyCompressor(buffer, { + threshold: params.threshold, + ratio: 100, // Very high ratio approximates infinity:1 + attack: params.attack, + release: params.release, + knee: 0, // Hard knee for brick-wall limiting + makeupGain: params.makeupGain, + }); +} + +/** + * Apply gate/expander to audio buffer + */ +export async function applyGate( + buffer: AudioBuffer, + params: GateParameters +): Promise { + const audioContext = getAudioContext(); + const channels = buffer.numberOfChannels; + const length = buffer.length; + const sampleRate = buffer.sampleRate; + + // Create output buffer + const outputBuffer = audioContext.createBuffer(channels, length, sampleRate); + + // Convert time constants to samples + const attackSamples = (params.attack / 1000) * sampleRate; + const releaseSamples = (params.release / 1000) * sampleRate; + + // Convert dB to linear + const thresholdLinear = dbToLinear(params.threshold); + const kneeLinear = dbToLinear(params.knee); + + // Process each channel + for (let channel = 0; channel < channels; channel++) { + const inputData = buffer.getChannelData(channel); + const outputData = outputBuffer.getChannelData(channel); + + let envelope = 0; + + for (let i = 0; i < length; i++) { + const input = inputData[i]; + const inputAbs = Math.abs(input); + + // Envelope follower with attack/release + if (inputAbs > envelope) { + envelope = envelope + (inputAbs - envelope) / attackSamples; + } else { + envelope = envelope + (inputAbs - envelope) / releaseSamples; + } + + // Calculate gain reduction + let gain = 1.0; + + if (envelope < thresholdLinear) { + // Below threshold - apply expansion/gating + const belowThreshold = thresholdLinear - envelope; + const kneeRange = kneeLinear / 2; + + if (params.knee > 0 && belowThreshold < kneeRange) { + // In the knee region - smooth transition + const kneeRatio = belowThreshold / kneeRange; + const expansionAmount = (1 - params.ratio) * kneeRatio; + gain = 1 + expansionAmount * (belowThreshold / thresholdLinear); + } else { + // Below knee - full expansion + const belowDb = params.threshold - linearToDb(envelope); + const gainReductionDb = belowDb * (params.ratio - 1); + gain = dbToLinear(-gainReductionDb); + } + + // Clamp to prevent extreme amplification + gain = Math.max(0, Math.min(1, gain)); + } + + // Apply gain + outputData[i] = input * gain; + } + } + + return outputBuffer; +} + +/** + * Convert decibels to linear gain + */ +function dbToLinear(db: number): number { + return Math.pow(10, db / 20); +} + +/** + * Convert linear gain to decibels + */ +function linearToDb(linear: number): number { + return 20 * Math.log10(Math.max(linear, 0.00001)); // Prevent log(0) +} diff --git a/lib/audio/effects/fade.ts b/lib/audio/effects/fade.ts new file mode 100644 index 0000000..d1c882a --- /dev/null +++ b/lib/audio/effects/fade.ts @@ -0,0 +1,116 @@ +/** + * Fade in/out effects + */ + +import { getAudioContext } from '../context'; + +export type FadeType = 'linear' | 'exponential' | 'logarithmic'; + +/** + * Apply fade in to audio buffer + * @param buffer - Source audio buffer + * @param duration - Fade duration in seconds + * @param type - Fade curve type + * @returns New audio buffer with fade in applied + */ +export function applyFadeIn( + buffer: AudioBuffer, + duration: number, + type: FadeType = 'linear' +): AudioBuffer { + const audioContext = getAudioContext(); + const fadeSamples = Math.min( + Math.floor(duration * buffer.sampleRate), + buffer.length + ); + + const outputBuffer = audioContext.createBuffer( + buffer.numberOfChannels, + buffer.length, + buffer.sampleRate + ); + + for (let channel = 0; channel < buffer.numberOfChannels; channel++) { + const inputData = buffer.getChannelData(channel); + const outputData = outputBuffer.getChannelData(channel); + + for (let i = 0; i < buffer.length; i++) { + if (i < fadeSamples) { + const progress = i / fadeSamples; + const gain = calculateFadeGain(progress, type); + outputData[i] = inputData[i] * gain; + } else { + outputData[i] = inputData[i]; + } + } + } + + return outputBuffer; +} + +/** + * Apply fade out to audio buffer + * @param buffer - Source audio buffer + * @param duration - Fade duration in seconds + * @param type - Fade curve type + * @returns New audio buffer with fade out applied + */ +export function applyFadeOut( + buffer: AudioBuffer, + duration: number, + type: FadeType = 'linear' +): AudioBuffer { + const audioContext = getAudioContext(); + const fadeSamples = Math.min( + Math.floor(duration * buffer.sampleRate), + buffer.length + ); + const fadeStartSample = buffer.length - fadeSamples; + + const outputBuffer = audioContext.createBuffer( + buffer.numberOfChannels, + buffer.length, + buffer.sampleRate + ); + + for (let channel = 0; channel < buffer.numberOfChannels; channel++) { + const inputData = buffer.getChannelData(channel); + const outputData = outputBuffer.getChannelData(channel); + + for (let i = 0; i < buffer.length; i++) { + if (i >= fadeStartSample) { + const progress = (i - fadeStartSample) / fadeSamples; + const gain = calculateFadeGain(1 - progress, type); + outputData[i] = inputData[i] * gain; + } else { + outputData[i] = inputData[i]; + } + } + } + + return outputBuffer; +} + +/** + * Calculate fade gain based on progress and curve type + * @param progress - Progress from 0 to 1 + * @param type - Fade curve type + * @returns Gain value from 0 to 1 + */ +function calculateFadeGain(progress: number, type: FadeType): number { + switch (type) { + case 'linear': + return progress; + + case 'exponential': + // Exponential curve: faster at the start, slower at the end + return progress * progress; + + case 'logarithmic': + // Logarithmic curve: slower at the start, faster at the end + return Math.sqrt(progress); + + default: + return progress; + } +} diff --git a/lib/audio/effects/filters.ts b/lib/audio/effects/filters.ts new file mode 100644 index 0000000..2729867 --- /dev/null +++ b/lib/audio/effects/filters.ts @@ -0,0 +1,168 @@ +/** + * Audio filter effects using BiquadFilterNode + */ + +import { getAudioContext } from '../context'; + +export type FilterType = 'lowpass' | 'highpass' | 'bandpass' | 'lowshelf' | 'highshelf' | 'peaking' | 'notch' | 'allpass'; + +export interface FilterOptions { + type: FilterType; + frequency: number; + Q?: number; + gain?: number; +} + +/** + * Apply a filter to an audio buffer using offline audio processing + * @param buffer - Source audio buffer + * @param options - Filter options + * @returns New audio buffer with filter applied + */ +export async function applyFilter( + buffer: AudioBuffer, + options: FilterOptions +): Promise { + const audioContext = getAudioContext(); + + // Create offline context for processing + const offlineContext = new OfflineAudioContext( + buffer.numberOfChannels, + buffer.length, + buffer.sampleRate + ); + + // Create source from buffer + const source = offlineContext.createBufferSource(); + source.buffer = buffer; + + // Create and configure filter + const filter = offlineContext.createBiquadFilter(); + filter.type = options.type; + filter.frequency.setValueAtTime(options.frequency, offlineContext.currentTime); + + if (options.Q !== undefined) { + filter.Q.setValueAtTime(options.Q, offlineContext.currentTime); + } + + if (options.gain !== undefined) { + filter.gain.setValueAtTime(options.gain, offlineContext.currentTime); + } + + // Connect nodes + source.connect(filter); + filter.connect(offlineContext.destination); + + // Start playback and render + source.start(0); + const renderedBuffer = await offlineContext.startRendering(); + + return renderedBuffer; +} + +/** + * Apply low-pass filter (cuts high frequencies) + * @param buffer - Source audio buffer + * @param frequency - Cutoff frequency in Hz (default: 1000) + * @param Q - Quality factor (default: 1.0) + * @returns New audio buffer with filter applied + */ +export async function applyLowPassFilter( + buffer: AudioBuffer, + frequency: number = 1000, + Q: number = 1.0 +): Promise { + return applyFilter(buffer, { type: 'lowpass', frequency, Q }); +} + +/** + * Apply high-pass filter (cuts low frequencies) + * @param buffer - Source audio buffer + * @param frequency - Cutoff frequency in Hz (default: 100) + * @param Q - Quality factor (default: 1.0) + * @returns New audio buffer with filter applied + */ +export async function applyHighPassFilter( + buffer: AudioBuffer, + frequency: number = 100, + Q: number = 1.0 +): Promise { + return applyFilter(buffer, { type: 'highpass', frequency, Q }); +} + +/** + * Apply band-pass filter (isolates a frequency range) + * @param buffer - Source audio buffer + * @param frequency - Center frequency in Hz (default: 1000) + * @param Q - Quality factor/bandwidth (default: 1.0) + * @returns New audio buffer with filter applied + */ +export async function applyBandPassFilter( + buffer: AudioBuffer, + frequency: number = 1000, + Q: number = 1.0 +): Promise { + return applyFilter(buffer, { type: 'bandpass', frequency, Q }); +} + +/** + * Apply notch filter (removes a specific frequency) + * @param buffer - Source audio buffer + * @param frequency - Notch frequency in Hz (default: 1000) + * @param Q - Quality factor/bandwidth (default: 1.0) + * @returns New audio buffer with filter applied + */ +export async function applyNotchFilter( + buffer: AudioBuffer, + frequency: number = 1000, + Q: number = 1.0 +): Promise { + return applyFilter(buffer, { type: 'notch', frequency, Q }); +} + +/** + * Apply low shelf filter (boost/cut low frequencies) + * @param buffer - Source audio buffer + * @param frequency - Shelf frequency in Hz (default: 200) + * @param gain - Gain in dB (default: 6) + * @returns New audio buffer with filter applied + */ +export async function applyLowShelfFilter( + buffer: AudioBuffer, + frequency: number = 200, + gain: number = 6 +): Promise { + return applyFilter(buffer, { type: 'lowshelf', frequency, gain }); +} + +/** + * Apply high shelf filter (boost/cut high frequencies) + * @param buffer - Source audio buffer + * @param frequency - Shelf frequency in Hz (default: 3000) + * @param gain - Gain in dB (default: 6) + * @returns New audio buffer with filter applied + */ +export async function applyHighShelfFilter( + buffer: AudioBuffer, + frequency: number = 3000, + gain: number = 6 +): Promise { + return applyFilter(buffer, { type: 'highshelf', frequency, gain }); +} + +/** + * Apply peaking EQ filter (boost/cut a specific frequency band) + * @param buffer - Source audio buffer + * @param frequency - Center frequency in Hz (default: 1000) + * @param Q - Quality factor/bandwidth (default: 1.0) + * @param gain - Gain in dB (default: 6) + * @returns New audio buffer with filter applied + */ +export async function applyPeakingFilter( + buffer: AudioBuffer, + frequency: number = 1000, + Q: number = 1.0, + gain: number = 6 +): Promise { + return applyFilter(buffer, { type: 'peaking', frequency, Q, gain }); +} diff --git a/lib/audio/effects/gain.ts b/lib/audio/effects/gain.ts new file mode 100644 index 0000000..bd3238c --- /dev/null +++ b/lib/audio/effects/gain.ts @@ -0,0 +1,52 @@ +/** + * Gain/Volume adjustment effect + */ + +import { getAudioContext } from '../context'; + +/** + * Apply gain to an audio buffer + * @param buffer - Source audio buffer + * @param gainValue - Gain multiplier (1.0 = no change, 0.5 = -6dB, 2.0 = +6dB) + * @returns New audio buffer with gain applied + */ +export function applyGain(buffer: AudioBuffer, gainValue: number): AudioBuffer { + const audioContext = getAudioContext(); + const outputBuffer = audioContext.createBuffer( + buffer.numberOfChannels, + buffer.length, + buffer.sampleRate + ); + + // Apply gain to each channel + for (let channel = 0; channel < buffer.numberOfChannels; channel++) { + const inputData = buffer.getChannelData(channel); + const outputData = outputBuffer.getChannelData(channel); + + for (let i = 0; i < buffer.length; i++) { + outputData[i] = inputData[i] * gainValue; + // Clamp to prevent distortion + outputData[i] = Math.max(-1, Math.min(1, outputData[i])); + } + } + + return outputBuffer; +} + +/** + * Convert dB to gain multiplier + * @param db - Decibels + * @returns Gain multiplier + */ +export function dbToGain(db: number): number { + return Math.pow(10, db / 20); +} + +/** + * Convert gain multiplier to dB + * @param gain - Gain multiplier + * @returns Decibels + */ +export function gainToDb(gain: number): number { + return 20 * Math.log10(gain); +} diff --git a/lib/audio/effects/normalize.ts b/lib/audio/effects/normalize.ts new file mode 100644 index 0000000..f17348f --- /dev/null +++ b/lib/audio/effects/normalize.ts @@ -0,0 +1,132 @@ +/** + * Normalization effects + */ + +import { getAudioContext } from '../context'; + +/** + * Normalize audio to peak amplitude + * @param buffer - Source audio buffer + * @param targetPeak - Target peak amplitude (0.0 to 1.0, default 1.0) + * @returns New audio buffer with normalized audio + */ +export function normalizePeak(buffer: AudioBuffer, targetPeak: number = 1.0): AudioBuffer { + const audioContext = getAudioContext(); + + // Find the absolute peak across all channels + let maxPeak = 0; + for (let channel = 0; channel < buffer.numberOfChannels; channel++) { + const channelData = buffer.getChannelData(channel); + for (let i = 0; i < buffer.length; i++) { + const abs = Math.abs(channelData[i]); + if (abs > maxPeak) { + maxPeak = abs; + } + } + } + + // Calculate gain factor + const gainFactor = maxPeak > 0 ? targetPeak / maxPeak : 1.0; + + // Create output buffer and apply gain + const outputBuffer = audioContext.createBuffer( + buffer.numberOfChannels, + buffer.length, + buffer.sampleRate + ); + + for (let channel = 0; channel < buffer.numberOfChannels; channel++) { + const inputData = buffer.getChannelData(channel); + const outputData = outputBuffer.getChannelData(channel); + + for (let i = 0; i < buffer.length; i++) { + outputData[i] = inputData[i] * gainFactor; + } + } + + return outputBuffer; +} + +/** + * Normalize audio to RMS (loudness) + * @param buffer - Source audio buffer + * @param targetRMS - Target RMS level (0.0 to 1.0, default 0.5) + * @returns New audio buffer with normalized audio + */ +export function normalizeRMS(buffer: AudioBuffer, targetRMS: number = 0.5): AudioBuffer { + const audioContext = getAudioContext(); + + // Calculate RMS across all channels + let sumSquares = 0; + let totalSamples = 0; + + for (let channel = 0; channel < buffer.numberOfChannels; channel++) { + const channelData = buffer.getChannelData(channel); + for (let i = 0; i < buffer.length; i++) { + sumSquares += channelData[i] * channelData[i]; + totalSamples++; + } + } + + const currentRMS = Math.sqrt(sumSquares / totalSamples); + const gainFactor = currentRMS > 0 ? targetRMS / currentRMS : 1.0; + + // Create output buffer and apply gain + const outputBuffer = audioContext.createBuffer( + buffer.numberOfChannels, + buffer.length, + buffer.sampleRate + ); + + for (let channel = 0; channel < buffer.numberOfChannels; channel++) { + const inputData = buffer.getChannelData(channel); + const outputData = outputBuffer.getChannelData(channel); + + for (let i = 0; i < buffer.length; i++) { + outputData[i] = inputData[i] * gainFactor; + // Clamp to prevent distortion + outputData[i] = Math.max(-1, Math.min(1, outputData[i])); + } + } + + return outputBuffer; +} + +/** + * Get peak amplitude of audio buffer + * @param buffer - Audio buffer + * @returns Peak amplitude (0.0 to 1.0) + */ +export function getPeakAmplitude(buffer: AudioBuffer): number { + let maxPeak = 0; + for (let channel = 0; channel < buffer.numberOfChannels; channel++) { + const channelData = buffer.getChannelData(channel); + for (let i = 0; i < buffer.length; i++) { + const abs = Math.abs(channelData[i]); + if (abs > maxPeak) { + maxPeak = abs; + } + } + } + return maxPeak; +} + +/** + * Get RMS amplitude of audio buffer + * @param buffer - Audio buffer + * @returns RMS amplitude + */ +export function getRMSAmplitude(buffer: AudioBuffer): number { + let sumSquares = 0; + let totalSamples = 0; + + for (let channel = 0; channel < buffer.numberOfChannels; channel++) { + const channelData = buffer.getChannelData(channel); + for (let i = 0; i < buffer.length; i++) { + sumSquares += channelData[i] * channelData[i]; + totalSamples++; + } + } + + return Math.sqrt(sumSquares / totalSamples); +} diff --git a/lib/audio/effects/reverse.ts b/lib/audio/effects/reverse.ts new file mode 100644 index 0000000..18e6079 --- /dev/null +++ b/lib/audio/effects/reverse.ts @@ -0,0 +1,31 @@ +/** + * Reverse audio effect + */ + +import { getAudioContext } from '../context'; + +/** + * Reverse audio buffer + * @param buffer - Source audio buffer + * @returns New audio buffer with reversed audio + */ +export function reverseAudio(buffer: AudioBuffer): AudioBuffer { + const audioContext = getAudioContext(); + const outputBuffer = audioContext.createBuffer( + buffer.numberOfChannels, + buffer.length, + buffer.sampleRate + ); + + // Reverse each channel + for (let channel = 0; channel < buffer.numberOfChannels; channel++) { + const inputData = buffer.getChannelData(channel); + const outputData = outputBuffer.getChannelData(channel); + + for (let i = 0; i < buffer.length; i++) { + outputData[i] = inputData[buffer.length - 1 - i]; + } + } + + return outputBuffer; +} diff --git a/lib/audio/effects/selection.ts b/lib/audio/effects/selection.ts new file mode 100644 index 0000000..d237249 --- /dev/null +++ b/lib/audio/effects/selection.ts @@ -0,0 +1,128 @@ +/** + * Utilities for applying effects to audio selections + */ + +import type { Selection } from '@/types/selection'; +import { getAudioContext } from '../context'; + +/** + * Extract a region from an audio buffer + */ +export function extractRegion( + buffer: AudioBuffer, + startTime: number, + endTime: number +): AudioBuffer { + const audioContext = getAudioContext(); + const sampleRate = buffer.sampleRate; + const numberOfChannels = buffer.numberOfChannels; + + const startSample = Math.floor(startTime * sampleRate); + const endSample = Math.floor(endTime * sampleRate); + const length = endSample - startSample; + + const regionBuffer = audioContext.createBuffer( + numberOfChannels, + length, + sampleRate + ); + + for (let channel = 0; channel < numberOfChannels; channel++) { + const sourceData = buffer.getChannelData(channel); + const targetData = regionBuffer.getChannelData(channel); + + for (let i = 0; i < length; i++) { + targetData[i] = sourceData[startSample + i]; + } + } + + return regionBuffer; +} + +/** + * Replace a region in an audio buffer with processed audio + */ +export function replaceRegion( + originalBuffer: AudioBuffer, + processedRegion: AudioBuffer, + startTime: number +): AudioBuffer { + const audioContext = getAudioContext(); + const sampleRate = originalBuffer.sampleRate; + const numberOfChannels = originalBuffer.numberOfChannels; + + // Create new buffer with same length as original + const newBuffer = audioContext.createBuffer( + numberOfChannels, + originalBuffer.length, + sampleRate + ); + + const startSample = Math.floor(startTime * sampleRate); + + for (let channel = 0; channel < numberOfChannels; channel++) { + const originalData = originalBuffer.getChannelData(channel); + const processedData = processedRegion.getChannelData(channel); + const newData = newBuffer.getChannelData(channel); + + // Copy everything from original + for (let i = 0; i < originalBuffer.length; i++) { + newData[i] = originalData[i]; + } + + // Replace the selected region with processed data + for (let i = 0; i < processedRegion.length; i++) { + if (startSample + i < newBuffer.length) { + newData[startSample + i] = processedData[i]; + } + } + } + + return newBuffer; +} + +/** + * Apply an effect function to a selection, or entire buffer if no selection + */ +export function applyEffectToSelection( + buffer: AudioBuffer, + selection: Selection | null, + effectFn: (buffer: AudioBuffer) => AudioBuffer +): AudioBuffer { + if (!selection || selection.start === selection.end) { + // No selection, apply to entire buffer + return effectFn(buffer); + } + + // Extract the selected region + const region = extractRegion(buffer, selection.start, selection.end); + + // Apply effect to the region + const processedRegion = effectFn(region); + + // Replace the region in the original buffer + return replaceRegion(buffer, processedRegion, selection.start); +} + +/** + * Apply an async effect function to a selection, or entire buffer if no selection + */ +export async function applyAsyncEffectToSelection( + buffer: AudioBuffer, + selection: Selection | null, + effectFn: (buffer: AudioBuffer) => Promise +): Promise { + if (!selection || selection.start === selection.end) { + // No selection, apply to entire buffer + return await effectFn(buffer); + } + + // Extract the selected region + const region = extractRegion(buffer, selection.start, selection.end); + + // Apply effect to the region + const processedRegion = await effectFn(region); + + // Replace the region in the original buffer + return replaceRegion(buffer, processedRegion, selection.start); +} diff --git a/lib/audio/effects/time-based.ts b/lib/audio/effects/time-based.ts new file mode 100644 index 0000000..0f5c435 --- /dev/null +++ b/lib/audio/effects/time-based.ts @@ -0,0 +1,340 @@ +/** + * Time-based effects (Delay, Reverb, Chorus, Flanger, Phaser) + */ + +import { getAudioContext } from '../context'; + +export interface DelayParameters { + time: number; // ms - delay time + feedback: number; // 0-1 - amount of delayed signal fed back + mix: number; // 0-1 - dry/wet mix (0 = dry, 1 = wet) +} + +export interface ReverbParameters { + roomSize: number; // 0-1 - size of the reverb room + damping: number; // 0-1 - high frequency damping + mix: number; // 0-1 - dry/wet mix +} + +export interface ChorusParameters { + rate: number; // Hz - LFO rate + depth: number; // 0-1 - modulation depth + delay: number; // ms - base delay time + mix: number; // 0-1 - dry/wet mix +} + +export interface FlangerParameters { + rate: number; // Hz - LFO rate + depth: number; // 0-1 - modulation depth + feedback: number; // 0-1 - feedback amount + delay: number; // ms - base delay time + mix: number; // 0-1 - dry/wet mix +} + +export interface PhaserParameters { + rate: number; // Hz - LFO rate + depth: number; // 0-1 - modulation depth + feedback: number; // 0-1 - feedback amount + stages: number; // 2-12 - number of allpass filters + mix: number; // 0-1 - dry/wet mix +} + +/** + * Apply delay/echo effect to audio buffer + */ +export async function applyDelay( + buffer: AudioBuffer, + params: DelayParameters +): Promise { + const audioContext = getAudioContext(); + const channels = buffer.numberOfChannels; + const length = buffer.length; + const sampleRate = buffer.sampleRate; + + // Calculate delay in samples + const delaySamples = Math.floor((params.time / 1000) * sampleRate); + + // Create output buffer (needs extra length for delay tail) + const outputLength = length + delaySamples * 5; // Allow for multiple echoes + const outputBuffer = audioContext.createBuffer(channels, outputLength, sampleRate); + + // Process each channel + for (let channel = 0; channel < channels; channel++) { + const inputData = buffer.getChannelData(channel); + const outputData = outputBuffer.getChannelData(channel); + + // Copy input and add delayed copies with feedback + for (let i = 0; i < outputLength; i++) { + let sample = 0; + + // Add original signal + if (i < length) { + sample += inputData[i] * (1 - params.mix); + } + + // Add delayed signal with feedback + let delayIndex = i; + let feedbackGain = params.mix; + + for (let echo = 0; echo < 10; echo++) { + delayIndex -= delaySamples; + if (delayIndex >= 0 && delayIndex < length) { + sample += inputData[delayIndex] * feedbackGain; + } + feedbackGain *= params.feedback; + if (feedbackGain < 0.001) break; // Stop when feedback is negligible + } + + outputData[i] = sample; + } + } + + return outputBuffer; +} + +/** + * Apply simple algorithmic reverb to audio buffer + */ +export async function applyReverb( + buffer: AudioBuffer, + params: ReverbParameters +): Promise { + const audioContext = getAudioContext(); + const channels = buffer.numberOfChannels; + const length = buffer.length; + const sampleRate = buffer.sampleRate; + + // Reverb uses multiple delay lines (Schroeder reverb algorithm) + const combDelays = [1557, 1617, 1491, 1422, 1277, 1356, 1188, 1116].map( + d => Math.floor(d * params.roomSize * (sampleRate / 44100)) + ); + const allpassDelays = [225, 556, 441, 341].map( + d => Math.floor(d * (sampleRate / 44100)) + ); + + // Create output buffer with reverb tail + const outputLength = length + Math.floor(sampleRate * 3 * params.roomSize); + const outputBuffer = audioContext.createBuffer(channels, outputLength, sampleRate); + + // Process each channel + for (let channel = 0; channel < channels; channel++) { + const inputData = buffer.getChannelData(channel); + const outputData = outputBuffer.getChannelData(channel); + + // Comb filter buffers + const combBuffers = combDelays.map(delay => new Float32Array(delay)); + const combIndices = combDelays.map(() => 0); + + // Allpass filter buffers + const allpassBuffers = allpassDelays.map(delay => new Float32Array(delay)); + const allpassIndices = allpassDelays.map(() => 0); + + // Process samples + for (let i = 0; i < outputLength; i++) { + let input = i < length ? inputData[i] : 0; + let combSum = 0; + + // Parallel comb filters + for (let c = 0; c < combDelays.length; c++) { + const delayedSample = combBuffers[c][combIndices[c]]; + combSum += delayedSample; + + // Feedback with damping + const feedback = delayedSample * (0.84 - params.damping * 0.2); + combBuffers[c][combIndices[c]] = input + feedback; + + combIndices[c] = (combIndices[c] + 1) % combDelays[c]; + } + + // Average comb outputs + let sample = combSum / combDelays.length; + + // Series allpass filters + for (let a = 0; a < allpassDelays.length; a++) { + const delayed = allpassBuffers[a][allpassIndices[a]]; + const output = -sample + delayed; + allpassBuffers[a][allpassIndices[a]] = sample + delayed * 0.5; + sample = output; + allpassIndices[a] = (allpassIndices[a] + 1) % allpassDelays[a]; + } + + // Mix dry and wet + outputData[i] = input * (1 - params.mix) + sample * params.mix * 0.5; + } + } + + return outputBuffer; +} + +/** + * Apply chorus effect to audio buffer + */ +export async function applyChorus( + buffer: AudioBuffer, + params: ChorusParameters +): Promise { + const audioContext = getAudioContext(); + const channels = buffer.numberOfChannels; + const length = buffer.length; + const sampleRate = buffer.sampleRate; + + // Create output buffer + const outputBuffer = audioContext.createBuffer(channels, length, sampleRate); + + // Base delay in samples + const baseDelaySamples = (params.delay / 1000) * sampleRate; + const maxDelaySamples = baseDelaySamples + (params.depth * sampleRate * 0.005); + + // Process each channel + for (let channel = 0; channel < channels; channel++) { + const inputData = buffer.getChannelData(channel); + const outputData = outputBuffer.getChannelData(channel); + + // Create delay buffer + const delayBuffer = new Float32Array(Math.ceil(maxDelaySamples) + 1); + let delayIndex = 0; + + for (let i = 0; i < length; i++) { + const input = inputData[i]; + + // Calculate LFO (Low Frequency Oscillator) + const lfoPhase = (i / sampleRate) * params.rate * 2 * Math.PI; + const lfo = Math.sin(lfoPhase); + + // Modulated delay time + const modulatedDelay = baseDelaySamples + (lfo * params.depth * sampleRate * 0.005); + + // Read from delay buffer with interpolation + const readIndex = (delayIndex - modulatedDelay + delayBuffer.length) % delayBuffer.length; + const readIndexInt = Math.floor(readIndex); + const readIndexFrac = readIndex - readIndexInt; + + const sample1 = delayBuffer[readIndexInt]; + const sample2 = delayBuffer[(readIndexInt + 1) % delayBuffer.length]; + const delayedSample = sample1 + (sample2 - sample1) * readIndexFrac; + + // Write to delay buffer + delayBuffer[delayIndex] = input; + delayIndex = (delayIndex + 1) % delayBuffer.length; + + // Mix dry and wet + outputData[i] = input * (1 - params.mix) + delayedSample * params.mix; + } + } + + return outputBuffer; +} + +/** + * Apply flanger effect to audio buffer + */ +export async function applyFlanger( + buffer: AudioBuffer, + params: FlangerParameters +): Promise { + const audioContext = getAudioContext(); + const channels = buffer.numberOfChannels; + const length = buffer.length; + const sampleRate = buffer.sampleRate; + + // Create output buffer + const outputBuffer = audioContext.createBuffer(channels, length, sampleRate); + + // Base delay in samples (shorter than chorus) + const baseDelaySamples = (params.delay / 1000) * sampleRate; + const maxDelaySamples = baseDelaySamples + (params.depth * sampleRate * 0.002); + + // Process each channel + for (let channel = 0; channel < channels; channel++) { + const inputData = buffer.getChannelData(channel); + const outputData = outputBuffer.getChannelData(channel); + + // Create delay buffer + const delayBuffer = new Float32Array(Math.ceil(maxDelaySamples) + 1); + let delayIndex = 0; + + for (let i = 0; i < length; i++) { + const input = inputData[i]; + + // Calculate LFO + const lfoPhase = (i / sampleRate) * params.rate * 2 * Math.PI; + const lfo = Math.sin(lfoPhase); + + // Modulated delay time + const modulatedDelay = baseDelaySamples + (lfo * params.depth * sampleRate * 0.002); + + // Read from delay buffer with interpolation + const readIndex = (delayIndex - modulatedDelay + delayBuffer.length) % delayBuffer.length; + const readIndexInt = Math.floor(readIndex); + const readIndexFrac = readIndex - readIndexInt; + + const sample1 = delayBuffer[readIndexInt]; + const sample2 = delayBuffer[(readIndexInt + 1) % delayBuffer.length]; + const delayedSample = sample1 + (sample2 - sample1) * readIndexFrac; + + // Write to delay buffer with feedback + delayBuffer[delayIndex] = input + delayedSample * params.feedback; + delayIndex = (delayIndex + 1) % delayBuffer.length; + + // Mix dry and wet + outputData[i] = input * (1 - params.mix) + delayedSample * params.mix; + } + } + + return outputBuffer; +} + +/** + * Apply phaser effect to audio buffer + */ +export async function applyPhaser( + buffer: AudioBuffer, + params: PhaserParameters +): Promise { + const audioContext = getAudioContext(); + const channels = buffer.numberOfChannels; + const length = buffer.length; + const sampleRate = buffer.sampleRate; + + // Create output buffer + const outputBuffer = audioContext.createBuffer(channels, length, sampleRate); + + // Process each channel + for (let channel = 0; channel < channels; channel++) { + const inputData = buffer.getChannelData(channel); + const outputData = outputBuffer.getChannelData(channel); + + // Allpass filter state for each stage + const stages = Math.floor(params.stages); + const allpassStates = new Array(stages).fill(0); + + for (let i = 0; i < length; i++) { + let input = inputData[i]; + let output = input; + + // Calculate LFO + const lfoPhase = (i / sampleRate) * params.rate * 2 * Math.PI; + const lfo = Math.sin(lfoPhase); + + // Modulated allpass frequency (200Hz to 2000Hz) + const baseFreq = 200 + (lfo + 1) * 0.5 * 1800 * params.depth; + const omega = (2 * Math.PI * baseFreq) / sampleRate; + const alpha = (1 - Math.tan(omega / 2)) / (1 + Math.tan(omega / 2)); + + // Apply cascaded allpass filters + for (let stage = 0; stage < stages; stage++) { + const filtered = alpha * output + allpassStates[stage]; + allpassStates[stage] = output - alpha * filtered; + output = filtered; + } + + // Add feedback + output = output + output * params.feedback; + + // Mix dry and wet + outputData[i] = input * (1 - params.mix) + output * params.mix; + } + } + + return outputBuffer; +} diff --git a/lib/audio/player.ts b/lib/audio/player.ts index 40dbafd..0c33c51 100644 --- a/lib/audio/player.ts +++ b/lib/audio/player.ts @@ -75,8 +75,10 @@ export class AudioPlayer { pause(): void { if (!this.isPlaying) return; - this.pauseTime = this.getCurrentTime(); + // Save current time BEFORE calling stop (which resets it) + const savedTime = this.getCurrentTime(); this.stop(); + this.pauseTime = savedTime; this.isPaused = true; } diff --git a/lib/history/commands/effect-command.ts b/lib/history/commands/effect-command.ts new file mode 100644 index 0000000..3acfc63 --- /dev/null +++ b/lib/history/commands/effect-command.ts @@ -0,0 +1,242 @@ +/** + * Effect commands for undo/redo system + */ + +import { BaseCommand } from '../command'; + +export class EffectCommand extends BaseCommand { + private originalBuffer: AudioBuffer; + private modifiedBuffer: AudioBuffer; + private applyCallback: (buffer: AudioBuffer) => void; + private description: string; + + constructor( + originalBuffer: AudioBuffer, + modifiedBuffer: AudioBuffer, + applyCallback: (buffer: AudioBuffer) => void, + description: string + ) { + super(); + this.originalBuffer = originalBuffer; + this.modifiedBuffer = modifiedBuffer; + this.applyCallback = applyCallback; + this.description = description; + } + + getDescription(): string { + return this.description; + } + + execute(): void { + this.applyCallback(this.modifiedBuffer); + } + + undo(): void { + this.applyCallback(this.originalBuffer); + } + + redo(): void { + this.execute(); + } +} + +/** + * Factory function to create effect commands + */ +export function createEffectCommand( + originalBuffer: AudioBuffer, + effectFunction: (buffer: AudioBuffer) => AudioBuffer | Promise, + applyCallback: (buffer: AudioBuffer) => void, + description: string +): EffectCommand { + const result = effectFunction(originalBuffer); + const modifiedBuffer = result instanceof Promise ? originalBuffer : result; + return new EffectCommand(originalBuffer, modifiedBuffer, applyCallback, description); +} + +/** + * Factory function to create async effect commands + */ +export async function createAsyncEffectCommand( + originalBuffer: AudioBuffer, + effectFunction: (buffer: AudioBuffer) => Promise, + applyCallback: (buffer: AudioBuffer) => void, + description: string +): Promise { + const modifiedBuffer = await effectFunction(originalBuffer); + return new EffectCommand(originalBuffer, modifiedBuffer, applyCallback, description); +} + +/** + * Factory for gain effect command + */ +export function createGainCommand( + buffer: AudioBuffer, + gainValue: number, + applyCallback: (buffer: AudioBuffer) => void +): EffectCommand { + return createEffectCommand( + buffer, + (buf) => { + // Import will happen at runtime + const { applyGain } = require('@/lib/audio/effects/gain'); + return applyGain(buf, gainValue); + }, + applyCallback, + `Apply Gain (${gainValue.toFixed(2)}x)` + ); +} + +/** + * Factory for normalize peak command + */ +export function createNormalizePeakCommand( + buffer: AudioBuffer, + targetPeak: number, + applyCallback: (buffer: AudioBuffer) => void +): EffectCommand { + return createEffectCommand( + buffer, + (buf) => { + const { normalizePeak } = require('@/lib/audio/effects/normalize'); + return normalizePeak(buf, targetPeak); + }, + applyCallback, + `Normalize to Peak (${targetPeak.toFixed(2)})` + ); +} + +/** + * Factory for normalize RMS command + */ +export function createNormalizeRMSCommand( + buffer: AudioBuffer, + targetRMS: number, + applyCallback: (buffer: AudioBuffer) => void +): EffectCommand { + return createEffectCommand( + buffer, + (buf) => { + const { normalizeRMS } = require('@/lib/audio/effects/normalize'); + return normalizeRMS(buf, targetRMS); + }, + applyCallback, + `Normalize to RMS (${targetRMS.toFixed(2)})` + ); +} + +/** + * Factory for fade in command + */ +export function createFadeInCommand( + buffer: AudioBuffer, + duration: number, + applyCallback: (buffer: AudioBuffer) => void +): EffectCommand { + return createEffectCommand( + buffer, + (buf) => { + const { applyFadeIn } = require('@/lib/audio/effects/fade'); + return applyFadeIn(buf, duration); + }, + applyCallback, + `Fade In (${duration.toFixed(2)}s)` + ); +} + +/** + * Factory for fade out command + */ +export function createFadeOutCommand( + buffer: AudioBuffer, + duration: number, + applyCallback: (buffer: AudioBuffer) => void +): EffectCommand { + return createEffectCommand( + buffer, + (buf) => { + const { applyFadeOut } = require('@/lib/audio/effects/fade'); + return applyFadeOut(buf, duration); + }, + applyCallback, + `Fade Out (${duration.toFixed(2)}s)` + ); +} + +/** + * Factory for reverse command + */ +export function createReverseCommand( + buffer: AudioBuffer, + applyCallback: (buffer: AudioBuffer) => void +): EffectCommand { + return createEffectCommand( + buffer, + (buf) => { + const { reverseAudio } = require('@/lib/audio/effects/reverse'); + return reverseAudio(buf); + }, + applyCallback, + 'Reverse Audio' + ); +} + +/** + * Factory for low-pass filter command + */ +export async function createLowPassFilterCommand( + buffer: AudioBuffer, + frequency: number, + Q: number, + applyCallback: (buffer: AudioBuffer) => void +): Promise { + return createAsyncEffectCommand( + buffer, + async (buf) => { + const { applyLowPassFilter } = require('@/lib/audio/effects/filters'); + return await applyLowPassFilter(buf, frequency, Q); + }, + applyCallback, + `Low-Pass Filter (${frequency}Hz)` + ); +} + +/** + * Factory for high-pass filter command + */ +export async function createHighPassFilterCommand( + buffer: AudioBuffer, + frequency: number, + Q: number, + applyCallback: (buffer: AudioBuffer) => void +): Promise { + return createAsyncEffectCommand( + buffer, + async (buf) => { + const { applyHighPassFilter } = require('@/lib/audio/effects/filters'); + return await applyHighPassFilter(buf, frequency, Q); + }, + applyCallback, + `High-Pass Filter (${frequency}Hz)` + ); +} + +/** + * Factory for band-pass filter command + */ +export async function createBandPassFilterCommand( + buffer: AudioBuffer, + frequency: number, + Q: number, + applyCallback: (buffer: AudioBuffer) => void +): Promise { + return createAsyncEffectCommand( + buffer, + async (buf) => { + const { applyBandPassFilter } = require('@/lib/audio/effects/filters'); + return await applyBandPassFilter(buf, frequency, Q); + }, + applyCallback, + `Band-Pass Filter (${frequency}Hz)` + ); +}