diff --git a/roadmap.md b/roadmap.md index 3fb60ad..26fac21 100644 --- a/roadmap.md +++ b/roadmap.md @@ -23,7 +23,7 @@ The `Metronome` is the heart of the app. The BPM, measure, current tick, and tim - BPM - time signature - measure count -- [ ] when Writer updates Metronome, current tick and measure reset to 0 +- [x] ~when Writer updates Metronome, current tick and measure reset to 0~ decided this is not important - [x] Metronome Component must use Writer to set properties https://github.com/ericyd/loop-supreme/pull/3 - [x] Metronome Component must use Reader to display properties https://github.com/ericyd/loop-supreme/pull/3 - [x] Metronome must play an audible click on each tick https://github.com/ericyd/loop-supreme/pull/4 @@ -53,16 +53,16 @@ A `Track` is a single mono or stereo audio buffer that contains audio data. A `T - [ ] audio data can be cleared from component without deleting it (to preserve track name) - [x] deleting a track stops playback https://github.com/ericyd/loop-supreme/pull/13 - [x] Component can record data from user device https://github.com/ericyd/loop-supreme/pull/8 -- [ ] Component shows waveform of recorded audio +- [x] Component shows waveform of recorded audio https://github.com/ericyd/loop-supreme/pull/20 - [x] Component can adjust volume of playback https://github.com/ericyd/loop-supreme/pull/13 - [x] Component has mute toggle button https://github.com/ericyd/loop-supreme/pull/13 - [x] Audio input can be monitored, or not https://github.com/ericyd/loop-supreme/pull/13 - [x] When Component is armed for recording, audio data is recorded starting at the beginning of the next loop, and automatically stops at the beginning of the following loop https://github.com/ericyd/loop-supreme/pull/9 - [x] recording accounts for audio latency https://github.com/ericyd/loop-supreme/pull/12 -- [x] Component gets confirmation before deleting track -- [x] Fix Recording button styling/class (use Tailwind) +- [x] Component gets confirmation before deleting track https://github.com/ericyd/loop-supreme/pull/15 +- [x] Fix Recording button styling/class (use Tailwind) https://github.com/ericyd/loop-supreme/pull/15 - [ ] Ensure the audio buffer is always exactly as long as it needs to be to fill the loop -- [ ] clean up functionality from recorder worklet that isn't being used (might want to hold off until I know how visualization will work) +- [x] clean up functionality from recorder worklet that isn't being used (might want to hold off until I know how visualization will work) https://github.com/ericyd/loop-supreme/pull/20 ## Saving audio @@ -80,17 +80,14 @@ A `Track` is a single mono or stereo audio buffer that contains audio data. A `T ## HTML -- [x] flesh out header (add links to blog, etc) +- [x] flesh out header (add links to blog, etc) https://github.com/ericyd/loop-supreme/pull/16 - [x] track page views (done automatically through Cloudflare) -- [x] OG tags, SEO +- [x] OG tags, SEO https://github.com/ericyd/loop-supreme/pull/16 ## Deploy -- [ ] building (GH Actions) - - probably will need to "eject" CRA so I can customize webpack resolve hook. - - https://webpack.js.org/configuration/resolve/. - - Currently getting this error in built app: "Error: Module resolve hook not set" -- [ ] hosting (Cloudflare) +- [x] building (GH Actions) https://github.com/ericyd/loop-supreme/pull/17 and https://github.com/ericyd/loop-supreme/pull/19 +- [x] hosting (Cloudflare) ## Misc @@ -112,3 +109,4 @@ A `Track` is a single mono or stereo audio buffer that contains audio data. A `T - [x] show alert to user if latency cannot be detected due to their environment - [ ] show alert if track latency cannot be detected, or if it seems wildly out of the norm (~100ms +/ 20ms ???). Consider adding a "custom latency" input option??? - [x] remove useInterval hook (not used) +- [ ] investigate network calls to workers. Is this expected? It looks like the actual worker is being re-downloaded too frequently. Should these be in useMemo instead of useRef? diff --git a/src/ControlPanel/MeasureCount.tsx b/src/ControlPanel/MeasureCount.tsx deleted file mode 100644 index 167e94f..0000000 --- a/src/ControlPanel/MeasureCount.tsx +++ /dev/null @@ -1,34 +0,0 @@ -import ControlPanelItem from './ControlPanelItem' - -type MeasureCountProps = { - onChange(measureCount: number): void - measureCount: number -} -export default function MeasureCount(props: MeasureCountProps) { - const handleChange: React.ChangeEventHandler = (event) => { - const measureCount = Number(event.target.value) - if (Number.isNaN(measureCount)) { - throw new Error(`measure count "${event.target.value}" is not a number`) - } - props.onChange(measureCount) - } - - return ( - -
- {props.measureCount} - - measure{props.measureCount === 1 ? '' : 's'} - -
- -
- ) -} diff --git a/src/ControlPanel/MeasuresPerLoop.tsx b/src/ControlPanel/MeasuresPerLoop.tsx new file mode 100644 index 0000000..a9953ad --- /dev/null +++ b/src/ControlPanel/MeasuresPerLoop.tsx @@ -0,0 +1,36 @@ +import ControlPanelItem from './ControlPanelItem' + +type MeasuresPerLoopProps = { + onChange(measuresPerLoop: number): void + measuresPerLoop: number +} +export default function MeasuresPerLoop(props: MeasuresPerLoopProps) { + const handleChange: React.ChangeEventHandler = (event) => { + const measuresPerLoop = Number(event.target.value) + if (Number.isNaN(measuresPerLoop)) { + throw new Error(`measure count "${event.target.value}" is not a number`) + } + props.onChange(measuresPerLoop) + } + + return ( + +
+ + {props.measuresPerLoop} + + + measure{props.measuresPerLoop === 1 ? '' : 's'} + +
+ +
+ ) +} diff --git a/src/ControlPanel/index.tsx b/src/ControlPanel/index.tsx index 93a6440..1bae7c1 100644 --- a/src/ControlPanel/index.tsx +++ b/src/ControlPanel/index.tsx @@ -1,7 +1,7 @@ import React from 'react' import { useDebouncedCallback } from 'use-debounce' import { MetronomeReader, MetronomeWriter } from '../Metronome' -import MeasureCount from './MeasureCount' +import MeasuresPerLoop from './MeasuresPerLoop' import TimeSignature from './TimeSignature' import Tempo from './Tempo' import BeatCounter from './BeatCounter' @@ -37,9 +37,9 @@ export const ControlPanel: React.FC = ({ beatUnit={metronome.timeSignature.beatUnit} /> - diff --git a/src/Metronome/index.tsx b/src/Metronome/index.tsx index 091451f..36e68e4 100644 --- a/src/Metronome/index.tsx +++ b/src/Metronome/index.tsx @@ -2,7 +2,7 @@ import React, { useCallback, useEffect, useMemo, useRef, useState } from 'react' import { useAudioRouter } from '../AudioRouter' import { ControlPanel } from '../ControlPanel' import { Scene } from '../Scene' -import { ClockConsumerMessage } from '../worklets/ClockWorker' +import type { ClockControllerMessage } from '../worklets/clock' import { decayingSine } from './waveforms' export type TimeSignature = { @@ -17,7 +17,7 @@ export type MetronomeReader = { bpm: number currentTick: number timeSignature: TimeSignature - measureCount: number + measuresPerLoop: number currentMeasure: number playing: boolean clock: Worker @@ -28,7 +28,7 @@ export type MetronomeReader = { export type MetronomeWriter = { setBpm: (bpm: number) => void setTimeSignature: (timeSignature: TimeSignature) => void - setMeasureCount: (count: number) => void + setMeasuresPerLoop: (count: number) => void togglePlaying: () => Promise setGain: (gain: number) => void setMuted: React.Dispatch> @@ -46,7 +46,7 @@ export const Metronome: React.FC = () => { beatsPerMeasure: 4, beatUnit: 4, }) - const [measureCount, setMeasureCount] = useState(2) + const [measuresPerLoop, setMeasuresPerLoop] = useState(2) const [playing, setPlaying] = useState(false) const [gain, setGain] = useState(0.5) const [muted, setMuted] = useState(false) @@ -102,9 +102,9 @@ export const Metronome: React.FC = () => { * because it can only be played once. */ const clockMessageHandler = useCallback( - (event: MessageEvent) => { + (event: MessageEvent) => { // console.log(event.data) // this is really noisy - if (event.data.message === 'tick') { + if (event.data.message === 'TICK') { const { currentTick } = event.data setCurrentTick(currentTick) @@ -133,7 +133,7 @@ export const Metronome: React.FC = () => { if (playing) { await audioContext.suspend() clock.current.postMessage({ - message: 'stop', + message: 'STOP', }) setPlaying(false) } else { @@ -141,8 +141,8 @@ export const Metronome: React.FC = () => { clock.current.postMessage({ bpm, beatsPerMeasure: timeSignature.beatsPerMeasure, - measureCount, - message: 'start', + measuresPerLoop, + message: 'START', }) setPlaying(true) } @@ -152,10 +152,10 @@ export const Metronome: React.FC = () => { clock.current.postMessage({ bpm, beatsPerMeasure: timeSignature.beatsPerMeasure, - measureCount, - message: 'update', + measuresPerLoop, + message: 'UPDATE', }) - }, [bpm, timeSignature.beatsPerMeasure, measureCount]) + }, [bpm, timeSignature.beatsPerMeasure, measuresPerLoop]) const reader: MetronomeReader = { bpm, @@ -163,7 +163,7 @@ export const Metronome: React.FC = () => { // but we don't want to *show* -1 to the user currentTick: Math.max(currentTick % timeSignature.beatsPerMeasure, 0), timeSignature, - measureCount, + measuresPerLoop, currentMeasure: Math.max( Math.floor(currentTick / timeSignature.beatsPerMeasure), 0 @@ -176,7 +176,7 @@ export const Metronome: React.FC = () => { const writer: MetronomeWriter = { setBpm, setTimeSignature, - setMeasureCount, + setMeasuresPerLoop, togglePlaying, setGain, setMuted, diff --git a/src/Track/Waveform.tsx b/src/Track/Waveform.tsx new file mode 100644 index 0000000..b8f6e22 --- /dev/null +++ b/src/Track/Waveform.tsx @@ -0,0 +1,54 @@ +import { useEffect, useState } from 'react' +import { + WaveformControllerMessage, + WaveformWorkerInitializeMessage, +} from '../worklets/waveform' + +type Props = { + worker: Worker + sampleRate: number +} +export default function Waveform(props: Props) { + const [path, setPath] = useState('M 0 0') + + const yMax = 2 + const xMax = 20 + useEffect(() => { + props.worker.postMessage({ + message: 'INITIALIZE', + yMax, + xMax, + sampleRate: props.sampleRate, + } as WaveformWorkerInitializeMessage) + }) + + useEffect(() => { + props.worker.addEventListener( + 'message', + (event: MessageEvent) => { + if (event.data.message === 'WAVEFORM_PATH') { + setPath(event.data.path) + } + } + ) + }) + + return ( + + + + ) +} diff --git a/src/Track/index.tsx b/src/Track/index.tsx index 7ebe9fc..7ab82e8 100644 --- a/src/Track/index.tsx +++ b/src/Track/index.tsx @@ -9,11 +9,16 @@ import { useAudioRouter } from '../AudioRouter' import { MetronomeReader } from '../Metronome' import { logger } from '../util/logger' import { VolumeControl } from '../VolumeControl' -import { ClockConsumerMessage } from '../worklets/ClockWorker' +import type { ClockControllerMessage } from '../worklets/clock' +import type { + WaveformWorkerFrameMessage, + WaveformWorkerMetronomeMessage, +} from '../worklets/waveform' import ArmTrackRecording from './ArmTrackRecording' import { getLatencySamples } from './get-latency-samples' import MonitorInput from './MonitorInput' import RemoveTrack from './RemoveTrack' +import Waveform from './Waveform' type Props = { id: number @@ -24,7 +29,7 @@ type Props = { type RecordingProperties = { numberOfChannels: number sampleRate: number - maxRecordingFrames: number + maxRecordingSamples: number latencySamples: number /** * default: false @@ -36,20 +41,22 @@ type MaxRecordingLengthReachedMessage = { message: 'MAX_RECORDING_LENGTH_REACHED' } -type UpdateRecordingLengthMessage = { - message: 'UPDATE_RECORDING_LENGTH' +type ShareRecordingBufferMessage = { + message: 'SHARE_RECORDING_BUFFER' + channelsData: Array recordingLength: number } -type ShareRecordingBufferMessage = { - message: 'SHARE_RECORDING_BUFFER' - buffer: Array +type UpdateWaveformMessage = { + message: 'UPDATE_WAVEFORM' + gain: number + samplesPerFrame: number } type RecordingMessage = | MaxRecordingLengthReachedMessage - | UpdateRecordingLengthMessage | ShareRecordingBufferMessage + | UpdateWaveformMessage export const Track: React.FC = ({ id, onRemove, metronome }) => { const { audioContext, stream } = useAudioRouter() @@ -57,6 +64,9 @@ export const Track: React.FC = ({ id, onRemove, metronome }) => { const [armed, setArmed] = useState(false) const toggleArmRecording = () => setArmed((value) => !value) const [recording, setRecording] = useState(false) + const waveformWorker = useRef( + new Worker(new URL('../worklets/waveform', import.meta.url)) + ) /** * Set up track gain. @@ -103,18 +113,24 @@ export const Track: React.FC = ({ id, onRemove, metronome }) => { */ const buildRecorderMessageHandler = useCallback( (recordingProperties: RecordingProperties) => { - let recordingLength = 0 - - // If the max length is reached, we can no longer record. return (event: MessageEvent) => { + logger.debug({ recordingProcessorEventData: event.data }) + // If the max length is reached, we can no longer record. if (event.data.message === 'MAX_RECORDING_LENGTH_REACHED') { - // isRecording = false; - logger.log(event.data) + // TODO: stop recording, or show alert or something + logger.error(event.data) } - if (event.data.message === 'UPDATE_RECORDING_LENGTH') { - recordingLength = event.data.recordingLength + + if (event.data.message === 'UPDATE_WAVEFORM') { + waveformWorker.current.postMessage({ + message: 'FRAME', + gain: event.data.gain, + samplesPerFrame: event.data.samplesPerFrame, + } as WaveformWorkerFrameMessage) } + if (event.data.message === 'SHARE_RECORDING_BUFFER') { + const recordingLength = event.data.recordingLength const recordingBuffer = audioContext.createBuffer( recordingProperties.numberOfChannels, // TODO: I'm also not sure if constructing the audioBuffer from the "recordingLength" indicated from the worklet is the best way. @@ -125,7 +141,7 @@ export const Track: React.FC = ({ id, onRemove, metronome }) => { ) for (let i = 0; i < recordingProperties.numberOfChannels; i++) { - // buffer is an Array of Float32Arrays; + // channelsData is an Array of Float32Arrays; // each element of Array is a channel, // which contains the raw samples for the audio data of that channel recordingBuffer.copyToChannel( @@ -140,7 +156,7 @@ export const Track: React.FC = ({ id, onRemove, metronome }) => { // [1] https://developer.mozilla.org/en-US/docs/Web/API/AudioBuffer/copyToChannel // [2] https://jsfiddle.net/y7qL9wr4/7 // TODO: should this be sliced to a maximum of buffer size? Maybe a non-issue? - event.data.buffer[i], + event.data.channelsData[i], i, 0 ) @@ -171,13 +187,14 @@ export const Track: React.FC = ({ id, onRemove, metronome }) => { const recordingProperties: RecordingProperties = { numberOfChannels: mediaSource.channelCount, sampleRate: audioContext.sampleRate, - maxRecordingFrames: audioContext.sampleRate * 10, + maxRecordingSamples: audioContext.sampleRate * 10, latencySamples: getLatencySamples( audioContext.sampleRate, stream, audioContext ), } + logger.debug({ recordingProperties }) recorderWorklet.current = new AudioWorkletNode(audioContext, 'recorder', { processorOptions: recordingProperties, }) @@ -205,6 +222,19 @@ export const Track: React.FC = ({ id, onRemove, metronome }) => { } }, [audioContext, buildRecorderMessageHandler, stream]) + /** + * Update waveform worker when metronome parameters change, + * so waveforms can be scaled properly + */ + useEffect(() => { + waveformWorker.current.postMessage({ + message: 'UPDATE_METRONOME', + beatsPerSecond: metronome.bpm / 60, + measuresPerLoop: metronome.measuresPerLoop, + beatsPerMeasure: metronome.timeSignature.beatsPerMeasure, + } as WaveformWorkerMetronomeMessage) + }) + const handleChangeTitle: ChangeEventHandler = (event) => { setTitle(event.target.value) } @@ -227,7 +257,7 @@ export const Track: React.FC = ({ id, onRemove, metronome }) => { } } - function delegateClockMessage(event: MessageEvent) { + function delegateClockMessage(event: MessageEvent) { if (event.data.loopStart) { handleLoopstart() } @@ -241,7 +271,7 @@ export const Track: React.FC = ({ id, onRemove, metronome }) => { }) return ( -
+
{/* Controls */}
{/* Title */} @@ -281,8 +311,11 @@ export const Track: React.FC = ({ id, onRemove, metronome }) => {
{/* Waveform */} -
- This is where the waveform will go +
+
) diff --git a/src/util/logger.ts b/src/util/logger.ts index 057dbdd..dac81df 100644 --- a/src/util/logger.ts +++ b/src/util/logger.ts @@ -1,10 +1,12 @@ +const isDebug = + new URLSearchParams(window.location.search).get('debug') !== null // a simple wrapper around "console" so debug logs can be left in place export const logger = { log: console.log.bind(console), info: console.info.bind(console), error: console.error.bind(console), debug: (...args: unknown[]) => { - if (new URLSearchParams(window.location.search).get('debug') !== null) { + if (isDebug) { console.debug(...args) } }, diff --git a/src/worklets/ClockWorker.ts b/src/worklets/ClockWorker.ts deleted file mode 100644 index eafdaab..0000000 --- a/src/worklets/ClockWorker.ts +++ /dev/null @@ -1,31 +0,0 @@ -type ClockWorkerStartMessage = { - message: 'start' - bpm: number - beatsPerMeasure: number - measureCount: number -} - -type ClockWorkerUpdateMessage = { - message: 'update' - bpm: number - beatsPerMeasure: number - measureCount: number -} - -type ClockWorkerStopMessage = { - message: 'stop' -} - -export type ClockWorkerMessage = - | ClockWorkerStartMessage - | ClockWorkerUpdateMessage - | ClockWorkerStopMessage - -export type ClockConsumerMessage = { - currentTick: number - // true on the first beat of each measure - downbeat: boolean - // true on the first beat of each loop - loopStart: boolean - message: 'tick' -} diff --git a/src/worklets/clock.ts b/src/worklets/clock.ts index 65595ac..c25f2e0 100644 --- a/src/worklets/clock.ts +++ b/src/worklets/clock.ts @@ -22,27 +22,61 @@ /* eslint-disable no-restricted-globals */ -import type { ClockWorkerMessage } from './ClockWorker' +type ClockWorkerStartMessage = { + message: 'START' + bpm: number + beatsPerMeasure: number + measuresPerLoop: number +} + +type ClockWorkerUpdateMessage = { + message: 'UPDATE' + bpm: number + beatsPerMeasure: number + measuresPerLoop: number +} + +type ClockWorkerStopMessage = { + message: 'STOP' +} + +type ClockWorkerMessage = + | ClockWorkerStartMessage + | ClockWorkerUpdateMessage + | ClockWorkerStopMessage + +export type ClockControllerMessage = { + message: 'TICK' + currentTick: number + // true on the first beat of each measure + downbeat: boolean + // true on the first beat of each loop + loopStart: boolean +} -postMessage({ message: 'ready' }) +postMessage({ message: 'clock ready' }) let timeoutId: NodeJS.Timer | null = null let currentTick = -1 self.onmessage = (e: MessageEvent) => { - function start(bpm: number, beatsPerMeasure: number, measureCount: number) { + function start( + bpm: number, + beatsPerMeasure: number, + measuresPerLoop: number + ) { // post one message immediately so the start doesn't appear delayed by one beat - currentTick = (currentTick + 1) % (beatsPerMeasure * measureCount) + currentTick = (currentTick + 1) % (beatsPerMeasure * measuresPerLoop) postMessage({ - message: 'tick', + message: 'TICK', currentTick, downbeat: currentTick % beatsPerMeasure === 0, loopStart: currentTick === 0, }) timeoutId = setInterval(() => { - currentTick = (currentTick + 1) % (beatsPerMeasure * measureCount) + currentTick = (currentTick + 1) % (beatsPerMeasure * measuresPerLoop) postMessage({ - message: 'tick', + message: 'TICK', currentTick, downbeat: currentTick % beatsPerMeasure === 0, loopStart: currentTick === 0, @@ -50,16 +84,16 @@ self.onmessage = (e: MessageEvent) => { }, (60 / bpm) * 1000) } - if (e.data.message === 'start') { - start(e.data.bpm, e.data.beatsPerMeasure, e.data.measureCount) - } else if (e.data.message === 'stop') { + if (e.data.message === 'START') { + start(e.data.bpm, e.data.beatsPerMeasure, e.data.measuresPerLoop) + } else if (e.data.message === 'STOP') { clearInterval(timeoutId!) timeoutId = null - } else if (e.data.message === 'update') { + } else if (e.data.message === 'UPDATE') { // only start if it was already running if (timeoutId) { clearInterval(timeoutId) - start(e.data.bpm, e.data.beatsPerMeasure, e.data.measureCount) + start(e.data.bpm, e.data.beatsPerMeasure, e.data.measuresPerLoop) } } } diff --git a/src/worklets/recorder.js b/src/worklets/recorder.js index 957aa32..ac439c9 100644 --- a/src/worklets/recorder.js +++ b/src/worklets/recorder.js @@ -1,66 +1,166 @@ -// Copyright (c) 2022 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// (mostly) copied wholesale from https://github.com/GoogleChromeLabs/web-audio-samples/blob/eed2a8613af551f2b1d166a01c834e8431fdf3c6/src/audio-worklet/migration/worklet-recorder/recording-processor.js - /** * Why JS? - * It makes our lives vastly easier - * The code in this file was heavily inspired by this Google example, and Monica Dinculescu's fantastic metronome test/example code: + * It makes our lives vastly easier. See scripts/postbuild.sh for more insight. + * What does this file do? + * It defines an AudioWorkletProcess (much like a Worker) which processes audio data input + * and saves it to a buffer which can be used to play back the audio. + * Although the functionality here is fairly simple, this frees up a lot of + * processing power since AudioWorkletProcessors don't run on the main thread. + * The code in this file was heavily inspired by (and lifted from) this Google example: * https://github.com/GoogleChromeLabs/web-audio-samples/blob/eed2a8613af551f2b1d166a01c834e8431fdf3c6/src/audio-worklet/migration/worklet-recorder/recording-processor.js - * https://glitch.com/edit/#!/metronomes?path=worker.js%3A1%3A0 + */ + +/** + * @typedef Channel + * @type {Float32Array} + */ + +/** + * @typedef Input + * @type {Channel[]} + */ + +/** + * @typedef Output + * @type {Channel[]} + */ + +// from https://github.com/microsoft/TypeScript/blob/d8aced98d9f68d5f036edba18075ac453432a4a2/lib/lib.dom.d.ts#L129-L135 +/** + * @typedef AudioWorkletNodeOptions + * @type {object} + * @property {number} numberOfInputs + * @property {number} numberOfOutputs + * @property {number[]} outputChannelCount + * @property {Record} parameterData + * @property {any} processorOptions + */ + +/** + * Worklet to record data to a buffer. + * Terms + * - sample: a datum of audio + * - block: a group of samples + * - frame: a group of blocks */ class RecordingProcessor extends AudioWorkletProcessor { + /** + * @param {AudioWorkletNodeOptions} options + */ constructor(options) { super() this.sampleRate = options.processorOptions?.sampleRate ?? 0 - this.maxRecordingFrames = options.processorOptions?.maxRecordingFrames ?? 0 + this.maxRecordingSamples = + options.processorOptions?.maxRecordingSamples ?? 0 this.numberOfChannels = options.processorOptions?.numberOfChannels ?? 0 this.latencySamples = options.processorOptions?.latencySamples ?? 0 - this._recordingBuffer = new Array(this.numberOfChannels).fill( - new Float32Array(this.maxRecordingFrames) + this.channelsData = new Array(this.numberOfChannels).fill( + new Float32Array(this.maxRecordingSamples) ) - this.recordedFrames = 0 + // recordedSamples is incremented by the blockSize when input blocks are processed. + // Since Float32Arrays must be initialized with a known length, + // this mutable property keeps track of how much we've processed, so we don't try to overflow the buffer + this.recordedSamples = 0 + + // Simple boolean to indicate whether or not we are currently recording. + // Even when this is false, we still process the input -> output in case + // the input is being monitored. this.recording = false - // We will use a timer to gate our messages; this one will publish at 60hz - this.framesSinceLastPublish = 0 - this.publishInterval = this.sampleRate / 60 + // Sending "update" messages every time the processor receives a block would be too frequent. + // We can gate the messages by number of blocks processed to reduce processing demand on the app listeners. + this.samplesSinceLastPublish = 0 + const publishingCadenceHz = 60 + this.targetSamplesPerFrame = this.sampleRate / publishingCadenceHz - // We will keep a live sum for rendering the visualizer. - this.sampleSum = 0 + // The sample sum is the sum of the gain of each sample, for a given message. + // This gets averaged over the number of samples in the message when published back to the app. + // This provides a reasonable approximation of "gain" for a single message, + // which can be used to update the waveform visualizer. + this.gainSum = 0 + // Consider defining a typedef for MessagePort, to constrain the types of messages it will send/receive + // From https://github.com/microsoft/TypeScript-DOM-lib-generator/blob/b929eb7863a3bf73f4a887fb97063276b10b92bc/baselines/audioworklet.generated.d.ts#L463-L482 this.port.onmessage = (event) => { if (event.data.message === 'UPDATE_RECORDING_STATE') { this.recording = event.data.recording + // When the recording ends, send the buffer back to the Track if (this.recording === false) { this.port.postMessage({ message: 'SHARE_RECORDING_BUFFER', - buffer: this._recordingBuffer, + channelsData: this.channelsData, + recordingLength: this.recordedSamples, }) } } } } - process(inputs, outputs, params) { + /** + * + * @param {Input[]} inputs + * @param {Output[]} outputs + * @param {Record} params + * @returns boolean "Returning true forces the Web Audio API to keep the node alive, + * while returning false allows the browser to terminate the node if + * it is neither generating new audio data nor receiving data through + * its inputs that it is processing" - MDN + */ + process(inputs, outputs) { // I *think* this is required because of a short delay between when the AudioWorkletProcessor is registered, and when the source stream is connected to it if (inputs.length === 0 || inputs[0].length === 0) { - return + return true } + + const blockSize = this.handleInput(inputs, outputs) + const shouldPublish = + this.samplesSinceLastPublish >= this.targetSamplesPerFrame + + // Returned in a chain because `process` must return a boolean. + // If any method returns false, the downstream methods should not be called. + return ( + this.handleMaxRecordingLength(blockSize) && + this.incrementRecordedSamples(blockSize) && + this.updateWaveform(shouldPublish, blockSize) + ) + } + + /** + * Processes input; + * Passes directly to output, for monitoring, + * and pushes to recording buffer if worklet is currently recording + * + * + * Note: Currently, audio data blocks are always 128 frames long—that is, + * they contain 128 32-bit floating-point samples for each of the inputs' channels. + * However, plans are already in place to revise the specification to allow + * the size of the audio blocks to be changed depending on circumstances + * (for example, if the audio hardware or CPU utilization is more efficient with larger block sizes). + * Therefore, you must always check the size of the sample array rather than assuming a particular size. + * This size may even be allowed to change over time, so you mustn't look at just the first block + * and assume the sample buffers will always be the same size. + * - https://developer.mozilla.org/en-US/docs/Web/API/AudioWorkletProcessor/process + * + * + * @param {Input[]} inputs + * @param {Output[]} outputs + * @returns number block size + */ + handleInput(inputs, outputs) { + const channelSampleLengths = [] for (let input = 0; input < inputs.length; input++) { for (let channel = 0; channel < inputs[input].length; channel++) { + channelSampleLengths.push(inputs[input][channel].length) for (let sample = 0; sample < inputs[input][channel].length; sample++) { const currentSample = inputs[input][channel][sample] // Copy data to recording buffer. if (this.recording) { - this._recordingBuffer[channel][ + this.channelsData[channel][ // The input hardware will have some recording latency. // To account for that latency, we shift the input data left by `latencySamples` samples. // Alternatives: @@ -68,59 +168,88 @@ class RecordingProcessor extends AudioWorkletProcessor { // However, to keep everything synchronized (including visuals eventually), // it made sense for the recording processor to automatically account for input latency. // See Track.tsx for latency determination - Math.max(sample + this.recordedFrames - this.latencySamples, 0) + Math.max(sample + this.recordedSamples - this.latencySamples, 0) ] = currentSample + + // Sum values for visualizer + this.gainSum += currentSample } // Monitor in the input by passing data directly to output, unchanged. // The output of the monitor is controlled in the Track via the monitorNode outputs[input][channel][sample] = currentSample - - // Sum values for visualizer - this.sampleSum += currentSample } } } + // I assume that the block sizes for each channel would never be different, though who knows! + return Math.max(...channelSampleLengths) + } - const shouldPublish = this.framesSinceLastPublish >= this.publishInterval + /** + * Validate that recording hasn't reached its limit. + * If it has, broadcast to the rest of the app. + * @param {number} blockSize + * @returns boolean + */ + handleMaxRecordingLength(blockSize) { + if ( + this.recording && + this.recordedSamples + blockSize >= this.maxRecordingSamples + ) { + this.recording = false + this.port.postMessage({ + message: 'MAX_RECORDING_LENGTH_REACHED', + }) - // Validate that recording hasn't reached its limit. - if (this.recording) { - if (this.recordedFrames + 128 < this.maxRecordingFrames) { - this.recordedFrames += 128 + return false + } + return true + } - // Post a recording recording length update on the clock's schedule - if (shouldPublish) { - this.port.postMessage({ - message: 'UPDATE_RECORDING_LENGTH', - recordingLength: this.recordedFrames, - }) - } - } else { - // Let the rest of the app know the limit was reached. - this.recording = false - this.port.postMessage({ - message: 'MAX_RECORDING_LENGTH_REACHED', - }) - - return false - } + /** + * Increment the count of recorded frames, if it hasn't exceeded the max + * @param {boolean} shouldPublish + * @param {number} blockSize + * @returns boolean + */ + incrementRecordedSamples(blockSize) { + if ( + this.recording && + this.recordedSamples + blockSize < this.maxRecordingSamples + ) { + this.recordedSamples += blockSize } + return true + } - // Handle message clock. - // If we should publish, post message and reset clock. + /** + * If the processor should publish, + * publish message with average sample gain and reset the publish interval. + * Else, increment the frames since last publish + * @param {boolean} shouldPublish + * @param {number} blockSize + * @returns boolean + */ + updateWaveform(shouldPublish, blockSize) { + if (!this.recording) { + return true + } if (shouldPublish) { this.port.postMessage({ - message: 'UPDATE_VISUALIZERS', - gain: this.sampleSum / this.framesSinceLastPublish, + message: 'UPDATE_WAVEFORM', + gain: this.gainSum / this.samplesSinceLastPublish, + // if samplesPerFrame is not evenly divisible by blockSize, then + // the actual samplesPerFrame will be higher than the calculated value in this.targetSamplesPerFrame + samplesPerFrame: + Math.ceil(this.targetSamplesPerFrame / blockSize) * blockSize, }) - this.framesSinceLastPublish = 0 - this.sampleSum = 0 - } else { - this.framesSinceLastPublish += 128 + this.samplesSinceLastPublish = 0 + this.gainSum = 0 } - + // A block was still processed; this should be incremented regardless + // I think this source is incorrect: https://github.com/GoogleChromeLabs/web-audio-samples/blob/eed2a8613af551f2b1d166a01c834e8431fdf3c6/src/audio-worklet/migration/worklet-recorder/recording-processor.js#L108-L110 + this.samplesSinceLastPublish += blockSize return true } } diff --git a/src/worklets/waveform.ts b/src/worklets/waveform.ts new file mode 100644 index 0000000..7961baf --- /dev/null +++ b/src/worklets/waveform.ts @@ -0,0 +1,146 @@ +/* eslint-disable no-restricted-globals */ + +export type WaveformWorkerFrameMessage = { + message: 'FRAME' + gain: number + samplesPerFrame: number +} + +export type WaveformWorkerInitializeMessage = { + message: 'INITIALIZE' + yMax: number + xMax: number + sampleRate: number +} + +export type WaveformWorkerMetronomeMessage = { + message: 'UPDATE_METRONOME' + beatsPerSecond: number + measuresPerLoop: number + beatsPerMeasure: number +} + +export type WaveformWorkerMessage = + | WaveformWorkerFrameMessage + | WaveformWorkerInitializeMessage + | WaveformWorkerMetronomeMessage + +export type WaveformControllerMessage = { + message: 'WAVEFORM_PATH' + path: string + minGain: number + maxGain: number +} + +postMessage({ message: 'waveform processor ready' }) + +// an array of each "gain" value for ever frame in the loop +const samples: number[] = [] + +// values corresponding to the waveform scaling +const yMin = 0 +let yMax = 1 +let xMax = 1 +let samplesPerSecond = 1 +let minGain = 0 +let maxGain = 0 +let samplesPerLoop = 1 + +self.onmessage = (e: MessageEvent) => { + if (e.data.message === 'FRAME') { + // Absolute value gives us the "top" of the waveform; the "bottom" is calculated below in pathNegative + const frameGain = Math.abs(e.data.gain) + samples.push(frameGain) + minGain = Math.min(minGain, frameGain) + maxGain = Math.max(maxGain, frameGain) + const framesPerLoop = Math.ceil(samplesPerLoop / e.data.samplesPerFrame) + + postMessage({ + message: 'WAVEFORM_PATH', + path: constructPath(samples, framesPerLoop), + minGain, + maxGain, + } as WaveformControllerMessage) + } + + if (e.data.message === 'INITIALIZE') { + // yMax is actually the height; since we have a "top" and "bottom" waveform, we only need half for our scaling + yMax = e.data.yMax / 2 + xMax = e.data.xMax + samplesPerSecond = e.data.sampleRate + } + + if (e.data.message === 'UPDATE_METRONOME') { + // This is all to get the number of frames per loop. + // Each data point on the waveform corresponds to a frame (many samples). + // To correctly position the point along the x axis, we need to know how many frames to expect for the whole loop. + const beatsPerLoop = e.data.beatsPerMeasure * e.data.measuresPerLoop + samplesPerLoop = (samplesPerSecond * beatsPerLoop) / e.data.beatsPerSecond + } +} + +function constructPath(samples: number[], framesPerLoop: number) { + // on the first pass, we create [x, y] pairs for each point. + // x is a simple fraction of the total x-axis length. + // y is normalized to the scale of the waveform. + // This represents the "top" of the closed waveform shape; the bottom is created below + const pathPositive = samples.map((gain, i) => [ + (i / framesPerLoop) * xMax, + map(minGain, maxGain, yMin, yMax, gain), + ]) + + // pathNegative is reversed because the end of the waveform top (pathPositive) + // should connect to the end of the waveform bottom (pathNegative) + const pathNegative = pathPositive.map(([x, y]) => [x, y * -1]).reverse() + + // construct the SVG path command + // Since the positive and negative paths are drawn in reverse order, + // the "xControlPointOffset" must be "* -1", so the control point is on the correct side of the end point. + return [ + `M ${pathPositive[0][0]} ${pathPositive[0][1]}`, + smoothCubicBezierPoints( + pathPositive.slice(1), + xMax / pathPositive.length / 4 + ), + smoothCubicBezierPoints(pathNegative, -xMax / pathNegative.length / 4), + `Z`, + ].join(' ') +} + +// algorithm taken from p5js https://github.com/processing/p5.js/blob/689359331166d085430146d4b6776a12d6a9c588/src/math/calculation.js#L448-L459 +// with naming taken from openrndr https://github.com/openrndr/openrndr/blob/2ca048076f6999cd79aee0d5b3db471152f59063/openrndr-math/src/commonMain/kotlin/org/openrndr/math/Mapping.kt#L8-L33 +function map( + beforeLeft: number, + beforeRight: number, + afterLeft: number, + afterRight: number, + value: number +) { + const newval = + ((value - beforeLeft) / (beforeRight - beforeLeft)) * + (afterRight - afterLeft) + + afterLeft + if (afterLeft < afterRight) { + return clamp(afterLeft, afterRight, newval) + } else { + return clamp(afterRight, afterLeft, newval) + } +} + +// https://github.com/processing/p5.js/blob/689359331166d085430146d4b6776a12d6a9c588/src/math/calculation.js#L110-L113 +function clamp(low: number, high: number, value: number) { + return Math.max(Math.min(value, high), low) +} + +// function straightLines(points: number[][]): string { +// return points.map(([x, y]) => `L ${x} ${y}`).join(' ') +// } + +function smoothCubicBezierPoints( + points: number[][], + xControlPointOffset: number +): string { + return points + .map(([x, y]) => `S ${x - xControlPointOffset},${y} ${x},${y}`) + .join(' ') +}