diff --git a/src/bench.ts b/src/bench.ts index 4c9e844..6f1ad15 100644 --- a/src/bench.ts +++ b/src/bench.ts @@ -20,7 +20,7 @@ import { } from './constants' import { createBenchEvent } from './event' import { Task } from './task' -import { type JSRuntime, mToNs, now, runtime, runtimeVersion } from './utils' +import { invariant, type JSRuntime, mToNs, now, runtime, runtimeVersion } from './utils' /** * The Bench class keeps track of the benchmark tasks and controls them. @@ -207,6 +207,20 @@ export class Bench extends EventTarget { return values } + runSync (): Task[] { + invariant(this.concurrency === null, 'Cannot use `concurrency` option when using `runSync`') + if (this.opts.warmup) { + this.warmupTasksSync() + } + const values: Task[] = [] + this.dispatchEvent(createBenchEvent('start')) + for (const task of this._tasks.values()) { + values.push(task.runSync()) + } + this.dispatchEvent(createBenchEvent('complete')) + return values + } + /** * table of the tasks results * @param convert - an optional callback to convert the task result to a table record @@ -258,4 +272,14 @@ export class Bench extends EventTarget { } } } + + /** + * warmup the benchmark tasks (sync version) + */ + private warmupTasksSync (): void { + this.dispatchEvent(createBenchEvent('warmup')) + for (const task of this._tasks.values()) { + task.warmupSync() + } + } } diff --git a/src/task.ts b/src/task.ts index d720e78..fb21c77 100644 --- a/src/task.ts +++ b/src/task.ts @@ -12,7 +12,7 @@ import type { } from './types' import { createBenchEvent, createErrorEvent } from './event' -import { getStatisticsSorted, isFnAsyncResource } from './utils' +import { getStatisticsSorted, invariant, isFnAsyncResource, isPromiseLike } from './utils' /** * A class that represents each benchmark task in Tinybench. It keeps track of the @@ -109,66 +109,38 @@ export class Task extends EventTarget { )) as { error?: Error; samples?: number[] } await this.bench.opts.teardown?.(this, 'run') - if (latencySamples) { - this.runs = latencySamples.length - const totalTime = latencySamples.reduce((a, b) => a + b, 0) + this.processRunResult({ error, latencySamples }) - // Latency statistics - const latencyStatistics = getStatisticsSorted( - latencySamples.sort((a, b) => a - b) - ) + return this + } - // Throughput statistics - const throughputSamples = latencySamples - .map(sample => - sample !== 0 ? 1000 / sample : 1000 / latencyStatistics.mean - ) // Use latency average as imputed sample - .sort((a, b) => a - b) - const throughputStatistics = getStatisticsSorted(throughputSamples) + /** + * run the current task and write the results in `Task.result` object property + * @returns the current task + * @internal + */ + runSync (): this { + if (this.result?.error) { + return this + } - if (this.bench.opts.signal?.aborted) { - return this - } + invariant(this.bench.concurrency === null, 'Cannot use `concurrency` option when using `runSync`') + this.dispatchEvent(createBenchEvent('start', this)) - this.mergeTaskResult({ - critical: latencyStatistics.critical, - df: latencyStatistics.df, - hz: throughputStatistics.mean, - latency: latencyStatistics, - max: latencyStatistics.max, - mean: latencyStatistics.mean, - min: latencyStatistics.min, - moe: latencyStatistics.moe, - p75: latencyStatistics.p75, - p99: latencyStatistics.p99, - p995: latencyStatistics.p995, - p999: latencyStatistics.p999, - period: totalTime / this.runs, - rme: latencyStatistics.rme, - runtime: this.bench.runtime, - runtimeVersion: this.bench.runtimeVersion, - samples: latencyStatistics.samples, - sd: latencyStatistics.sd, - sem: latencyStatistics.sem, - throughput: throughputStatistics, - totalTime, - variance: latencyStatistics.variance, - }) - } + const setupResult = this.bench.opts.setup?.(this, 'run') + invariant(!isPromiseLike(setupResult), '`setup` function must be sync when using `runSync()`') - if (error) { - this.mergeTaskResult({ error }) - this.dispatchEvent(createErrorEvent(this, error)) - this.bench.dispatchEvent(createErrorEvent(this, error)) - if (this.bench.opts.throws) { - throw error - } - } + const { error, samples: latencySamples } = (this.benchmarkSync( + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + this.bench.opts.time!, + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + this.bench.opts.iterations! + )) as { error?: Error; samples?: number[] } - this.dispatchEvent(createBenchEvent('cycle', this)) - this.bench.dispatchEvent(createBenchEvent('cycle', this)) - // cycle and complete are equal in Task - this.dispatchEvent(createBenchEvent('complete', this)) + const teardownResult = this.bench.opts.teardown?.(this, 'run') + invariant(!isPromiseLike(teardownResult), '`teardown` function must be sync when using `runSync()`') + + this.processRunResult({ error, latencySamples }) return this } @@ -191,14 +163,34 @@ export class Task extends EventTarget { )) as { error?: Error } await this.bench.opts.teardown?.(this, 'warmup') - if (error) { - this.mergeTaskResult({ error }) - this.dispatchEvent(createErrorEvent(this, error)) - this.bench.dispatchEvent(createErrorEvent(this, error)) - if (this.bench.opts.throws) { - throw error - } + this.postWarmup(error) + } + + /** + * warmup the current task (sync version) + * @internal + */ + warmupSync (): void { + if (this.result?.error) { + return } + + this.dispatchEvent(createBenchEvent('warmup', this)) + + const setupResult = this.bench.opts.setup?.(this, 'warmup') + invariant(!isPromiseLike(setupResult), '`setup` function must be sync when using `runSync()`') + + const { error } = (this.benchmarkSync( + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + this.bench.opts.warmupTime!, + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + this.bench.opts.warmupIterations! + )) as { error?: Error } + + const teardownResult = this.bench.opts.teardown?.(this, 'warmup') + invariant(!isPromiseLike(teardownResult), '`teardown` function must be sync when using `runSync()`') + + this.postWarmup(error) } private async benchmark ( @@ -278,6 +270,69 @@ export class Task extends EventTarget { return { samples } } + private benchmarkSync ( + time: number, + iterations: number + ): { error?: unknown; samples?: number[] } { + if (this.fnOpts.beforeAll != null) { + try { + const beforeAllResult = this.fnOpts.beforeAll.call(this) + invariant(!isPromiseLike(beforeAllResult), '`beforeAll` function must be sync when using `runSync()`') + } catch (error) { + return { error } + } + } + + // TODO: factor out + let totalTime = 0 // ms + const samples: number[] = [] + const benchmarkTask = () => { + if (this.fnOpts.beforeEach != null) { + const beforeEachResult = this.fnOpts.beforeEach.call(this) + invariant(!isPromiseLike(beforeEachResult), '`beforeEach` function must be sync when using `runSync()`') + } + + let taskTime = 0 // ms; + + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + const taskStart = this.bench.opts.now!() + // eslint-disable-next-line no-useless-call + const result = this.fn.call(this) + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + taskTime = this.bench.opts.now!() - taskStart + + invariant(!isPromiseLike(result), 'task function must be sync when using `runSync()`') + + samples.push(taskTime) + totalTime += taskTime + + if (this.fnOpts.afterEach != null) { + const afterEachResult = this.fnOpts.afterEach.call(this) + invariant(!isPromiseLike(afterEachResult), '`afterEach` function must be sync when using `runSync()`') + } + } + + try { + while ( + // eslint-disable-next-line no-unmodified-loop-condition + (totalTime < time || samples.length < iterations)) { + benchmarkTask() + } + } catch (error) { + return { error } + } + + if (this.fnOpts.afterAll != null) { + try { + const afterAllResult = this.fnOpts.afterAll.call(this) + invariant(!isPromiseLike(afterAllResult), '`afterAll` function must be sync when using `runSync()`') + } catch (error) { + return { error } + } + } + return { samples } + } + /** * merge into the result object values * @param result - the task result object to merge with the current result object values @@ -288,4 +343,78 @@ export class Task extends EventTarget { ...result, }) as Readonly } + + private postWarmup (error: Error | undefined): void { + if (error) { + this.mergeTaskResult({ error }) + this.dispatchEvent(createErrorEvent(this, error)) + this.bench.dispatchEvent(createErrorEvent(this, error)) + if (this.bench.opts.throws) { + throw error + } + } + } + + private processRunResult ({ error, latencySamples }: { error?: Error, latencySamples?: number[] }): void { + if (latencySamples) { + this.runs = latencySamples.length + const totalTime = latencySamples.reduce((a, b) => a + b, 0) + + // Latency statistics + const latencyStatistics = getStatisticsSorted( + latencySamples.sort((a, b) => a - b) + ) + + // Throughput statistics + const throughputSamples = latencySamples + .map(sample => + sample !== 0 ? 1000 / sample : 1000 / latencyStatistics.mean + ) // Use latency average as imputed sample + .sort((a, b) => a - b) + const throughputStatistics = getStatisticsSorted(throughputSamples) + + if (this.bench.opts.signal?.aborted) { + return + } + + this.mergeTaskResult({ + critical: latencyStatistics.critical, + df: latencyStatistics.df, + hz: throughputStatistics.mean, + latency: latencyStatistics, + max: latencyStatistics.max, + mean: latencyStatistics.mean, + min: latencyStatistics.min, + moe: latencyStatistics.moe, + p75: latencyStatistics.p75, + p99: latencyStatistics.p99, + p995: latencyStatistics.p995, + p999: latencyStatistics.p999, + period: totalTime / this.runs, + rme: latencyStatistics.rme, + runtime: this.bench.runtime, + runtimeVersion: this.bench.runtimeVersion, + samples: latencyStatistics.samples, + sd: latencyStatistics.sd, + sem: latencyStatistics.sem, + throughput: throughputStatistics, + totalTime, + variance: latencyStatistics.variance, + }) + } + + if (error) { + this.mergeTaskResult({ error }) + this.dispatchEvent(createErrorEvent(this, error)) + this.bench.dispatchEvent(createErrorEvent(this, error)) + if (this.bench.opts.throws) { + throw error + } + } + + this.dispatchEvent(createBenchEvent('cycle', this)) + this.bench.dispatchEvent(createBenchEvent('cycle', this)) + // cycle and complete are equal in Task + this.dispatchEvent(createBenchEvent('complete', this)) + } } diff --git a/src/utils.ts b/src/utils.ts index 29ab80c..f8df398 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -158,7 +158,7 @@ export const now = performanceNow * @param maybePromiseLike - the value to check * @returns true if the value is a promise-like object */ -const isPromiseLike = ( +export const isPromiseLike = ( maybePromiseLike: unknown ): maybePromiseLike is PromiseLike => maybePromiseLike !== null && @@ -335,3 +335,9 @@ export const getStatisticsSorted = (samples: number[]): Statistics => { variance: vr, } } + +export const invariant = (condition: boolean, message: string): void => { + if (!condition) { + throw new Error(message) + } +} diff --git a/test/index.test.ts b/test/index.test.ts index a73dade..b1b08ff 100644 --- a/test/index.test.ts +++ b/test/index.test.ts @@ -3,10 +3,20 @@ import { expect, test, vi } from 'vitest' import { Bench, hrtimeNow, now, type Task } from '../src' +/** + * @param ms amount of time to sleep in milliseconds + */ +function sleep (ms: number): void { + const start = performance.now() + while (performance.now() - start < ms) { + // noop + } +} + test.each([ ['now()', now], ['hrtimeNow()', hrtimeNow], -])('%s basic', { skip: platform() !== 'linux' }, async (_, _now) => { +])('%s basic (async)', { skip: platform() !== 'linux' }, async (_, _now) => { const bench = new Bench({ iterations: 16, now: _now, time: 100 }) bench .add('foo', async () => { @@ -41,6 +51,44 @@ test.each([ ).toBeCloseTo(1000, 1) }) +test.each([ + ['now()', now], + ['hrtimeNow()', hrtimeNow], +])('%s basic (sync)', { skip: platform() !== 'linux' }, (_, _now) => { + const bench = new Bench({ iterations: 16, now: _now, time: 100 }) + bench + .add('foo', () => { + sleep(50) + }) + .add('bar', () => { + sleep(100) + }) + + bench.runSync() + + const { tasks } = bench + + expect(tasks.length).toEqual(2) + + expect(tasks[0]?.name).toEqual('foo') + expect(tasks[0]?.result?.totalTime).toBeGreaterThan(50) + expect(tasks[0]?.result?.latency.mean).toBeGreaterThan(50) + // throughput mean is ops/s, period is ms unit value + expect( + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + tasks[0]!.result!.throughput.mean * tasks[0]!.result!.period + ).toBeCloseTo(1000, 1) + + expect(tasks[1]?.name).toEqual('bar') + expect(tasks[1]?.result?.totalTime).toBeGreaterThan(100) + expect(tasks[1]?.result?.latency.mean).toBeGreaterThan(100) + // throughput mean is ops/s, period is ms unit value + expect( + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + tasks[1]!.result!.throughput.mean * tasks[1]!.result!.period + ).toBeCloseTo(1000, 1) +}) + test('cannot add duplicate task', () => { const bench = new Bench() bench.add('foo', () => { @@ -53,7 +101,7 @@ test('cannot add duplicate task', () => { ).toThrowError('Task "foo" already exists') }) -test('bench table', async () => { +test('bench table (async)', async () => { const bench = new Bench({ iterations: 32, time: 100 }) bench.add('foo', async () => { await new Promise(resolve => setTimeout(resolve, 1)) @@ -100,7 +148,54 @@ test('bench table', async () => { ]) }) -test('bench task runs and time consistency', async () => { +test('bench table (sync)', () => { + const bench = new Bench({ iterations: 32, time: 100 }) + bench.add('foo', () => { + // noop + }) + + bench.runSync() + + expect(bench.table()).toStrictEqual([ + /* eslint-disable perfectionist/sort-objects */ + { + // eslint-disable-next-line @typescript-eslint/no-unsafe-assignment + 'Task name': expect.any(String), + // eslint-disable-next-line @typescript-eslint/no-unsafe-assignment + 'Latency average (ns)': expect.any(String), + // eslint-disable-next-line @typescript-eslint/no-unsafe-assignment + 'Latency median (ns)': expect.any(String), + // eslint-disable-next-line @typescript-eslint/no-unsafe-assignment + 'Throughput average (ops/s)': expect.any(String), + // eslint-disable-next-line @typescript-eslint/no-unsafe-assignment + 'Throughput median (ops/s)': expect.any(String), + // eslint-disable-next-line @typescript-eslint/no-unsafe-assignment + Samples: expect.any(Number), + }, + /* eslint-enable perfectionist/sort-objects */ + ]) + + bench.remove('foo').add('bar', () => { + throw new Error('fake') + }) + + bench.runSync() + + expect(bench.table()).toStrictEqual([ + /* eslint-disable perfectionist/sort-objects */ + { + // eslint-disable-next-line @typescript-eslint/no-unsafe-assignment + 'Task name': expect.any(String), + // eslint-disable-next-line @typescript-eslint/no-unsafe-assignment + Error: expect.any(String), + // eslint-disable-next-line @typescript-eslint/no-unsafe-assignment + Stack: expect.any(String), + }, + /* eslint-enable perfectionist/sort-objects */ + ]) +}) + +test('bench task runs and time consistency (async)', async () => { const bench = new Bench({ iterations: 32, time: 100 }) bench.add('foo', async () => { await new Promise(resolve => setTimeout(resolve, 50)) @@ -116,7 +211,23 @@ test('bench task runs and time consistency', async () => { expect(fooTask?.result?.totalTime).toBeGreaterThanOrEqual(bench.opts.time!) }) -test('events order', async () => { +test('bench task runs and time consistency (sync)', () => { + const bench = new Bench({ iterations: 32, time: 100 }) + bench.add('foo', () => { + sleep(50) + }) + + bench.runSync() + + const fooTask = bench.getTask('foo') + + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + expect(fooTask?.runs).toBeGreaterThanOrEqual(bench.opts.iterations!) + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + expect(fooTask?.result?.totalTime).toBeGreaterThanOrEqual(bench.opts.time!) +}) + +test('events order (async)', async () => { const controller = new AbortController() const bench = new Bench({ iterations: 32, @@ -126,10 +237,12 @@ test('events order', async () => { warmupTime: 0, }) bench - // eslint-disable-next-line @typescript-eslint/no-empty-function - .add('foo', async () => {}) - // eslint-disable-next-line @typescript-eslint/no-empty-function - .add('bar', async () => {}) + .add('foo', async () => { + // noop + }) + .add('bar', async () => { + // noop + }) .add('error', () => { throw new Error('fake') }) @@ -194,8 +307,9 @@ test('events order', async () => { events.push('complete') }) - // eslint-disable-next-line @typescript-eslint/no-empty-function - bench.add('temporary', () => {}).remove('temporary') + bench.add('temporary', () => { + // noop + }).remove('temporary') setTimeout(() => { controller.abort() @@ -227,7 +341,103 @@ test('events order', async () => { expect(abortTask?.result).toBeUndefined() }, 10000) -test('events order at task completion', async () => { +test('events order (sync)', () => { + const bench = new Bench({ + iterations: 32, + time: 100, + warmupIterations: 0, + warmupTime: 0, + }) + bench + .add('foo', () => { + // noop + }) + .add('bar', () => { + // noop + }) + .add('error', () => { + throw new Error('fake') + }) + + const events: string[] = [] + + const error = bench.getTask('error') + + error?.addEventListener('start', () => { + events.push('error-start') + }) + + error?.addEventListener('error', () => { + events.push('error-error') + }) + + error?.addEventListener('cycle', () => { + events.push('error-cycle') + }) + + error?.addEventListener('complete', () => { + events.push('error-complete') + }) + + bench.addEventListener('warmup', () => { + events.push('warmup') + }) + + bench.addEventListener('start', () => { + events.push('start') + }) + + bench.addEventListener('error', () => { + events.push('error') + }) + + bench.addEventListener('reset', () => { + events.push('reset') + }) + + bench.addEventListener('cycle', evt => { + expect(evt.task?.name.trim()).not.toBe('') + events.push('cycle') + }) + + bench.addEventListener('add', () => { + events.push('add') + }) + + bench.addEventListener('remove', () => { + events.push('remove') + }) + + bench.addEventListener('complete', () => { + events.push('complete') + }) + + bench.add('temporary', () => { + // noop + }).remove('temporary') + + bench.runSync() + bench.reset() + + expect(events).toStrictEqual([ + 'add', + 'remove', + 'warmup', + 'start', + 'cycle', + 'cycle', + 'error-start', + 'error-error', + 'error', + 'error-cycle', + 'cycle', + 'error-complete', + 'complete', + 'reset', + ]) +}, 10000) + +test('events order at task completion (async)', async () => { const bench = new Bench({ iterations: 16, time: 100 }) bench @@ -258,7 +468,38 @@ test('events order at task completion', async () => { expect(tasks[1]?.name).toBe('bar') }) -test.each(['warmup', 'run'])('%s error event', async mode => { +test('events order at task completion (sync)', () => { + const bench = new Bench({ iterations: 16, time: 100 }) + + bench + .add('foo', () => { + sleep(25) + }) + .add('bar', () => { + sleep(50) + }) + + const events: string[] = [] + + const fooTask = bench.getTask('foo') + const barTask = bench.getTask('bar') + fooTask?.addEventListener('complete', () => { + events.push('foo-complete') + expect(events).toStrictEqual(['foo-complete']) + }) + barTask?.addEventListener('complete', () => { + events.push('bar-complete') + expect(events).toStrictEqual(['foo-complete', 'bar-complete']) + }) + + const tasks = bench.runSync() + + expect(tasks.length).toBe(2) + expect(tasks[0]?.name).toBe('foo') + expect(tasks[1]?.name).toBe('bar') +}) + +test.each(['warmup', 'run'])('%s error event (async)', async mode => { const bench = new Bench({ iterations: 32, time: 100, @@ -283,7 +524,32 @@ test.each(['warmup', 'run'])('%s error event', async mode => { expect(task?.result?.error).toStrictEqual(error) }) -test.each(['warmup', 'run'])('%s throws', async mode => { +test.each(['warmup', 'run'])('%s error event (sync)', mode => { + const bench = new Bench({ + iterations: 32, + time: 100, + warmup: mode === 'warmup', + }) + const error = new Error() + + bench.add('error', () => { + throw error + }) + + let err: Error | undefined + let task: Task | undefined + bench.addEventListener('error', evt => { + const { error: e, task: t } = evt + err = e + task = t + }) + + expect(bench.runSync()).toBeDefined() + expect(err).toStrictEqual(error) + expect(task?.result?.error).toStrictEqual(error) +}) + +test.each(['warmup', 'run'])('%s throws (async)', async mode => { const iterations = 1 const bench = new Bench({ iterations, @@ -310,22 +576,103 @@ test.each(['warmup', 'run'])('%s throws', async mode => { expect(task?.result?.error).toStrictEqual(error) }) -test('detect faster task', { skip: platform() !== 'linux' }, async () => { - const bench = new Bench({ iterations: 32, time: 100 }) - bench - .add('faster', async () => { - await new Promise(resolve => setTimeout(resolve, 0)) - }) - .add('slower', async () => { - await new Promise(resolve => setTimeout(resolve, 50)) - }) - - await bench.run() +test.each(['warmup', 'run'])('%s throws (sync)', mode => { + const iterations = 1 + const bench = new Bench({ + iterations, + throws: true, + warmup: mode === 'warmup', + warmupIterations: iterations, + }) + const error = new Error() - const fasterTask = bench.getTask('faster') - const slowerTask = bench.getTask('slower') + bench.add('error', () => { + throw error + }) - expect(fasterTask?.result?.latency.mean).toBeLessThan( + let err: Error | undefined + let task: Task | undefined + bench.addEventListener('error', evt => { + const { error: e, task: t } = evt + err = e + task = t + }) + + expect(() => { + bench.runSync() + }).toThrowError(error) + expect(err).toStrictEqual(error) + expect(task?.result?.error).toStrictEqual(error) +}) + +test('detect faster task (async)', { skip: platform() !== 'linux' }, async () => { + const bench = new Bench({ iterations: 32, time: 100 }) + bench + .add('faster', async () => { + await new Promise(resolve => setTimeout(resolve, 0)) + }) + .add('slower', async () => { + await new Promise(resolve => setTimeout(resolve, 50)) + }) + + await bench.run() + + const fasterTask = bench.getTask('faster') + const slowerTask = bench.getTask('slower') + + expect(fasterTask?.result?.latency.mean).toBeLessThan( + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + slowerTask!.result!.latency.mean + ) + expect(fasterTask?.result?.latency.min).toBeLessThan( + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + slowerTask!.result!.latency.min + ) + expect(fasterTask?.result?.latency.max).toBeLessThan( + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + slowerTask!.result!.latency.max + ) + // latency moe should be lesser since it's faster + expect(fasterTask?.result?.latency.moe).toBeLessThan( + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + slowerTask!.result!.latency.moe + ) + + expect(fasterTask?.result?.throughput.mean).toBeGreaterThan( + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + slowerTask!.result!.throughput.mean + ) + expect(fasterTask?.result?.throughput.min).toBeGreaterThan( + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + slowerTask!.result!.throughput.min + ) + expect(fasterTask?.result?.throughput.max).toBeGreaterThan( + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + slowerTask!.result!.throughput.max + ) + // throughput moe should be greater since it's faster + expect(fasterTask?.result?.throughput.moe).toBeGreaterThan( + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + slowerTask!.result!.throughput.moe + ) +}) + +test('detect faster task (sync)', { skip: platform() !== 'linux' }, () => { + const bench = new Bench({ iterations: 32, time: 100 }) + bench + .add('faster', () => { + // noop + }) + .add('slower', () => { + sleep(50) + }) + + bench.runSync() + + const fasterTask = bench.getTask('faster') + const slowerTask = bench.getTask('slower') + + expect(fasterTask?.result?.latency.mean).toBeLessThan( // eslint-disable-next-line @typescript-eslint/no-non-null-assertion slowerTask!.result!.latency.mean ) @@ -362,7 +709,7 @@ test('detect faster task', { skip: platform() !== 'linux' }, async () => { ) }) -test('statistics', async () => { +test('statistics (async)', async () => { const bench = new Bench({ iterations: 32, time: 100 }) bench.add('foo', async () => { await new Promise(resolve => setTimeout(resolve, 0)) @@ -450,7 +797,95 @@ test('statistics', async () => { expect(fooTask?.result?.throughput.p999).toBeTypeOf('number') }) -test('setup and teardown', async () => { +test('statistics (sync)', () => { + const bench = new Bench({ iterations: 32, time: 100 }) + bench.add('foo', () => { + // noop + }) + bench.runSync() + + const fooTask = bench.getTask('foo') + + expect(fooTask?.result).toBeDefined() + expect(fooTask?.result?.runtime).toStrictEqual(bench.runtime) + expect(fooTask?.result?.runtimeVersion).toStrictEqual(bench.runtimeVersion) + expect(fooTask?.result?.totalTime).toBeTypeOf('number') + expect(fooTask?.result?.period).toBeTypeOf('number') + // deprecated + // eslint-disable-next-line @typescript-eslint/no-deprecated + expect(Array.isArray(fooTask?.result?.samples)).toBe(true) + // eslint-disable-next-line @typescript-eslint/no-deprecated + expect(fooTask?.result?.hz).toBeTypeOf('number') + // eslint-disable-next-line @typescript-eslint/no-deprecated + expect(fooTask?.result?.min).toBeTypeOf('number') + // eslint-disable-next-line @typescript-eslint/no-deprecated + expect(fooTask?.result?.max).toBeTypeOf('number') + // eslint-disable-next-line @typescript-eslint/no-deprecated + expect(fooTask?.result?.mean).toBeTypeOf('number') + // eslint-disable-next-line @typescript-eslint/no-deprecated + expect(fooTask?.result?.variance).toBeTypeOf('number') + // eslint-disable-next-line @typescript-eslint/no-deprecated + expect(fooTask?.result?.sd).toBeTypeOf('number') + // eslint-disable-next-line @typescript-eslint/no-deprecated + expect(fooTask?.result?.sem).toBeTypeOf('number') + // eslint-disable-next-line @typescript-eslint/no-deprecated + expect(fooTask?.result?.df).toBeTypeOf('number') + // eslint-disable-next-line @typescript-eslint/no-deprecated + expect(fooTask?.result?.critical).toBeTypeOf('number') + // eslint-disable-next-line @typescript-eslint/no-deprecated + expect(fooTask?.result?.moe).toBeTypeOf('number') + // eslint-disable-next-line @typescript-eslint/no-deprecated + expect(fooTask?.result?.rme).toBeTypeOf('number') + // eslint-disable-next-line @typescript-eslint/no-deprecated + expect(fooTask?.result?.p75).toBeTypeOf('number') + // eslint-disable-next-line @typescript-eslint/no-deprecated + expect(fooTask?.result?.p99).toBeTypeOf('number') + // eslint-disable-next-line @typescript-eslint/no-deprecated + expect(fooTask?.result?.p995).toBeTypeOf('number') + // eslint-disable-next-line @typescript-eslint/no-deprecated + expect(fooTask?.result?.p999).toBeTypeOf('number') + // latency statistics + expect(fooTask?.result?.latency).toBeTypeOf('object') + expect(Array.isArray(fooTask?.result?.latency.samples)).toBe(true) + expect(fooTask?.result?.latency.min).toBeTypeOf('number') + expect(fooTask?.result?.latency.max).toBeTypeOf('number') + expect(fooTask?.result?.latency.mean).toBeTypeOf('number') + expect(fooTask?.result?.latency.variance).toBeTypeOf('number') + expect(fooTask?.result?.latency.sd).toBeTypeOf('number') + expect(fooTask?.result?.latency.sem).toBeTypeOf('number') + expect(fooTask?.result?.latency.df).toBeTypeOf('number') + expect(fooTask?.result?.latency.critical).toBeTypeOf('number') + expect(fooTask?.result?.latency.moe).toBeTypeOf('number') + expect(fooTask?.result?.latency.rme).toBeTypeOf('number') + expect(fooTask?.result?.latency.aad).toBeTypeOf('number') + expect(fooTask?.result?.latency.mad).toBeTypeOf('number') + expect(fooTask?.result?.latency.p50).toBeTypeOf('number') + expect(fooTask?.result?.latency.p75).toBeTypeOf('number') + expect(fooTask?.result?.latency.p99).toBeTypeOf('number') + expect(fooTask?.result?.latency.p995).toBeTypeOf('number') + expect(fooTask?.result?.latency.p999).toBeTypeOf('number') + // throughput statistics + expect(fooTask?.result?.throughput).toBeTypeOf('object') + expect(Array.isArray(fooTask?.result?.throughput.samples)).toBe(true) + expect(fooTask?.result?.throughput.max).toBeTypeOf('number') + expect(fooTask?.result?.throughput.mean).toBeTypeOf('number') + expect(fooTask?.result?.throughput.variance).toBeTypeOf('number') + expect(fooTask?.result?.throughput.sd).toBeTypeOf('number') + expect(fooTask?.result?.throughput.sem).toBeTypeOf('number') + expect(fooTask?.result?.throughput.df).toBeTypeOf('number') + expect(fooTask?.result?.throughput.critical).toBeTypeOf('number') + expect(fooTask?.result?.throughput.moe).toBeTypeOf('number') + expect(fooTask?.result?.throughput.rme).toBeTypeOf('number') + expect(fooTask?.result?.throughput.aad).toBeTypeOf('number') + expect(fooTask?.result?.throughput.mad).toBeTypeOf('number') + expect(fooTask?.result?.throughput.p50).toBeTypeOf('number') + expect(fooTask?.result?.throughput.p75).toBeTypeOf('number') + expect(fooTask?.result?.throughput.p99).toBeTypeOf('number') + expect(fooTask?.result?.throughput.p995).toBeTypeOf('number') + expect(fooTask?.result?.throughput.p999).toBeTypeOf('number') +}) + +test('setup and teardown (async)', async () => { const calls: string[] = [] const setup = vi.fn(() => { calls.push('setup') @@ -480,7 +915,37 @@ test('setup and teardown', async () => { expect(calls).toStrictEqual(['setup', 'teardown', 'setup', 'teardown']) }) -test('task beforeAll, afterAll, beforeEach, afterEach', async () => { +test('setup and teardown (sync)', () => { + const calls: string[] = [] + const setup = vi.fn(() => { + calls.push('setup') + }) + const teardown = vi.fn(() => { + calls.push('teardown') + }) + const bench = new Bench({ + iterations: 32, + setup, + teardown, + time: 100, + }) + bench.add('foo', () => { + // noop + }) + const fooTask = bench.getTask('foo') + + bench.runSync() + + expect(setup).toBeCalledWith(fooTask, 'warmup') + expect(setup).toBeCalledWith(fooTask, 'run') + expect(setup).toHaveBeenCalledTimes(2) + expect(teardown).toBeCalledWith(fooTask, 'warmup') + expect(teardown).toBeCalledWith(fooTask, 'run') + expect(teardown).toHaveBeenCalledTimes(2) + expect(calls).toStrictEqual(['setup', 'teardown', 'setup', 'teardown']) +}) + +test('task beforeAll, afterAll, beforeEach, afterEach (async)', async () => { const iterations = 128 const bench = new Bench({ iterations, @@ -524,8 +989,52 @@ test('task beforeAll, afterAll, beforeEach, afterEach', async () => { expect(beforeEach.mock.calls.length).toBe(afterEach.mock.calls.length) }) +test('task beforeAll, afterAll, beforeEach, afterEach (sync)', () => { + const iterations = 128 + const bench = new Bench({ + iterations, + time: 0, + warmupIterations: iterations, + warmupTime: 0, + }) + + const beforeAll = vi.fn(function hook (this: Task) { + expect(this).toBe(bench.getTask('foo')) + }) + const afterAll = vi.fn(function hook (this: Task) { + expect(this).toBe(bench.getTask('foo')) + }) + const beforeEach = vi.fn(function hook (this: Task) { + expect(this).toBe(bench.getTask('foo')) + }) + const afterEach = vi.fn(function hook (this: Task) { + expect(this).toBe(bench.getTask('foo')) + }) + bench.add( + 'foo', + () => { + // noop + }, + { + afterAll, + afterEach, + beforeAll, + beforeEach, + } + ) + + bench.runSync() + + expect(beforeAll).toHaveBeenCalledTimes(2 /* warmup + run */) + expect(afterAll).toHaveBeenCalledTimes(2 /* warmup + run */) + expect(beforeAll.mock.calls.length).toBe(afterAll.mock.calls.length) + expect(beforeEach).toHaveBeenCalledTimes(iterations * 2 /* warmup + run */) + expect(afterEach).toHaveBeenCalledTimes(iterations * 2 /* warmup + run */) + expect(beforeEach.mock.calls.length).toBe(afterEach.mock.calls.length) +}) + test( - 'task with promiseLike return', + 'task with promiseLike return (async)', { skip: platform() !== 'linux' }, async () => { const bench = new Bench({ iterations: 16, time: 100 }) @@ -546,7 +1055,73 @@ test( } ) -test.each(['warmup', 'run'])('%s error handling', async mode => { +test( + 'task with promiseLike return (sync)', + () => { + const bench = new Bench({ iterations: 16, time: 100 }) + + bench + .add('foo', async () => { + // noop + }) + .add('fum', () => ({ + then: (resolve: () => void) => Promise.resolve(setTimeout(resolve, 50)), + })) + .add('bar', () => new Promise(resolve => setTimeout(resolve, 50))) + + bench.runSync() + + expect(bench.getTask('foo')?.result?.error?.message).toStrictEqual('task function must be sync when using `runSync()`') + expect(bench.getTask('fum')?.result?.error?.message).toStrictEqual('task function must be sync when using `runSync()`') + expect(bench.getTask('bar')?.result?.error?.message).toStrictEqual('task function must be sync when using `runSync()`') + } +) + +test( + 'async hooks in sync tests', + () => { + const bench = new Bench({ iterations: 16, time: 100 }) + + bench + .add('async-beforeAll', () => { + // noop + }, { + beforeAll: async () => { + // noop + }, + }) + .add('async-beforeEach', () => { + // noop + }, { + beforeEach: async () => { + // noop + }, + }) + .add('async-afterAll', () => { + // noop + }, { + afterAll: async () => { + // noop + }, + }) + .add('async-afterEach', () => { + // noop + }, { + afterEach: async () => { + // noop + }, + }) + + bench.runSync() + + expect(bench.getTask('async-beforeAll')?.result?.error?.message).toStrictEqual('`beforeAll` function must be sync when using `runSync()`') + expect(bench.getTask('async-beforeEach')?.result?.error?.message).toStrictEqual('`beforeEach` function must be sync when using `runSync()`') + expect(bench.getTask('async-afterAll')?.result?.error?.message).toStrictEqual('`afterAll` function must be sync when using `runSync()`') + expect(bench.getTask('async-afterEach')?.result?.error?.message).toStrictEqual('`afterEach` function must be sync when using `runSync()`') + } +) + +test.each(['warmup', 'run'])('%s error handling (async)', async mode => { const bench = new Bench({ warmup: mode === 'warmup' }) const error = new Error('error') @@ -565,7 +1140,30 @@ test.each(['warmup', 'run'])('%s error handling', async mode => { expect(bench.getTask('baz')?.result?.error).toStrictEqual(promiseError) }) -test('throw error in beforeAll, afterAll, beforeEach, afterEach', async () => { +test.each(['warmup', 'run'])('%s error handling (sync)', mode => { + const bench = new Bench({ warmup: mode === 'warmup' }) + + const error = new Error('error') + + bench + .add('foo', () => { + throw error + }) + .add('bar', () => { + throw error + }) + .add('baz', () => { + throw error + }) + + bench.runSync() + + expect(bench.getTask('foo')?.result?.error).toStrictEqual(error) + expect(bench.getTask('bar')?.result?.error).toStrictEqual(error) + expect(bench.getTask('baz')?.result?.error).toStrictEqual(error) +}) + +test('throw error in beforeAll, afterAll, beforeEach, afterEach (async)', async () => { const bench = new Bench() const BAerror = new Error('BeforeAll') @@ -594,6 +1192,43 @@ test('throw error in beforeAll, afterAll, beforeEach, afterEach', async () => { expect(bench.getTask('AA test')?.result?.error).toStrictEqual(AAerror) }) +test('throw error in beforeAll, afterAll, beforeEach, afterEach (sync)', () => { + const bench = new Bench() + + const BAerror = new Error('BeforeAll') + const BEerror = new Error('BeforeEach') + const AEerror = new Error('AfterEach') + const AAerror = new Error('AfterAll') + + bench + .add('BA test', () => 1, { + beforeAll: () => { + throw BAerror + } + }) + .add('BE test', () => 1, { + beforeEach: () => { + throw BEerror + } + }) + .add('AE test', () => 1, { + afterEach: () => { + throw AEerror + } + }) + .add('AA test', () => 1, { + afterAll: () => { + throw AAerror + } + }) + bench.runSync() + + expect(bench.getTask('BA test')?.result?.error).toStrictEqual(BAerror) + expect(bench.getTask('BE test')?.result?.error).toStrictEqual(BEerror) + expect(bench.getTask('AE test')?.result?.error).toStrictEqual(AEerror) + expect(bench.getTask('AA test')?.result?.error).toStrictEqual(AAerror) +}) + test('removing non-existing task should not throw', () => { const bench = new Bench() bench.addEventListener('remove', () => { @@ -602,3 +1237,23 @@ test('removing non-existing task should not throw', () => { bench.remove('non-existent') }) + +test('using concurrency should throw (sync)', () => { + const bench = new Bench({ + throws: true, + }) + + bench.add('foo', () => 1) + + bench.concurrency = 'task' + + expect(() => { + bench.runSync() + }).toThrowError('Cannot use `concurrency` option when using `runSync`') + + bench.concurrency = 'bench' + + expect(() => { + bench.runSync() + }).toThrowError('Cannot use `concurrency` option when using `runSync`') +})