diff --git a/benchmarks.ps1 b/benchmarks.ps1 new file mode 100755 index 0000000000..fb2381c9ba --- /dev/null +++ b/benchmarks.ps1 @@ -0,0 +1,25 @@ +#! /usr/bin/env pwsh + +Param( + [string]$Configuration = "Release", + [switch]$Interactive +) + +$ErrorActionPreference = "Stop" +$ProgressPreference = "SilentlyContinue" + +Write-Host "Running benchmarks..." + +$additionalArgs = @() + +if ($Interactive -ne $true) { + $additionalArgs += "--" + $additionalArgs += "--filter" + $additionalArgs += "*" +} + +$project = Join-Path "src" "Polly.Core.Benchmarks" "Polly.Core.Benchmarks.csproj" + +dotnet run --configuration $Configuration --framework net7.0 --project $project $additionalArgs + +exit $LASTEXITCODE diff --git a/src/Polly.Core.Benchmarks/Program.cs b/src/Polly.Core.Benchmarks/Program.cs index 5b9850ebe8..12a6e91cfb 100644 --- a/src/Polly.Core.Benchmarks/Program.cs +++ b/src/Polly.Core.Benchmarks/Program.cs @@ -10,4 +10,4 @@ .AddJob(Job.MediumRun.WithToolchain(InProcessEmitToolchain.Instance)) .AddDiagnoser(MemoryDiagnoser.Default); -BenchmarkRunner.Run(typeof(PollyVersion).Assembly, config); +BenchmarkSwitcher.FromAssembly(typeof(PollyVersion).Assembly).Run(args, config); diff --git a/src/Polly.Core.Benchmarks/README.md b/src/Polly.Core.Benchmarks/README.md index 8522847b9c..c14171e766 100644 --- a/src/Polly.Core.Benchmarks/README.md +++ b/src/Polly.Core.Benchmarks/README.md @@ -1,15 +1,15 @@ # Benchmarks -To run the benchmarks: +To run the benchmarks, use the `benchmarks.ps1` script in the root of the repository: ``` powershell -# run all benchmarks -dotnet run -c release -f net7.0 +# Run all benchmarks +./benchmarks.ps1 -# pick benchmarks to run -dotnet run -c release -f net7.0 -- pick +# Pick benchmarks to run +./benchmarks.ps1 -Interactive ``` The benchmark results are stored in [`BenchmarkDotNet.Artifacts/results`](BenchmarkDotNet.Artifacts/results/) folder. -Run the benchmarks when your changes are significant enough to make sense running them. We do not use fixed hardware so your numbers might differ, however the important is the `Ratio` and `Alloc Ratio` which stays around the same or improves (ideally) between runs. +Run the benchmarks when your changes are significant enough to make sense running them. We do not use fixed hardware so your numbers might differ, however the important is the `Ratio` and `Alloc Ratio` which stays around the same or improves (ideally) between runs.