Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update benchmarking macros #3934

Merged
merged 77 commits into from
Apr 10, 2024
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
Show all changes
77 commits
Select commit Hold shift + click to select a range
a3ddd5e
Update benchmarking
pgherveou Apr 2, 2024
db22ac0
Fix tests
pgherveou Apr 2, 2024
d3a6ccb
PR doc
pgherveou Apr 2, 2024
03ba18e
fix staking benchmark
pgherveou Apr 2, 2024
31e7f4c
fix prdoc
pgherveou Apr 2, 2024
7b4d0c8
Add on_before_start closure
pgherveou Apr 2, 2024
a560470
Fix fix
pgherveou Apr 2, 2024
c92091b
fix comment
pgherveou Apr 2, 2024
d701524
fix
pgherveou Apr 2, 2024
f311681
fix
pgherveou Apr 2, 2024
e52c535
PR comment
pgherveou Apr 3, 2024
f8f14ce
nit
pgherveou Apr 3, 2024
9bf84f8
fix bug
pgherveou Apr 3, 2024
c98c102
Merge branch 'master' into pg/bench_update
pgherveou Apr 3, 2024
cd46b3f
nit
pgherveou Apr 3, 2024
7623c2a
Merge branch 'master' of https://github.com/paritytech/polkadot-sdk i…
Apr 3, 2024
aa523d6
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
57d6d62
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
86e8349
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
b8f79bb
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
c07f03e
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
bbd3969
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
fafc4f7
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
81b98f9
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
6af2ae4
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
e0cd224
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
ae2ad48
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
9fe72a1
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
30b417c
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
b73ce1a
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
bef9679
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
9a8c341
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
2aa4306
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
f2d6a65
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
f55563a
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
183537e
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
141982f
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
50c12d2
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
a16e8b5
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
e39d75d
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
1f22c66
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
5da51b2
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
23e41ef
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
a3ed19c
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
c11f98b
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
ab93a87
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
5aa62a8
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
a513333
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
b94810f
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
93b1050
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
a4472d7
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
6ee198d
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
038ab82
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
e3ce75b
fixes
pgherveou Apr 3, 2024
b85041b
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
3368aae
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
dd3406e
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
b9459bb
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
2172ac9
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
7f5c8bb
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
2ceaa1a
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
3c1ff83
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
96f8123
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
a7a0f64
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
3450dc9
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
5209cd7
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
d15781f
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
5d57d63
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
7d2cdf4
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
8448264
".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime…
Apr 3, 2024
9197593
Ensure benchmark does not have forbidden variable names
pgherveou Apr 8, 2024
59c44a5
Merge branch 'master' into pg/bench_update
ggwpez Apr 8, 2024
e3bffd2
".git/.scripts/commands/bench-all/bench-all.sh" --target_dir=substrate
Apr 9, 2024
6823690
Merge branch 'master' into pg/bench_update
athei Apr 9, 2024
3c87518
Update constant
pgherveou Apr 9, 2024
253c2cf
Merge branch 'master' into pg/bench_update
pgherveou Apr 9, 2024
3575b22
Merge branch 'master' into pg/bench_update
pgherveou Apr 9, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions prdoc/pr_3934.prdoc
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
title: Update benchmarking macros

doc:
- audience: Runtime Dev
description: |
Current benchmarking macro return a closure with the captured benchmarked code.
This could cause compilation issues when the benchmarked code has complex lifetime requirements.
This PR updates the existing macro by injecting the recoding parameter and invoking the start / stop method around the benchmarked block instead of returning a closure

crates:
- name: frame-benchmarking

77 changes: 29 additions & 48 deletions substrate/frame/benchmarking/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -262,15 +262,11 @@ mod benchmarks {
let components = <SelectedBenchmark as BenchmarkingSetup<Test>>::components(&selected);
assert_eq!(components, vec![(BenchmarkParameter::b, 1, 1000)]);

let closure = <SelectedBenchmark as BenchmarkingSetup<Test>>::instance(
&selected,
&[(BenchmarkParameter::b, 1)],
true,
)
.expect("failed to create closure");

new_test_ext().execute_with(|| {
assert_ok!(closure());
assert_ok!(<SelectedBenchmark as BenchmarkingSetup<Test>>::test_instance(
&selected,
&[(BenchmarkParameter::b, 1)],
));
});
}

Expand All @@ -281,15 +277,11 @@ mod benchmarks {
let components = <SelectedBenchmark as BenchmarkingSetup<Test>>::components(&selected);
assert_eq!(components, vec![(BenchmarkParameter::b, 1, 1000)]);

let closure = <SelectedBenchmark as BenchmarkingSetup<Test>>::instance(
&selected,
&[(BenchmarkParameter::b, 1)],
true,
)
.expect("failed to create closure");

new_test_ext().execute_with(|| {
assert_ok!(closure());
assert_ok!(<SelectedBenchmark as BenchmarkingSetup<Test>>::test_instance(
&selected,
&[(BenchmarkParameter::b, 1)],
));
});
}

Expand All @@ -300,60 +292,49 @@ mod benchmarks {
let components = <SelectedBenchmark as BenchmarkingSetup<Test>>::components(&selected);
assert_eq!(components, vec![(BenchmarkParameter::x, 1, 10000)]);

let closure = <SelectedBenchmark as BenchmarkingSetup<Test>>::instance(
&selected,
&[(BenchmarkParameter::x, 1)],
true,
)
.expect("failed to create closure");

assert_ok!(closure());
new_test_ext().execute_with(|| {
assert_ok!(<SelectedBenchmark as BenchmarkingSetup<Test>>::test_instance(
&selected,
&[(BenchmarkParameter::x, 1)],
));
});
}

#[test]
fn benchmarks_macro_verify_works() {
// Check postcondition for benchmark `set_value` is valid.
let selected = SelectedBenchmark::set_value;

let closure = <SelectedBenchmark as BenchmarkingSetup<Test>>::instance(
&selected,
&[(BenchmarkParameter::b, 1)],
true,
)
.expect("failed to create closure");

new_test_ext().execute_with(|| {
assert_ok!(closure());
assert_ok!(<SelectedBenchmark as BenchmarkingSetup<Test>>::test_instance(
&selected,
&[(BenchmarkParameter::b, 1)],
));
});

// Check postcondition for benchmark `bad_verify` is invalid.
let selected = SelectedBenchmark::bad_verify;

let closure = <SelectedBenchmark as BenchmarkingSetup<Test>>::instance(
&selected,
&[(BenchmarkParameter::x, 10000)],
true,
)
.expect("failed to create closure");

new_test_ext().execute_with(|| {
assert_err!(closure(), "You forgot to sort!");
assert_err!(
<SelectedBenchmark as BenchmarkingSetup<Test>>::test_instance(
&selected,
&[(BenchmarkParameter::x, 10000)],
),
"You forgot to sort!"
);
});
}

#[test]
fn benchmark_override_works() {
let selected = SelectedBenchmark::override_benchmark;

let closure = <SelectedBenchmark as BenchmarkingSetup<Test>>::instance(
&selected,
&[(BenchmarkParameter::b, 1)],
true,
)
.expect("failed to create closure");

new_test_ext().execute_with(|| {
let result = closure();
let result = <SelectedBenchmark as BenchmarkingSetup<Test>>::test_instance(
&selected,
&[(BenchmarkParameter::b, 1)],
);
assert!(matches!(result, Err(BenchmarkError::Override(_))));
});
}
Expand Down
68 changes: 66 additions & 2 deletions substrate/frame/benchmarking/src/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ use scale_info::TypeInfo;
use serde::{Deserialize, Serialize};
use sp_io::hashing::blake2_256;
use sp_runtime::{traits::TrailingZeroInput, DispatchError};
use sp_std::{prelude::Box, vec::Vec};
use sp_std::vec::Vec;
use sp_storage::TrackedStorageKey;

/// An alphabet of possible parameters to use for benchmarking.
Expand Down Expand Up @@ -342,6 +342,61 @@ pub trait Benchmarking {
) -> Result<Vec<BenchmarkResult>, BenchmarkError>;
}

/// The recording trait used to record the time and proof size of a benchmark iteration.
pub trait Recording {
/// Start the recording.
fn start(&mut self) {}

// Stop the recording.
fn stop(&mut self) {}
}

/// A no-op recording, used when instantiating the benchmark test instance.
pub struct NoopRecording;
impl Recording for NoopRecording {}

/// Records the time and proof size of a single benchmark iteration.
#[derive(Default)]
pgherveou marked this conversation as resolved.
Show resolved Hide resolved
pub struct BenchmarkRecording {
start_extrinsic: u128,
finish_extrinsic: u128,
pgherveou marked this conversation as resolved.
Show resolved Hide resolved
start_pov: Option<u32>,
end_pov: Option<u32>,
}

impl Recording for BenchmarkRecording {
fn start(&mut self) {
self.start_pov = crate::benchmarking::proof_size();
self.start_extrinsic = crate::benchmarking::current_time();
}

fn stop(&mut self) {
self.finish_extrinsic = crate::benchmarking::current_time();
self.end_pov = crate::benchmarking::proof_size();
}
}

impl BenchmarkRecording {
pub fn elapsed_extrinsic(&self) -> u128 {
self.finish_extrinsic.saturating_sub(self.start_extrinsic)
}

pub fn start_pov(&self) -> Option<u32> {
self.start_pov
}

pub fn end_pov(&self) -> Option<u32> {
self.end_pov
}

pub fn diff_pov(&self) -> u32 {
match (self.start_pov, self.end_pov) {
(Some(start), Some(end)) => end.saturating_sub(start),
_ => Default::default(),
}
}
}

/// The required setup for creating a benchmark.
///
/// Instance generic parameter is optional and can be used in order to capture unused generics for
Expand All @@ -353,9 +408,18 @@ pub trait BenchmarkingSetup<T, I = ()> {
/// Set up the storage, and prepare a closure to run the benchmark.
fn instance(
&self,
recording: &mut impl Recording,
components: &[(BenchmarkParameter, u32)],
verify: bool,
) -> Result<Box<dyn FnOnce() -> Result<(), BenchmarkError>>, BenchmarkError>;
) -> Result<(), BenchmarkError>;

/// Same as `instance` but passing a no-op recording.
fn test_instance(
&self,
components: &[(BenchmarkParameter, u32)],
) -> Result<(), BenchmarkError> {
return self.instance(&mut NoopRecording {}, components, true);
}
}

/// Grab an account, seeded by a name and index.
Expand Down
56 changes: 21 additions & 35 deletions substrate/frame/benchmarking/src/v1.rs
Original file line number Diff line number Diff line change
Expand Up @@ -786,9 +786,10 @@ macro_rules! benchmark_backend {

fn instance(
&self,
recording: &mut impl $crate::Recording,
components: &[($crate::BenchmarkParameter, u32)],
verify: bool
) -> Result<$crate::__private::Box<dyn FnOnce() -> Result<(), $crate::BenchmarkError>>, $crate::BenchmarkError> {
) -> Result<(), $crate::BenchmarkError> {
$(
// Prepare instance
let $param = components.iter()
Expand All @@ -802,13 +803,14 @@ macro_rules! benchmark_backend {
$( $param_instancer ; )*
$( $post )*

Ok($crate::__private::Box::new(move || -> Result<(), $crate::BenchmarkError> {
$eval;
if verify {
$postcode;
}
Ok(())
}))
recording.start();
$eval;
recording.stop();
ggwpez marked this conversation as resolved.
Show resolved Hide resolved

if verify {
$postcode;
}
Ok(())
}
}
};
Expand Down Expand Up @@ -960,14 +962,15 @@ macro_rules! selected_benchmark {

fn instance(
&self,
recording: &mut impl $crate::Recording,
components: &[($crate::BenchmarkParameter, u32)],
verify: bool
) -> Result<$crate::__private::Box<dyn FnOnce() -> Result<(), $crate::BenchmarkError>>, $crate::BenchmarkError> {
) -> Result<(), $crate::BenchmarkError> {
match self {
$(
Self::$bench => <
$bench as $crate::BenchmarkingSetup<T $(, $bench_inst)? >
>::instance(&$bench, components, verify),
>::instance(&$bench, recording, components, verify),
)*
}
}
Expand Down Expand Up @@ -1075,12 +1078,6 @@ macro_rules! impl_benchmark {
// Always reset the state after the benchmark.
$crate::__private::defer!($crate::benchmarking::wipe_db());

// Set up the externalities environment for the setup we want to
// benchmark.
let closure_to_benchmark = <
SelectedBenchmark as $crate::BenchmarkingSetup<T $(, $instance)?>
>::instance(&selected_benchmark, c, verify)?;

// Set the block number to at least 1 so events are deposited.
if $crate::__private::Zero::is_zero(&frame_system::Pallet::<T>::block_number()) {
frame_system::Pallet::<T>::set_block_number(1u32.into());
Expand Down Expand Up @@ -1108,20 +1105,14 @@ macro_rules! impl_benchmark {
verify
);

let start_pov = $crate::benchmarking::proof_size();
let start_extrinsic = $crate::benchmarking::current_time();

closure_to_benchmark()?;

let finish_extrinsic = $crate::benchmarking::current_time();
let end_pov = $crate::benchmarking::proof_size();
// Set up the externalities environment for the setup we want to
// benchmark.
let mut recording = $crate::BenchmarkRecording::default();
<SelectedBenchmark as $crate::BenchmarkingSetup<T $(, $instance)?>>::instance(&selected_benchmark, &mut recording, c, verify)?;

// Calculate the diff caused by the benchmark.
let elapsed_extrinsic = finish_extrinsic.saturating_sub(start_extrinsic);
let diff_pov = match (start_pov, end_pov) {
(Some(start), Some(end)) => end.saturating_sub(start),
_ => Default::default(),
};
let elapsed_extrinsic = recording.elapsed_extrinsic();
let diff_pov = recording.diff_pov();

// Commit the changes to get proper write count
$crate::benchmarking::commit_db();
Expand All @@ -1136,7 +1127,7 @@ macro_rules! impl_benchmark {
);
$crate::__private::log::trace!(
target: "benchmark",
"Proof sizes: before {:?} after {:?} diff {}", &start_pov, &end_pov, &diff_pov
"Proof sizes: before {:?} after {:?} diff {}", recording.start_pov(), recording.end_pov(), &diff_pov
);

// Time the storage root recalculation.
Expand Down Expand Up @@ -1230,18 +1221,13 @@ macro_rules! impl_benchmark_test {
// Always reset the state after the benchmark.
$crate::__private::defer!($crate::benchmarking::wipe_db());

// Set up the benchmark, return execution + verification function.
let closure_to_verify = <
SelectedBenchmark as $crate::BenchmarkingSetup<T, _>
>::instance(&selected_benchmark, &c, true)?;

// Set the block number to at least 1 so events are deposited.
if $crate::__private::Zero::is_zero(&frame_system::Pallet::<T>::block_number()) {
frame_system::Pallet::<T>::set_block_number(1u32.into());
}

// Run execution + verification
closure_to_verify()
<SelectedBenchmark as $crate::BenchmarkingSetup<T, _>>::test_instance(&selected_benchmark, &c)
};

if components.is_empty() {
Expand Down
10 changes: 4 additions & 6 deletions substrate/frame/staking/src/benchmarking.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1077,15 +1077,13 @@ mod tests {
(frame_benchmarking::BenchmarkParameter::v, v),
(frame_benchmarking::BenchmarkParameter::n, n),
];
let closure_to_benchmark =
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can this test be removed, it should be executed when running the benchmark tests anyway ?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yea looks a bit weird.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ah nvm it's marked as #[extra] so I guess this is to ensure the test runs everytime..

<SelectedBenchmark as frame_benchmarking::BenchmarkingSetup<Test>>::instance(

assert_ok!(
<SelectedBenchmark as frame_benchmarking::BenchmarkingSetup<Test>>::test_instance(
&selected_benchmark,
&c,
true,
)
.unwrap();

assert_ok!(closure_to_benchmark());
);
});
}
}
Loading
Loading