Skip to content

Commit

Permalink
test: update VID bench code (EspressoSystems#392)
Browse files Browse the repository at this point in the history
* nix voodoo

* bench panic with useful message if needed features are missing

* ignore benchmark reports put in primitives/target

* use power-of-2 node counts to accommodate advz vid scheme constraints

* [no-ci] clarifying comment
  • Loading branch information
ggutoski authored Nov 1, 2023
1 parent 5c74b9a commit d99c22d
Show file tree
Hide file tree
Showing 4 changed files with 136 additions and 114 deletions.
2 changes: 2 additions & 0 deletions .envrc
Original file line number Diff line number Diff line change
@@ -1 +1,3 @@
use nix
watch_file flake.nix
watch_file flake.lock
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -11,3 +11,7 @@ Cargo.lock
# Test coverage (grcov)
default.profraw
/.pre-commit-config.yaml

# benchmark reports placed here for some reason:
# https://doc.rust-lang.org/cargo/commands/cargo-bench.html#working-directory-of-benchmarks
/primitives/target
2 changes: 2 additions & 0 deletions flake.nix
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,8 @@
llvm_15
] ++ lib.optionals stdenv.isDarwin [ darwin.apple_sdk.frameworks.Security ];

CARGO_TARGET_DIR = "target/nix_rustc";

shellHook = ''
export RUST_BACKTRACE=full
export PATH="$PATH:$(pwd)/target/debug:$(pwd)/target/release"
Expand Down
242 changes: 128 additions & 114 deletions primitives/benches/advz.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,130 +4,144 @@
// You should have received a copy of the MIT License
// along with the Jellyfish library. If not, see <https://mit-license.org/>.

#![cfg(feature = "test-srs")]
use ark_bls12_381::Bls12_381;
use ark_bn254::Bn254;
use ark_ec::pairing::Pairing;
use ark_serialize::Write;
use ark_std::rand::RngCore;
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use digest::{crypto_common::generic_array::ArrayLength, Digest, DynDigest, OutputSizeUser};
use jf_primitives::{
pcs::{checked_fft_size, prelude::UnivariateKzgPCS, PolynomialCommitmentScheme},
vid::{advz::Advz, VidScheme},
};
use sha2::Sha256;
#[cfg(not(feature = "test-srs"))]
fn main() {
panic!("need `test-srs` feature to run this benchmark");
}

const KB: usize = 1 << 10;
const MB: usize = KB << 10;
#[cfg(feature = "test-srs")]
criterion::criterion_main!(feature_gated::benches);

fn advz<E, H>(c: &mut Criterion, pairing_name: &str)
where
E: Pairing,
// TODO(Gus) clean up nasty trait bounds upstream
H: Digest + DynDigest + Default + Clone + Write,
<<H as OutputSizeUser>::OutputSize as ArrayLength<u8>>::ArrayType: Copy,
{
// play with these items
const RATE: usize = 4; // ratio of num_storage_nodes : polynomial_degree
let storage_node_counts = [600, 700, 800, 900, 1000];
let payload_byte_lens = [1 * MB];
#[cfg(feature = "test-srs")]
mod feature_gated {
use ark_bls12_381::Bls12_381;
use ark_bn254::Bn254;
use ark_ec::pairing::Pairing;
use ark_serialize::Write;
use ark_std::rand::RngCore;
use criterion::{BenchmarkId, Criterion, Throughput};
use digest::{crypto_common::generic_array::ArrayLength, Digest, DynDigest, OutputSizeUser};
use jf_primitives::{
pcs::{checked_fft_size, prelude::UnivariateKzgPCS, PolynomialCommitmentScheme},
vid::{advz::Advz, VidScheme},
};
use sha2::Sha256;

// more items as a function of the above
let poly_degrees_iter = storage_node_counts.iter().map(|c| c / RATE);
let supported_degree = poly_degrees_iter.clone().max().unwrap();
let vid_sizes_iter = poly_degrees_iter.zip(storage_node_counts);
let mut rng = jf_utils::test_rng();
let srs = UnivariateKzgPCS::<E>::gen_srs_for_testing(
&mut rng,
checked_fft_size(supported_degree).unwrap(),
)
.unwrap();
const KB: usize = 1 << 10;
const MB: usize = KB << 10;

// run all benches for each payload_byte_lens
for len in payload_byte_lens {
// random payload data
let mut payload_bytes = vec![0u8; len];
rng.fill_bytes(&mut payload_bytes);
fn advz<E, H>(c: &mut Criterion, pairing_name: &str)
where
E: Pairing,
// TODO(Gus) clean up nasty trait bounds upstream
H: Digest + DynDigest + Default + Clone + Write,
<<H as OutputSizeUser>::OutputSize as ArrayLength<u8>>::ArrayType: Copy,
{
// play with these items
//
// CODE_RATE is merely a convenient way to automatically choose polynomial
// degree as a function of storage node count.
// If desired, you could set polynomial degrees independent of storage node
// count.
const CODE_RATE: usize = 4; // ratio of num_storage_nodes : polynomial_degree
let storage_node_counts = [512, 1024];
let payload_byte_lens = [1 * MB];

let benchmark_group_name =
|op_name| format!("advz_{}_{}_{}KB", pairing_name, op_name, len / KB);
// more items as a function of the above
let poly_degrees_iter = storage_node_counts.iter().map(|c| c / CODE_RATE);
let supported_degree = poly_degrees_iter.clone().max().unwrap();
let vid_sizes_iter = poly_degrees_iter.zip(storage_node_counts);
let mut rng = jf_utils::test_rng();
let srs = UnivariateKzgPCS::<E>::gen_srs_for_testing(
&mut rng,
checked_fft_size(supported_degree).unwrap(),
)
.unwrap();

// commit
let mut grp = c.benchmark_group(benchmark_group_name("commit"));
grp.throughput(Throughput::Bytes(len as u64));
for (poly_degree, num_storage_nodes) in vid_sizes_iter.clone() {
let advz = Advz::<E, H>::new(poly_degree, num_storage_nodes, &srs).unwrap();
grp.bench_with_input(
BenchmarkId::from_parameter(num_storage_nodes),
&num_storage_nodes,
|b, _| {
b.iter(|| advz.commit_only(&payload_bytes).unwrap());
},
);
}
grp.finish();
// run all benches for each payload_byte_lens
for len in payload_byte_lens {
// random payload data
let mut payload_bytes = vec![0u8; len];
rng.fill_bytes(&mut payload_bytes);

// disperse
let mut grp = c.benchmark_group(benchmark_group_name("disperse"));
grp.throughput(Throughput::Bytes(len as u64));
for (poly_degree, num_storage_nodes) in vid_sizes_iter.clone() {
let advz = Advz::<E, H>::new(poly_degree, num_storage_nodes, &srs).unwrap();
grp.bench_with_input(
BenchmarkId::from_parameter(num_storage_nodes),
&num_storage_nodes,
|b, _| {
b.iter(|| advz.disperse(&payload_bytes).unwrap());
},
);
}
grp.finish();
let benchmark_group_name =
|op_name| format!("advz_{}_{}_{}KB", pairing_name, op_name, len / KB);

// verify
let mut grp = c.benchmark_group(benchmark_group_name("verify"));
grp.throughput(Throughput::Bytes(len as u64));
for (poly_degree, num_storage_nodes) in vid_sizes_iter.clone() {
let advz = Advz::<E, H>::new(poly_degree, num_storage_nodes, &srs).unwrap();
let disperse = advz.disperse(&payload_bytes).unwrap();
let (shares, common) = (disperse.shares, disperse.common);
grp.bench_with_input(
BenchmarkId::from_parameter(num_storage_nodes),
&num_storage_nodes,
|b, _| {
// verify only the 0th share
b.iter(|| advz.verify_share(&shares[0], &common).unwrap().unwrap());
},
);
}
grp.finish();
// commit
let mut grp = c.benchmark_group(benchmark_group_name("commit"));
grp.throughput(Throughput::Bytes(len as u64));
for (poly_degree, num_storage_nodes) in vid_sizes_iter.clone() {
let advz = Advz::<E, H>::new(poly_degree, num_storage_nodes, &srs).unwrap();
grp.bench_with_input(
BenchmarkId::from_parameter(num_storage_nodes),
&num_storage_nodes,
|b, _| {
b.iter(|| advz.commit_only(&payload_bytes).unwrap());
},
);
}
grp.finish();

// disperse
let mut grp = c.benchmark_group(benchmark_group_name("disperse"));
grp.throughput(Throughput::Bytes(len as u64));
for (poly_degree, num_storage_nodes) in vid_sizes_iter.clone() {
let advz = Advz::<E, H>::new(poly_degree, num_storage_nodes, &srs).unwrap();
grp.bench_with_input(
BenchmarkId::from_parameter(num_storage_nodes),
&num_storage_nodes,
|b, _| {
b.iter(|| advz.disperse(&payload_bytes).unwrap());
},
);
}
grp.finish();

// recover
let mut grp = c.benchmark_group(benchmark_group_name("recover"));
grp.throughput(Throughput::Bytes(len as u64));
for (poly_degree, num_storage_nodes) in vid_sizes_iter.clone() {
let advz = Advz::<E, H>::new(poly_degree, num_storage_nodes, &srs).unwrap();
let disperse = advz.disperse(&payload_bytes).unwrap();
let (shares, common) = (disperse.shares, disperse.common);
grp.bench_with_input(
BenchmarkId::from_parameter(num_storage_nodes),
&num_storage_nodes,
|b, _| {
// recover from only the first poly_degree shares
b.iter(|| {
advz.recover_payload(&shares[..poly_degree], &common)
.unwrap()
});
},
);
// verify
let mut grp = c.benchmark_group(benchmark_group_name("verify"));
grp.throughput(Throughput::Bytes(len as u64));
for (poly_degree, num_storage_nodes) in vid_sizes_iter.clone() {
let advz = Advz::<E, H>::new(poly_degree, num_storage_nodes, &srs).unwrap();
let disperse = advz.disperse(&payload_bytes).unwrap();
let (shares, common) = (disperse.shares, disperse.common);
grp.bench_with_input(
BenchmarkId::from_parameter(num_storage_nodes),
&num_storage_nodes,
|b, _| {
// verify only the 0th share
b.iter(|| advz.verify_share(&shares[0], &common).unwrap().unwrap());
},
);
}
grp.finish();

// recover
let mut grp = c.benchmark_group(benchmark_group_name("recover"));
grp.throughput(Throughput::Bytes(len as u64));
for (poly_degree, num_storage_nodes) in vid_sizes_iter.clone() {
let advz = Advz::<E, H>::new(poly_degree, num_storage_nodes, &srs).unwrap();
let disperse = advz.disperse(&payload_bytes).unwrap();
let (shares, common) = (disperse.shares, disperse.common);
grp.bench_with_input(
BenchmarkId::from_parameter(num_storage_nodes),
&num_storage_nodes,
|b, _| {
// recover from only the first poly_degree shares
b.iter(|| {
advz.recover_payload(&shares[..poly_degree], &common)
.unwrap()
});
},
);
}
grp.finish();
}
grp.finish();
}
}

fn advz_main(c: &mut Criterion) {
advz::<Bls12_381, Sha256>(c, "Bls381");
advz::<Bn254, Sha256>(c, "Bn254");
}
fn advz_main(c: &mut Criterion) {
advz::<Bls12_381, Sha256>(c, "Bls381");
advz::<Bn254, Sha256>(c, "Bn254");
}

criterion_group!(name = benches; config = Criterion::default().sample_size(10); targets = advz_main);
criterion_main!(benches);
criterion::criterion_group!(name = benches; config = Criterion::default().sample_size(10); targets = advz_main);
}

0 comments on commit d99c22d

Please sign in to comment.