From 13967bf8b7f92217eaf01c72d01f3552ae67e5f3 Mon Sep 17 00:00:00 2001 From: nemo Date: Mon, 23 Aug 2021 14:11:26 -0400 Subject: [PATCH 1/2] Serialize parent's cache generation and access (#1496) * feat: add a test for parallel parent's cache generation * feat: serialize parent's cache generation/access * feat: add a test for this to ensure there's no corruption * fix: ensure test doesn't remove a file another thread is using * fix: isolate the tests instead of adding locking --- .circleci/config.yml | 4 +- storage-proofs-porep/Cargo.toml | 3 +- .../src/stacked/vanilla/cache.rs | 115 +++++++++++++++++- .../src/stacked/vanilla/cores.rs | 5 +- 4 files changed, 119 insertions(+), 8 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 4a88aaf576..1f567ebefc 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -140,7 +140,9 @@ jobs: ulimit -u 20000 ulimit -n 20000 cargo +<< pipeline.parameters.nightly-toolchain >> test --all --verbose --release lifecycle -- --ignored --nocapture - cargo +<< pipeline.parameters.nightly-toolchain >> test -p storage-proofs-porep --features single-threaded --release checkout_cores -- --test-threads=1 + cargo +<< pipeline.parameters.nightly-toolchain >> test -p storage-proofs-porep --features isolated-testing --release checkout_cores -- --test-threads=1 + cargo +<< pipeline.parameters.nightly-toolchain >> test -p storage-proofs-porep --features isolated-testing --release test_parallel_generation_and_read_partial_range_v1_0 + cargo +<< pipeline.parameters.nightly-toolchain >> test -p storage-proofs-porep --features isolated-testing --release test_parallel_generation_and_read_partial_range_v1_1 no_output_timeout: 30m environment: RUST_TEST_THREADS: 1 diff --git a/storage-proofs-porep/Cargo.toml b/storage-proofs-porep/Cargo.toml index a9bf9dc03d..7ab51caab8 100644 --- a/storage-proofs-porep/Cargo.toml +++ b/storage-proofs-porep/Cargo.toml @@ -40,6 +40,7 @@ libc = "0.2" fdlimit = "0.2.0" fr32 = { path = "../fr32", version = "^2.0.0", default-features = false } yastl = "0.1.2" +fil_logger = "0.1" [target."cfg(target_arch = \"aarch64\")".dependencies] sha2 = { version = "0.9.3", features = ["compress", "asm"] } @@ -59,7 +60,7 @@ default = ["blst", "gpu", "multicore-sdr"] gpu = ["storage-proofs-core/gpu", "filecoin-hashers/gpu", "neptune/opencl", "bellperson/gpu", "fr32/gpu"] pairing = ["storage-proofs-core/pairing", "bellperson/pairing", "neptune/pairing", "filecoin-hashers/pairing", "fr32/pairing"] blst = ["storage-proofs-core/blst", "bellperson/blst", "neptune/blst", "filecoin-hashers/blst", "fr32/blst"] -single-threaded = [] +isolated-testing = [] multicore-sdr = ["hwloc"] [[bench]] diff --git a/storage-proofs-porep/src/stacked/vanilla/cache.rs b/storage-proofs-porep/src/stacked/vanilla/cache.rs index cd92e57673..8bd6bd81bf 100644 --- a/storage-proofs-porep/src/stacked/vanilla/cache.rs +++ b/storage-proofs-porep/src/stacked/vanilla/cache.rs @@ -1,7 +1,8 @@ -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashSet}; use std::fs::{remove_file, File}; use std::io; use std::path::{Path, PathBuf}; +use std::sync::Mutex; use anyhow::{bail, ensure, Context}; use byteorder::{ByteOrder, LittleEndian}; @@ -38,6 +39,7 @@ pub struct ParentCacheData { lazy_static! { pub static ref PARENT_CACHE: ParentCacheDataMap = serde_json::from_str(PARENT_CACHE_DATA).expect("Invalid parent_cache.json"); + static ref PARENT_CACHE_ACCESS_LOCK: Mutex> = Mutex::new(HashSet::new()); } // StackedGraph will hold two different (but related) `ParentCache`, @@ -155,11 +157,25 @@ impl ParentCache { G: Graph + ParameterSetMetadata + Send + Sync, { let path = cache_path(cache_entries, graph); + let generation_key = path.display().to_string(); + let mut generated = PARENT_CACHE_ACCESS_LOCK + .lock() + .expect("parent cache generation lock failed"); + if path.exists() { + // If the cache file exists and we've got the lock, generation has previously been + // completed. Insert that it no longer needs generation at this point unconditionally. + if generated.get(&generation_key).is_none() { + generated.insert(generation_key); + } Self::open(len, cache_entries, graph, &path) } else { match Self::generate(len, cache_entries, graph, &path) { - Ok(c) => Ok(c), + Ok(c) => { + generated.insert(generation_key); + + Ok(c) + } Err(err) => { match err.downcast::() { Ok(error) if error.kind() == io::ErrorKind::AlreadyExists => { @@ -435,13 +451,23 @@ where mod tests { use super::*; + use std::sync::Once; + use filecoin_hashers::poseidon::PoseidonHasher; use storage_proofs_core::api_version::ApiVersion; use crate::stacked::vanilla::graph::{StackedBucketGraph, EXP_DEGREE}; + static INIT_LOGGER: Once = Once::new(); + fn init_logger() { + INIT_LOGGER.call_once(|| { + fil_logger::init(); + }); + } + #[test] fn test_read_full_range() { + init_logger(); let nodes = 24u32; let graph = StackedBucketGraph::::new_stacked( nodes as usize, @@ -466,14 +492,93 @@ mod tests { } #[test] - fn test_read_partial_range() { + #[cfg(feature = "isolated-testing")] + fn test_parallel_generation_and_read_partial_range_v1_0() { + let porep_id = [0u8; 32]; + test_parallel_generation_and_read_partial_range(ApiVersion::V1_0_0, &porep_id); + } + + #[test] + #[cfg(feature = "isolated-testing")] + fn test_parallel_generation_and_read_partial_range_v1_1() { + let porep_id = [1u8; 32]; //needs to be different than v1_0 for a separate graph + test_parallel_generation_and_read_partial_range(ApiVersion::V1_1_0, &porep_id); + } + + // This removes the parent cache file for the test, then tries to + // open or generate it in parallel. Then we perform the + // read_partial_range test, which should pass if the parallel + // generation was not corrupted. + // + // This test should not be run while other tests that use the + // parent's cache are running, as it may remove the parent cache + // file while another thread is using it. + #[cfg(feature = "isolated-testing")] + fn test_parallel_generation_and_read_partial_range( + api_version: ApiVersion, + porep_id: &[u8; 32], + ) { + use yastl::Pool; + + init_logger(); + let pool = Pool::new(3); let nodes = 48u32; let graph = StackedBucketGraph::::new_stacked( nodes as usize, BASE_DEGREE, EXP_DEGREE, - [0u8; 32], - ApiVersion::V1_0_0, + *porep_id, + api_version, + ) + .expect("new_stacked failure"); + + let path = cache_path(nodes, &graph); + + // If this cache file exists, remove it so that we can be sure + // at least one thread will generate it in this test. + if std::fs::remove_file(&path).is_ok() {}; + + pool.scoped(|s| { + for _ in 0..3 { + s.execute(move || { + let graph = StackedBucketGraph::::new_stacked( + nodes as usize, + BASE_DEGREE, + EXP_DEGREE, + *porep_id, + api_version, + ) + .expect("new_stacked failure"); + + ParentCache::new(nodes, nodes, &graph).expect("parent cache new failure"); + }); + } + }); + + test_read_partial_range(api_version, porep_id); + } + + #[test] + fn test_read_partial_range_v1_0() { + let porep_id = [0u8; 32]; + test_read_partial_range(ApiVersion::V1_0_0, &porep_id); + } + + #[test] + fn test_read_partial_range_v1_1() { + let porep_id = [1u8; 32]; //needs to be different than v1_0 for a separate graph + test_read_partial_range(ApiVersion::V1_1_0, &porep_id); + } + + fn test_read_partial_range(api_version: ApiVersion, porep_id: &[u8; 32]) { + init_logger(); + let nodes = 48u32; + let graph = StackedBucketGraph::::new_stacked( + nodes as usize, + BASE_DEGREE, + EXP_DEGREE, + *porep_id, + api_version, ) .expect("new_stacked failure"); diff --git a/storage-proofs-porep/src/stacked/vanilla/cores.rs b/storage-proofs-porep/src/stacked/vanilla/cores.rs index d7e4605de3..fc486c9233 100644 --- a/storage-proofs-porep/src/stacked/vanilla/cores.rs +++ b/storage-proofs-porep/src/stacked/vanilla/cores.rs @@ -200,7 +200,10 @@ mod tests { } #[test] - #[cfg(feature = "single-threaded")] + #[cfg(feature = "isolated-testing")] + // This test should not be run while other tests are running, as + // the cores we're working with may otherwise be busy and cause a + // failure. fn test_checkout_cores() { let checkout1 = checkout_core_group(); dbg!(&checkout1); From 9cc6444a78408a1fc19344a973d5ea8d91d63ad4 Mon Sep 17 00:00:00 2001 From: nemo Date: Thu, 26 Aug 2021 08:05:38 -0400 Subject: [PATCH 2/2] feat: fail verification of empty proof bytes (#1498) * feat: fail verification of empty proof bytes * feat: update xcode version to a CI supported version --- .circleci/config.yml | 2 +- filecoin-proofs/src/api/seal.rs | 5 ++++ filecoin-proofs/tests/mod.rs | 46 +++++++++++++++++++++++++++++++++ 3 files changed, 52 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 1f567ebefc..e7896cf167 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -330,7 +330,7 @@ jobs: command: cargo +$(cat rust-toolchain) clippy --all-targets --workspace -- -D warnings test_darwin: macos: - xcode: "10.0.0" + xcode: "12.5.0" working_directory: ~/crate resource_class: large environment: *setup-env diff --git a/filecoin-proofs/src/api/seal.rs b/filecoin-proofs/src/api/seal.rs index 45c3a8e0a9..6bbd1d0f78 100644 --- a/filecoin-proofs/src/api/seal.rs +++ b/filecoin-proofs/src/api/seal.rs @@ -925,8 +925,10 @@ pub fn verify_seal( proof_vec: &[u8], ) -> Result { info!("verify_seal:start: {:?}", sector_id); + ensure!(comm_d_in != [0; 32], "Invalid all zero commitment (comm_d)"); ensure!(comm_r_in != [0; 32], "Invalid all zero commitment (comm_r)"); + ensure!(!proof_vec.is_empty(), "Invalid proof bytes (empty vector)"); let comm_r: ::Domain = as_safe_commitment(&comm_r_in, "comm_r")?; let comm_d: DefaultPieceDomain = as_safe_commitment(&comm_d_in, "comm_d")?; @@ -1042,6 +1044,9 @@ pub fn verify_batch_seal( "Invalid all zero commitment (comm_r)" ); } + for proofs in proof_vecs { + ensure!(!proofs.is_empty(), "Invalid proof (empty bytes) found"); + } let sector_bytes = PaddedBytesAmount::from(porep_config); diff --git a/filecoin-proofs/tests/mod.rs b/filecoin-proofs/tests/mod.rs index 8bd38cd9f5..241c6ee721 100644 --- a/filecoin-proofs/tests/mod.rs +++ b/filecoin-proofs/tests/mod.rs @@ -22,6 +22,8 @@ fn test_verify_seal_fr32_validation() { assert!(out.is_err(), "tripwire"); let arbitrary_porep_id = [87; 32]; + + // Test failure for invalid comm_r conversion. { let result = verify_seal::( PoRepConfig { @@ -60,6 +62,7 @@ fn test_verify_seal_fr32_validation() { } } + // Test failure for invalid comm_d conversion. { let result = verify_seal::( PoRepConfig { @@ -97,6 +100,49 @@ fn test_verify_seal_fr32_validation() { panic_any("should have failed comm_d to Fr32 conversion"); } } + + // Test failure for verifying an empty proof. + { + let non_zero_commitment_fr_bytes = [1; 32]; + let out = bytes_into_fr(&non_zero_commitment_fr_bytes); + assert!(out.is_ok(), "tripwire"); + + let result = verify_seal::( + PoRepConfig { + sector_size: SectorSize(SECTOR_SIZE_2_KIB), + partitions: PoRepProofPartitions( + *POREP_PARTITIONS + .read() + .expect("POREP_PARTITIONS poisoned") + .get(&SECTOR_SIZE_2_KIB) + .expect("unknown sector size"), + ), + porep_id: arbitrary_porep_id, + api_version: ApiVersion::V1_1_0, + }, + non_zero_commitment_fr_bytes, + non_zero_commitment_fr_bytes, + [0; 32], + SectorId::from(0), + [0; 32], + [0; 32], + &[], + ); + + if let Err(err) = result { + let needle = "Invalid proof bytes (empty vector)"; + let haystack = format!("{}", err); + + assert!( + haystack.contains(needle), + "\"{}\" did not contain \"{}\"", + haystack, + needle, + ); + } else { + panic_any("should have failed due to empty proof bytes"); + } + } } #[test]