Skip to content

Commit

Permalink
Merge branch 'filecoin-project:master' into feat/decoupling-post-worker
Browse files Browse the repository at this point in the history
  • Loading branch information
lovel8 committed Aug 27, 2021
2 parents 425f800 + 9cc6444 commit 7ff6f49
Show file tree
Hide file tree
Showing 6 changed files with 171 additions and 9 deletions.
6 changes: 4 additions & 2 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,9 @@ jobs:
ulimit -u 20000
ulimit -n 20000
cargo +<< pipeline.parameters.nightly-toolchain >> test --all --verbose --release lifecycle -- --ignored --nocapture
cargo +<< pipeline.parameters.nightly-toolchain >> test -p storage-proofs-porep --features single-threaded --release checkout_cores -- --test-threads=1
cargo +<< pipeline.parameters.nightly-toolchain >> test -p storage-proofs-porep --features isolated-testing --release checkout_cores -- --test-threads=1
cargo +<< pipeline.parameters.nightly-toolchain >> test -p storage-proofs-porep --features isolated-testing --release test_parallel_generation_and_read_partial_range_v1_0
cargo +<< pipeline.parameters.nightly-toolchain >> test -p storage-proofs-porep --features isolated-testing --release test_parallel_generation_and_read_partial_range_v1_1
no_output_timeout: 30m
environment:
RUST_TEST_THREADS: 1
Expand Down Expand Up @@ -328,7 +330,7 @@ jobs:
command: cargo +$(cat rust-toolchain) clippy --all-targets --workspace -- -D warnings
test_darwin:
macos:
xcode: "10.0.0"
xcode: "12.5.0"
working_directory: ~/crate
resource_class: large
environment: *setup-env
Expand Down
5 changes: 5 additions & 0 deletions filecoin-proofs/src/api/seal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -925,8 +925,10 @@ pub fn verify_seal<Tree: 'static + MerkleTreeTrait>(
proof_vec: &[u8],
) -> Result<bool> {
info!("verify_seal:start: {:?}", sector_id);

ensure!(comm_d_in != [0; 32], "Invalid all zero commitment (comm_d)");
ensure!(comm_r_in != [0; 32], "Invalid all zero commitment (comm_r)");
ensure!(!proof_vec.is_empty(), "Invalid proof bytes (empty vector)");

let comm_r: <Tree::Hasher as Hasher>::Domain = as_safe_commitment(&comm_r_in, "comm_r")?;
let comm_d: DefaultPieceDomain = as_safe_commitment(&comm_d_in, "comm_d")?;
Expand Down Expand Up @@ -1042,6 +1044,9 @@ pub fn verify_batch_seal<Tree: 'static + MerkleTreeTrait>(
"Invalid all zero commitment (comm_r)"
);
}
for proofs in proof_vecs {
ensure!(!proofs.is_empty(), "Invalid proof (empty bytes) found");
}

let sector_bytes = PaddedBytesAmount::from(porep_config);

Expand Down
46 changes: 46 additions & 0 deletions filecoin-proofs/tests/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ fn test_verify_seal_fr32_validation() {
assert!(out.is_err(), "tripwire");

let arbitrary_porep_id = [87; 32];

// Test failure for invalid comm_r conversion.
{
let result = verify_seal::<DefaultOctLCTree>(
PoRepConfig {
Expand Down Expand Up @@ -60,6 +62,7 @@ fn test_verify_seal_fr32_validation() {
}
}

// Test failure for invalid comm_d conversion.
{
let result = verify_seal::<DefaultOctLCTree>(
PoRepConfig {
Expand Down Expand Up @@ -97,6 +100,49 @@ fn test_verify_seal_fr32_validation() {
panic_any("should have failed comm_d to Fr32 conversion");
}
}

// Test failure for verifying an empty proof.
{
let non_zero_commitment_fr_bytes = [1; 32];
let out = bytes_into_fr(&non_zero_commitment_fr_bytes);
assert!(out.is_ok(), "tripwire");

let result = verify_seal::<DefaultOctLCTree>(
PoRepConfig {
sector_size: SectorSize(SECTOR_SIZE_2_KIB),
partitions: PoRepProofPartitions(
*POREP_PARTITIONS
.read()
.expect("POREP_PARTITIONS poisoned")
.get(&SECTOR_SIZE_2_KIB)
.expect("unknown sector size"),
),
porep_id: arbitrary_porep_id,
api_version: ApiVersion::V1_1_0,
},
non_zero_commitment_fr_bytes,
non_zero_commitment_fr_bytes,
[0; 32],
SectorId::from(0),
[0; 32],
[0; 32],
&[],
);

if let Err(err) = result {
let needle = "Invalid proof bytes (empty vector)";
let haystack = format!("{}", err);

assert!(
haystack.contains(needle),
"\"{}\" did not contain \"{}\"",
haystack,
needle,
);
} else {
panic_any("should have failed due to empty proof bytes");
}
}
}

#[test]
Expand Down
3 changes: 2 additions & 1 deletion storage-proofs-porep/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ libc = "0.2"
fdlimit = "0.2.0"
fr32 = { path = "../fr32", version = "^2.0.0", default-features = false }
yastl = "0.1.2"
fil_logger = "0.1"

[target."cfg(target_arch = \"aarch64\")".dependencies]
sha2 = { version = "0.9.3", features = ["compress", "asm"] }
Expand All @@ -59,7 +60,7 @@ default = ["blst", "gpu", "multicore-sdr"]
gpu = ["storage-proofs-core/gpu", "filecoin-hashers/gpu", "neptune/opencl", "bellperson/gpu", "fr32/gpu"]
pairing = ["storage-proofs-core/pairing", "bellperson/pairing", "neptune/pairing", "filecoin-hashers/pairing", "fr32/pairing"]
blst = ["storage-proofs-core/blst", "bellperson/blst", "neptune/blst", "filecoin-hashers/blst", "fr32/blst"]
single-threaded = []
isolated-testing = []
multicore-sdr = ["hwloc"]

[[bench]]
Expand Down
115 changes: 110 additions & 5 deletions storage-proofs-porep/src/stacked/vanilla/cache.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
use std::collections::BTreeMap;
use std::collections::{BTreeMap, HashSet};
use std::fs::{remove_file, File};
use std::io;
use std::path::{Path, PathBuf};
use std::sync::Mutex;

use anyhow::{bail, ensure, Context};
use byteorder::{ByteOrder, LittleEndian};
Expand Down Expand Up @@ -38,6 +39,7 @@ pub struct ParentCacheData {
lazy_static! {
pub static ref PARENT_CACHE: ParentCacheDataMap =
serde_json::from_str(PARENT_CACHE_DATA).expect("Invalid parent_cache.json");
static ref PARENT_CACHE_ACCESS_LOCK: Mutex<HashSet<String>> = Mutex::new(HashSet::new());
}

// StackedGraph will hold two different (but related) `ParentCache`,
Expand Down Expand Up @@ -155,11 +157,25 @@ impl ParentCache {
G: Graph<H> + ParameterSetMetadata + Send + Sync,
{
let path = cache_path(cache_entries, graph);
let generation_key = path.display().to_string();
let mut generated = PARENT_CACHE_ACCESS_LOCK
.lock()
.expect("parent cache generation lock failed");

if path.exists() {
// If the cache file exists and we've got the lock, generation has previously been
// completed. Insert that it no longer needs generation at this point unconditionally.
if generated.get(&generation_key).is_none() {
generated.insert(generation_key);
}
Self::open(len, cache_entries, graph, &path)
} else {
match Self::generate(len, cache_entries, graph, &path) {
Ok(c) => Ok(c),
Ok(c) => {
generated.insert(generation_key);

Ok(c)
}
Err(err) => {
match err.downcast::<io::Error>() {
Ok(error) if error.kind() == io::ErrorKind::AlreadyExists => {
Expand Down Expand Up @@ -435,13 +451,23 @@ where
mod tests {
use super::*;

use std::sync::Once;

use filecoin_hashers::poseidon::PoseidonHasher;
use storage_proofs_core::api_version::ApiVersion;

use crate::stacked::vanilla::graph::{StackedBucketGraph, EXP_DEGREE};

static INIT_LOGGER: Once = Once::new();
fn init_logger() {
INIT_LOGGER.call_once(|| {
fil_logger::init();
});
}

#[test]
fn test_read_full_range() {
init_logger();
let nodes = 24u32;
let graph = StackedBucketGraph::<PoseidonHasher>::new_stacked(
nodes as usize,
Expand All @@ -466,14 +492,93 @@ mod tests {
}

#[test]
fn test_read_partial_range() {
#[cfg(feature = "isolated-testing")]
fn test_parallel_generation_and_read_partial_range_v1_0() {
let porep_id = [0u8; 32];
test_parallel_generation_and_read_partial_range(ApiVersion::V1_0_0, &porep_id);
}

#[test]
#[cfg(feature = "isolated-testing")]
fn test_parallel_generation_and_read_partial_range_v1_1() {
let porep_id = [1u8; 32]; //needs to be different than v1_0 for a separate graph
test_parallel_generation_and_read_partial_range(ApiVersion::V1_1_0, &porep_id);
}

// This removes the parent cache file for the test, then tries to
// open or generate it in parallel. Then we perform the
// read_partial_range test, which should pass if the parallel
// generation was not corrupted.
//
// This test should not be run while other tests that use the
// parent's cache are running, as it may remove the parent cache
// file while another thread is using it.
#[cfg(feature = "isolated-testing")]
fn test_parallel_generation_and_read_partial_range(
api_version: ApiVersion,
porep_id: &[u8; 32],
) {
use yastl::Pool;

init_logger();
let pool = Pool::new(3);
let nodes = 48u32;
let graph = StackedBucketGraph::<PoseidonHasher>::new_stacked(
nodes as usize,
BASE_DEGREE,
EXP_DEGREE,
[0u8; 32],
ApiVersion::V1_0_0,
*porep_id,
api_version,
)
.expect("new_stacked failure");

let path = cache_path(nodes, &graph);

// If this cache file exists, remove it so that we can be sure
// at least one thread will generate it in this test.
if std::fs::remove_file(&path).is_ok() {};

pool.scoped(|s| {
for _ in 0..3 {
s.execute(move || {
let graph = StackedBucketGraph::<PoseidonHasher>::new_stacked(
nodes as usize,
BASE_DEGREE,
EXP_DEGREE,
*porep_id,
api_version,
)
.expect("new_stacked failure");

ParentCache::new(nodes, nodes, &graph).expect("parent cache new failure");
});
}
});

test_read_partial_range(api_version, porep_id);
}

#[test]
fn test_read_partial_range_v1_0() {
let porep_id = [0u8; 32];
test_read_partial_range(ApiVersion::V1_0_0, &porep_id);
}

#[test]
fn test_read_partial_range_v1_1() {
let porep_id = [1u8; 32]; //needs to be different than v1_0 for a separate graph
test_read_partial_range(ApiVersion::V1_1_0, &porep_id);
}

fn test_read_partial_range(api_version: ApiVersion, porep_id: &[u8; 32]) {
init_logger();
let nodes = 48u32;
let graph = StackedBucketGraph::<PoseidonHasher>::new_stacked(
nodes as usize,
BASE_DEGREE,
EXP_DEGREE,
*porep_id,
api_version,
)
.expect("new_stacked failure");

Expand Down
5 changes: 4 additions & 1 deletion storage-proofs-porep/src/stacked/vanilla/cores.rs
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,10 @@ mod tests {
}

#[test]
#[cfg(feature = "single-threaded")]
#[cfg(feature = "isolated-testing")]
// This test should not be run while other tests are running, as
// the cores we're working with may otherwise be busy and cause a
// failure.
fn test_checkout_cores() {
let checkout1 = checkout_core_group();
dbg!(&checkout1);
Expand Down

0 comments on commit 7ff6f49

Please sign in to comment.