Skip to content

Commit

Permalink
Merge pull request #1384 from filecoin-project/feat-fallback-parallel…
Browse files Browse the repository at this point in the history
…ize-prove

perf(storage-proofs-post): increase parallelism in fallback-post
  • Loading branch information
dignifiedquire authored Dec 3, 2020
2 parents 88d1af8 + 157aa73 commit 56c18bd
Showing 1 changed file with 177 additions and 155 deletions.
332 changes: 177 additions & 155 deletions storage-proofs/post/src/fallback/vanilla.rs
Original file line number Diff line number Diff line change
Expand Up @@ -248,11 +248,6 @@ pub fn generate_leaf_challenge_inner<T: Domain>(
leaf_challenge % (pub_params.sector_size / NODE_SIZE as u64)
}

enum ProofOrFault<T> {
Proof(T),
Fault(SectorId),
}

// Generates a single vanilla proof, given the private inputs and sector challenges.
pub fn vanilla_proof<Tree: MerkleTreeTrait>(
sector_id: SectorId,
Expand Down Expand Up @@ -376,84 +371,106 @@ impl<'a, Tree: 'a + MerkleTreeTrait> ProofScheme<'a> for FallbackPoSt<'a, Tree>
.zip(priv_inputs.sectors.chunks(num_sectors_per_chunk))
.enumerate()
{
trace!("proving partition {}", j);

let mut proofs = Vec::with_capacity(num_sectors_per_chunk);

for (i, (pub_sector, priv_sector)) in pub_sectors_chunk
.iter()
.zip(priv_sectors_chunk.iter())
let (mut proofs, mut faults) = pub_sectors_chunk
.par_iter()
.zip(priv_sectors_chunk.par_iter())
.enumerate()
{
let sector_id = pub_sector.id;
let tree = priv_sector.tree;
let tree_leafs = tree.leafs();
let rows_to_discard = default_rows_to_discard(tree_leafs, Tree::Arity::to_usize());

trace!(
"Generating proof for tree leafs {} and arity {}",
tree_leafs,
Tree::Arity::to_usize(),
);

// avoid rehashing fixed inputs
let mut challenge_hasher = Sha256::new();
challenge_hasher.update(AsRef::<[u8]>::as_ref(&pub_inputs.randomness));
challenge_hasher.update(&u64::from(sector_id).to_le_bytes()[..]);

let mut inclusion_proofs = Vec::new();
for proof_or_fault in (0..pub_params.challenge_count)
.into_par_iter()
.map(|n| {
let challenge_index = ((j * num_sectors_per_chunk + i)
* pub_params.challenge_count
+ n) as u64;
let challenged_leaf =
generate_leaf_challenge_inner::<<Tree::Hasher as Hasher>::Domain>(
challenge_hasher.clone(),
pub_params,
challenge_index,
);

let proof =
tree.gen_cached_proof(challenged_leaf as usize, Some(rows_to_discard));
match proof {
Ok(proof) => {
if proof.validate(challenged_leaf as usize)
&& proof.root() == priv_sector.comm_r_last
&& pub_sector.comm_r
== <Tree::Hasher as Hasher>::Function::hash2(
&priv_sector.comm_c,
&priv_sector.comm_r_last,
)
{
Ok(ProofOrFault::Proof(proof))
} else {
Ok(ProofOrFault::Fault(sector_id))
.map(|(i, (pub_sector, priv_sector))| {
let sector_id = pub_sector.id;
let tree = priv_sector.tree;
let tree_leafs = tree.leafs();
let rows_to_discard =
default_rows_to_discard(tree_leafs, Tree::Arity::to_usize());

trace!(
"Generating proof for tree leafs {} and arity {}",
tree_leafs,
Tree::Arity::to_usize(),
);

// avoid rehashing fixed inputs
let mut challenge_hasher = Sha256::new();
challenge_hasher.update(AsRef::<[u8]>::as_ref(&pub_inputs.randomness));
challenge_hasher.update(&u64::from(sector_id).to_le_bytes()[..]);

let (inclusion_proofs, faults) = (0..pub_params.challenge_count)
.into_par_iter()
.fold(
|| (Vec::new(), BTreeSet::new()),
|(mut inclusion_proofs, mut faults), n| {
let challenge_index =
((j * num_sectors_per_chunk + i) * pub_params.challenge_count
+ n) as u64;
let challenged_leaf = generate_leaf_challenge_inner::<
<Tree::Hasher as Hasher>::Domain,
>(
challenge_hasher.clone(),
pub_params,
challenge_index,
);
let proof = tree.gen_cached_proof(
challenged_leaf as usize,
Some(rows_to_discard),
);

match proof {
Ok(proof) => {
if proof.validate(challenged_leaf as usize)
&& proof.root() == priv_sector.comm_r_last
&& pub_sector.comm_r
== <Tree::Hasher as Hasher>::Function::hash2(
&priv_sector.comm_c,
&priv_sector.comm_r_last,
)
{
inclusion_proofs.push(proof);
} else {
error!("faulty sector: {:?}", sector_id);
faults.insert(sector_id);
}
}
Err(err) => {
error!("faulty sector: {:?} ({:?})", sector_id, err);
faults.insert(sector_id);
}
}
}
Err(_) => Ok(ProofOrFault::Fault(sector_id)),
}
})
.collect::<Result<Vec<_>>>()?
{
match proof_or_fault {
ProofOrFault::Proof(proof) => {
inclusion_proofs.push(proof);
}
ProofOrFault::Fault(sector_id) => {
error!("faulty sector: {:?}", sector_id);
faulty_sectors.insert(sector_id);
}
}
}

proofs.push(SectorProof {
inclusion_proofs,
comm_c: priv_sector.comm_c,
comm_r_last: priv_sector.comm_r_last,
});
}
(inclusion_proofs, faults)
},
)
.reduce(
|| (Vec::new(), BTreeSet::new()),
|(mut inclusion_proofs, mut faults), (p, f)| {
inclusion_proofs.extend(p);
faults.extend(f);
(inclusion_proofs, faults)
},
);

(
SectorProof {
inclusion_proofs,
comm_c: priv_sector.comm_c,
comm_r_last: priv_sector.comm_r_last,
},
faults,
)
})
.fold(
|| (Vec::new(), BTreeSet::new()),
|(mut sector_proofs, mut sector_faults), (sector_proof, mut faults)| {
sector_faults.append(&mut faults);
sector_proofs.push(sector_proof);
(sector_proofs, sector_faults)
},
)
.reduce(
|| (Vec::new(), BTreeSet::new()),
|(mut sector_proofs, mut sector_faults), (proofs, mut faults)| {
sector_proofs.extend(proofs);
sector_faults.append(&mut faults);
(sector_proofs, sector_faults)
},
);

// If there were less than the required number of sectors provided, we duplicate the last one
// to pad the proof out, such that it works in the circuit part.
Expand All @@ -462,6 +479,7 @@ impl<'a, Tree: 'a + MerkleTreeTrait> ProofScheme<'a> for FallbackPoSt<'a, Tree>
}

partition_proofs.push(Proof { sectors: proofs });
faulty_sectors.append(&mut faults);
}

if faulty_sectors.is_empty() {
Expand Down Expand Up @@ -507,82 +525,86 @@ impl<'a, Tree: 'a + MerkleTreeTrait> ProofScheme<'a> for FallbackPoSt<'a, Tree>
num_sectors_per_chunk,
);

for (i, (pub_sector, sector_proof)) in pub_sectors_chunk
.iter()
.zip(proof.sectors.iter())
let is_valid = pub_sectors_chunk
.par_iter()
.zip(proof.sectors.par_iter())
.enumerate()
{
let sector_id = pub_sector.id;
let comm_r = &pub_sector.comm_r;
let comm_c = sector_proof.comm_c;
let inclusion_proofs = &sector_proof.inclusion_proofs;

// Verify that H(Comm_c || Comm_r_last) == Comm_R

// comm_r_last is the root of the proof
let comm_r_last = inclusion_proofs[0].root();

if AsRef::<[u8]>::as_ref(&<Tree::Hasher as Hasher>::Function::hash2(
&comm_c,
&comm_r_last,
)) != AsRef::<[u8]>::as_ref(comm_r)
{
error!("hash(comm_c || comm_r_last) != comm_r: {:?}", sector_id);
return Ok(false);
}

ensure!(
challenge_count == inclusion_proofs.len(),
"unexpected number of inclusion proofs: {} != {}",
challenge_count,
inclusion_proofs.len()
);
.map(|(i, (pub_sector, sector_proof))| {
let sector_id = pub_sector.id;
let comm_r = &pub_sector.comm_r;
let comm_c = sector_proof.comm_c;
let inclusion_proofs = &sector_proof.inclusion_proofs;

// Verify that H(Comm_c || Comm_r_last) == Comm_R

// comm_r_last is the root of the proof
let comm_r_last = inclusion_proofs[0].root();

if AsRef::<[u8]>::as_ref(&<Tree::Hasher as Hasher>::Function::hash2(
&comm_c,
&comm_r_last,
)) != AsRef::<[u8]>::as_ref(comm_r)
{
error!("hash(comm_c || comm_r_last) != comm_r: {:?}", sector_id);
return Ok(false);
}

ensure!(
challenge_count == inclusion_proofs.len(),
"unexpected number of inclusion proofs: {} != {}",
challenge_count,
inclusion_proofs.len()
);

// avoid rehashing fixed inputs
let mut challenge_hasher = Sha256::new();
challenge_hasher.update(AsRef::<[u8]>::as_ref(&pub_inputs.randomness));
challenge_hasher.update(&u64::from(sector_id).to_le_bytes()[..]);

let is_valid_list = inclusion_proofs
.par_iter()
.enumerate()
.map(|(n, inclusion_proof)| -> Result<bool> {
let challenge_index =
(j * num_sectors_per_chunk + i) * pub_params.challenge_count + n;
let challenged_leaf =
generate_leaf_challenge_inner::<<Tree::Hasher as Hasher>::Domain>(
challenge_hasher.clone(),
pub_params,
challenge_index as u64,
);

// validate all comm_r_lasts match
if inclusion_proof.root() != comm_r_last {
error!("inclusion proof root != comm_r_last: {:?}", sector_id);
return Ok(false);
}

// avoid rehashing fixed inputs
let mut challenge_hasher = Sha256::new();
challenge_hasher.update(AsRef::<[u8]>::as_ref(&pub_inputs.randomness));
challenge_hasher.update(&u64::from(sector_id).to_le_bytes()[..]);

let is_valid_list = inclusion_proofs
.par_iter()
.enumerate()
.map(|(n, inclusion_proof)| -> Result<bool> {
let challenge_index =
(j * num_sectors_per_chunk + i) * pub_params.challenge_count + n;
let challenged_leaf =
generate_leaf_challenge_inner::<<Tree::Hasher as Hasher>::Domain>(
challenge_hasher.clone(),
pub_params,
challenge_index as u64,
);

// validate all comm_r_lasts match
if inclusion_proof.root() != comm_r_last {
error!("inclusion proof root != comm_r_last: {:?}", sector_id);
return Ok(false);
}

// validate the path length
let expected_path_length = inclusion_proof
.expected_len(pub_params.sector_size as usize / NODE_SIZE);

if expected_path_length != inclusion_proof.path().len() {
error!("wrong path length: {:?}", sector_id);
return Ok(false);
}

if !inclusion_proof.validate(challenged_leaf as usize) {
error!("invalid inclusion proof: {:?}", sector_id);
return Ok(false);
}
Ok(true)
})
.collect::<Result<Vec<bool>>>()?;

let is_valid = is_valid_list.into_iter().all(|v| v);
if !is_valid {
return Ok(false);
}
// validate the path length
let expected_path_length = inclusion_proof
.expected_len(pub_params.sector_size as usize / NODE_SIZE);

if expected_path_length != inclusion_proof.path().len() {
error!("wrong path length: {:?}", sector_id);
return Ok(false);
}

if !inclusion_proof.validate(challenged_leaf as usize) {
error!("invalid inclusion proof: {:?}", sector_id);
return Ok(false);
}
Ok(true)
})
.collect::<Result<Vec<bool>>>()?;

Ok(is_valid_list.into_iter().all(|v| v))
})
.reduce(
|| Ok(true),
|all_valid, is_valid| Ok(all_valid? && is_valid?),
)?;
if !is_valid {
return Ok(false);
}
}
Ok(true)
Expand Down

0 comments on commit 56c18bd

Please sign in to comment.