Skip to content

Commit

Permalink
feat: remove unnecessary scope/spawn in gpu2 code
Browse files Browse the repository at this point in the history
feat: add some minor parallelization where we can
  • Loading branch information
cryptonemo committed Mar 18, 2021
1 parent 5006a7a commit 2ec461f
Show file tree
Hide file tree
Showing 4 changed files with 27 additions and 28 deletions.
3 changes: 1 addition & 2 deletions filecoin-proofs/src/api/seal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -137,13 +137,12 @@ where
base_tree_leafs,
);

// MT for original data is always named tree-d, and it will be
// referenced later in the process as such.
let mut config = StoreConfig::new(
cache_path.as_ref(),
CacheKey::CommDTree.to_string(),
default_rows_to_discard(base_tree_leafs, BINARY_ARITY),
);

let data_tree = create_base_merkle_tree::<BinaryMerkleTree<DefaultPieceHasher>>(
Some(config.clone()),
base_tree_leafs,
Expand Down
2 changes: 1 addition & 1 deletion storage-proofs-core/src/compound_proof.rs
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ where
groth_proofs
.into_iter()
.map(|groth_proof| {
let mut proof_vec = vec![];
let mut proof_vec = Vec::new();
groth_proof.write(&mut proof_vec)?;
let gp = groth16::Proof::<Bls12>::read(&proof_vec[..])?;
Ok(gp)
Expand Down
1 change: 1 addition & 0 deletions storage-proofs-porep/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ storage-proofs-core = { path = "../storage-proofs-core", version = "^6.0.0", def
sha2raw = { path = "../sha2raw", version = "^2.0.0"}
filecoin-hashers = { path = "../filecoin-hashers", version = "1.0.0", default-features = false, features = ["poseidon", "sha256"]}
rand = "0.7"
memmap = "0.7"
merkletree = "0.21.0"
mapr = "0.8.0"
num-bigint = "0.2"
Expand Down
49 changes: 24 additions & 25 deletions storage-proofs-porep/src/stacked/vanilla/proof.rs
Original file line number Diff line number Diff line change
Expand Up @@ -103,9 +103,12 @@ impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tr
let mut parents = vec![0; base_degree];
graph.base_parents(x, &mut parents)?;

for parent in &parents {
columns.push(t_aux.column(*parent)?);
}
columns.extend(
parents
.into_par_iter()
.map(|parent| t_aux.column(parent).expect("failed to get parent column"))
.collect::<Vec<Column<Tree::Hasher>>>(),
);

debug_assert!(columns.len() == base_degree);

Expand All @@ -116,7 +119,10 @@ impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tr
let mut parents = vec![0; graph.expansion_degree()];
graph.expanded_parents(x, &mut parents)?;

parents.iter().map(|parent| t_aux.column(*parent)).collect()
parents
.into_par_iter()
.map(|parent| t_aux.column(parent))
.collect()
};

(0..partition_count)
Expand Down Expand Up @@ -194,7 +200,7 @@ impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tr
graph.base_parents(challenge, &mut parents)?;

parents
.into_iter()
.into_par_iter()
.map(|parent| t_aux.domain_node_at_layer(layer, parent))
.collect::<Result<_>>()?
} else {
Expand All @@ -203,7 +209,7 @@ impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tr
let base_parents_count = graph.base_graph().degree();

parents
.into_iter()
.into_par_iter()
.enumerate()
.map(|(i, parent)| {
if i < base_parents_count {
Expand Down Expand Up @@ -556,25 +562,18 @@ impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tr
layers
];

rayon::scope(|s| {
// capture a shadowed version of layer_data.
let layer_data: &mut Vec<_> = &mut layer_data;

// gather all layer data in parallel.
s.spawn(move |_| {
for (layer_index, mut layer_bytes) in
layer_data.iter_mut().enumerate()
{
let store = labels.labels_for_layer(layer_index + 1);
let start = (i * nodes_count) + node_index;
let end = start + chunked_nodes_count;

store
.read_range_into(start, end, &mut layer_bytes)
.expect("failed to read store range");
}
});
});
// gather all layer data.
for (layer_index, mut layer_bytes) in
layer_data.iter_mut().enumerate()
{
let store = labels.labels_for_layer(layer_index + 1);
let start = (i * nodes_count) + node_index;
let end = start + chunked_nodes_count;

store
.read_range_into(start, end, &mut layer_bytes)
.expect("failed to read store range");
}

(0..chunked_nodes_count)
.into_par_iter()
Expand Down

0 comments on commit 2ec461f

Please sign in to comment.