Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: hard-code rows to discard #1724

Merged
merged 1 commit into from
Oct 5, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 3 additions & 8 deletions fil-proofs-tooling/src/shared.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ use filecoin_proofs::{
PublicReplicaInfo, SealPreCommitOutput, SealPreCommitPhase1Output, SectorSize,
UnpaddedBytesAmount,
};
use generic_array::typenum::Unsigned;
use log::info;
use merkletree::store::StoreConfig;
use rand::{random, thread_rng, RngCore};
Expand All @@ -19,7 +18,7 @@ use rayon::prelude::{
use storage_proofs_core::{
api_version::{ApiFeature, ApiVersion},
sector::SectorId,
util::{default_rows_to_discard, NODE_SIZE},
util::NODE_SIZE,
};
use storage_proofs_porep::stacked::Labels;
use tempfile::{tempdir, NamedTempFile};
Expand Down Expand Up @@ -193,12 +192,8 @@ pub fn create_replicas<Tree: 'static + MerkleTreeTrait>(
.par_iter()
.zip(sector_ids.par_iter())
.map(|(cache_dir, sector_id)| {
let nodes = sector_size.0 as usize / NODE_SIZE;
let mut tmp_store_config = StoreConfig::new(
cache_dir.path(),
format!("tmp-config-{}", sector_id),
default_rows_to_discard(nodes, Tree::Arity::to_usize()),
);
let mut tmp_store_config =
StoreConfig::new(cache_dir.path(), format!("tmp-config-{}", sector_id), 0);
tmp_store_config.size = Some(u64::from(sector_size) as usize / NODE_SIZE);
let f = File::create(StoreConfig::data_path(
&tmp_store_config.path,
Expand Down
12 changes: 1 addition & 11 deletions filecoin-proofs/src/api/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ use storage_proofs_core::{
merkle::get_base_tree_count,
pieces::generate_piece_commitment_bytes_from_source,
sector::SectorId,
util::default_rows_to_discard,
};
use storage_proofs_porep::stacked::{
generate_replica_id, PersistentAux, PublicParams, StackedDrg, TemporaryAux,
Expand Down Expand Up @@ -356,16 +355,7 @@ where
{
trace!("unseal_range_inner:start");

let base_tree_size = get_base_tree_size::<DefaultBinaryTree>(porep_config.sector_size)?;
let base_tree_leafs = get_base_tree_leafs::<DefaultBinaryTree>(base_tree_size)?;
let config = StoreConfig::new(
cache_path.as_ref(),
CacheKey::CommDTree.to_string(),
default_rows_to_discard(
base_tree_leafs,
<DefaultBinaryTree as MerkleTreeTrait>::Arity::to_usize(),
),
);
let config = StoreConfig::new(cache_path.as_ref(), CacheKey::CommDTree.to_string(), 0);
let pp: PublicParams<Tree> = public_params(porep_config)?;

let offset_padded: PaddedBytesAmount = UnpaddedBytesAmount::from(offset).into();
Expand Down
20 changes: 5 additions & 15 deletions filecoin-proofs/src/api/seal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -158,11 +158,7 @@ where
base_tree_leafs,
);

let mut config = StoreConfig::new(
cache_path.as_ref(),
CacheKey::CommDTree.to_string(),
default_rows_to_discard(base_tree_leafs, BINARY_ARITY),
);
let mut config = StoreConfig::new(cache_path.as_ref(), CacheKey::CommDTree.to_string(), 0);

let data_tree = create_base_merkle_tree::<BinaryMerkleTree<DefaultPieceHasher>>(
Some(config.clone()),
Expand Down Expand Up @@ -273,11 +269,7 @@ where
"seal phase 2: base tree size {}, base tree leafs {}, rows to discard {}",
base_tree_size,
base_tree_leafs,
default_rows_to_discard(base_tree_leafs, BINARY_ARITY)
);
ensure!(
config.rows_to_discard == default_rows_to_discard(base_tree_leafs, BINARY_ARITY),
"Invalid cache size specified"
0
);

let store: DiskStore<DefaultPieceDomain> =
Expand Down Expand Up @@ -1202,7 +1194,6 @@ where
let base_tree_count = get_base_tree_count::<TreeR>();
let base_tree_leafs = leaf_count / base_tree_count;

let rows_to_discard = default_rows_to_discard(base_tree_leafs, TreeR::Arity::to_usize());
let size = get_base_tree_size::<TreeR>(SectorSize(sector_size))?;
let tree_r_last_config = StoreConfig {
path: PathBuf::from(output_dir.as_ref()),
Expand All @@ -1214,7 +1205,7 @@ where
// configuration. *Use with caution*. It must be noted that if/when this unchecked
// value is passed through merkle_light, merkle_light now does a check that does not
// allow us to discard more rows than is possible to discard.
rows_to_discard,
rows_to_discard: default_rows_to_discard(base_tree_leafs, TreeR::Arity::to_usize()),
};

let replica_base_tree_size = get_base_tree_size::<DefaultBinaryTree>(sector_size.into())?;
Expand Down Expand Up @@ -1257,13 +1248,12 @@ where
let base_tree_count = get_base_tree_count::<Tree>();
let base_tree_leafs = leaf_count / base_tree_count;

let rows_to_discard = default_rows_to_discard(base_tree_leafs, Tree::Arity::to_usize());
let size = get_base_tree_size::<Tree>(SectorSize(sector_size))?;
let tree_c_config = StoreConfig {
path: PathBuf::from(output_dir.as_ref()),
id: CacheKey::CommCTree.to_string(),
size: Some(size),
rows_to_discard,
rows_to_discard: 0,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If TreeR has discarded rows, shouldn't also TreeC? Admittedly, I don't fully understand rows_to_discard and its implications

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If TreeR has discarded rows, shouldn't also TreeC? Admittedly, I don't fully understand rows_to_discard and its implications

That's exactly the confusing thing, which motivated this change. The rows_to_discard really only ever applies to TreeR. Even if it's set, it would be ignored by a TreeC.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

To put it another way, we need to keep TreeR around for the long term for PoSt proving, where-as TreeC is removed after the initial PoRep proof usage. The idea was that if we need to keep TreeR around -- how could we optimize (down) the amount of space that it requires on disk (i.e. compaction/level cache store).

};
let configs = split_config(tree_c_config, base_tree_count)?;

Expand All @@ -1275,7 +1265,7 @@ where
path: PathBuf::from(input_dir.as_ref()),
id: CacheKey::label_layer(layer),
size: Some(label_base_tree_leafs),
rows_to_discard: default_rows_to_discard(label_base_tree_leafs, BINARY_ARITY),
rows_to_discard: 0,
})
.collect();
let labels = Labels::new(label_configs);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,9 @@ use log::{info, warn};
use merkletree::{merkle::Element, store::StoreConfig};
use storage_proofs_core::{
cache_key::CacheKey, drgraph::Graph, error::Result, merkle::MerkleTreeTrait,
util::default_rows_to_discard,
};

use crate::stacked::vanilla::{proof::LayerState, StackedBucketGraph, BINARY_ARITY};
use crate::stacked::vanilla::{proof::LayerState, StackedBucketGraph};

#[cfg(feature = "multicore-sdr")]
pub mod multi;
Expand All @@ -31,7 +30,7 @@ where
path: cache_path.as_ref().to_path_buf(),
id: CacheKey::label_layer(layer),
size: Some(graph.size()),
rows_to_discard: default_rows_to_discard(graph.size(), BINARY_ARITY),
rows_to_discard: 0,
});

let mut states = Vec::with_capacity(layers);
Expand Down
7 changes: 3 additions & 4 deletions storage-proofs-porep/src/stacked/vanilla/proof.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1515,10 +1515,9 @@ impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tr
path: cache_path.clone(),
id: CacheKey::CommDTree.to_string(),
size: Some(get_merkle_tree_len(total_nodes_count, BINARY_ARITY)?),
rows_to_discard: default_rows_to_discard(total_nodes_count, BINARY_ARITY),
rows_to_discard: 0,
};

let rows_to_discard = default_rows_to_discard(nodes_count, Tree::Arity::to_usize());
let size = Some(get_merkle_tree_len(nodes_count, Tree::Arity::to_usize())?);

let tree_r_last_config = StoreConfig {
Expand All @@ -1531,7 +1530,7 @@ impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tr
// configuration. *Use with caution*. It must be noted that if/when this unchecked
// value is passed through merkle_light, merkle_light now does a check that does not
// allow us to discard more rows than is possible to discard.
rows_to_discard,
rows_to_discard: default_rows_to_discard(nodes_count, Tree::Arity::to_usize()),
};
trace!(
"tree_r_last using rows_to_discard={}",
Expand All @@ -1542,7 +1541,7 @@ impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tr
path: cache_path,
id: CacheKey::CommCTree.to_string(),
size,
rows_to_discard,
rows_to_discard: 0,
};

let labels =
Expand Down
22 changes: 5 additions & 17 deletions storage-proofs-porep/tests/stacked_vanilla.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,12 @@ use storage_proofs_core::{
proof::ProofScheme,
table_tests,
test_helper::setup_replica,
util::{default_rows_to_discard, NODE_SIZE},
util::NODE_SIZE,
TEST_SEED,
};
use storage_proofs_porep::stacked::{
LayerChallenges, PrivateInputs, PublicInputs, SetupParams, StackedBucketGraph, StackedDrg,
TemporaryAux, TemporaryAuxCache, BINARY_ARITY, EXP_DEGREE,
TemporaryAux, TemporaryAuxCache, EXP_DEGREE,
};
use tempfile::tempdir;

Expand Down Expand Up @@ -95,11 +95,7 @@ fn test_extract_all<Tree: 'static + MerkleTreeTrait>() {
// MT for original data is always named tree-d, and it will be
// referenced later in the process as such.
let cache_dir = tempdir().expect("tempdir failure");
let config = StoreConfig::new(
cache_dir.path(),
CacheKey::CommDTree.to_string(),
default_rows_to_discard(nodes, BINARY_ARITY),
);
let config = StoreConfig::new(cache_dir.path(), CacheKey::CommDTree.to_string(), 0);

// Generate a replica path.
let replica_path = cache_dir.path().join("replica-path");
Expand Down Expand Up @@ -190,11 +186,7 @@ fn test_stacked_porep_resume_seal() {
// MT for original data is always named tree-d, and it will be
// referenced later in the process as such.
let cache_dir = tempdir().expect("tempdir failure");
let config = StoreConfig::new(
cache_dir.path(),
CacheKey::CommDTree.to_string(),
default_rows_to_discard(nodes, BINARY_ARITY),
);
let config = StoreConfig::new(cache_dir.path(), CacheKey::CommDTree.to_string(), 0);

// Generate a replica path.
let replica_path1 = cache_dir.path().join("replica-path-1");
Expand Down Expand Up @@ -344,11 +336,7 @@ fn test_prove_verify<Tree: 'static + MerkleTreeTrait>(n: usize, challenges: Laye
// MT for original data is always named tree-d, and it will be
// referenced later in the process as such.
let cache_dir = tempdir().expect("tempdir failure");
let config = StoreConfig::new(
cache_dir.path(),
CacheKey::CommDTree.to_string(),
default_rows_to_discard(nodes, BINARY_ARITY),
);
let config = StoreConfig::new(cache_dir.path(), CacheKey::CommDTree.to_string(), 0);

// Generate a replica path.
let replica_path = cache_dir.path().join("replica-path");
Expand Down
1 change: 0 additions & 1 deletion storage-proofs-update/tests/circuit.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ use rand::SeedableRng;
use rand_xorshift::XorShiftRng;
use storage_proofs_core::{
merkle::{MerkleTreeTrait, MerkleTreeWrapper},
util::default_rows_to_discard,
TEST_SEED,
};
use storage_proofs_update::{
Expand Down
1 change: 0 additions & 1 deletion storage-proofs-update/tests/circuit_poseidon.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ use rand::SeedableRng;
use rand_xorshift::XorShiftRng;
use storage_proofs_core::{
merkle::{MerkleTreeTrait, MerkleTreeWrapper},
util::default_rows_to_discard,
TEST_SEED,
};
use storage_proofs_update::{
Expand Down
3 changes: 1 addition & 2 deletions storage-proofs-update/tests/compound.rs
Original file line number Diff line number Diff line change
Expand Up @@ -108,13 +108,12 @@ where
let labels_d_new: Vec<TreeDDomain> = (0..sector_nodes)
.map(|_| TreeDDomain::random(&mut rng))
.collect();
let tree_d_rows_to_discard = default_rows_to_discard(sector_nodes, TreeDArity::to_usize());
let tree_d_nodes = get_merkle_tree_len(sector_nodes, TreeDArity::to_usize()).unwrap();
let tree_d_new_config = StoreConfig {
path: tmp_path.into(),
id: "tree-d-new".to_string(),
size: Some(tree_d_nodes),
rows_to_discard: tree_d_rows_to_discard,
rows_to_discard: 0,
};
let tree_d_new = TreeD::try_from_iter_with_config(
labels_d_new.iter().copied().map(Ok),
Expand Down