diff --git a/fil-proofs-tooling/Cargo.toml b/fil-proofs-tooling/Cargo.toml index 9f97b7917..d1623fe57 100644 --- a/fil-proofs-tooling/Cargo.toml +++ b/fil-proofs-tooling/Cargo.toml @@ -31,7 +31,7 @@ storage-proofs = { path = "../storage-proofs"} filecoin-proofs = { path = "../filecoin-proofs"} tempfile = "3.0.8" cpu-time = "1.0.0" -git2 = "0.10.1" +git2 = "0.13" heim = "0.0.9" futures-preview = "0.3.0-alpha.17" raw-cpuid = "7.0.3" diff --git a/storage-proofs/core/src/parameter_cache.rs b/storage-proofs/core/src/parameter_cache.rs index 5c8fa8d39..518989297 100644 --- a/storage-proofs/core/src/parameter_cache.rs +++ b/storage-proofs/core/src/parameter_cache.rs @@ -134,7 +134,7 @@ fn ensure_ancestor_dirs_exist(cache_entry_path: PathBuf) -> Result { Ok(cache_entry_path) } -pub trait ParameterSetMetadata: Clone { +pub trait ParameterSetMetadata { fn identifier(&self) -> String; fn sector_size(&self) -> u64; } diff --git a/storage-proofs/core/src/settings.rs b/storage-proofs/core/src/settings.rs index 1b32d3abc..891312418 100644 --- a/storage-proofs/core/src/settings.rs +++ b/storage-proofs/core/src/settings.rs @@ -22,12 +22,13 @@ pub struct Settings { pub use_gpu_tree_builder: bool, pub max_gpu_tree_batch_size: u32, pub rows_to_discard: u32, + pub sdr_parents_cache_size: u32, } impl Default for Settings { fn default() -> Self { Settings { - maximize_caching: false, + maximize_caching: true, pedersen_hash_exp_window_size: 16, use_gpu_column_builder: false, max_gpu_column_batch_size: 400_000, @@ -35,6 +36,7 @@ impl Default for Settings { use_gpu_tree_builder: false, max_gpu_tree_batch_size: 700_000, rows_to_discard: 2, + sdr_parents_cache_size: 2048, } } } diff --git a/storage-proofs/porep/Cargo.toml b/storage-proofs/porep/Cargo.toml index d011322bb..5ea2d6f90 100644 --- a/storage-proofs/porep/Cargo.toml +++ b/storage-proofs/porep/Cargo.toml @@ -27,9 +27,10 @@ log = "0.4.7" pretty_assertions = "0.6.1" generic-array = "0.13.2" anyhow = "1.0.23" -once_cell = "1.3.1" neptune = { version = "1.0.1", features = ["gpu"] } num_cpus = "1.10.1" +hex = "0.4.2" +byteorder = "1.3.4" [dev-dependencies] tempdir = "0.3.7" diff --git a/storage-proofs/porep/benches/encode.rs b/storage-proofs/porep/benches/encode.rs index 686474e5f..8d3b557a1 100644 --- a/storage-proofs/porep/benches/encode.rs +++ b/storage-proofs/porep/benches/encode.rs @@ -53,7 +53,17 @@ fn kdf_benchmark(c: &mut Criterion) { let graph = &graph; let replica_id = replica_id.clone(); - b.iter(|| black_box(create_label_exp(graph, &replica_id, &*exp_data, data, 1, 2))) + b.iter(|| { + black_box(create_label_exp( + graph, + None, + &replica_id, + &*exp_data, + data, + 1, + 2, + )) + }) }); group.bench_function("non-exp", |b| { @@ -61,7 +71,7 @@ fn kdf_benchmark(c: &mut Criterion) { let graph = &graph; let replica_id = replica_id.clone(); - b.iter(|| black_box(create_label(graph, &replica_id, &mut data, 1, 2))) + b.iter(|| black_box(create_label(graph, None, &replica_id, &mut data, 1, 2))) }); group.finish(); diff --git a/storage-proofs/porep/src/stacked/circuit/create_label.rs b/storage-proofs/porep/src/stacked/circuit/create_label.rs index 431710d62..315707737 100644 --- a/storage-proofs/porep/src/stacked/circuit/create_label.rs +++ b/storage-proofs/porep/src/stacked/circuit/create_label.rs @@ -167,7 +167,7 @@ mod tests { assert_eq!(cs.num_constraints(), 532_025); let (l1, l2) = data.split_at_mut(size * NODE_SIZE); - create_label_exp(&graph, &id_fr.into(), &*l2, l1, layer, node).unwrap(); + create_label_exp(&graph, None, &id_fr.into(), &*l2, l1, layer, node).unwrap(); let expected_raw = data_at_node(&l1, node).unwrap(); let expected = bytes_into_fr(expected_raw).unwrap(); diff --git a/storage-proofs/porep/src/stacked/circuit/proof.rs b/storage-proofs/porep/src/stacked/circuit/proof.rs index bbe491430..2f9eb4f05 100644 --- a/storage-proofs/porep/src/stacked/circuit/proof.rs +++ b/storage-proofs/porep/src/stacked/circuit/proof.rs @@ -239,7 +239,7 @@ impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> // exp parents let mut exp_parents = vec![0; graph.expansion_degree()]; - graph.expanded_parents(challenge, &mut exp_parents); + graph.expanded_parents(challenge, &mut exp_parents)?; // Inclusion Proofs: expander parent node in comm_c for parent in exp_parents.into_iter() { diff --git a/storage-proofs/porep/src/stacked/vanilla/cache.rs b/storage-proofs/porep/src/stacked/vanilla/cache.rs new file mode 100644 index 000000000..acede8be2 --- /dev/null +++ b/storage-proofs/porep/src/stacked/vanilla/cache.rs @@ -0,0 +1,347 @@ +use std::path::PathBuf; + +use anyhow::{bail, ensure, Context}; +use byteorder::{ByteOrder, LittleEndian}; +use log::info; +use rayon::prelude::*; +use sha2::{Digest, Sha256}; + +use storage_proofs_core::{ + drgraph::Graph, + drgraph::BASE_DEGREE, + error::Result, + hasher::Hasher, + parameter_cache::{ParameterSetMetadata, VERSION}, +}; + +use super::graph::{StackedGraph, DEGREE}; + +/// Path in which to store the parents caches. +const PARENT_CACHE_DIR: &str = "/var/tmp/filecoin-parents"; + +/// u32 = 4 bytes +const NODE_BYTES: usize = 4; + +// StackedGraph will hold two different (but related) `ParentCache`, +#[derive(Debug)] +pub struct ParentCache { + /// Disk path for the cache. + path: PathBuf, + /// The total number of cache entries. + num_cache_entries: u32, + cache: CacheData, +} + +#[derive(Debug)] +struct CacheData { + /// This is a large list of fixed (parent) sized arrays. + data: memmap::Mmap, + /// Offset in nodes. + offset: u32, + /// Len in nodes. + len: u32, + /// The underlyling file. + file: std::fs::File, +} + +impl CacheData { + /// Change the cache to point to the newly passed in offset. + /// + /// The `new_offset` must be set, such that `new_offset + len` does not + /// overflow the underlying data. + fn shift(&mut self, new_offset: u32) -> Result<()> { + if self.offset == new_offset { + return Ok(()); + } + + let offset = new_offset as usize * DEGREE * NODE_BYTES; + let len = self.len as usize * DEGREE * NODE_BYTES; + + self.data = unsafe { + memmap::MmapOptions::new() + .offset(offset as u64) + .len(len) + .map(&self.file) + .context("could not shift mmap}")? + }; + self.offset = new_offset; + + Ok(()) + } + + /// Returns true if this node is in the cached range. + fn contains(&self, node: u32) -> bool { + node >= self.offset && node < self.offset + self.len + } + + /// Read the parents for the given node from cache. + /// + /// Panics if the `node` is not in the cache. + fn read(&self, node: u32) -> [u32; DEGREE] { + assert!(node >= self.offset, "node not in cache"); + let start = (node - self.offset) as usize * DEGREE * NODE_BYTES; + let end = start + DEGREE * NODE_BYTES; + + let mut res = [0u32; DEGREE]; + LittleEndian::read_u32_into(&self.data[start..end], &mut res); + res + } + + fn reset(&mut self) -> Result<()> { + if self.offset == 0 { + return Ok(()); + } + + self.shift(0) + } + + fn open(offset: u32, len: u32, path: &PathBuf) -> Result { + let min_cache_size = (offset + len) as usize * DEGREE * NODE_BYTES; + + let file = std::fs::OpenOptions::new() + .read(true) + .open(&path) + .with_context(|| format!("could not open path={}", path.display()))?; + + let actual_len = file.metadata()?.len(); + if actual_len < min_cache_size as u64 { + bail!( + "corrupted cache: {}, expected at least {}, got {} bytes", + path.display(), + min_cache_size, + actual_len + ); + } + + let data = unsafe { + memmap::MmapOptions::new() + .offset((offset as usize * DEGREE * NODE_BYTES) as u64) + .len(len as usize * DEGREE * NODE_BYTES) + .map(&file) + .with_context(|| format!("could not mmap path={}", path.display()))? + }; + + Ok(Self { + data, + file, + len, + offset, + }) + } +} + +impl ParentCache { + pub fn new(len: u32, cache_entries: u32, graph: &StackedGraph) -> Result + where + H: Hasher, + G: Graph + ParameterSetMetadata + Send + Sync, + { + let path = cache_path(cache_entries, graph); + if path.exists() { + Self::open(len, cache_entries, path) + } else { + Self::generate(len, cache_entries, graph, path) + } + } + + /// Opens an existing cache from disk. + pub fn open(len: u32, cache_entries: u32, path: PathBuf) -> Result { + info!("parent cache: opening {}", path.display()); + + let cache = CacheData::open(0, len, &path)?; + info!("parent cache: opened"); + + Ok(ParentCache { + cache, + path, + num_cache_entries: cache_entries, + }) + } + + /// Generates a new cache and stores it on disk. + pub fn generate( + len: u32, + cache_entries: u32, + graph: &StackedGraph, + path: PathBuf, + ) -> Result + where + H: Hasher, + G: Graph + ParameterSetMetadata + Send + Sync, + { + info!("parent cache: generating {}", path.display()); + + std::fs::create_dir_all(PARENT_CACHE_DIR).context("unable to crate parent cache dir")?; + + let file = std::fs::OpenOptions::new() + .read(true) + .write(true) + .create(true) + .open(&path) + .with_context(|| format!("could not open path={}", path.display()))?; + + let cache_size = cache_entries as usize * NODE_BYTES * DEGREE; + file.set_len(cache_size as u64) + .with_context(|| format!("failed to set length: {}", cache_size))?; + + let mut data = unsafe { + memmap::MmapOptions::new() + .map_mut(&file) + .with_context(|| format!("could not mmap path={}", path.display()))? + }; + + data.par_chunks_mut(DEGREE * NODE_BYTES) + .enumerate() + .try_for_each(|(node, entry)| -> Result<()> { + let mut parents = [0u32; DEGREE]; + graph + .base_graph() + .parents(node, &mut parents[..BASE_DEGREE])?; + graph.generate_expanded_parents(node, &mut parents[BASE_DEGREE..]); + + LittleEndian::write_u32_into(&parents, entry); + Ok(()) + })?; + + info!("parent cache: generated"); + data.flush().context("failed to flush parent cache")?; + drop(data); + + info!("parent cache: written to disk"); + + Ok(ParentCache { + cache: CacheData::open(0, len, &path)?, + path, + num_cache_entries: cache_entries, + }) + } + + /// Read a single cache element at position `node`. + pub fn read(&mut self, node: u32) -> Result<[u32; DEGREE]> { + if self.cache.contains(node) { + return Ok(self.cache.read(node)); + } + + // not in memory, shift cache + ensure!( + node >= self.cache.offset + self.cache.len, + "cache must be read in ascending order {} < {} + {}", + node, + self.cache.offset, + self.cache.len, + ); + + // Shift cache by its current size. + let new_offset = + (self.num_cache_entries - self.cache.len).min(self.cache.offset + self.cache.len); + self.cache.shift(new_offset)?; + + Ok(self.cache.read(node)) + } + + /// Resets the partial cache to the beginning. + pub fn reset(&mut self) -> Result<()> { + self.cache.reset() + } +} + +fn cache_path(cache_entries: u32, graph: &StackedGraph) -> PathBuf +where + H: Hasher, + G: Graph + ParameterSetMetadata + Send + Sync, +{ + let mut hasher = Sha256::default(); + + hasher.input(H::name()); + hasher.input(graph.identifier()); + for key in &graph.feistel_keys { + hasher.input(key.to_le_bytes()); + } + hasher.input(cache_entries.to_le_bytes()); + let h = hasher.result(); + PathBuf::from(PARENT_CACHE_DIR).join(format!( + "v{}-sdr-parent-{}.cache", + VERSION, + hex::encode(h), + )) +} + +#[cfg(test)] +mod tests { + use super::*; + + use crate::stacked::vanilla::graph::{StackedBucketGraph, EXP_DEGREE}; + use storage_proofs_core::hasher::PoseidonHasher; + + #[test] + fn test_read_full_range() { + let nodes = 24u32; + let graph = StackedBucketGraph::::new_stacked( + nodes as usize, + BASE_DEGREE, + EXP_DEGREE, + [0u8; 32], + ) + .unwrap(); + + let mut cache = ParentCache::new(nodes, nodes, &graph).unwrap(); + + for node in 0..nodes { + let mut expected_parents = [0; DEGREE]; + graph.parents(node as usize, &mut expected_parents).unwrap(); + let parents = cache.read(node).unwrap(); + + assert_eq!(expected_parents, parents); + } + } + + #[test] + fn test_read_partial_range() { + let nodes = 48u32; + let graph = StackedBucketGraph::::new_stacked( + nodes as usize, + BASE_DEGREE, + EXP_DEGREE, + [0u8; 32], + ) + .unwrap(); + + let mut half_cache = ParentCache::new(nodes / 2, nodes, &graph).unwrap(); + let mut quarter_cache = ParentCache::new(nodes / 4, nodes, &graph).unwrap(); + + for node in 0..nodes { + let mut expected_parents = [0; DEGREE]; + graph.parents(node as usize, &mut expected_parents).unwrap(); + + let parents = half_cache.read(node).unwrap(); + assert_eq!(expected_parents, parents); + + let parents = quarter_cache.read(node).unwrap(); + assert_eq!(expected_parents, parents); + + // some internal checks to make sure the cache works as expected + assert_eq!( + half_cache.cache.data.len() / DEGREE / NODE_BYTES, + nodes as usize / 2 + ); + assert_eq!( + quarter_cache.cache.data.len() / DEGREE / NODE_BYTES, + nodes as usize / 4 + ); + } + + half_cache.reset().unwrap(); + quarter_cache.reset().unwrap(); + + for node in 0..nodes { + let mut expected_parents = [0; DEGREE]; + graph.parents(node as usize, &mut expected_parents).unwrap(); + + let parents = half_cache.read(node).unwrap(); + assert_eq!(expected_parents, parents); + + let parents = quarter_cache.read(node).unwrap(); + assert_eq!(expected_parents, parents); + } + } +} diff --git a/storage-proofs/porep/src/stacked/vanilla/create_label.rs b/storage-proofs/porep/src/stacked/vanilla/create_label.rs index 65f8b5ec8..1299225df 100644 --- a/storage-proofs/porep/src/stacked/vanilla/create_label.rs +++ b/storage-proofs/porep/src/stacked/vanilla/create_label.rs @@ -10,10 +10,11 @@ use storage_proofs_core::{ util::{data_at_node_offset, NODE_SIZE}, }; -use super::graph::StackedBucketGraph; +use super::{cache::ParentCache, graph::StackedBucketGraph}; pub fn create_label( graph: &StackedBucketGraph, + cache: Option<&mut ParentCache>, replica_id: &H::Domain, layer_labels: &mut [u8], layer_index: usize, @@ -34,7 +35,7 @@ pub fn create_label( _mm_prefetch(prev.as_ptr() as *const i8, _MM_HINT_T0); } - graph.copy_parents_data(node as u32, &*layer_labels, hasher) + graph.copy_parents_data(node as u32, &*layer_labels, hasher, cache)? } else { hasher.finish() }; @@ -52,6 +53,7 @@ pub fn create_label( pub fn create_label_exp( graph: &StackedBucketGraph, + cache: Option<&mut ParentCache>, replica_id: &H::Domain, exp_parents_data: &[u8], layer_labels: &mut [u8], @@ -73,7 +75,7 @@ pub fn create_label_exp( _mm_prefetch(prev.as_ptr() as *const i8, _MM_HINT_T0); } - graph.copy_parents_data_exp(node as u32, &*layer_labels, exp_parents_data, hasher) + graph.copy_parents_data_exp(node as u32, &*layer_labels, exp_parents_data, hasher, cache)? } else { hasher.finish() }; diff --git a/storage-proofs/porep/src/stacked/vanilla/graph.rs b/storage-proofs/porep/src/stacked/vanilla/graph.rs index deabb9e63..4b76ba63a 100644 --- a/storage-proofs/porep/src/stacked/vanilla/graph.rs +++ b/storage-proofs/porep/src/stacked/vanilla/graph.rs @@ -8,8 +8,6 @@ use std::arch::x86_64::*; use anyhow::ensure; use log::info; -use once_cell::sync::OnceCell; -use rayon::prelude::*; use sha2raw::Sha256; use storage_proofs_core::{ crypto::{ @@ -26,89 +24,12 @@ use storage_proofs_core::{ util::NODE_SIZE, }; +use super::cache::ParentCache; + /// The expansion degree used for Stacked Graphs. pub const EXP_DEGREE: usize = 8; -const DEGREE: usize = BASE_DEGREE + EXP_DEGREE; - -/// Returns a reference to the parent cache, initializing it lazily the first time this is called. -fn parent_cache( - cache_entries: u32, - graph: &StackedGraph, -) -> Result<&'static ParentCache> -where - H: Hasher, - G: Graph + ParameterSetMetadata + Send + Sync, -{ - static INSTANCE_32_GIB: OnceCell = OnceCell::new(); - static INSTANCE_64_GIB: OnceCell = OnceCell::new(); - - const NODE_GIB: u32 = (1024 * 1024 * 1024) / NODE_SIZE as u32; - ensure!( - ((cache_entries == 32 * NODE_GIB) || (cache_entries == 64 * NODE_GIB)), - "Cache is only available for 32GiB and 64GiB sectors" - ); - info!("using parent_cache[{}]", cache_entries); - if cache_entries == 32 * NODE_GIB { - Ok(INSTANCE_32_GIB.get_or_init(|| { - ParentCache::new(cache_entries, graph).expect("failed to fill 32GiB cache") - })) - } else { - Ok(INSTANCE_64_GIB.get_or_init(|| { - ParentCache::new(cache_entries, graph).expect("failed to fill 64GiB cache") - })) - } -} - -// StackedGraph will hold two different (but related) `ParentCache`, -#[derive(Debug, Clone)] -struct ParentCache { - /// This is a large list of fixed (parent) sized arrays. - /// `Vec>` was showing quite a large memory overhead, so this is layed out as a fixed boxed slice of memory. - cache: Box<[u32]>, -} - -impl ParentCache { - pub fn new(cache_entries: u32, graph: &StackedGraph) -> Result - where - H: Hasher, - G: Graph + ParameterSetMetadata + Send + Sync, - { - info!("filling parents cache"); - let mut cache = vec![0u32; DEGREE * cache_entries as usize]; - - let base_degree = BASE_DEGREE; - let exp_degree = EXP_DEGREE; - - cache - .par_chunks_mut(DEGREE) - .enumerate() - .try_for_each(|(node, entry)| -> Result<()> { - graph - .base_graph() - .parents(node, &mut entry[..base_degree])?; - graph.generate_expanded_parents( - node, - &mut entry[base_degree..base_degree + exp_degree], - ); - Ok(()) - })?; - - info!("cache filled"); - - Ok(ParentCache { - cache: cache.into_boxed_slice(), - }) - } - - /// Read a single cache element at position `node`. - #[inline] - pub fn read(&self, node: u32) -> &[u32] { - let start = node as usize * DEGREE; - let end = start + DEGREE; - &self.cache[start..end] - } -} +pub(crate) const DEGREE: usize = BASE_DEGREE + EXP_DEGREE; #[derive(Clone)] pub struct StackedGraph @@ -121,7 +42,6 @@ where pub(crate) feistel_keys: [feistel::Index; 4], feistel_precomputed: FeistelPrecomputed, id: String, - cache: Option<&'static ParentCache>, _h: PhantomData, } @@ -136,7 +56,6 @@ where .field("base_graph", &self.base_graph) .field("feistel_precomputed", &self.feistel_precomputed) .field("id", &self.id) - .field("cache", &self.cache) .finish() } } @@ -188,8 +107,6 @@ where assert_eq!(expansion_degree, EXP_DEGREE); ensure!(nodes <= std::u32::MAX as usize, "too many nodes"); - let use_cache = settings::SETTINGS.lock().unwrap().maximize_caching; - let base_graph = match base_graph { Some(graph) => graph, None => G::new(nodes, base_degree, 0, porep_id)?, @@ -198,27 +115,31 @@ where let feistel_keys = derive_feistel_keys(porep_id); - let mut res = StackedGraph { + let res = StackedGraph { base_graph, id: format!( "stacked_graph::StackedGraph{{expansion_degree: {} base_graph: {} }}", expansion_degree, bg_id, ), expansion_degree, - cache: None, feistel_keys, feistel_precomputed: feistel::precompute((expansion_degree * nodes) as feistel::Index), _h: PhantomData, }; - if use_cache { - info!("using parents cache of unlimited size"); + Ok(res) + } - let cache = parent_cache(nodes as u32, &res)?; - res.cache = Some(cache); - } + /// Returns a reference to the parent cache. + pub fn parent_cache(&self) -> Result { + // Number of nodes to be cached in memory + let default_cache_size = settings::SETTINGS.lock().unwrap().sdr_parents_cache_size; + let cache_entries = self.size() as u32; + let cache_size = cache_entries.min(default_cache_size); - Ok(res) + info!("using parent_cache[{} / {}]", cache_size, cache_entries); + + ParentCache::new(cache_size, cache_entries, self) } pub fn copy_parents_data_exp( @@ -227,27 +148,34 @@ where base_data: &[u8], exp_data: &[u8], hasher: Sha256, - ) -> [u8; 32] { - if let Some(cache) = self.cache { - let cache_parents = cache.read(node as u32); - self.copy_parents_data_inner_exp(&cache_parents, base_data, exp_data, hasher) + mut cache: Option<&mut ParentCache>, + ) -> Result<[u8; 32]> { + if let Some(ref mut cache) = cache { + let cache_parents = cache.read(node as u32)?; + Ok(self.copy_parents_data_inner_exp(&cache_parents, base_data, exp_data, hasher)) } else { let mut cache_parents = [0u32; DEGREE]; self.parents(node as usize, &mut cache_parents[..]).unwrap(); - self.copy_parents_data_inner_exp(&cache_parents, base_data, exp_data, hasher) + Ok(self.copy_parents_data_inner_exp(&cache_parents, base_data, exp_data, hasher)) } } - pub fn copy_parents_data(&self, node: u32, base_data: &[u8], hasher: Sha256) -> [u8; 32] { - if let Some(cache) = self.cache { - let cache_parents = cache.read(node as u32); - self.copy_parents_data_inner(&cache_parents, base_data, hasher) + pub fn copy_parents_data( + &self, + node: u32, + base_data: &[u8], + hasher: Sha256, + mut cache: Option<&mut ParentCache>, + ) -> Result<[u8; 32]> { + if let Some(ref mut cache) = cache { + let cache_parents = cache.read(node as u32)?; + Ok(self.copy_parents_data_inner(&cache_parents, base_data, hasher)) } else { let mut cache_parents = [0u32; DEGREE]; self.parents(node as usize, &mut cache_parents[..]).unwrap(); - self.copy_parents_data_inner(&cache_parents, base_data, hasher) + Ok(self.copy_parents_data_inner(&cache_parents, base_data, hasher)) } } @@ -362,20 +290,15 @@ where #[inline] fn parents(&self, node: usize, parents: &mut [u32]) -> Result<()> { - if let Some(cache) = self.cache { - // Read from the cache - let cache_parents = cache.read(node as u32); - parents.copy_from_slice(cache_parents); - } else { - self.base_parents(node, &mut parents[..self.base_graph().degree()])?; - - // expanded_parents takes raw_node - self.expanded_parents( - node, - &mut parents[self.base_graph().degree() - ..self.base_graph().degree() + self.expansion_degree()], - ); - } + self.base_parents(node, &mut parents[..self.base_graph().degree()])?; + + // expanded_parents takes raw_node + self.expanded_parents( + node, + &mut parents + [self.base_graph().degree()..self.base_graph().degree() + self.expansion_degree()], + )?; + Ok(()) } @@ -450,7 +373,7 @@ where // back this function in the `reversed` direction). } - fn generate_expanded_parents(&self, node: usize, expanded_parents: &mut [u32]) { + pub fn generate_expanded_parents(&self, node: usize, expanded_parents: &mut [u32]) { debug_assert_eq!(expanded_parents.len(), self.expansion_degree); for (i, el) in expanded_parents.iter_mut().enumerate() { *el = self.correspondent(node, i); @@ -475,30 +398,18 @@ where } pub fn base_parents(&self, node: usize, parents: &mut [u32]) -> Result<()> { - if let Some(cache) = self.cache { - // Read from the cache - let cache_parents = cache.read(node as u32); - parents.copy_from_slice(&cache_parents[..self.base_graph().degree()]); - Ok(()) - } else { - // No cache usage, generate on demand. - self.base_graph().parents(node, parents) - } + // No cache usage, generate on demand. + self.base_graph().parents(node, parents) } /// Assign `self.expansion_degree` parents to `node` using an invertible permutation /// that is applied one way for the forward layers and one way for the reversed /// ones. #[inline] - pub fn expanded_parents(&self, node: usize, parents: &mut [u32]) { - if let Some(cache) = self.cache { - // Read from the cache - let cache_parents = cache.read(node as u32); - parents.copy_from_slice(&cache_parents[self.base_graph().degree()..]); - } else { - // No cache usage, generate on demand. - self.generate_expanded_parents(node, parents); - } + pub fn expanded_parents(&self, node: usize, parents: &mut [u32]) -> Result<()> { + // No cache usage, generate on demand. + self.generate_expanded_parents(node, parents); + Ok(()) } } diff --git a/storage-proofs/porep/src/stacked/vanilla/mod.rs b/storage-proofs/porep/src/stacked/vanilla/mod.rs index a1a7b97d9..306f50c89 100644 --- a/storage-proofs/porep/src/stacked/vanilla/mod.rs +++ b/storage-proofs/porep/src/stacked/vanilla/mod.rs @@ -1,6 +1,7 @@ #[macro_use] mod macros; +mod cache; mod challenges; mod column; mod column_proof; diff --git a/storage-proofs/porep/src/stacked/vanilla/proof.rs b/storage-proofs/porep/src/stacked/vanilla/proof.rs index 1bcd33413..87d646dfc 100644 --- a/storage-proofs/porep/src/stacked/vanilla/proof.rs +++ b/storage-proofs/porep/src/stacked/vanilla/proof.rs @@ -100,7 +100,7 @@ impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tr let get_exp_parents_columns = |x: usize| -> Result>> { let mut parents = vec![0; graph.expansion_degree()]; - graph.expanded_parents(x, &mut parents); + graph.expanded_parents(x, &mut parents)?; parents.iter().map(|parent| t_aux.column(*parent)).collect() }; @@ -301,18 +301,36 @@ impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tr // NOTE: this means we currently keep 2x sector size around, to improve speed. let mut labels_buffer = vec![0u8; 2 * layer_size]; + let use_cache = settings::SETTINGS.lock().unwrap().maximize_caching; + let mut cache = if use_cache { + Some(graph.parent_cache()?) + } else { + None + }; + for layer in 1..=layers { info!("generating layer: {}", layer); + if let Some(ref mut cache) = cache { + cache.reset()?; + } if layer == 1 { let layer_labels = &mut labels_buffer[..layer_size]; for node in 0..graph.size() { - create_label(graph, replica_id, layer_labels, layer, node)?; + create_label(graph, cache.as_mut(), replica_id, layer_labels, layer, node)?; } } else { let (layer_labels, exp_labels) = labels_buffer.split_at_mut(layer_size); for node in 0..graph.size() { - create_label_exp(graph, replica_id, exp_labels, layer_labels, layer, node)?; + create_label_exp( + graph, + cache.as_mut(), + replica_id, + exp_labels, + layer_labels, + layer, + node, + )?; } }