From d14a184b3b5ae333e8c58a486b5ad08ec341d3d3 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Wed, 10 Apr 2024 15:46:33 -0700 Subject: [PATCH] blockstore: scaffolding for chained merkle root conflict detection (#719) * blockstore: scaffolding for chained merkle root conflict detection * pr feedback: use dummy feature key until final plumbing (cherry picked from commit 411fdc93125f82b36415a2252fc534defecee4d5) # Conflicts: # sdk/src/feature_set.rs --- core/src/window_service.rs | 23 ++ ledger/src/blockstore.rs | 431 +++++++++++++++++++++++++++++++++- ledger/src/blockstore_db.rs | 2 + ledger/src/blockstore_meta.rs | 8 + ledger/src/shred.rs | 8 +- sdk/src/feature_set.rs | 9 + 6 files changed, 467 insertions(+), 14 deletions(-) diff --git a/core/src/window_service.rs b/core/src/window_service.rs index 504776db1e1a25..fdbc1894e804bd 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -169,6 +169,11 @@ fn run_check_duplicate( shred_slot, &root_bank, ); + let chained_merkle_conflict_duplicate_proofs = cluster_nodes::check_feature_activation( + &feature_set::chained_merkle_conflict_duplicate_proofs::id(), + shred_slot, + &root_bank, + ); let (shred1, shred2) = match shred { PossibleDuplicateShred::LastIndexConflict(shred, conflict) | PossibleDuplicateShred::ErasureConflict(shred, conflict) => { @@ -196,6 +201,24 @@ fn run_check_duplicate( return Ok(()); } } + PossibleDuplicateShred::ChainedMerkleRootConflict(shred, conflict) => { + if chained_merkle_conflict_duplicate_proofs { + // Although this proof can be immediately stored on detection, we wait until + // here in order to check the feature flag, as storage in blockstore can + // preclude the detection of other duplicate proofs in this slot + if blockstore.has_duplicate_shreds_in_slot(shred_slot) { + return Ok(()); + } + blockstore.store_duplicate_slot( + shred_slot, + conflict.clone(), + shred.clone().into_payload(), + )?; + (shred, conflict) + } else { + return Ok(()); + } + } PossibleDuplicateShred::Exists(shred) => { // Unlike the other cases we have to wait until here to decide to handle the duplicate and store // in blockstore. This is because the duplicate could have been part of the same insert batch, diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 5c086d057dae38..3e045b422626e2 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -67,11 +67,15 @@ use { borrow::Cow, cell::RefCell, cmp, - collections::{hash_map::Entry as HashMapEntry, BTreeSet, HashMap, HashSet, VecDeque}, + collections::{ + btree_map::Entry as BTreeMapEntry, hash_map::Entry as HashMapEntry, BTreeMap, BTreeSet, + HashMap, HashSet, VecDeque, + }, convert::TryInto, fmt::Write, fs, io::{Error as IoError, ErrorKind}, + ops::Bound, path::{Path, PathBuf}, rc::Rc, sync::{ @@ -146,6 +150,7 @@ pub enum PossibleDuplicateShred { LastIndexConflict(/* original */ Shred, /* conflict */ Vec), // The index of this shred conflicts with `slot_meta.last_index` ErasureConflict(/* original */ Shred, /* conflict */ Vec), // The coding shred has a conflict in the erasure_meta MerkleRootConflict(/* original */ Shred, /* conflict */ Vec), // Merkle root conflict in the same fec set + ChainedMerkleRootConflict(/* original */ Shred, /* conflict */ Vec), // Merkle root chaining conflict with previous fec set } impl PossibleDuplicateShred { @@ -155,6 +160,7 @@ impl PossibleDuplicateShred { Self::LastIndexConflict(shred, _) => shred.slot(), Self::ErasureConflict(shred, _) => shred.slot(), Self::MerkleRootConflict(shred, _) => shred.slot(), + Self::ChainedMerkleRootConflict(shred, _) => shred.slot(), } } } @@ -491,6 +497,85 @@ impl Blockstore { self.erasure_meta_cf.get((slot, u64::from(fec_set_index))) } + #[cfg(test)] + fn put_erasure_meta( + &self, + erasure_set: ErasureSetId, + erasure_meta: &ErasureMeta, + ) -> Result<()> { + let (slot, fec_set_index) = erasure_set.store_key(); + self.erasure_meta_cf.put_bytes( + (slot, u64::from(fec_set_index)), + &bincode::serialize(erasure_meta).unwrap(), + ) + } + + /// Attempts to find the previous consecutive erasure set for `erasure_set`. + /// + /// Checks the map `erasure_metas`, if not present scans blockstore. Returns None + /// if the previous consecutive erasure set is not present in either. + fn previous_erasure_set( + &self, + erasure_set: ErasureSetId, + erasure_metas: &mut BTreeMap>, + ) -> Result> { + let (slot, fec_set_index) = erasure_set.store_key(); + + // Check the previous entry from the in memory map to see if it is the consecutive + // set to `erasure set` + let candidate_erasure_entry = erasure_metas + .range(( + Bound::Included(ErasureSetId::new(slot, 0)), + Bound::Excluded(erasure_set), + )) + .next_back(); + let candidate_erasure_set = candidate_erasure_entry + .filter(|(_, candidate_erasure_meta)| { + candidate_erasure_meta.as_ref().next_fec_set_index() == Some(fec_set_index) + }) + .map(|(candidate_erasure_set, _)| *candidate_erasure_set); + if candidate_erasure_set.is_some() { + return Ok(candidate_erasure_set); + } + + // Consecutive set was not found in memory, scan blockstore for a potential candidate + let Some(((_, candidate_fec_set_index), candidate_erasure_meta)) = self + .erasure_meta_cf + .iter(IteratorMode::From( + (slot, u64::from(fec_set_index)), + IteratorDirection::Reverse, + ))? + // `find` here, to skip the first element in case the erasure meta for fec_set_index is already present + .find(|((_, candidate_fec_set_index), _)| { + *candidate_fec_set_index != u64::from(fec_set_index) + }) + // Do not consider sets from the previous slot + .filter(|((candidate_slot, _), _)| *candidate_slot == slot) + else { + // No potential candidates + return Ok(None); + }; + let candidate_fec_set_index = u32::try_from(candidate_fec_set_index) + .expect("fec_set_index from a previously inserted shred should fit in u32"); + let candidate_erasure_set = ErasureSetId::new(slot, candidate_fec_set_index); + let candidate_erasure_meta: ErasureMeta = deserialize(candidate_erasure_meta.as_ref())?; + + // Add this candidate to erasure metas to avoid blockstore lookup in future + erasure_metas.insert( + candidate_erasure_set, + WorkingEntry::Clean(candidate_erasure_meta), + ); + + // Check if this is actually the consecutive erasure set + let Some(next_fec_set_index) = candidate_erasure_meta.next_fec_set_index() else { + return Err(BlockstoreError::InvalidErasureConfig); + }; + if next_fec_set_index == fec_set_index { + return Ok(Some(candidate_erasure_set)); + } + Ok(None) + } + fn merkle_root_meta(&self, erasure_set: ErasureSetId) -> Result> { self.merkle_root_meta_cf.get(erasure_set.store_key()) } @@ -763,7 +848,7 @@ impl Blockstore { fn try_shred_recovery( &self, - erasure_metas: &HashMap>, + erasure_metas: &BTreeMap>, index_working_set: &mut HashMap, prev_inserted_shreds: &HashMap, reed_solomon_cache: &ReedSolomonCache, @@ -882,7 +967,7 @@ impl Blockstore { let mut write_batch = self.db.batch()?; let mut just_inserted_shreds = HashMap::with_capacity(shreds.len()); - let mut erasure_metas = HashMap::new(); + let mut erasure_metas = BTreeMap::new(); let mut merkle_root_metas = HashMap::new(); let mut slot_meta_working_set = HashMap::new(); let mut index_working_set = HashMap::new(); @@ -1236,7 +1321,7 @@ impl Blockstore { fn check_insert_coding_shred( &self, shred: Shred, - erasure_metas: &mut HashMap>, + erasure_metas: &mut BTreeMap>, merkle_root_metas: &mut HashMap>, index_working_set: &mut HashMap, write_batch: &mut WriteBatch, @@ -1444,7 +1529,7 @@ impl Blockstore { fn check_insert_data_shred( &self, shred: Shred, - erasure_metas: &mut HashMap>, + erasure_metas: &mut BTreeMap>, merkle_root_metas: &mut HashMap>, index_working_set: &mut HashMap, slot_meta_working_set: &mut HashMap, @@ -1539,7 +1624,7 @@ impl Blockstore { just_inserted_shreds.insert(shred.id(), shred); index_meta_working_set_entry.did_insert_occur = true; slot_meta_entry.did_insert_occur = true; - if let HashMapEntry::Vacant(entry) = erasure_metas.entry(erasure_set) { + if let BTreeMapEntry::Vacant(entry) = erasure_metas.entry(erasure_set) { if let Some(meta) = self.erasure_meta(erasure_set).unwrap() { entry.insert(WorkingEntry::Clean(meta)); } @@ -1655,6 +1740,177 @@ impl Blockstore { false } + /// Returns true if there is no chaining conflict between + /// the `shred` and `merkle_root_meta` of the next FEC set, + /// or if shreds from the next set are yet to be received. + /// + /// Otherwise return false and add duplicate proof to + /// `duplicate_shreds`. + /// + /// This is intended to be used right after `shred`'s `erasure_meta` + /// has been created for the first time and loaded into `erasure_metas`. + #[allow(dead_code)] + fn check_forward_chained_merkle_root_consistency( + &self, + shred: &Shred, + just_inserted_shreds: &HashMap, + erasure_metas: &BTreeMap>, + merkle_root_metas: &mut HashMap>, + duplicate_shreds: &mut Vec, + ) -> bool { + let slot = shred.slot(); + let erasure_set = shred.erasure_set(); + let erasure_meta_entry = erasure_metas.get(&erasure_set).expect( + "Checking chained merkle root consistency on an erasure set {erasure_set:?} + that is not loaded in memory, programmer error", + ); + let erasure_meta = erasure_meta_entry.as_ref(); + debug_assert!(erasure_meta.check_coding_shred(shred)); + + // If a shred from the next fec set has already been inserted, check the chaining + let Some(next_fec_set_index) = erasure_meta.next_fec_set_index() else { + error!("Invalid erasure meta, unable to compute next fec set index {erasure_meta:?}"); + return false; + }; + let next_erasure_set = ErasureSetId::new(slot, next_fec_set_index); + let next_merkle_root_meta = match merkle_root_metas.entry(next_erasure_set) { + HashMapEntry::Vacant(entry) => self + .merkle_root_meta(next_erasure_set) + .unwrap() + .map(|meta| entry.insert(WorkingEntry::Clean(meta))), + HashMapEntry::Occupied(entry) => Some(entry.into_mut()), + }; + if let Some(next_merkle_root_meta) = next_merkle_root_meta.as_deref().map(AsRef::as_ref) { + let next_shred_id = ShredId::new( + slot, + next_merkle_root_meta.first_received_shred_index(), + next_merkle_root_meta.first_received_shred_type(), + ); + let next_shred = + Self::get_shred_from_just_inserted_or_db(self, just_inserted_shreds, next_shred_id) + .expect("Shred indicated by merkle root meta must exist") + .into_owned(); + let merkle_root = shred.merkle_root().ok(); + let chained_merkle_root = shred::layout::get_chained_merkle_root(&next_shred); + + if !self.check_chaining(merkle_root, chained_merkle_root) { + warn!( + "Received conflicting chained merkle roots for slot: {slot}, + shred {erasure_set:?} type {:?} has merkle root {merkle_root:?}, however + next fec set shred {next_erasure_set:?} type {:?} chains to merkle root {chained_merkle_root:?}. + Reporting as duplicate", + shred.shred_type(), + next_merkle_root_meta.first_received_shred_type(), + ); + + if !self.has_duplicate_shreds_in_slot(shred.slot()) { + duplicate_shreds.push(PossibleDuplicateShred::ChainedMerkleRootConflict( + shred.clone(), + next_shred, + )); + } + return false; + } + } + + true + } + + /// Returns true if there is no chaining conflict between + /// the `shred` and `merkle_root_meta` of the previous FEC set, + /// or if shreds from the previous set are yet to be received. + /// + /// Otherwise return false and add duplicate proof to + /// `duplicate_shreds`. + /// + /// This is intended to be used right after `shred`'s `merkle_root_meta` + /// has been created for the first time. + #[allow(dead_code)] + fn check_backwards_chained_merkle_root_consistency( + &self, + shred: &Shred, + just_inserted_shreds: &HashMap, + erasure_metas: &mut BTreeMap>, + merkle_root_metas: &mut HashMap>, + duplicate_shreds: &mut Vec, + ) -> bool { + let slot = shred.slot(); + let erasure_set = shred.erasure_set(); + let fec_set_index = shred.fec_set_index(); + + if fec_set_index == 0 { + // Although the first fec set chains to the last fec set of the parent block, + // if this chain is incorrect we do not know which block is the duplicate until votes + // are received. We instead delay this check until the block reaches duplicate + // confirmation. + return true; + } + + // If a shred from the previous fec set has already been inserted, check the chaining. + // Since we cannot compute the previous fec set index, we check the in memory map, otherwise + // check the previous key from blockstore to see if it is consecutive with our current set. + let Some(prev_erasure_set) = self + .previous_erasure_set(erasure_set, erasure_metas) + .expect("Expect database operations to succeed") + else { + // No shreds from the previous erasure batch have been received, + // so nothing to check. Once the previous erasure batch is received, + // we will verify this chain through the forward check above. + return true; + }; + let prev_merkle_root_meta_entry = match merkle_root_metas.entry(prev_erasure_set) { + HashMapEntry::Vacant(entry) => entry.insert(WorkingEntry::Clean( + self.merkle_root_meta(prev_erasure_set) + .unwrap() + .expect("merkle root meta must exist for erasure meta"), + )), + HashMapEntry::Occupied(entry) => entry.into_mut(), + }; + + let prev_merkle_root_meta = prev_merkle_root_meta_entry.as_ref(); + let prev_shred_id = ShredId::new( + slot, + prev_merkle_root_meta.first_received_shred_index(), + prev_merkle_root_meta.first_received_shred_type(), + ); + let prev_shred = + Self::get_shred_from_just_inserted_or_db(self, just_inserted_shreds, prev_shred_id) + .expect("Shred indicated by merkle root meta must exist") + .into_owned(); + let merkle_root = shred::layout::get_merkle_root(&prev_shred); + let chained_merkle_root = shred.chained_merkle_root().ok(); + + if !self.check_chaining(merkle_root, chained_merkle_root) { + warn!( + "Received conflicting chained merkle roots for slot: {slot}, + shred {:?} type {:?} chains to merkle root {chained_merkle_root:?}, however + previous fec set shred {prev_erasure_set:?} type {:?} has merkle root {merkle_root:?}. + Reporting as duplicate", + shred.erasure_set(), + shred.shred_type(), + prev_merkle_root_meta.first_received_shred_type(), + ); + + if !self.has_duplicate_shreds_in_slot(shred.slot()) { + duplicate_shreds.push(PossibleDuplicateShred::ChainedMerkleRootConflict( + shred.clone(), + prev_shred, + )); + } + return false; + } + + true + } + + /// Checks if the chained merkle root == merkle root + /// + /// Returns true if no conflict, or if chained merkle roots are not enabled + fn check_chaining(&self, merkle_root: Option, chained_merkle_root: Option) -> bool { + chained_merkle_root.is_none() // Chained merkle roots have not been enabled yet + || chained_merkle_root == merkle_root + } + fn should_insert_data_shred( &self, shred: &Shred, @@ -6939,7 +7195,7 @@ pub mod tests { let (_, coding_shreds, _) = setup_erasure_shreds(slot, parent_slot, 10); let coding_shred = coding_shreds[index as usize].clone(); - let mut erasure_metas = HashMap::new(); + let mut erasure_metas = BTreeMap::new(); let mut merkle_root_metas = HashMap::new(); let mut index_working_set = HashMap::new(); let mut just_received_shreds = HashMap::new(); @@ -7134,7 +7390,7 @@ pub mod tests { setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index); let data_shred = data_shreds[0].clone(); - let mut erasure_metas = HashMap::new(); + let mut erasure_metas = BTreeMap::new(); let mut merkle_root_metas = HashMap::new(); let mut index_working_set = HashMap::new(); let mut just_received_shreds = HashMap::new(); @@ -7351,7 +7607,7 @@ pub mod tests { 0, // version ); - let mut erasure_metas = HashMap::new(); + let mut erasure_metas = BTreeMap::new(); let mut merkle_root_metas = HashMap::new(); let mut index_working_set = HashMap::new(); let mut just_received_shreds = HashMap::new(); @@ -9874,6 +10130,22 @@ pub mod tests { parent_slot: u64, num_entries: u64, fec_set_index: u32, + ) -> (Vec, Vec, Arc) { + setup_erasure_shreds_with_index_and_chained_merkle( + slot, + parent_slot, + num_entries, + fec_set_index, + Some(Hash::new_from_array(rand::thread_rng().gen())), + ) + } + + fn setup_erasure_shreds_with_index_and_chained_merkle( + slot: u64, + parent_slot: u64, + num_entries: u64, + fec_set_index: u32, + chained_merkle_root: Option, ) -> (Vec, Vec, Arc) { let entries = make_slot_entries_with_transactions(num_entries); let leader_keypair = Arc::new(Keypair::new()); @@ -9882,8 +10154,7 @@ pub mod tests { &leader_keypair, &entries, true, // is_last_in_slot - // chained_merkle_root - Some(Hash::new_from_array(rand::thread_rng().gen())), + chained_merkle_root, fec_set_index, // next_shred_index fec_set_index, // next_code_index true, // merkle_variant @@ -10795,4 +11066,142 @@ pub mod tests { assert_eq!(read_cost, *cost_table.get(&read_key).unwrap()); } } + + #[test] + fn test_previous_erasure_set() { + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + let mut erasure_metas = BTreeMap::new(); + + let parent_slot = 0; + let prev_slot = 1; + let slot = 2; + let (data_shreds_0, coding_shreds_0, _) = + setup_erasure_shreds_with_index(slot, parent_slot, 10, 0); + let erasure_set_0 = ErasureSetId::new(slot, 0); + let erasure_meta_0 = + ErasureMeta::from_coding_shred(coding_shreds_0.first().unwrap()).unwrap(); + + let prev_fec_set_index = data_shreds_0.len() as u32; + let (data_shreds_prev, coding_shreds_prev, _) = + setup_erasure_shreds_with_index(slot, parent_slot, 10, prev_fec_set_index); + let erasure_set_prev = ErasureSetId::new(slot, prev_fec_set_index); + let erasure_meta_prev = + ErasureMeta::from_coding_shred(coding_shreds_prev.first().unwrap()).unwrap(); + + let (_, coding_shreds_prev_slot, _) = + setup_erasure_shreds_with_index(prev_slot, parent_slot, 10, prev_fec_set_index); + let erasure_set_prev_slot = ErasureSetId::new(prev_slot, prev_fec_set_index); + let erasure_meta_prev_slot = + ErasureMeta::from_coding_shred(coding_shreds_prev_slot.first().unwrap()).unwrap(); + + let fec_set_index = data_shreds_prev.len() as u32 + prev_fec_set_index; + let erasure_set = ErasureSetId::new(slot, fec_set_index); + + // Blockstore is empty + assert_eq!( + blockstore + .previous_erasure_set(erasure_set, &mut erasure_metas) + .unwrap(), + None + ); + + // Erasure metas does not contain the previous fec set, but only the one before that + erasure_metas.insert(erasure_set_0, WorkingEntry::Dirty(erasure_meta_0)); + assert_eq!( + blockstore + .previous_erasure_set(erasure_set, &mut erasure_metas) + .unwrap(), + None + ); + + // Both Erasure metas and blockstore, contain only contain the previous previous fec set + erasure_metas.insert(erasure_set_0, WorkingEntry::Clean(erasure_meta_0)); + blockstore + .put_erasure_meta(erasure_set_0, &erasure_meta_0) + .unwrap(); + assert_eq!( + blockstore + .previous_erasure_set(erasure_set, &mut erasure_metas) + .unwrap(), + None + ); + + // Erasure meta contains the previous FEC set, blockstore only contains the older + erasure_metas.insert(erasure_set_prev, WorkingEntry::Dirty(erasure_meta_prev)); + assert_eq!( + blockstore + .previous_erasure_set(erasure_set, &mut erasure_metas) + .unwrap(), + Some(erasure_set_prev) + ); + + // Erasure meta only contains the older, blockstore has the previous fec set + erasure_metas.remove(&erasure_set_prev); + blockstore + .put_erasure_meta(erasure_set_prev, &erasure_meta_prev) + .unwrap(); + assert_eq!( + blockstore + .previous_erasure_set(erasure_set, &mut erasure_metas) + .unwrap(), + Some(erasure_set_prev) + ); + + // Both contain the previous fec set + erasure_metas.insert(erasure_set_prev, WorkingEntry::Clean(erasure_meta_prev)); + assert_eq!( + blockstore + .previous_erasure_set(erasure_set, &mut erasure_metas) + .unwrap(), + Some(erasure_set_prev) + ); + + // Works even if the previous fec set has index 0 + assert_eq!( + blockstore + .previous_erasure_set(erasure_set_prev, &mut erasure_metas) + .unwrap(), + Some(erasure_set_0) + ); + erasure_metas.remove(&erasure_set_0); + assert_eq!( + blockstore + .previous_erasure_set(erasure_set_prev, &mut erasure_metas) + .unwrap(), + Some(erasure_set_0) + ); + + // Does not cross slot boundary + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + erasure_metas.clear(); + erasure_metas.insert( + erasure_set_prev_slot, + WorkingEntry::Dirty(erasure_meta_prev_slot), + ); + assert_eq!( + erasure_meta_prev_slot.next_fec_set_index().unwrap(), + fec_set_index + ); + assert_eq!( + blockstore + .previous_erasure_set(erasure_set, &mut erasure_metas) + .unwrap(), + None, + ); + erasure_metas.insert( + erasure_set_prev_slot, + WorkingEntry::Clean(erasure_meta_prev_slot), + ); + blockstore + .put_erasure_meta(erasure_set_prev_slot, &erasure_meta_prev_slot) + .unwrap(); + assert_eq!( + blockstore + .previous_erasure_set(erasure_set, &mut erasure_metas) + .unwrap(), + None, + ); + } } diff --git a/ledger/src/blockstore_db.rs b/ledger/src/blockstore_db.rs index 8b6b44edae61f6..ab7517453584a2 100644 --- a/ledger/src/blockstore_db.rs +++ b/ledger/src/blockstore_db.rs @@ -151,6 +151,8 @@ pub enum BlockstoreError { MissingTransactionMetadata, #[error("transaction-index overflow")] TransactionIndexOverflow, + #[error("invalid erasure config")] + InvalidErasureConfig, } pub type Result = std::result::Result; diff --git a/ledger/src/blockstore_meta.rs b/ledger/src/blockstore_meta.rs index c8b5f6cb4fee99..c56453d88352bc 100644 --- a/ledger/src/blockstore_meta.rs +++ b/ledger/src/blockstore_meta.rs @@ -392,6 +392,14 @@ impl ErasureMeta { self.first_coding_index..self.first_coding_index + num_coding } + pub(crate) fn next_fec_set_index(&self) -> Option { + let num_data = u64::try_from(self.config.num_data).ok()?; + self.set_index + .checked_add(num_data) + .map(u32::try_from)? + .ok() + } + pub(crate) fn status(&self, index: &Index) -> ErasureMetaStatus { use ErasureMetaStatus::*; diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index bb93b7628eed37..7ee66eeaf6be9c 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -292,10 +292,14 @@ impl ShredId { } /// Tuple which identifies erasure coding set that the shred belongs to. -#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] pub(crate) struct ErasureSetId(Slot, /*fec_set_index:*/ u32); impl ErasureSetId { + pub(crate) fn new(slot: Slot, fec_set_index: u32) -> Self { + Self(slot, fec_set_index) + } + pub(crate) fn slot(&self) -> Slot { self.0 } @@ -310,7 +314,6 @@ impl ErasureSetId { macro_rules! dispatch { ($vis:vis fn $name:ident(&self $(, $arg:ident : $ty:ty)?) $(-> $out:ty)?) => { #[inline] - #[allow(dead_code)] $vis fn $name(&self $(, $arg:$ty)?) $(-> $out)? { match self { Self::ShredCode(shred) => shred.$name($($arg, )?), @@ -728,7 +731,6 @@ pub mod layout { } } - #[allow(dead_code)] pub(crate) fn get_chained_merkle_root(shred: &[u8]) -> Option { let offset = match get_shred_variant(shred).ok()? { ShredVariant::LegacyCode | ShredVariant::LegacyData => None, diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 18e931efb74f9f..cc8852667e90cd 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -777,6 +777,10 @@ pub mod enable_gossip_duplicate_proof_ingestion { solana_sdk::declare_id!("FNKCMBzYUdjhHyPdsKG2LSmdzH8TCHXn3ytj8RNBS4nG"); } +pub mod chained_merkle_conflict_duplicate_proofs { + solana_sdk::declare_id!("mustrekeyVfuxJKNRGkyTDokLwWxx6kD2ZLsqQHaDD8"); +} + pub mod enable_chained_merkle_shreds { solana_sdk::declare_id!("7uZBkJXJ1HkuP6R3MJfZs7mLwymBcDbKdqbF51ZWLier"); } @@ -976,6 +980,11 @@ lazy_static! { (enable_gossip_duplicate_proof_ingestion::id(), "enable gossip duplicate proof ingestion #32963"), (enable_chained_merkle_shreds::id(), "Enable chained Merkle shreds #34916"), (deprecate_unused_legacy_vote_plumbing::id(), "Deprecate unused legacy vote tx plumbing"), +<<<<<<< HEAD +======= + (enable_tower_sync_ix::id(), "Enable tower sync vote instruction"), + (chained_merkle_conflict_duplicate_proofs::id(), "generate duplicate proofs for chained merkle root conflicts"), +>>>>>>> 411fdc9312 (blockstore: scaffolding for chained merkle root conflict detection (#719)) /*************** ADD NEW FEATURES HERE ***************/ ] .iter()