diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 9868ddb72effac..1702ba9c6fb016 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -499,7 +499,7 @@ impl Blockstore { fn previous_erasure_set( &self, erasure_set: ErasureSetId, - erasure_metas: &mut BTreeMap>, + erasure_metas: &BTreeMap>, ) -> Result> { let (slot, fec_set_index) = erasure_set.store_key(); @@ -542,12 +542,6 @@ impl Blockstore { let candidate_erasure_set = ErasureSetId::new(slot, candidate_fec_set_index); let candidate_erasure_meta: ErasureMeta = deserialize(candidate_erasure_meta.as_ref())?; - // Add this candidate to erasure metas to avoid blockstore lookup in future - erasure_metas.insert( - candidate_erasure_set, - WorkingEntry::Clean(candidate_erasure_meta), - ); - // Check if this is actually the consecutive erasure set let Some(next_fec_set_index) = candidate_erasure_meta.next_fec_set_index() else { return Err(BlockstoreError::InvalidErasureConfig); @@ -1117,6 +1111,66 @@ impl Blockstore { &mut write_batch, )?; + for (erasure_set, working_erasure_meta) in erasure_metas.iter() { + if !working_erasure_meta.should_write() { + // Not a new erasure meta + continue; + } + let (slot, _) = erasure_set.store_key(); + if self.has_duplicate_shreds_in_slot(slot) { + continue; + } + // First coding shred from this erasure batch, check the forward merkle root chaining + let erasure_meta = working_erasure_meta.as_ref(); + let shred_id = ShredId::new( + slot, + erasure_meta + .first_received_coding_shred_index() + .expect("First received coding index must exist for all erasure metas"), + ShredType::Code, + ); + let shred = just_inserted_shreds + .get(&shred_id) + .expect("Erasure meta was just created, initial shred must exist"); + + self.check_forward_chained_merkle_root_consistency( + shred, + erasure_meta, + &just_inserted_shreds, + &mut merkle_root_metas, + &mut duplicate_shreds, + ); + } + + for (erasure_set, working_merkle_root_meta) in merkle_root_metas.iter() { + if !working_merkle_root_meta.should_write() { + // Not a new merkle root meta + continue; + } + let (slot, _) = erasure_set.store_key(); + if self.has_duplicate_shreds_in_slot(slot) { + continue; + } + // First shred from this erasure batch, check the backwards merkle root chaining + let merkle_root_meta = working_merkle_root_meta.as_ref(); + let shred_id = ShredId::new( + slot, + merkle_root_meta.first_received_shred_index(), + merkle_root_meta.first_received_shred_type(), + ); + let shred = just_inserted_shreds + .get(&shred_id) + .expect("Merkle root meta was just created, initial shred must exist"); + + self.check_backwards_chained_merkle_root_consistency( + shred, + &just_inserted_shreds, + &erasure_metas, + &merkle_root_metas, + &mut duplicate_shreds, + ); + } + for (erasure_set, working_erasure_meta) in erasure_metas { if !working_erasure_meta.should_write() { // No need to rewrite the column @@ -1270,6 +1324,26 @@ impl Blockstore { Ok(insert_results.completed_data_set_infos) } + #[cfg(test)] + fn insert_shred_return_duplicate( + &self, + shred: Shred, + leader_schedule: &LeaderScheduleCache, + ) -> Vec { + let insert_results = self + .do_insert_shreds( + vec![shred], + vec![false], + Some(leader_schedule), + false, + None, // retransmit-sender + &ReedSolomonCache::default(), + &mut BlockstoreInsertionMetrics::default(), + ) + .unwrap(); + insert_results.duplicate_shreds + } + #[allow(clippy::too_many_arguments)] fn check_insert_coding_shred( &self, @@ -1701,24 +1775,18 @@ impl Blockstore { /// `duplicate_shreds`. /// /// This is intended to be used right after `shred`'s `erasure_meta` - /// has been created for the first time and loaded into `erasure_metas`. - #[allow(dead_code)] + /// has been created for the first time. fn check_forward_chained_merkle_root_consistency( &self, shred: &Shred, + erasure_meta: &ErasureMeta, just_inserted_shreds: &HashMap, - erasure_metas: &BTreeMap>, merkle_root_metas: &mut HashMap>, duplicate_shreds: &mut Vec, ) -> bool { + debug_assert!(erasure_meta.check_coding_shred(shred)); let slot = shred.slot(); let erasure_set = shred.erasure_set(); - let erasure_meta_entry = erasure_metas.get(&erasure_set).expect( - "Checking chained merkle root consistency on an erasure set {erasure_set:?} - that is not loaded in memory, programmer error", - ); - let erasure_meta = erasure_meta_entry.as_ref(); - debug_assert!(erasure_meta.check_coding_shred(shred)); // If a shred from the next fec set has already been inserted, check the chaining let Some(next_fec_set_index) = erasure_meta.next_fec_set_index() else { @@ -1726,44 +1794,48 @@ impl Blockstore { return false; }; let next_erasure_set = ErasureSetId::new(slot, next_fec_set_index); - let next_merkle_root_meta = match merkle_root_metas.entry(next_erasure_set) { - HashMapEntry::Vacant(entry) => self - .merkle_root_meta(next_erasure_set) - .unwrap() - .map(|meta| entry.insert(WorkingEntry::Clean(meta))), - HashMapEntry::Occupied(entry) => Some(entry.into_mut()), + let Some(next_merkle_root_meta) = merkle_root_metas + .get(&next_erasure_set) + .map(WorkingEntry::as_ref) + .map(Cow::Borrowed) + .or_else(|| { + self.merkle_root_meta(next_erasure_set) + .unwrap() + .map(Cow::Owned) + }) + else { + // No shred from the next fec set has been received + return true; }; - if let Some(next_merkle_root_meta) = next_merkle_root_meta.as_deref().map(AsRef::as_ref) { - let next_shred_id = ShredId::new( - slot, - next_merkle_root_meta.first_received_shred_index(), + let next_shred_id = ShredId::new( + slot, + next_merkle_root_meta.first_received_shred_index(), + next_merkle_root_meta.first_received_shred_type(), + ); + let next_shred = + Self::get_shred_from_just_inserted_or_db(self, just_inserted_shreds, next_shred_id) + .expect("Shred indicated by merkle root meta must exist") + .into_owned(); + let merkle_root = shred.merkle_root().ok(); + let chained_merkle_root = shred::layout::get_chained_merkle_root(&next_shred); + + if !self.check_chaining(merkle_root, chained_merkle_root) { + warn!( + "Received conflicting chained merkle roots for slot: {slot}, + shred {erasure_set:?} type {:?} has merkle root {merkle_root:?}, however + next fec set shred {next_erasure_set:?} type {:?} chains to merkle root {chained_merkle_root:?}. + Reporting as duplicate", + shred.shred_type(), next_merkle_root_meta.first_received_shred_type(), ); - let next_shred = - Self::get_shred_from_just_inserted_or_db(self, just_inserted_shreds, next_shred_id) - .expect("Shred indicated by merkle root meta must exist") - .into_owned(); - let merkle_root = shred.merkle_root().ok(); - let chained_merkle_root = shred::layout::get_chained_merkle_root(&next_shred); - if !self.check_chaining(merkle_root, chained_merkle_root) { - warn!( - "Received conflicting chained merkle roots for slot: {slot}, - shred {erasure_set:?} type {:?} has merkle root {merkle_root:?}, however - next fec set shred {next_erasure_set:?} type {:?} chains to merkle root {chained_merkle_root:?}. - Reporting as duplicate", - shred.shred_type(), - next_merkle_root_meta.first_received_shred_type(), - ); - - if !self.has_duplicate_shreds_in_slot(shred.slot()) { - duplicate_shreds.push(PossibleDuplicateShred::ChainedMerkleRootConflict( - shred.clone(), - next_shred, - )); - } - return false; + if !self.has_duplicate_shreds_in_slot(shred.slot()) { + duplicate_shreds.push(PossibleDuplicateShred::ChainedMerkleRootConflict( + shred.clone(), + next_shred, + )); } + return false; } true @@ -1778,13 +1850,12 @@ impl Blockstore { /// /// This is intended to be used right after `shred`'s `merkle_root_meta` /// has been created for the first time. - #[allow(dead_code)] fn check_backwards_chained_merkle_root_consistency( &self, shred: &Shred, just_inserted_shreds: &HashMap, - erasure_metas: &mut BTreeMap>, - merkle_root_metas: &mut HashMap>, + erasure_metas: &BTreeMap>, + merkle_root_metas: &HashMap>, duplicate_shreds: &mut Vec, ) -> bool { let slot = shred.slot(); @@ -1811,16 +1882,17 @@ impl Blockstore { // we will verify this chain through the forward check above. return true; }; - let prev_merkle_root_meta_entry = match merkle_root_metas.entry(prev_erasure_set) { - HashMapEntry::Vacant(entry) => entry.insert(WorkingEntry::Clean( + + let prev_merkle_root_meta = merkle_root_metas + .get(&prev_erasure_set) + .map(WorkingEntry::as_ref) + .map(Cow::Borrowed) + .or_else(|| { self.merkle_root_meta(prev_erasure_set) .unwrap() - .expect("merkle root meta must exist for erasure meta"), - )), - HashMapEntry::Occupied(entry) => entry.into_mut(), - }; - - let prev_merkle_root_meta = prev_merkle_root_meta_entry.as_ref(); + .map(Cow::Owned) + }) + .expect("merkle root meta must exist for erasure meta"); let prev_shred_id = ShredId::new( slot, prev_merkle_root_meta.first_received_shred_index(), @@ -11044,7 +11116,7 @@ pub mod tests { // Blockstore is empty assert_eq!( blockstore - .previous_erasure_set(erasure_set, &mut erasure_metas) + .previous_erasure_set(erasure_set, &erasure_metas) .unwrap(), None ); @@ -11053,7 +11125,7 @@ pub mod tests { erasure_metas.insert(erasure_set_0, WorkingEntry::Dirty(erasure_meta_0)); assert_eq!( blockstore - .previous_erasure_set(erasure_set, &mut erasure_metas) + .previous_erasure_set(erasure_set, &erasure_metas) .unwrap(), None ); @@ -11065,7 +11137,7 @@ pub mod tests { .unwrap(); assert_eq!( blockstore - .previous_erasure_set(erasure_set, &mut erasure_metas) + .previous_erasure_set(erasure_set, &erasure_metas) .unwrap(), None ); @@ -11074,7 +11146,7 @@ pub mod tests { erasure_metas.insert(erasure_set_prev, WorkingEntry::Dirty(erasure_meta_prev)); assert_eq!( blockstore - .previous_erasure_set(erasure_set, &mut erasure_metas) + .previous_erasure_set(erasure_set, &erasure_metas) .unwrap(), Some(erasure_set_prev) ); @@ -11086,7 +11158,7 @@ pub mod tests { .unwrap(); assert_eq!( blockstore - .previous_erasure_set(erasure_set, &mut erasure_metas) + .previous_erasure_set(erasure_set, &erasure_metas) .unwrap(), Some(erasure_set_prev) ); @@ -11095,7 +11167,7 @@ pub mod tests { erasure_metas.insert(erasure_set_prev, WorkingEntry::Clean(erasure_meta_prev)); assert_eq!( blockstore - .previous_erasure_set(erasure_set, &mut erasure_metas) + .previous_erasure_set(erasure_set, &erasure_metas) .unwrap(), Some(erasure_set_prev) ); @@ -11103,14 +11175,14 @@ pub mod tests { // Works even if the previous fec set has index 0 assert_eq!( blockstore - .previous_erasure_set(erasure_set_prev, &mut erasure_metas) + .previous_erasure_set(erasure_set_prev, &erasure_metas) .unwrap(), Some(erasure_set_0) ); erasure_metas.remove(&erasure_set_0); assert_eq!( blockstore - .previous_erasure_set(erasure_set_prev, &mut erasure_metas) + .previous_erasure_set(erasure_set_prev, &erasure_metas) .unwrap(), Some(erasure_set_0) ); @@ -11129,7 +11201,7 @@ pub mod tests { ); assert_eq!( blockstore - .previous_erasure_set(erasure_set, &mut erasure_metas) + .previous_erasure_set(erasure_set, &erasure_metas) .unwrap(), None, ); @@ -11142,9 +11214,376 @@ pub mod tests { .unwrap(); assert_eq!( blockstore - .previous_erasure_set(erasure_set, &mut erasure_metas) + .previous_erasure_set(erasure_set, &erasure_metas) .unwrap(), None, ); } + + #[test] + fn test_chained_merkle_root_consistency_backwards() { + // Insert a coding shred then consistent data and coding shreds from the next FEC set + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + + let parent_slot = 0; + let slot = 1; + let fec_set_index = 0; + let (data_shreds, coding_shreds, leader_schedule) = + setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index); + let coding_shred = coding_shreds[0].clone(); + let next_fec_set_index = fec_set_index + data_shreds.len() as u32; + + assert!(blockstore + .insert_shred_return_duplicate(coding_shred.clone(), &leader_schedule,) + .is_empty()); + + let merkle_root = coding_shred.merkle_root().unwrap(); + + // Correctly chained merkle + let (data_shreds, coding_shreds, _) = setup_erasure_shreds_with_index_and_chained_merkle( + slot, + parent_slot, + 10, + next_fec_set_index, + Some(merkle_root), + ); + let data_shred = data_shreds[0].clone(); + let coding_shred = coding_shreds[0].clone(); + assert!(blockstore + .insert_shred_return_duplicate(coding_shred, &leader_schedule,) + .is_empty()); + assert!(blockstore + .insert_shred_return_duplicate(data_shred, &leader_schedule,) + .is_empty()); + } + + #[test] + fn test_chained_merkle_root_consistency_forwards() { + // Insert a coding shred, then a consistent coding shred from the previous FEC set + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + + let parent_slot = 0; + let slot = 1; + let fec_set_index = 0; + let (data_shreds, coding_shreds, leader_schedule) = + setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index); + let coding_shred = coding_shreds[0].clone(); + let next_fec_set_index = fec_set_index + data_shreds.len() as u32; + + // Correctly chained merkle + let merkle_root = coding_shred.merkle_root().unwrap(); + let (_, next_coding_shreds, _) = setup_erasure_shreds_with_index_and_chained_merkle( + slot, + parent_slot, + 10, + next_fec_set_index, + Some(merkle_root), + ); + let next_coding_shred = next_coding_shreds[0].clone(); + + assert!(blockstore + .insert_shred_return_duplicate(next_coding_shred, &leader_schedule,) + .is_empty()); + + // Insert previous FEC set + assert!(blockstore + .insert_shred_return_duplicate(coding_shred, &leader_schedule,) + .is_empty()); + } + + #[test] + fn test_chained_merkle_root_across_slots_backwards() { + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + + let parent_slot = 0; + let slot = 1; + let fec_set_index = 0; + let (data_shreds, _, leader_schedule) = + setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index); + let data_shred = data_shreds[0].clone(); + + assert!(blockstore + .insert_shred_return_duplicate(data_shred.clone(), &leader_schedule,) + .is_empty()); + + // Incorrectly chained merkle for next slot + let merkle_root = Hash::new_unique(); + assert!(merkle_root != data_shred.merkle_root().unwrap()); + let (next_slot_data_shreds, next_slot_coding_shreds, leader_schedule) = + setup_erasure_shreds_with_index_and_chained_merkle( + slot + 1, + slot, + 10, + fec_set_index, + Some(merkle_root), + ); + let next_slot_data_shred = next_slot_data_shreds[0].clone(); + let next_slot_coding_shred = next_slot_coding_shreds[0].clone(); + assert!(blockstore + .insert_shred_return_duplicate(next_slot_coding_shred, &leader_schedule,) + .is_empty()); + assert!(blockstore + .insert_shred_return_duplicate(next_slot_data_shred, &leader_schedule) + .is_empty()); + } + + #[test] + fn test_chained_merkle_root_across_slots_forwards() { + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + + let parent_slot = 0; + let slot = 1; + let fec_set_index = 0; + let (_, coding_shreds, _) = + setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index); + let coding_shred = coding_shreds[0].clone(); + + // Incorrectly chained merkle for next slot + let merkle_root = Hash::new_unique(); + assert!(merkle_root != coding_shred.merkle_root().unwrap()); + let (next_slot_data_shreds, _, leader_schedule) = + setup_erasure_shreds_with_index_and_chained_merkle( + slot + 1, + slot, + 10, + fec_set_index, + Some(merkle_root), + ); + let next_slot_data_shred = next_slot_data_shreds[0].clone(); + + assert!(blockstore + .insert_shred_return_duplicate(next_slot_data_shred.clone(), &leader_schedule,) + .is_empty()); + + // Insert for previous slot + assert!(blockstore + .insert_shred_return_duplicate(coding_shred, &leader_schedule,) + .is_empty()); + } + + #[test] + fn test_chained_merkle_root_inconsistency_backwards_insert_code() { + // Insert a coding shred then inconsistent coding shred then inconsistent data shred from the next FEC set + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + + let parent_slot = 0; + let slot = 1; + let fec_set_index = 0; + let (data_shreds, coding_shreds, leader_schedule) = + setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index); + let coding_shred_previous = coding_shreds[0].clone(); + let next_fec_set_index = fec_set_index + data_shreds.len() as u32; + + assert!(blockstore + .insert_shred_return_duplicate(coding_shred_previous.clone(), &leader_schedule,) + .is_empty()); + + // Incorrectly chained merkle + let merkle_root = Hash::new_unique(); + assert!(merkle_root != coding_shred_previous.merkle_root().unwrap()); + let (data_shreds, coding_shreds, leader_schedule) = + setup_erasure_shreds_with_index_and_chained_merkle( + slot, + parent_slot, + 10, + next_fec_set_index, + Some(merkle_root), + ); + let data_shred = data_shreds[0].clone(); + let coding_shred = coding_shreds[0].clone(); + let duplicate_shreds = + blockstore.insert_shred_return_duplicate(coding_shred.clone(), &leader_schedule); + assert_eq!(duplicate_shreds.len(), 1); + assert_eq!( + duplicate_shreds[0], + PossibleDuplicateShred::ChainedMerkleRootConflict( + coding_shred, + coding_shred_previous.into_payload() + ) + ); + + // Should not check again, even though this shred conflicts as well + assert!(blockstore + .insert_shred_return_duplicate(data_shred.clone(), &leader_schedule,) + .is_empty()); + } + + #[test] + fn test_chained_merkle_root_inconsistency_backwards_insert_data() { + // Insert a coding shred then inconsistent data shred then inconsistent coding shred from the next FEC set + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + + let parent_slot = 0; + let slot = 1; + let fec_set_index = 0; + let (data_shreds, coding_shreds, leader_schedule) = + setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index); + let coding_shred_previous = coding_shreds[0].clone(); + let next_fec_set_index = fec_set_index + data_shreds.len() as u32; + + assert!(blockstore + .insert_shred_return_duplicate(coding_shred_previous.clone(), &leader_schedule,) + .is_empty()); + + // Incorrectly chained merkle + let merkle_root = Hash::new_unique(); + assert!(merkle_root != coding_shred_previous.merkle_root().unwrap()); + let (data_shreds, coding_shreds, leader_schedule) = + setup_erasure_shreds_with_index_and_chained_merkle( + slot, + parent_slot, + 10, + next_fec_set_index, + Some(merkle_root), + ); + let data_shred = data_shreds[0].clone(); + let coding_shred = coding_shreds[0].clone(); + + let duplicate_shreds = + blockstore.insert_shred_return_duplicate(data_shred.clone(), &leader_schedule); + assert_eq!(duplicate_shreds.len(), 1); + assert_eq!( + duplicate_shreds[0], + PossibleDuplicateShred::ChainedMerkleRootConflict( + data_shred, + coding_shred_previous.into_payload(), + ) + ); + // Should not check again, even though this shred conflicts as well + assert!(blockstore + .insert_shred_return_duplicate(coding_shred.clone(), &leader_schedule,) + .is_empty()); + } + + #[test] + fn test_chained_merkle_root_inconsistency_forwards() { + // Insert a data shred, then an inconsistent coding shred from the previous FEC set + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + + let parent_slot = 0; + let slot = 1; + let fec_set_index = 0; + let (data_shreds, coding_shreds, leader_schedule) = + setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index); + let coding_shred = coding_shreds[0].clone(); + let next_fec_set_index = fec_set_index + data_shreds.len() as u32; + + // Incorrectly chained merkle + let merkle_root = Hash::new_unique(); + assert!(merkle_root != coding_shred.merkle_root().unwrap()); + let (next_data_shreds, _, leader_schedule_next) = + setup_erasure_shreds_with_index_and_chained_merkle( + slot, + parent_slot, + 10, + next_fec_set_index, + Some(merkle_root), + ); + let next_data_shred = next_data_shreds[0].clone(); + + assert!(blockstore + .insert_shred_return_duplicate(next_data_shred.clone(), &leader_schedule_next,) + .is_empty()); + + // Insert previous FEC set + let duplicate_shreds = + blockstore.insert_shred_return_duplicate(coding_shred.clone(), &leader_schedule); + + assert_eq!(duplicate_shreds.len(), 1); + assert_eq!( + duplicate_shreds[0], + PossibleDuplicateShred::ChainedMerkleRootConflict( + coding_shred, + next_data_shred.into_payload(), + ) + ); + } + + #[test] + fn test_chained_merkle_root_inconsistency_both() { + // Insert a coding shred from fec_set - 1, and a data shred from fec_set + 10 + // Then insert an inconsistent data shred from fec_set, and finally an + // inconsistent coding shred from fec_set + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + + let parent_slot = 0; + let slot = 1; + let prev_fec_set_index = 0; + let (prev_data_shreds, prev_coding_shreds, leader_schedule_prev) = + setup_erasure_shreds_with_index(slot, parent_slot, 10, prev_fec_set_index); + let prev_coding_shred = prev_coding_shreds[0].clone(); + let fec_set_index = prev_fec_set_index + prev_data_shreds.len() as u32; + + // Incorrectly chained merkle + let merkle_root = Hash::new_unique(); + assert!(merkle_root != prev_coding_shred.merkle_root().unwrap()); + let (data_shreds, coding_shreds, leader_schedule) = + setup_erasure_shreds_with_index_and_chained_merkle( + slot, + parent_slot, + 10, + fec_set_index, + Some(merkle_root), + ); + let data_shred = data_shreds[0].clone(); + let coding_shred = coding_shreds[0].clone(); + let next_fec_set_index = fec_set_index + prev_data_shreds.len() as u32; + + // Incorrectly chained merkle + let merkle_root = Hash::new_unique(); + assert!(merkle_root != data_shred.merkle_root().unwrap()); + let (next_data_shreds, _, leader_schedule_next) = + setup_erasure_shreds_with_index_and_chained_merkle( + slot, + parent_slot, + 10, + next_fec_set_index, + Some(merkle_root), + ); + let next_data_shred = next_data_shreds[0].clone(); + + assert!(blockstore + .insert_shred_return_duplicate(prev_coding_shred.clone(), &leader_schedule_prev,) + .is_empty()); + + assert!(blockstore + .insert_shred_return_duplicate(next_data_shred.clone(), &leader_schedule_next) + .is_empty()); + + // Insert data shred + let duplicate_shreds = + blockstore.insert_shred_return_duplicate(data_shred.clone(), &leader_schedule); + + // Only the backwards check will be performed + assert_eq!(duplicate_shreds.len(), 1); + assert_eq!( + duplicate_shreds[0], + PossibleDuplicateShred::ChainedMerkleRootConflict( + data_shred, + prev_coding_shred.into_payload(), + ) + ); + + // Insert coding shred + let duplicate_shreds = + blockstore.insert_shred_return_duplicate(coding_shred.clone(), &leader_schedule); + + // Now the forwards check will be performed + assert_eq!(duplicate_shreds.len(), 1); + assert_eq!( + duplicate_shreds[0], + PossibleDuplicateShred::ChainedMerkleRootConflict( + coding_shred, + next_data_shred.into_payload(), + ) + ); + } } diff --git a/ledger/src/blockstore_meta.rs b/ledger/src/blockstore_meta.rs index 8e97b9ac801e63..a76d462c610f04 100644 --- a/ledger/src/blockstore_meta.rs +++ b/ledger/src/blockstore_meta.rs @@ -392,7 +392,6 @@ impl ErasureMeta { self.first_coding_index..self.first_coding_index + num_coding } - #[allow(dead_code)] pub(crate) fn first_received_coding_shred_index(&self) -> Option { u32::try_from(self.first_received_coding_index).ok() } diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 97ab1fa7347742..47956caa561e99 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -774,7 +774,7 @@ pub mod enable_gossip_duplicate_proof_ingestion { } pub mod chained_merkle_conflict_duplicate_proofs { - solana_sdk::declare_id!("mustrekeyVfuxJKNRGkyTDokLwWxx6kD2ZLsqQHaDD8"); + solana_sdk::declare_id!("chaie9S2zVfuxJKNRGkyTDokLwWxx6kD2ZLsqQHaDD8"); } pub mod enable_chained_merkle_shreds {