From 69ea21e947975ea04e6ac31dbe529dcb48e960a4 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Mon, 1 Jul 2024 12:05:05 -0400 Subject: [PATCH] gossip: do not allow duplicate proofs for incorrect shred versions (#1931) * gossip: do not allow duplicate proofs for incorrect shred versions * pr feedback: refactor test function to take shred_version --- core/src/tvu.rs | 1 + gossip/src/cluster_info.rs | 2 + gossip/src/crds_gossip.rs | 2 + gossip/src/duplicate_shred.rs | 174 ++++++++++++++++++++++++-- gossip/src/duplicate_shred_handler.rs | 25 +++- 5 files changed, 190 insertions(+), 14 deletions(-) diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 6ba61f578b5671..4dcd7bbfa3e589 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -352,6 +352,7 @@ impl Tvu { leader_schedule_cache.clone(), bank_forks.clone(), duplicate_slots_sender, + tvu_config.shred_version, ), ); diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 7e9327f8de169c..57a1e95babc5a5 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -1261,6 +1261,7 @@ impl ClusterInfo { other_payload, None:: Option>, // Leader schedule DUPLICATE_SHRED_MAX_PAYLOAD_SIZE, + self.my_shred_version(), )?; Ok(()) } @@ -3698,6 +3699,7 @@ mod tests { Some(leader_schedule), timestamp(), DUPLICATE_SHRED_MAX_PAYLOAD_SIZE, + version, ) .unwrap() .collect(); diff --git a/gossip/src/crds_gossip.rs b/gossip/src/crds_gossip.rs index 3300f3ec4d1d79..788e9f9af87a02 100644 --- a/gossip/src/crds_gossip.rs +++ b/gossip/src/crds_gossip.rs @@ -90,6 +90,7 @@ impl CrdsGossip { leader_schedule: Option, // Maximum serialized size of each DuplicateShred chunk payload. max_payload_size: usize, + shred_version: u16, ) -> Result<(), duplicate_shred::Error> where F: FnOnce(Slot) -> Option, @@ -114,6 +115,7 @@ impl CrdsGossip { leader_schedule, timestamp(), max_payload_size, + shred_version, )?; // Find the index of oldest duplicate shred. let mut num_dup_shreds = 0; diff --git a/gossip/src/duplicate_shred.rs b/gossip/src/duplicate_shred.rs index 69cc0abce58811..3b4448c968414c 100644 --- a/gossip/src/duplicate_shred.rs +++ b/gossip/src/duplicate_shred.rs @@ -66,6 +66,8 @@ pub enum Error { InvalidErasureMetaConflict, #[error("invalid last index conflict")] InvalidLastIndexConflict, + #[error("invalid shred version: {0}")] + InvalidShredVersion(u16), #[error("invalid signature")] InvalidSignature, #[error("invalid size limit")] @@ -90,6 +92,7 @@ pub enum Error { /// Check that `shred1` and `shred2` indicate a valid duplicate proof /// - Must be for the same slot +/// - Must match the expected shred version /// - Must both sigverify for the correct leader /// - Must have a merkle root conflict, otherwise `shred1` and `shred2` must have the same `shred_type` /// - If `shred1` and `shred2` share the same index they must be not equal @@ -98,7 +101,12 @@ pub enum Error { /// LAST_SHRED_IN_SLOT, however the other shred must have a higher index. /// - If `shred1` and `shred2` do not share the same index and are coding shreds /// verify that they have conflicting erasure metas -fn check_shreds(leader_schedule: Option, shred1: &Shred, shred2: &Shred) -> Result<(), Error> +fn check_shreds( + leader_schedule: Option, + shred1: &Shred, + shred2: &Shred, + shred_version: u16, +) -> Result<(), Error> where F: FnOnce(Slot) -> Option, { @@ -106,6 +114,13 @@ where return Err(Error::SlotMismatch); } + if shred1.version() != shred_version { + return Err(Error::InvalidShredVersion(shred1.version())); + } + if shred2.version() != shred_version { + return Err(Error::InvalidShredVersion(shred2.version())); + } + if let Some(leader_schedule) = leader_schedule { let slot_leader = leader_schedule(shred1.slot()).ok_or(Error::UnknownSlotLeader(shred1.slot()))?; @@ -165,6 +180,7 @@ pub(crate) fn from_shred( leader_schedule: Option, wallclock: u64, max_size: usize, // Maximum serialized size of each DuplicateShred. + shred_version: u16, ) -> Result, Error> where F: FnOnce(Slot) -> Option, @@ -173,7 +189,7 @@ where return Err(Error::InvalidDuplicateShreds); } let other_shred = Shred::new_from_serialized_shred(other_payload)?; - check_shreds(leader_schedule, &shred, &other_shred)?; + check_shreds(leader_schedule, &shred, &other_shred, shred_version)?; let slot = shred.slot(); let proof = DuplicateSlotProof { shred1: shred.into_payload(), @@ -226,6 +242,7 @@ fn check_chunk(slot: Slot, num_chunks: u8) -> impl Fn(&DuplicateShred) -> Result pub(crate) fn into_shreds( slot_leader: &Pubkey, chunks: impl IntoIterator, + shred_version: u16, ) -> Result<(Shred, Shred), Error> { let mut chunks = chunks.into_iter(); let DuplicateShred { @@ -261,10 +278,16 @@ pub(crate) fn into_shreds( } let shred1 = Shred::new_from_serialized_shred(proof.shred1)?; let shred2 = Shred::new_from_serialized_shred(proof.shred2)?; + if shred1.slot() != slot || shred2.slot() != slot { Err(Error::SlotMismatch) } else { - check_shreds(Some(|_| Some(slot_leader).copied()), &shred1, &shred2)?; + check_shreds( + Some(|_| Some(slot_leader).copied()), + &shred1, + &shred2, + shred_version, + )?; Ok((shred1, shred2)) } } @@ -487,11 +510,12 @@ pub(crate) mod tests { Some(leader_schedule), rng.gen(), // wallclock 512, // max_size + version, ) .unwrap() .collect(); assert!(chunks.len() > 4); - let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks).unwrap(); + let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks, version).unwrap(); assert_eq!(shred1, shred3); assert_eq!(shred2, shred4); } @@ -542,6 +566,7 @@ pub(crate) mod tests { Some(leader_schedule), rng.gen(), // wallclock 512, // max_size + version, ) .err() .unwrap(), @@ -560,7 +585,9 @@ pub(crate) mod tests { assert!(chunks.len() > 4); assert_matches!( - into_shreds(&leader.pubkey(), chunks).err().unwrap(), + into_shreds(&leader.pubkey(), chunks, version) + .err() + .unwrap(), Error::InvalidDuplicateSlotProof ); } @@ -629,11 +656,12 @@ pub(crate) mod tests { Some(leader_schedule), rng.gen(), // wallclock 512, // max_size + version, ) .unwrap() .collect(); assert!(chunks.len() > 4); - let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks).unwrap(); + let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks, version).unwrap(); assert_eq!(shred1, &shred3); assert_eq!(shred2, &shred4); } @@ -737,6 +765,7 @@ pub(crate) mod tests { Some(leader_schedule), rng.gen(), // wallclock 512, // max_size + version, ) .err() .unwrap(), @@ -755,7 +784,9 @@ pub(crate) mod tests { assert!(chunks.len() > 4); assert_matches!( - into_shreds(&leader.pubkey(), chunks).err().unwrap(), + into_shreds(&leader.pubkey(), chunks, version) + .err() + .unwrap(), Error::InvalidLastIndexConflict ); } @@ -814,11 +845,12 @@ pub(crate) mod tests { Some(leader_schedule), rng.gen(), // wallclock 512, // max_size + version, ) .unwrap() .collect(); assert!(chunks.len() > 4); - let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks).unwrap(); + let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks, version).unwrap(); assert_eq!(shred1, shred3); assert_eq!(shred2, shred4); } @@ -895,6 +927,7 @@ pub(crate) mod tests { Some(leader_schedule), rng.gen(), // wallclock 512, // max_size + version, ) .err() .unwrap(), @@ -913,7 +946,9 @@ pub(crate) mod tests { assert!(chunks.len() > 4); assert_matches!( - into_shreds(&leader.pubkey(), chunks).err().unwrap(), + into_shreds(&leader.pubkey(), chunks, version) + .err() + .unwrap(), Error::InvalidErasureMetaConflict ); } @@ -986,11 +1021,12 @@ pub(crate) mod tests { Some(leader_schedule), rng.gen(), // wallclock 512, // max_size + version, ) .unwrap() .collect(); assert!(chunks.len() > 4); - let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks).unwrap(); + let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks, version).unwrap(); assert_eq!(shred1, shred3); assert_eq!(shred2, shred4); } @@ -1077,6 +1113,7 @@ pub(crate) mod tests { Some(leader_schedule), rng.gen(), // wallclock 512, // max_size + version, ) .err() .unwrap(), @@ -1095,9 +1132,124 @@ pub(crate) mod tests { assert!(chunks.len() > 4); assert_matches!( - into_shreds(&leader.pubkey(), chunks).err().unwrap(), + into_shreds(&leader.pubkey(), chunks, version) + .err() + .unwrap(), Error::ShredTypeMismatch ); } } + + #[test] + fn test_shred_version() { + let mut rng = rand::thread_rng(); + let leader = Arc::new(Keypair::new()); + let (slot, parent_slot, reference_tick, version) = (53084024, 53084023, 0, 0); + let shredder = Shredder::new(slot, parent_slot, reference_tick, version).unwrap(); + let next_shred_index = rng.gen_range(0..31_000); + let leader_schedule = |s| { + if s == slot { + Some(leader.pubkey()) + } else { + None + } + }; + + let (data_shreds, coding_shreds) = new_rand_shreds( + &mut rng, + next_shred_index, + next_shred_index, + 10, + true, + &shredder, + &leader, + true, + ); + + // Wrong shred version 1 + let shredder = Shredder::new(slot, parent_slot, reference_tick, version + 1).unwrap(); + let (wrong_data_shreds_1, wrong_coding_shreds_1) = new_rand_shreds( + &mut rng, + next_shred_index, + next_shred_index, + 10, + true, + &shredder, + &leader, + true, + ); + + // Wrong shred version 2 + let shredder = Shredder::new(slot, parent_slot, reference_tick, version + 2).unwrap(); + let (wrong_data_shreds_2, wrong_coding_shreds_2) = new_rand_shreds( + &mut rng, + next_shred_index, + next_shred_index, + 10, + true, + &shredder, + &leader, + true, + ); + + let test_cases = vec![ + // One correct shred version, one wrong + (coding_shreds[0].clone(), wrong_coding_shreds_1[0].clone()), + (coding_shreds[0].clone(), wrong_data_shreds_1[0].clone()), + (data_shreds[0].clone(), wrong_coding_shreds_1[0].clone()), + (data_shreds[0].clone(), wrong_data_shreds_1[0].clone()), + // Both wrong shred version + ( + wrong_coding_shreds_2[0].clone(), + wrong_coding_shreds_1[0].clone(), + ), + ( + wrong_coding_shreds_2[0].clone(), + wrong_data_shreds_1[0].clone(), + ), + ( + wrong_data_shreds_2[0].clone(), + wrong_coding_shreds_1[0].clone(), + ), + ( + wrong_data_shreds_2[0].clone(), + wrong_data_shreds_1[0].clone(), + ), + ]; + + for (shred1, shred2) in test_cases.into_iter() { + assert_matches!( + from_shred( + shred1.clone(), + Pubkey::new_unique(), // self_pubkey + shred2.payload().clone(), + Some(leader_schedule), + rng.gen(), // wallclock + 512, // max_size + version, + ) + .err() + .unwrap(), + Error::InvalidShredVersion(_) + ); + + let chunks: Vec<_> = from_shred_bypass_checks( + shred1.clone(), + Pubkey::new_unique(), // self_pubkey + shred2.clone(), + rng.gen(), // wallclock + 512, // max_size + ) + .unwrap() + .collect(); + assert!(chunks.len() > 4); + + assert_matches!( + into_shreds(&leader.pubkey(), chunks, version) + .err() + .unwrap(), + Error::InvalidShredVersion(_) + ); + } + } } diff --git a/gossip/src/duplicate_shred_handler.rs b/gossip/src/duplicate_shred_handler.rs index 883d0a7da00504..edf62aaf4276fc 100644 --- a/gossip/src/duplicate_shred_handler.rs +++ b/gossip/src/duplicate_shred_handler.rs @@ -48,6 +48,7 @@ pub struct DuplicateShredHandler { cached_slots_in_epoch: u64, // Used to notify duplicate consensus state machine duplicate_slots_sender: Sender, + shred_version: u16, } impl DuplicateShredHandlerTrait for DuplicateShredHandler { @@ -68,6 +69,7 @@ impl DuplicateShredHandler { leader_schedule_cache: Arc, bank_forks: Arc>, duplicate_slots_sender: Sender, + shred_version: u16, ) -> Self { Self { buffer: HashMap::<(Slot, Pubkey), BufferEntry>::default(), @@ -80,6 +82,7 @@ impl DuplicateShredHandler { leader_schedule_cache, bank_forks, duplicate_slots_sender, + shred_version, } } @@ -130,7 +133,8 @@ impl DuplicateShredHandler { .leader_schedule_cache .slot_leader_at(slot, /*bank:*/ None) .ok_or(Error::UnknownSlotLeader(slot))?; - let (shred1, shred2) = duplicate_shred::into_shreds(&pubkey, chunks)?; + let (shred1, shred2) = + duplicate_shred::into_shreds(&pubkey, chunks, self.shred_version)?; if !self.blockstore.has_duplicate_shreds_in_slot(slot) { self.blockstore.store_duplicate_slot( slot, @@ -255,16 +259,17 @@ mod tests { slot: u64, expected_error: Option, chunk_size: usize, + shred_version: u16, ) -> Result, Error> { let my_keypair = match expected_error { Some(Error::InvalidSignature) => Arc::new(Keypair::new()), _ => keypair, }; let mut rng = rand::thread_rng(); - let shredder = Shredder::new(slot, slot - 1, 0, 0).unwrap(); + let shredder = Shredder::new(slot, slot - 1, 0, shred_version).unwrap(); let next_shred_index = 353; let shred1 = new_rand_shred(&mut rng, next_shred_index, &shredder, &my_keypair); - let shredder1 = Shredder::new(slot + 1, slot, 0, 0).unwrap(); + let shredder1 = Shredder::new(slot + 1, slot, 0, shred_version).unwrap(); let shred2 = match expected_error { Some(Error::SlotMismatch) => { new_rand_shred(&mut rng, next_shred_index, &shredder1, &my_keypair) @@ -283,6 +288,7 @@ mod tests { None:: Option>, timestamp(), // wallclock chunk_size, // max_size + shred_version, )?; Ok(chunks) } @@ -295,6 +301,7 @@ mod tests { let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); let my_keypair = Arc::new(Keypair::new()); let my_pubkey = my_keypair.pubkey(); + let shred_version = 0; let genesis_config_info = create_genesis_config_with_leader(10_000, &my_pubkey, 10_000); let GenesisConfigInfo { genesis_config, .. } = genesis_config_info; let mut bank = Bank::new_for_tests(&genesis_config); @@ -322,6 +329,7 @@ mod tests { leader_schedule_cache, bank_forks_arc, sender, + shred_version, ); let chunks = create_duplicate_proof( my_keypair.clone(), @@ -329,6 +337,7 @@ mod tests { start_slot, None, DUPLICATE_SHRED_MAX_PAYLOAD_SIZE, + shred_version, ) .unwrap(); let chunks1 = create_duplicate_proof( @@ -337,6 +346,7 @@ mod tests { start_slot + 1, None, DUPLICATE_SHRED_MAX_PAYLOAD_SIZE, + shred_version, ) .unwrap(); assert!(!blockstore.has_duplicate_shreds_in_slot(start_slot)); @@ -365,6 +375,7 @@ mod tests { start_slot + 2, Some(error), DUPLICATE_SHRED_MAX_PAYLOAD_SIZE, + shred_version, ) { Err(_) => (), Ok(chunks) => { @@ -386,6 +397,7 @@ mod tests { let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); let my_keypair = Arc::new(Keypair::new()); let my_pubkey = my_keypair.pubkey(); + let shred_version = 0; let genesis_config_info = create_genesis_config_with_leader(10_000, &my_pubkey, 10_000); let GenesisConfigInfo { genesis_config, .. } = genesis_config_info; let mut bank = Bank::new_for_tests(&genesis_config); @@ -410,6 +422,7 @@ mod tests { leader_schedule_cache, bank_forks_arc, sender, + shred_version, ); // The feature will only be activated at Epoch 1. let start_slot: Slot = slots_in_epoch + 1; @@ -421,6 +434,7 @@ mod tests { start_slot, None, DUPLICATE_SHRED_MAX_PAYLOAD_SIZE / 2, + shred_version, ) .unwrap(); for chunk in chunks { @@ -438,6 +452,7 @@ mod tests { future_slot, None, DUPLICATE_SHRED_MAX_PAYLOAD_SIZE, + shred_version, ) .unwrap(); for chunk in chunks { @@ -454,6 +469,7 @@ mod tests { start_slot, None, DUPLICATE_SHRED_MAX_PAYLOAD_SIZE, + shred_version, ) .unwrap(); // handle chunk 0 of the first proof. @@ -474,6 +490,7 @@ mod tests { let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); let my_keypair = Arc::new(Keypair::new()); let my_pubkey = my_keypair.pubkey(); + let shred_version = 0; let genesis_config_info = create_genesis_config_with_leader(10_000, &my_pubkey, 10_000); let GenesisConfigInfo { genesis_config, .. } = genesis_config_info; let mut bank = Bank::new_for_tests(&genesis_config); @@ -492,6 +509,7 @@ mod tests { leader_schedule_cache, bank_forks_arc, sender, + shred_version, ); let chunks = create_duplicate_proof( my_keypair.clone(), @@ -499,6 +517,7 @@ mod tests { 1, None, DUPLICATE_SHRED_MAX_PAYLOAD_SIZE, + shred_version, ) .unwrap(); assert!(!blockstore.has_duplicate_shreds_in_slot(1));