Skip to content

Commit

Permalink
v1.18: gossip: do not allow duplicate proofs for incorrect shred vers…
Browse files Browse the repository at this point in the history
…ions (backport of #1931) (#1940)

gossip: do not allow duplicate proofs for incorrect shred versions (#1931)

* gossip: do not allow duplicate proofs for incorrect shred versions

* pr feedback: refactor test function to take shred_version

(cherry picked from commit 69ea21e)

Co-authored-by: Ashwin Sekar <ashwin@anza.xyz>
  • Loading branch information
mergify[bot] and AshwinSekar authored Jul 3, 2024
1 parent af3098e commit 292093e
Show file tree
Hide file tree
Showing 5 changed files with 190 additions and 14 deletions.
1 change: 1 addition & 0 deletions core/src/tvu.rs
Original file line number Diff line number Diff line change
Expand Up @@ -342,6 +342,7 @@ impl Tvu {
leader_schedule_cache.clone(),
bank_forks.clone(),
duplicate_slots_sender,
tvu_config.shred_version,
),
);

Expand Down
2 changes: 2 additions & 0 deletions gossip/src/cluster_info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1194,6 +1194,7 @@ impl ClusterInfo {
other_payload,
None::<fn(Slot) -> Option<Pubkey>>, // Leader schedule
DUPLICATE_SHRED_MAX_PAYLOAD_SIZE,
self.my_shred_version(),
)?;
Ok(())
}
Expand Down Expand Up @@ -3537,6 +3538,7 @@ mod tests {
Some(leader_schedule),
timestamp(),
DUPLICATE_SHRED_MAX_PAYLOAD_SIZE,
version,
)
.unwrap()
.collect();
Expand Down
2 changes: 2 additions & 0 deletions gossip/src/crds_gossip.rs
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,7 @@ impl CrdsGossip {
leader_schedule: Option<F>,
// Maximum serialized size of each DuplicateShred chunk payload.
max_payload_size: usize,
shred_version: u16,
) -> Result<(), duplicate_shred::Error>
where
F: FnOnce(Slot) -> Option<Pubkey>,
Expand All @@ -114,6 +115,7 @@ impl CrdsGossip {
leader_schedule,
timestamp(),
max_payload_size,
shred_version,
)?;
// Find the index of oldest duplicate shred.
let mut num_dup_shreds = 0;
Expand Down
174 changes: 163 additions & 11 deletions gossip/src/duplicate_shred.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,8 @@ pub enum Error {
InvalidErasureMetaConflict,
#[error("invalid last index conflict")]
InvalidLastIndexConflict,
#[error("invalid shred version: {0}")]
InvalidShredVersion(u16),
#[error("invalid signature")]
InvalidSignature,
#[error("invalid size limit")]
Expand All @@ -92,6 +94,7 @@ pub enum Error {

/// Check that `shred1` and `shred2` indicate a valid duplicate proof
/// - Must be for the same slot
/// - Must match the expected shred version
/// - Must both sigverify for the correct leader
/// - Must have a merkle root conflict, otherwise `shred1` and `shred2` must have the same `shred_type`
/// - If `shred1` and `shred2` share the same index they must be not equal
Expand All @@ -100,14 +103,26 @@ pub enum Error {
/// LAST_SHRED_IN_SLOT, however the other shred must have a higher index.
/// - If `shred1` and `shred2` do not share the same index and are coding shreds
/// verify that they have conflicting erasure metas
fn check_shreds<F>(leader_schedule: Option<F>, shred1: &Shred, shred2: &Shred) -> Result<(), Error>
fn check_shreds<F>(
leader_schedule: Option<F>,
shred1: &Shred,
shred2: &Shred,
shred_version: u16,
) -> Result<(), Error>
where
F: FnOnce(Slot) -> Option<Pubkey>,
{
if shred1.slot() != shred2.slot() {
return Err(Error::SlotMismatch);
}

if shred1.version() != shred_version {
return Err(Error::InvalidShredVersion(shred1.version()));
}
if shred2.version() != shred_version {
return Err(Error::InvalidShredVersion(shred2.version()));
}

if let Some(leader_schedule) = leader_schedule {
let slot_leader =
leader_schedule(shred1.slot()).ok_or(Error::UnknownSlotLeader(shred1.slot()))?;
Expand Down Expand Up @@ -167,6 +182,7 @@ pub(crate) fn from_shred<F>(
leader_schedule: Option<F>,
wallclock: u64,
max_size: usize, // Maximum serialized size of each DuplicateShred.
shred_version: u16,
) -> Result<impl Iterator<Item = DuplicateShred>, Error>
where
F: FnOnce(Slot) -> Option<Pubkey>,
Expand All @@ -175,7 +191,7 @@ where
return Err(Error::InvalidDuplicateShreds);
}
let other_shred = Shred::new_from_serialized_shred(other_payload)?;
check_shreds(leader_schedule, &shred, &other_shred)?;
check_shreds(leader_schedule, &shred, &other_shred, shred_version)?;
let slot = shred.slot();
let proof = DuplicateSlotProof {
shred1: shred.into_payload(),
Expand Down Expand Up @@ -228,6 +244,7 @@ fn check_chunk(slot: Slot, num_chunks: u8) -> impl Fn(&DuplicateShred) -> Result
pub(crate) fn into_shreds(
slot_leader: &Pubkey,
chunks: impl IntoIterator<Item = DuplicateShred>,
shred_version: u16,
) -> Result<(Shred, Shred), Error> {
let mut chunks = chunks.into_iter();
let DuplicateShred {
Expand Down Expand Up @@ -263,10 +280,16 @@ pub(crate) fn into_shreds(
}
let shred1 = Shred::new_from_serialized_shred(proof.shred1)?;
let shred2 = Shred::new_from_serialized_shred(proof.shred2)?;

if shred1.slot() != slot || shred2.slot() != slot {
Err(Error::SlotMismatch)
} else {
check_shreds(Some(|_| Some(slot_leader).copied()), &shred1, &shred2)?;
check_shreds(
Some(|_| Some(slot_leader).copied()),
&shred1,
&shred2,
shred_version,
)?;
Ok((shred1, shred2))
}
}
Expand Down Expand Up @@ -489,11 +512,12 @@ pub(crate) mod tests {
Some(leader_schedule),
rng.gen(), // wallclock
512, // max_size
version,
)
.unwrap()
.collect();
assert!(chunks.len() > 4);
let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks).unwrap();
let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks, version).unwrap();
assert_eq!(shred1, shred3);
assert_eq!(shred2, shred4);
}
Expand Down Expand Up @@ -544,6 +568,7 @@ pub(crate) mod tests {
Some(leader_schedule),
rng.gen(), // wallclock
512, // max_size
version,
)
.err()
.unwrap(),
Expand All @@ -562,7 +587,9 @@ pub(crate) mod tests {
assert!(chunks.len() > 4);

assert_matches!(
into_shreds(&leader.pubkey(), chunks).err().unwrap(),
into_shreds(&leader.pubkey(), chunks, version)
.err()
.unwrap(),
Error::InvalidDuplicateSlotProof
);
}
Expand Down Expand Up @@ -631,11 +658,12 @@ pub(crate) mod tests {
Some(leader_schedule),
rng.gen(), // wallclock
512, // max_size
version,
)
.unwrap()
.collect();
assert!(chunks.len() > 4);
let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks).unwrap();
let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks, version).unwrap();
assert_eq!(shred1, &shred3);
assert_eq!(shred2, &shred4);
}
Expand Down Expand Up @@ -739,6 +767,7 @@ pub(crate) mod tests {
Some(leader_schedule),
rng.gen(), // wallclock
512, // max_size
version,
)
.err()
.unwrap(),
Expand All @@ -757,7 +786,9 @@ pub(crate) mod tests {
assert!(chunks.len() > 4);

assert_matches!(
into_shreds(&leader.pubkey(), chunks).err().unwrap(),
into_shreds(&leader.pubkey(), chunks, version)
.err()
.unwrap(),
Error::InvalidLastIndexConflict
);
}
Expand Down Expand Up @@ -816,11 +847,12 @@ pub(crate) mod tests {
Some(leader_schedule),
rng.gen(), // wallclock
512, // max_size
version,
)
.unwrap()
.collect();
assert!(chunks.len() > 4);
let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks).unwrap();
let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks, version).unwrap();
assert_eq!(shred1, shred3);
assert_eq!(shred2, shred4);
}
Expand Down Expand Up @@ -897,6 +929,7 @@ pub(crate) mod tests {
Some(leader_schedule),
rng.gen(), // wallclock
512, // max_size
version,
)
.err()
.unwrap(),
Expand All @@ -915,7 +948,9 @@ pub(crate) mod tests {
assert!(chunks.len() > 4);

assert_matches!(
into_shreds(&leader.pubkey(), chunks).err().unwrap(),
into_shreds(&leader.pubkey(), chunks, version)
.err()
.unwrap(),
Error::InvalidErasureMetaConflict
);
}
Expand Down Expand Up @@ -988,11 +1023,12 @@ pub(crate) mod tests {
Some(leader_schedule),
rng.gen(), // wallclock
512, // max_size
version,
)
.unwrap()
.collect();
assert!(chunks.len() > 4);
let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks).unwrap();
let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks, version).unwrap();
assert_eq!(shred1, shred3);
assert_eq!(shred2, shred4);
}
Expand Down Expand Up @@ -1079,6 +1115,7 @@ pub(crate) mod tests {
Some(leader_schedule),
rng.gen(), // wallclock
512, // max_size
version,
)
.err()
.unwrap(),
Expand All @@ -1097,9 +1134,124 @@ pub(crate) mod tests {
assert!(chunks.len() > 4);

assert_matches!(
into_shreds(&leader.pubkey(), chunks).err().unwrap(),
into_shreds(&leader.pubkey(), chunks, version)
.err()
.unwrap(),
Error::ShredTypeMismatch
);
}
}

#[test]
fn test_shred_version() {
let mut rng = rand::thread_rng();
let leader = Arc::new(Keypair::new());
let (slot, parent_slot, reference_tick, version) = (53084024, 53084023, 0, 0);
let shredder = Shredder::new(slot, parent_slot, reference_tick, version).unwrap();
let next_shred_index = rng.gen_range(0..31_000);
let leader_schedule = |s| {
if s == slot {
Some(leader.pubkey())
} else {
None
}
};

let (data_shreds, coding_shreds) = new_rand_shreds(
&mut rng,
next_shred_index,
next_shred_index,
10,
true,
&shredder,
&leader,
true,
);

// Wrong shred version 1
let shredder = Shredder::new(slot, parent_slot, reference_tick, version + 1).unwrap();
let (wrong_data_shreds_1, wrong_coding_shreds_1) = new_rand_shreds(
&mut rng,
next_shred_index,
next_shred_index,
10,
true,
&shredder,
&leader,
true,
);

// Wrong shred version 2
let shredder = Shredder::new(slot, parent_slot, reference_tick, version + 2).unwrap();
let (wrong_data_shreds_2, wrong_coding_shreds_2) = new_rand_shreds(
&mut rng,
next_shred_index,
next_shred_index,
10,
true,
&shredder,
&leader,
true,
);

let test_cases = vec![
// One correct shred version, one wrong
(coding_shreds[0].clone(), wrong_coding_shreds_1[0].clone()),
(coding_shreds[0].clone(), wrong_data_shreds_1[0].clone()),
(data_shreds[0].clone(), wrong_coding_shreds_1[0].clone()),
(data_shreds[0].clone(), wrong_data_shreds_1[0].clone()),
// Both wrong shred version
(
wrong_coding_shreds_2[0].clone(),
wrong_coding_shreds_1[0].clone(),
),
(
wrong_coding_shreds_2[0].clone(),
wrong_data_shreds_1[0].clone(),
),
(
wrong_data_shreds_2[0].clone(),
wrong_coding_shreds_1[0].clone(),
),
(
wrong_data_shreds_2[0].clone(),
wrong_data_shreds_1[0].clone(),
),
];

for (shred1, shred2) in test_cases.into_iter() {
assert_matches!(
from_shred(
shred1.clone(),
Pubkey::new_unique(), // self_pubkey
shred2.payload().clone(),
Some(leader_schedule),
rng.gen(), // wallclock
512, // max_size
version,
)
.err()
.unwrap(),
Error::InvalidShredVersion(_)
);

let chunks: Vec<_> = from_shred_bypass_checks(
shred1.clone(),
Pubkey::new_unique(), // self_pubkey
shred2.clone(),
rng.gen(), // wallclock
512, // max_size
)
.unwrap()
.collect();
assert!(chunks.len() > 4);

assert_matches!(
into_shreds(&leader.pubkey(), chunks, version)
.err()
.unwrap(),
Error::InvalidShredVersion(_)
);
}
}
}
Loading

0 comments on commit 292093e

Please sign in to comment.