Skip to content

Commit

Permalink
move fec_set_index to shred's common header
Browse files Browse the repository at this point in the history
  • Loading branch information
pgarg66 committed Dec 12, 2019
1 parent 3ae902b commit f286312
Show file tree
Hide file tree
Showing 6 changed files with 144 additions and 21 deletions.
1 change: 1 addition & 0 deletions core/src/broadcast_stage/standard_broadcast_run.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ impl StandardBroadcastRun {
true,
max_ticks_in_slot & SHRED_TICK_REFERENCE_MASK,
self.shred_version,
last_unfinished_slot.next_shred_index,
))
} else {
None
Expand Down
2 changes: 1 addition & 1 deletion core/src/chacha.rs
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ mod tests {
hasher.hash(&buf[..size]);

// golden needs to be updated if shred structure changes....
let golden: Hash = "3z2WAkJp2dJjvpXsqsLbZ4muc39YGT7YY3eJQGtTHLfb"
let golden: Hash = "2rq8nR6rns2T5zsbQAGBDZb41NVtacneLgkCH17CVxZm"
.parse()
.unwrap();

Expand Down
28 changes: 24 additions & 4 deletions core/src/sigverify_shreds.rs
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ pub mod tests {
true,
0,
0,
0xc0de,
);
let mut batch = [Packets::default(), Packets::default()];

Expand All @@ -110,6 +111,7 @@ pub mod tests {
true,
0,
0,
0xc0de,
);
Shredder::sign_shred(&keypair, &mut shred);
batch[1].packets.resize(1, Packet::default());
Expand All @@ -133,14 +135,32 @@ pub mod tests {
let mut batch = vec![Packets::default()];
batch[0].packets.resize(2, Packet::default());

let mut shred =
Shred::new_from_data(0, 0xc0de, 0xdead, Some(&[1, 2, 3, 4]), true, true, 0, 0);
let mut shred = Shred::new_from_data(
0,
0xc0de,
0xdead,
Some(&[1, 2, 3, 4]),
true,
true,
0,
0,
0xc0de,
);
Shredder::sign_shred(&leader_keypair, &mut shred);
batch[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload);
batch[0].packets[0].meta.size = shred.payload.len();

let mut shred =
Shred::new_from_data(0, 0xbeef, 0xc0de, Some(&[1, 2, 3, 4]), true, true, 0, 0);
let mut shred = Shred::new_from_data(
0,
0xbeef,
0xc0de,
Some(&[1, 2, 3, 4]),
true,
true,
0,
0,
0xc0de,
);
let wrong_keypair = Keypair::new();
Shredder::sign_shred(&wrong_keypair, &mut shred);
batch[0].packets[1].data[0..shred.payload.len()].copy_from_slice(&shred.payload);
Expand Down
15 changes: 13 additions & 2 deletions ledger/src/blocktree.rs
Original file line number Diff line number Diff line change
Expand Up @@ -689,7 +689,7 @@ impl Blocktree {
if is_trusted
|| Blocktree::should_insert_coding_shred(&shred, index_meta.coding(), &self.last_root)
{
let set_index = u64::from(shred.coding_header.fec_set_index);
let set_index = u64::from(shred.common_header.fec_set_index);
let erasure_config = ErasureConfig::new(
shred.coding_header.num_data_shreds as usize,
shred.coding_header.num_coding_shreds as usize,
Expand Down Expand Up @@ -3535,7 +3535,17 @@ pub mod tests {
let gap: u64 = 10;
let shreds: Vec<_> = (0..64)
.map(|i| {
Shred::new_from_data(slot, (i * gap) as u32, 0, None, false, false, i as u8, 0)
Shred::new_from_data(
slot,
(i * gap) as u32,
0,
None,
false,
false,
i as u8,
0,
(i * gap) as u32,
)
})
.collect();
blocktree.insert_shreds(shreds, None, false).unwrap();
Expand Down Expand Up @@ -4150,6 +4160,7 @@ pub mod tests {
true,
0,
0,
next_shred_index as u32,
)];

// With the corruption, nothing should be returned, even though an
Expand Down
54 changes: 50 additions & 4 deletions ledger/src/shred.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,9 @@ use thiserror::Error;
/// The following constants are computed by hand, and hardcoded.
/// `test_shred_constants` ensures that the values are correct.
/// Constants are used over lazy_static for performance reasons.
pub const SIZE_OF_COMMON_SHRED_HEADER: usize = 79;
pub const SIZE_OF_COMMON_SHRED_HEADER: usize = 83;
pub const SIZE_OF_DATA_SHRED_HEADER: usize = 3;
pub const SIZE_OF_CODING_SHRED_HEADER: usize = 10;
pub const SIZE_OF_CODING_SHRED_HEADER: usize = 6;
pub const SIZE_OF_SIGNATURE: usize = 64;
pub const SIZE_OF_DATA_SHRED_IGNORED_TAIL: usize =
SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_CODING_SHRED_HEADER;
Expand Down Expand Up @@ -86,6 +86,7 @@ pub struct ShredCommonHeader {
pub slot: Slot,
pub index: u32,
pub version: u16,
pub fec_set_index: u32,
}

/// The data shred header has parent offset and flags
Expand All @@ -98,7 +99,6 @@ pub struct DataShredHeader {
/// The coding shred header has FEC information
#[derive(Serialize, Clone, Default, Deserialize, PartialEq, Debug)]
pub struct CodingShredHeader {
pub fec_set_index: u32,
pub num_data_shreds: u16,
pub num_coding_shreds: u16,
pub position: u16,
Expand Down Expand Up @@ -147,12 +147,14 @@ impl Shred {
is_last_in_slot: bool,
reference_tick: u8,
version: u16,
fec_set_index: u32,
) -> Self {
let mut payload = vec![0; PACKET_DATA_SIZE];
let common_header = ShredCommonHeader {
slot,
index,
version,
fec_set_index,
..ShredCommonHeader::default()
};

Expand Down Expand Up @@ -454,6 +456,11 @@ impl Shredder {
.map(|(i, shred_data)| {
let shred_index = next_shred_index + i as u32;

// Each FEC block has maximum MAX_DATA_SHREDS_PER_FEC_BLOCK shreds
// "FEC set index" is the index of first data shred in that FEC block
let fec_set_index =
shred_index - (i % MAX_DATA_SHREDS_PER_FEC_BLOCK as usize) as u32;

let (is_last_data, is_last_in_slot) = {
if shred_index == last_shred_index {
(true, is_last_in_slot)
Expand All @@ -471,6 +478,7 @@ impl Shredder {
is_last_in_slot,
self.reference_tick,
self.version,
fec_set_index,
);

Shredder::sign_shred(&self.keypair, &mut shred);
Expand Down Expand Up @@ -546,12 +554,12 @@ impl Shredder {
index,
slot,
version,
fec_set_index,
..ShredCommonHeader::default()
};
(
header,
CodingShredHeader {
fec_set_index,
num_data_shreds: num_data as u16,
num_coding_shreds: num_code as u16,
position: position as u16,
Expand Down Expand Up @@ -1431,4 +1439,42 @@ pub mod tests {
let version = Shred::version_from_hash(&Hash::new(&hash));
assert_eq!(version, 0x5a5a);
}

#[test]
fn test_shred_fec_set_index() {
let keypair = Arc::new(Keypair::new());
let hash = hash(Hash::default().as_ref());
let version = Shred::version_from_hash(&hash);
assert_ne!(version, 0);
let shredder =
Shredder::new(0, 0, 0.5, keypair, 0, version).expect("Failed in creating shredder");

let entries: Vec<_> = (0..500)
.map(|_| {
let keypair0 = Keypair::new();
let keypair1 = Keypair::new();
let tx0 =
system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default());
Entry::new(&Hash::default(), 1, vec![tx0])
})
.collect();

let start_index = 0x12;
let (data_shreds, coding_shreds, _next_index) =
shredder.entries_to_shreds(&entries, true, start_index);

let max_per_block = MAX_DATA_SHREDS_PER_FEC_BLOCK as usize;
data_shreds.iter().enumerate().for_each(|(i, s)| {
let expected_fec_set_index = start_index + ((i / max_per_block) * max_per_block) as u32;
assert_eq!(s.common_header.fec_set_index, expected_fec_set_index);
});

coding_shreds.iter().enumerate().for_each(|(i, s)| {
// There'll be half the number of coding shreds, as FEC rate is 0.5
// So multiply i with 2
let expected_fec_set_index =
start_index + ((i * 2 / max_per_block) * max_per_block) as u32;
assert_eq!(s.common_header.fec_set_index, expected_fec_set_index);
});
}
}
65 changes: 55 additions & 10 deletions ledger/src/sigverify_shreds.rs
Original file line number Diff line number Diff line change
Expand Up @@ -485,8 +485,17 @@ pub mod tests {
solana_logger::setup();
let mut packet = Packet::default();
let slot = 0xdeadc0de;
let mut shred =
Shred::new_from_data(slot, 0xc0de, 0xdead, Some(&[1, 2, 3, 4]), true, true, 0, 0);
let mut shred = Shred::new_from_data(
slot,
0xc0de,
0xdead,
Some(&[1, 2, 3, 4]),
true,
true,
0,
0,
0xc0de,
);
assert_eq!(shred.slot(), slot);
let keypair = Keypair::new();
Shredder::sign_shred(&keypair, &mut shred);
Expand Down Expand Up @@ -519,8 +528,17 @@ pub mod tests {
solana_logger::setup();
let mut batch = [Packets::default()];
let slot = 0xdeadc0de;
let mut shred =
Shred::new_from_data(slot, 0xc0de, 0xdead, Some(&[1, 2, 3, 4]), true, true, 0, 0);
let mut shred = Shred::new_from_data(
slot,
0xc0de,
0xdead,
Some(&[1, 2, 3, 4]),
true,
true,
0,
0,
0xc0de,
);
let keypair = Keypair::new();
Shredder::sign_shred(&keypair, &mut shred);
batch[0].packets.resize(1, Packet::default());
Expand Down Expand Up @@ -562,8 +580,17 @@ pub mod tests {

let mut batch = [Packets::default()];
let slot = 0xdeadc0de;
let mut shred =
Shred::new_from_data(slot, 0xc0de, 0xdead, Some(&[1, 2, 3, 4]), true, true, 0, 0);
let mut shred = Shred::new_from_data(
slot,
0xc0de,
0xdead,
Some(&[1, 2, 3, 4]),
true,
true,
0,
0,
0xc0de,
);
let keypair = Keypair::new();
Shredder::sign_shred(&keypair, &mut shred);
batch[0].packets.resize(1, Packet::default());
Expand Down Expand Up @@ -615,8 +642,17 @@ pub mod tests {
let mut batch = [Packets::default()];
let slot = 0xdeadc0de;
let keypair = Keypair::new();
let shred =
Shred::new_from_data(slot, 0xc0de, 0xdead, Some(&[1, 2, 3, 4]), true, true, 0, 0);
let shred = Shred::new_from_data(
slot,
0xc0de,
0xdead,
Some(&[1, 2, 3, 4]),
true,
true,
0,
0,
0xc0de,
);
batch[0].packets.resize(1, Packet::default());
batch[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload);
batch[0].packets[0].meta.size = shred.payload.len();
Expand Down Expand Up @@ -653,8 +689,17 @@ pub mod tests {
let mut batch = [Packets::default()];
let slot = 0xdeadc0de;
let keypair = Keypair::new();
let shred =
Shred::new_from_data(slot, 0xc0de, 0xdead, Some(&[1, 2, 3, 4]), true, true, 0, 0);
let shred = Shred::new_from_data(
slot,
0xc0de,
0xdead,
Some(&[1, 2, 3, 4]),
true,
true,
0,
0,
0xc0de,
);
batch[0].packets.resize(1, Packet::default());
batch[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload);
batch[0].packets[0].meta.size = shred.payload.len();
Expand Down

0 comments on commit f286312

Please sign in to comment.