Skip to content

Commit

Permalink
Move slow shred multi_fec test to integration tests folder (#7426)
Browse files Browse the repository at this point in the history
  • Loading branch information
sakridge authored Dec 11, 2019
1 parent 601d7a5 commit f526c42
Show file tree
Hide file tree
Showing 2 changed files with 123 additions and 116 deletions.
144 changes: 28 additions & 116 deletions ledger/src/shred.rs
Original file line number Diff line number Diff line change
Expand Up @@ -817,6 +817,34 @@ pub fn max_entries_per_n_shred(entry: &Entry, num_shreds: u64) -> u64 {
(shred_data_size * num_shreds - count_size) / entry_size
}

pub fn verify_test_data_shred(
shred: &Shred,
index: u32,
slot: Slot,
parent: Slot,
pk: &Pubkey,
verify: bool,
is_last_in_slot: bool,
is_last_in_fec_set: bool,
) {
assert_eq!(shred.payload.len(), PACKET_DATA_SIZE);
assert!(shred.is_data());
assert_eq!(shred.index(), index);
assert_eq!(shred.slot(), slot);
assert_eq!(shred.parent(), parent);
assert_eq!(verify, shred.verify(pk));
if is_last_in_slot {
assert!(shred.last_in_slot());
} else {
assert!(!shred.last_in_slot());
}
if is_last_in_fec_set {
assert!(shred.data_complete());
} else {
assert!(!shred.data_complete());
}
}

#[cfg(test)]
pub mod tests {
use super::*;
Expand Down Expand Up @@ -847,34 +875,6 @@ pub mod tests {
);
}

fn verify_test_data_shred(
shred: &Shred,
index: u32,
slot: Slot,
parent: Slot,
pk: &Pubkey,
verify: bool,
is_last_in_slot: bool,
is_last_in_fec_set: bool,
) {
assert_eq!(shred.payload.len(), PACKET_DATA_SIZE);
assert!(shred.is_data());
assert_eq!(shred.index(), index);
assert_eq!(shred.slot(), slot);
assert_eq!(shred.parent(), parent);
assert_eq!(verify, shred.verify(pk));
if is_last_in_slot {
assert!(shred.last_in_slot());
} else {
assert!(!shred.last_in_slot());
}
if is_last_in_fec_set {
assert!(shred.data_complete());
} else {
assert!(!shred.data_complete());
}
}

fn verify_test_code_shred(shred: &Shred, index: u32, slot: Slot, pk: &Pubkey, verify: bool) {
assert_eq!(shred.payload.len(), PACKET_DATA_SIZE);
assert!(!shred.is_data());
Expand Down Expand Up @@ -1363,94 +1363,6 @@ pub mod tests {
);
}

#[test]
fn test_multi_fec_block_coding() {
let keypair = Arc::new(Keypair::new());
let slot = 0x123456789abcdef0;
let shredder = Shredder::new(slot, slot - 5, 1.0, keypair.clone(), 0, 0)
.expect("Failed in creating shredder");

let num_fec_sets = 100;
let num_data_shreds = (MAX_DATA_SHREDS_PER_FEC_BLOCK * num_fec_sets) as usize;
let keypair0 = Keypair::new();
let keypair1 = Keypair::new();
let tx0 = system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default());
let entry = Entry::new(&Hash::default(), 1, vec![tx0]);
let num_entries = max_entries_per_n_shred(&entry, num_data_shreds as u64);

let entries: Vec<_> = (0..num_entries)
.map(|_| {
let keypair0 = Keypair::new();
let keypair1 = Keypair::new();
let tx0 =
system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default());
Entry::new(&Hash::default(), 1, vec![tx0])
})
.collect();

let serialized_entries = bincode::serialize(&entries).unwrap();
let (data_shreds, coding_shreds, next_index) =
shredder.entries_to_shreds(&entries, true, 0);
assert_eq!(next_index as usize, num_data_shreds);
assert_eq!(data_shreds.len(), num_data_shreds);
assert_eq!(coding_shreds.len(), num_data_shreds);

for c in &coding_shreds {
assert!(!c.is_data());
}

let mut all_shreds = vec![];
for i in 0..num_fec_sets {
let shred_start_index = (MAX_DATA_SHREDS_PER_FEC_BLOCK * i) as usize;
let end_index = shred_start_index + MAX_DATA_SHREDS_PER_FEC_BLOCK as usize - 1;
let fec_set_shreds = data_shreds[shred_start_index..=end_index]
.iter()
.cloned()
.chain(coding_shreds[shred_start_index..=end_index].iter().cloned())
.collect::<Vec<_>>();

let mut shred_info: Vec<Shred> = fec_set_shreds
.iter()
.enumerate()
.filter_map(|(i, b)| if i % 2 != 0 { Some(b.clone()) } else { None })
.collect();

let recovered_data = Shredder::try_recovery(
shred_info.clone(),
MAX_DATA_SHREDS_PER_FEC_BLOCK as usize,
MAX_DATA_SHREDS_PER_FEC_BLOCK as usize,
shred_start_index,
slot,
)
.unwrap();

for (i, recovered_shred) in recovered_data.into_iter().enumerate() {
let index = shred_start_index + (i * 2);
verify_test_data_shred(
&recovered_shred,
index.try_into().unwrap(),
slot,
slot - 5,
&keypair.pubkey(),
true,
index == end_index,
index == end_index,
);

shred_info.insert(i * 2, recovered_shred);
}

all_shreds.extend(
shred_info
.into_iter()
.take(MAX_DATA_SHREDS_PER_FEC_BLOCK as usize),
);
}

let result = Shredder::deshred(&all_shreds[..]).unwrap();
assert_eq!(serialized_entries[..], result[..serialized_entries.len()]);
}

#[test]
fn test_shred_version() {
let keypair = Arc::new(Keypair::new());
Expand Down
95 changes: 95 additions & 0 deletions ledger/tests/shred.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
use solana_ledger::entry::Entry;
use solana_ledger::shred::{
max_entries_per_n_shred, verify_test_data_shred, Shred, Shredder, MAX_DATA_SHREDS_PER_FEC_BLOCK,
};
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::{hash::Hash, system_transaction};
use std::convert::TryInto;
use std::sync::Arc;

#[test]
fn test_multi_fec_block_coding() {
let keypair = Arc::new(Keypair::new());
let slot = 0x123456789abcdef0;
let shredder = Shredder::new(slot, slot - 5, 1.0, keypair.clone(), 0, 0)
.expect("Failed in creating shredder");

let num_fec_sets = 100;
let num_data_shreds = (MAX_DATA_SHREDS_PER_FEC_BLOCK * num_fec_sets) as usize;
let keypair0 = Keypair::new();
let keypair1 = Keypair::new();
let tx0 = system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default());
let entry = Entry::new(&Hash::default(), 1, vec![tx0]);
let num_entries = max_entries_per_n_shred(&entry, num_data_shreds as u64);

let entries: Vec<_> = (0..num_entries)
.map(|_| {
let keypair0 = Keypair::new();
let keypair1 = Keypair::new();
let tx0 =
system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default());
Entry::new(&Hash::default(), 1, vec![tx0])
})
.collect();

let serialized_entries = bincode::serialize(&entries).unwrap();
let (data_shreds, coding_shreds, next_index) = shredder.entries_to_shreds(&entries, true, 0);
assert_eq!(next_index as usize, num_data_shreds);
assert_eq!(data_shreds.len(), num_data_shreds);
assert_eq!(coding_shreds.len(), num_data_shreds);

for c in &coding_shreds {
assert!(!c.is_data());
}

let mut all_shreds = vec![];
for i in 0..num_fec_sets {
let shred_start_index = (MAX_DATA_SHREDS_PER_FEC_BLOCK * i) as usize;
let end_index = shred_start_index + MAX_DATA_SHREDS_PER_FEC_BLOCK as usize - 1;
let fec_set_shreds = data_shreds[shred_start_index..=end_index]
.iter()
.cloned()
.chain(coding_shreds[shred_start_index..=end_index].iter().cloned())
.collect::<Vec<_>>();

let mut shred_info: Vec<Shred> = fec_set_shreds
.iter()
.enumerate()
.filter_map(|(i, b)| if i % 2 != 0 { Some(b.clone()) } else { None })
.collect();

let recovered_data = Shredder::try_recovery(
shred_info.clone(),
MAX_DATA_SHREDS_PER_FEC_BLOCK as usize,
MAX_DATA_SHREDS_PER_FEC_BLOCK as usize,
shred_start_index,
slot,
)
.unwrap();

for (i, recovered_shred) in recovered_data.into_iter().enumerate() {
let index = shred_start_index + (i * 2);
verify_test_data_shred(
&recovered_shred,
index.try_into().unwrap(),
slot,
slot - 5,
&keypair.pubkey(),
true,
index == end_index,
index == end_index,
);

shred_info.insert(i * 2, recovered_shred);
}

all_shreds.extend(
shred_info
.into_iter()
.take(MAX_DATA_SHREDS_PER_FEC_BLOCK as usize),
);
}

let result = Shredder::deshred(&all_shreds[..]).unwrap();
assert_eq!(serialized_entries[..], result[..serialized_entries.len()]);
}

0 comments on commit f526c42

Please sign in to comment.