Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Allow coding shred index to be different than data shred index #7438

Merged
merged 3 commits into from
Dec 13, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion core/benches/shredder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ fn bench_deshredder(bencher: &mut Bencher) {
fn bench_deserialize_hdr(bencher: &mut Bencher) {
let data = vec![0; SIZE_OF_DATA_SHRED_PAYLOAD];

let shred = Shred::new_from_data(2, 1, 1, Some(&data), true, true, 0, 0);
let shred = Shred::new_from_data(2, 1, 1, Some(&data), true, true, 0, 0, 1);

bencher.iter(|| {
let payload = shred.payload.clone();
Expand Down
1 change: 1 addition & 0 deletions core/src/broadcast_stage/standard_broadcast_run.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ impl StandardBroadcastRun {
true,
max_ticks_in_slot & SHRED_TICK_REFERENCE_MASK,
self.shred_version,
last_unfinished_slot.next_shred_index,
))
} else {
None
Expand Down
2 changes: 1 addition & 1 deletion core/src/chacha.rs
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ mod tests {
hasher.hash(&buf[..size]);

// golden needs to be updated if shred structure changes....
let golden: Hash = "9K6NR4cazo7Jzk2CpyXmNaZMGqvfXG83JzyJipkoHare"
let golden: Hash = "2rq8nR6rns2T5zsbQAGBDZb41NVtacneLgkCH17CVxZm"
.parse()
.unwrap();

Expand Down
28 changes: 24 additions & 4 deletions core/src/sigverify_shreds.rs
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ pub mod tests {
true,
0,
0,
0xc0de,
);
let mut batch = [Packets::default(), Packets::default()];

Expand All @@ -110,6 +111,7 @@ pub mod tests {
true,
0,
0,
0xc0de,
);
Shredder::sign_shred(&keypair, &mut shred);
batch[1].packets.resize(1, Packet::default());
Expand All @@ -133,14 +135,32 @@ pub mod tests {
let mut batch = vec![Packets::default()];
batch[0].packets.resize(2, Packet::default());

let mut shred =
Shred::new_from_data(0, 0xc0de, 0xdead, Some(&[1, 2, 3, 4]), true, true, 0, 0);
let mut shred = Shred::new_from_data(
0,
0xc0de,
0xdead,
Some(&[1, 2, 3, 4]),
true,
true,
0,
0,
0xc0de,
);
Shredder::sign_shred(&leader_keypair, &mut shred);
batch[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload);
batch[0].packets[0].meta.size = shred.payload.len();

let mut shred =
Shred::new_from_data(0, 0xbeef, 0xc0de, Some(&[1, 2, 3, 4]), true, true, 0, 0);
let mut shred = Shred::new_from_data(
0,
0xbeef,
0xc0de,
Some(&[1, 2, 3, 4]),
true,
true,
0,
0,
0xc0de,
);
let wrong_keypair = Keypair::new();
Shredder::sign_shred(&wrong_keypair, &mut shred);
batch[0].packets[1].data[0..shred.payload.len()].copy_from_slice(&shred.payload);
Expand Down
2 changes: 1 addition & 1 deletion core/src/window_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -363,7 +363,7 @@ mod test {
);

// If it's a coding shred, test that slot >= root
let (common, coding) = Shredder::new_coding_shred_header(5, 5, 6, 6, 0, 0);
let (common, coding) = Shredder::new_coding_shred_header(5, 5, 5, 6, 6, 0, 0);
let mut coding_shred =
Shred::new_empty_from_header(common, DataShredHeader::default(), coding);
Shredder::sign_shred(&leader_keypair, &mut coding_shred);
Expand Down
40 changes: 31 additions & 9 deletions ledger/src/blocktree.rs
Original file line number Diff line number Diff line change
Expand Up @@ -387,7 +387,11 @@ impl Blocktree {
"blocktree-erasure",
("slot", slot as i64, i64),
("start_index", set_index as i64, i64),
("end_index", erasure_meta.end_indexes().0 as i64, i64),
(
"end_index",
(erasure_meta.set_index + erasure_meta.config.num_data() as u64) as i64,
i64
),
("recovery_attempted", attempted, bool),
("recovery_status", status, String),
("recovered", recovered as i64, i64),
Expand Down Expand Up @@ -420,8 +424,10 @@ impl Blocktree {
}
}
});
(set_index..set_index + erasure_meta.config.num_coding() as u64).for_each(
|i| {
(erasure_meta.first_coding_index
..erasure_meta.first_coding_index
+ erasure_meta.config.num_coding() as u64)
.for_each(|i| {
if let Some(shred) = prev_inserted_codes
.remove(&(slot, i))
.map(|s| {
Expand Down Expand Up @@ -450,13 +456,13 @@ impl Blocktree {
{
available_shreds.push(shred);
}
},
);
});
if let Ok(mut result) = Shredder::try_recovery(
available_shreds,
erasure_meta.config.num_data(),
erasure_meta.config.num_coding(),
set_index as usize,
erasure_meta.first_coding_index as usize,
slot,
) {
submit_metrics(true, "complete".into(), result.len());
Expand Down Expand Up @@ -683,17 +689,21 @@ impl Blocktree {
if is_trusted
|| Blocktree::should_insert_coding_shred(&shred, index_meta.coding(), &self.last_root)
{
let set_index = shred_index - u64::from(shred.coding_header.position);
let set_index = u64::from(shred.common_header.fec_set_index);
let erasure_config = ErasureConfig::new(
shred.coding_header.num_data_shreds as usize,
shred.coding_header.num_coding_shreds as usize,
);

let erasure_meta = erasure_metas.entry((slot, set_index)).or_insert_with(|| {
let first_coding_index =
u64::from(shred.index()) - u64::from(shred.coding_header.position);
self.erasure_meta_cf
.get((slot, set_index))
.expect("Expect database get to succeed")
.unwrap_or_else(|| ErasureMeta::new(set_index, &erasure_config))
.unwrap_or_else(|| {
ErasureMeta::new(set_index, first_coding_index, &erasure_config)
})
});

if erasure_config != erasure_meta.config {
Expand Down Expand Up @@ -3531,7 +3541,17 @@ pub mod tests {
let gap: u64 = 10;
let shreds: Vec<_> = (0..64)
.map(|i| {
Shred::new_from_data(slot, (i * gap) as u32, 0, None, false, false, i as u8, 0)
Shred::new_from_data(
slot,
(i * gap) as u32,
0,
None,
false,
false,
i as u8,
0,
(i * gap) as u32,
)
})
.collect();
blocktree.insert_shreds(shreds, None, false).unwrap();
Expand Down Expand Up @@ -3722,7 +3742,8 @@ pub mod tests {
let last_root = RwLock::new(0);

let slot = 1;
let (mut shred, coding) = Shredder::new_coding_shred_header(slot, 11, 11, 11, 10, 0);
let (mut shred, coding) =
Shredder::new_coding_shred_header(slot, 11, 11, 11, 11, 10, 0);
let coding_shred = Shred::new_empty_from_header(
shred.clone(),
DataShredHeader::default(),
Expand Down Expand Up @@ -4145,6 +4166,7 @@ pub mod tests {
true,
0,
0,
next_shred_index as u32,
)];

// With the corruption, nothing should be returned, even though an
Expand Down
35 changes: 11 additions & 24 deletions ledger/src/blocktree_meta.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,8 @@ pub struct CodingIndex {
pub struct ErasureMeta {
/// Which erasure set in the slot this is
pub set_index: u64,
/// First coding index in the FEC set
pub first_coding_index: u64,
/// Size of shards in this erasure set
pub size: usize,
/// Erasure configuration for this erasure set
Expand Down Expand Up @@ -200,9 +202,10 @@ impl SlotMeta {
}

impl ErasureMeta {
pub fn new(set_index: u64, config: &ErasureConfig) -> ErasureMeta {
pub fn new(set_index: u64, first_coding_index: u64, config: &ErasureConfig) -> ErasureMeta {
ErasureMeta {
set_index,
first_coding_index,
size: 0,
config: *config,
}
Expand All @@ -211,11 +214,12 @@ impl ErasureMeta {
pub fn status(&self, index: &Index) -> ErasureMetaStatus {
use ErasureMetaStatus::*;

let start_idx = self.start_index();
let (data_end_idx, coding_end_idx) = self.end_indexes();

let num_coding = index.coding().present_in_bounds(start_idx..coding_end_idx);
let num_data = index.data().present_in_bounds(start_idx..data_end_idx);
let num_coding = index.coding().present_in_bounds(
self.first_coding_index..self.first_coding_index + self.config.num_coding() as u64,
);
let num_data = index
.data()
.present_in_bounds(self.set_index..self.set_index + self.config.num_data() as u64);

let (data_missing, coding_missing) = (
self.config.num_data() - num_data,
Expand All @@ -240,23 +244,6 @@ impl ErasureMeta {
pub fn size(&self) -> usize {
self.size
}

pub fn set_index_for(index: u64, num_data: usize) -> u64 {
index / num_data as u64
}

pub fn start_index(&self) -> u64 {
self.set_index
}

/// returns a tuple of (data_end, coding_end)
pub fn end_indexes(&self) -> (u64, u64) {
let start = self.start_index();
(
start + self.config.num_data() as u64,
start + self.config.num_coding() as u64,
)
}
}

#[cfg(test)]
Expand All @@ -272,7 +259,7 @@ mod test {
let set_index = 0;
let erasure_config = ErasureConfig::default();

let mut e_meta = ErasureMeta::new(set_index, &erasure_config);
let mut e_meta = ErasureMeta::new(set_index, set_index, &erasure_config);
let mut rng = thread_rng();
let mut index = Index::new(0);
e_meta.size = 1;
Expand Down
Loading