Skip to content
This repository has been archived by the owner on Nov 6, 2020. It is now read-only.

Implement hardcoded sync in the light client #8075

Merged
merged 8 commits into from
Mar 27, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
235 changes: 205 additions & 30 deletions ethcore/light/src/client/header_chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,11 @@ use std::sync::Arc;
use cht;

use ethcore::block_status::BlockStatus;
use ethcore::error::{BlockImportError, BlockError};
use ethcore::error::{Error, BlockImportError, BlockError};
use ethcore::encoded;
use ethcore::header::Header;
use ethcore::ids::BlockId;
use ethcore::spec::Spec;
use ethcore::spec::{Spec, SpecHardcodedSync};
use ethcore::engines::epoch::{
Transition as EpochTransition,
PendingTransition as PendingEpochTransition
Expand All @@ -45,7 +45,7 @@ use rlp::{Encodable, Decodable, DecoderError, RlpStream, Rlp, UntrustedRlp};
use heapsize::HeapSizeOf;
use ethereum_types::{H256, H264, U256};
use plain_hasher::H256FastMap;
use kvdb::{self, DBTransaction, KeyValueDB};
use kvdb::{DBTransaction, KeyValueDB};

use cache::Cache;
use parking_lot::{Mutex, RwLock};
Expand Down Expand Up @@ -180,6 +180,12 @@ pub struct PendingChanges {
best_block: Option<BlockDescriptor>, // new best block.
}

/// Whether or not the hardcoded sync feature is allowed.
pub enum HardcodedSync {
Allow,
Deny,
}

/// Header chain. See module docs for more details.
pub struct HeaderChain {
genesis_header: encoded::Header, // special-case the genesis.
Expand All @@ -198,7 +204,8 @@ impl HeaderChain {
col: Option<u32>,
spec: &Spec,
cache: Arc<Mutex<Cache>>,
) -> Result<Self, kvdb::Error> {
allow_hs: HardcodedSync,
) -> Result<Self, Error> {
let mut live_epoch_proofs = ::std::collections::HashMap::default();

let genesis = ::rlp::encode(&spec.genesis_header()).into_vec();
Expand Down Expand Up @@ -240,7 +247,7 @@ impl HeaderChain {
let best_block = {
let era = match candidates.get(&best_number) {
Some(era) => era,
None => return Err("Database corrupt: highest block referenced but no data.".into()),
None => return Err(Error::Database("Database corrupt: highest block referenced but no data.".into())),
};

let best = &era.candidates[0];
Expand All @@ -260,8 +267,9 @@ impl HeaderChain {
col: col,
cache: cache,
}

} else {
HeaderChain {
let chain = HeaderChain {
genesis_header: encoded::Header::new(genesis),
best_block: RwLock::new(BlockDescriptor {
hash: decoded_header.hash(),
Expand All @@ -270,15 +278,49 @@ impl HeaderChain {
}),
candidates: RwLock::new(BTreeMap::new()),
live_epoch_proofs: RwLock::new(live_epoch_proofs),
db: db,
db: db.clone(),
col: col,
cache: cache,
};

// insert the hardcoded sync into the database.
if let (&Some(ref hardcoded_sync), HardcodedSync::Allow) = (&spec.hardcoded_sync, allow_hs) {
let mut batch = db.transaction();

// insert the hardcoded CHT roots into the database.
for (cht_num, cht_root) in hardcoded_sync.chts.iter().enumerate() {
batch.put(col, cht_key(cht_num as u64).as_bytes(), &::rlp::encode(cht_root));
}

let decoded_header = hardcoded_sync.header.decode();
let decoded_header_num = decoded_header.number();

// write the block in the DB.
info!(target: "chain", "Inserting hardcoded block #{} in chain",
decoded_header_num);
let pending = chain.insert_with_td(&mut batch, decoded_header,
hardcoded_sync.total_difficulty, None)?;

// check that we have enough hardcoded CHT roots. avoids panicking later.
let cht_num = cht::block_to_cht_number(decoded_header_num - 1)
.expect("specs provided a hardcoded block with height 0");
if cht_num >= hardcoded_sync.chts.len() as u64 {
warn!(target: "chain", "specs didn't provide enough CHT roots for its \
hardcoded block ; falling back to non-hardcoded sync \
mode");
} else {
db.write_buffered(batch);
chain.apply_pending(pending);
}
}

chain
};

// instantiate genesis epoch data if it doesn't exist.
if let None = chain.db.get(col, LAST_CANONICAL_TRANSITION)? {
let genesis_data = spec.genesis_epoch_data()?;
let genesis_data = spec.genesis_epoch_data()
.map_err(|s| Error::Database(s.into()))?;

{
let mut batch = chain.db.transaction();
Expand All @@ -304,6 +346,29 @@ impl HeaderChain {
transaction: &mut DBTransaction,
header: Header,
transition_proof: Option<Vec<u8>>,
) -> Result<PendingChanges, BlockImportError> {
self.insert_inner(transaction, header, None, transition_proof)
}

/// Insert a pre-verified header, with a known total difficulty. Similary to `insert`.
///
/// This blindly trusts that the data given to it is sensible.
pub fn insert_with_td(
&self,
transaction: &mut DBTransaction,
header: Header,
total_difficulty: U256,
transition_proof: Option<Vec<u8>>,
) -> Result<PendingChanges, BlockImportError> {
self.insert_inner(transaction, header, Some(total_difficulty), transition_proof)
}

fn insert_inner(
&self,
transaction: &mut DBTransaction,
header: Header,
total_difficulty: Option<U256>,
transition_proof: Option<Vec<u8>>,
) -> Result<PendingChanges, BlockImportError> {
let hash = header.hash();
let number = header.number();
Expand All @@ -321,19 +386,24 @@ impl HeaderChain {
// hold candidates the whole time to guard import order.
let mut candidates = self.candidates.write();

// find parent details.
let parent_td =
if number == 1 {
self.genesis_header.difficulty()
} else {
candidates.get(&(number - 1))
.and_then(|entry| entry.candidates.iter().find(|c| c.hash == parent_hash))
.map(|c| c.total_difficulty)
.ok_or_else(|| BlockError::UnknownParent(parent_hash))
.map_err(BlockImportError::Block)?
};
// find total difficulty.
let total_difficulty = match total_difficulty {
Some(td) => td,
None => {
let parent_td =
if number == 1 {
self.genesis_header.difficulty()
} else {
candidates.get(&(number - 1))
.and_then(|entry| entry.candidates.iter().find(|c| c.hash == parent_hash))
.map(|c| c.total_difficulty)
.ok_or_else(|| BlockError::UnknownParent(parent_hash))
.map_err(BlockImportError::Block)?
};

let total_difficulty = parent_td + *header.difficulty();
parent_td + *header.difficulty()
},
};

// insert headers and candidates entries and write era to disk.
{
Expand Down Expand Up @@ -479,6 +549,65 @@ impl HeaderChain {
Ok(pending)
}

/// Generates the specifications for hardcoded sync. This is typically only called manually
/// from time to time by a Parity developer in order to update the chain specifications.
///
/// Returns `None` if we are at the genesis block, or if an error happens .
pub fn read_hardcoded_sync(&self) -> Result<Option<SpecHardcodedSync>, Error> {
let mut chts = Vec::new();
let mut cht_num = 0;

loop {
let cht = match self.cht_root(cht_num) {
Some(cht) => cht,
None if cht_num != 0 => {
// end of the iteration
let h_num = 1 + cht_num as u64 * cht::SIZE;
let header = if let Some(header) = self.block_header(BlockId::Number(h_num)) {
header
} else {
let msg = format!("header of block #{} not found in DB ; database in an \
inconsistent state", h_num);
return Err(Error::Database(msg.into()));
};

let decoded = header.decode();

let entry: Entry = {
let bytes = self.db.get(self.col, era_key(h_num).as_bytes())?
.ok_or_else(|| {
let msg = format!("entry for era #{} not found in DB ; database \
in an inconsistent state", h_num);
Error::Database(msg.into())
})?;
::rlp::decode(&bytes)
};

let total_difficulty = entry.candidates.iter()
.find(|c| c.hash == decoded.hash())
.ok_or_else(|| {
let msg = "no candidate matching block found in DB ; database in an \
inconsistent state";
Error::Database(msg.into())
})?
.total_difficulty;

break Ok(Some(SpecHardcodedSync {
header: header,
total_difficulty: total_difficulty,
chts: chts,
}));
},
None => {
break Ok(None);
},
};

chts.push(cht);
cht_num += 1;
}
}

/// Apply pending changes from a previous `insert` operation.
/// Must be done before the next `insert` call.
pub fn apply_pending(&self, pending: PendingChanges) {
Expand Down Expand Up @@ -721,7 +850,7 @@ impl<'a> Iterator for AncestryIter<'a> {

#[cfg(test)]
mod tests {
use super::HeaderChain;
use super::{HeaderChain, HardcodedSync};
use std::sync::Arc;

use ethereum_types::U256;
Expand All @@ -747,7 +876,7 @@ mod tests {

let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));

let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap();
let chain = HeaderChain::new(db.clone(), None, &spec, cache, HardcodedSync::Allow).unwrap();

let mut parent_hash = genesis_header.hash();
let mut rolling_timestamp = genesis_header.timestamp();
Expand Down Expand Up @@ -780,7 +909,7 @@ mod tests {
let db = make_db();
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));

let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap();
let chain = HeaderChain::new(db.clone(), None, &spec, cache, HardcodedSync::Allow).unwrap();

let mut parent_hash = genesis_header.hash();
let mut rolling_timestamp = genesis_header.timestamp();
Expand Down Expand Up @@ -862,7 +991,7 @@ mod tests {
let db = make_db();
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));

let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap();
let chain = HeaderChain::new(db.clone(), None, &spec, cache, HardcodedSync::Allow).unwrap();

assert!(chain.block_header(BlockId::Earliest).is_some());
assert!(chain.block_header(BlockId::Latest).is_some());
Expand All @@ -876,7 +1005,8 @@ mod tests {
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));

{
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap();
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone(),
HardcodedSync::Allow).unwrap();
let mut parent_hash = genesis_header.hash();
let mut rolling_timestamp = genesis_header.timestamp();
for i in 1..10000 {
Expand All @@ -896,7 +1026,8 @@ mod tests {
}
}

let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap();
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone(),
HardcodedSync::Allow).unwrap();
assert!(chain.block_header(BlockId::Number(10)).is_none());
assert!(chain.block_header(BlockId::Number(9000)).is_some());
assert!(chain.cht_root(2).is_some());
Expand All @@ -912,7 +1043,8 @@ mod tests {
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));

{
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap();
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone(),
HardcodedSync::Allow).unwrap();
let mut parent_hash = genesis_header.hash();
let mut rolling_timestamp = genesis_header.timestamp();

Expand Down Expand Up @@ -954,7 +1086,8 @@ mod tests {
}

// after restoration, non-canonical eras should still be loaded.
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap();
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone(),
HardcodedSync::Allow).unwrap();
assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 10);
assert!(chain.candidates.read().get(&100).is_some())
}
Expand All @@ -966,7 +1099,8 @@ mod tests {
let db = make_db();
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));

let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap();
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone(),
HardcodedSync::Allow).unwrap();

assert!(chain.block_header(BlockId::Earliest).is_some());
assert!(chain.block_header(BlockId::Number(0)).is_some());
Expand All @@ -980,7 +1114,7 @@ mod tests {
let db = make_db();
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));

let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap();
let chain = HeaderChain::new(db.clone(), None, &spec, cache, HardcodedSync::Allow).unwrap();

let mut parent_hash = genesis_header.hash();
let mut rolling_timestamp = genesis_header.timestamp();
Expand Down Expand Up @@ -1038,4 +1172,45 @@ mod tests {
assert!(chain.live_epoch_proofs.read().is_empty());
assert_eq!(chain.epoch_transition_for(parent_hash).unwrap().1, vec![1, 2, 3, 4]);
}

#[test]
fn hardcoded_sync_gen() {
let spec = Spec::new_test();
let genesis_header = spec.genesis_header();
let db = make_db();

let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));

let chain = HeaderChain::new(db.clone(), None, &spec, cache, HardcodedSync::Allow).unwrap();

let mut parent_hash = genesis_header.hash();
let mut rolling_timestamp = genesis_header.timestamp();
let mut total_difficulty = *genesis_header.difficulty();
let h_num = 3 * ::cht::SIZE + 1;
for i in 1..10000 {
let mut header = Header::new();
header.set_parent_hash(parent_hash);
header.set_number(i);
header.set_timestamp(rolling_timestamp);
let diff = *genesis_header.difficulty() * i as u32;
header.set_difficulty(diff);
if i <= h_num {
total_difficulty = total_difficulty + diff;
}
parent_hash = header.hash();

let mut tx = db.transaction();
let pending = chain.insert(&mut tx, header, None).unwrap();
db.write(tx).unwrap();
chain.apply_pending(pending);

rolling_timestamp += 10;
}

let hardcoded_sync = chain.read_hardcoded_sync().unwrap().unwrap();
assert_eq!(hardcoded_sync.chts.len(), 3);
assert_eq!(hardcoded_sync.total_difficulty, total_difficulty);
let decoded: Header = hardcoded_sync.header.decode();
assert_eq!(decoded.number(), h_num);
}
}
Loading