Skip to content
This repository has been archived by the owner on Nov 6, 2020. It is now read-only.

Implement hardcoded sync in the light client #8075

Merged
merged 8 commits into from
Mar 27, 2018
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
187 changes: 167 additions & 20 deletions ethcore/light/src/client/header_chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,11 @@ use std::sync::Arc;
use cht;

use ethcore::block_status::BlockStatus;
use ethcore::error::{BlockImportError, BlockError};
use ethcore::error::{Error, BlockImportError, BlockError};
use ethcore::encoded;
use ethcore::header::Header;
use ethcore::ids::BlockId;
use ethcore::spec::Spec;
use ethcore::spec::{Spec, SpecHardcodedSync};
use ethcore::engines::epoch::{
Transition as EpochTransition,
PendingTransition as PendingEpochTransition
Expand All @@ -45,7 +45,7 @@ use rlp::{Encodable, Decodable, DecoderError, RlpStream, Rlp, UntrustedRlp};
use heapsize::HeapSizeOf;
use ethereum_types::{H256, H264, U256};
use plain_hasher::H256FastMap;
use kvdb::{self, DBTransaction, KeyValueDB};
use kvdb::{DBTransaction, KeyValueDB};

use cache::Cache;
use parking_lot::{Mutex, RwLock};
Expand Down Expand Up @@ -198,7 +198,7 @@ impl HeaderChain {
col: Option<u32>,
spec: &Spec,
cache: Arc<Mutex<Cache>>,
) -> Result<Self, kvdb::Error> {
) -> Result<Self, Error> {
let mut live_epoch_proofs = ::std::collections::HashMap::default();

let genesis = ::rlp::encode(&spec.genesis_header()).into_vec();
Expand Down Expand Up @@ -240,7 +240,7 @@ impl HeaderChain {
let best_block = {
let era = match candidates.get(&best_number) {
Some(era) => era,
None => return Err("Database corrupt: highest block referenced but no data.".into()),
None => return Err(Error::Database("Database corrupt: highest block referenced but no data.".into())),
};

let best = &era.candidates[0];
Expand All @@ -260,8 +260,9 @@ impl HeaderChain {
col: col,
cache: cache,
}

} else {
HeaderChain {
let chain = HeaderChain {
genesis_header: encoded::Header::new(genesis),
best_block: RwLock::new(BlockDescriptor {
hash: decoded_header.hash(),
Expand All @@ -270,15 +271,47 @@ impl HeaderChain {
}),
candidates: RwLock::new(BTreeMap::new()),
live_epoch_proofs: RwLock::new(live_epoch_proofs),
db: db,
db: db.clone(),
col: col,
cache: cache,
};

// insert the hardcoded sync into the database.
if let Some(ref hardcoded_sync) = spec.hardcoded_sync {
let mut batch = db.transaction();

// insert the hardcoded CHT roots into the database.
for (cht_num, cht_root) in hardcoded_sync.chts.iter().enumerate() {
batch.put(col, cht_key(cht_num as u64).as_bytes(), &::rlp::encode(cht_root));
}

let decoded_header = hardcoded_sync.header.decode();

// check that we have enough hardcoded CHT roots. avoids panicking later.
{
let cht_num = cht::block_to_cht_number(decoded_header.number() - 1)
.expect("specs provided a hardcoded block with height 0");
if cht_num >= hardcoded_sync.chts.len() as u64 {
panic!("specs didn't provide enough CHT roots for its hardcoded block");
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

rule for panics of any kind is "prove or remove" -- We may rather want to print a warning and fall back to hardcoded-less sync. Or move these checks to the deserialization phase where we can exit with an error more gracefully.

}
}

// write the block in the DB.
info!(target: "chain", "Inserting hardcoded block #{} in chain",
decoded_header.number());
let pending = chain.insert_with_td(&mut batch, decoded_header,
hardcoded_sync.total_difficulty, None)?;
db.write_buffered(batch);
chain.apply_pending(pending);
}

chain
};

// instantiate genesis epoch data if it doesn't exist.
if let None = chain.db.get(col, LAST_CANONICAL_TRANSITION)? {
let genesis_data = spec.genesis_epoch_data()?;
let genesis_data = spec.genesis_epoch_data()
.map_err(|s| Error::Database(s.into()))?;

{
let mut batch = chain.db.transaction();
Expand All @@ -299,11 +332,36 @@ impl HeaderChain {
///
/// If the block is an epoch transition, provide the transition along with
/// the header.
#[inline]
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

does that actually help? generally I figure LLVM will figure it out.

pub fn insert(
&self,
transaction: &mut DBTransaction,
header: Header,
transition_proof: Option<Vec<u8>>,
) -> Result<PendingChanges, BlockImportError> {
self.insert_inner(transaction, header, None, transition_proof)
}

/// Insert a pre-verified header, with a known total difficulty. Similary to `insert`.
///
/// This blindly trusts that the data given to it is sensible.
#[inline]
pub fn insert_with_td(
&self,
transaction: &mut DBTransaction,
header: Header,
total_difficulty: U256,
transition_proof: Option<Vec<u8>>,
) -> Result<PendingChanges, BlockImportError> {
self.insert_inner(transaction, header, Some(total_difficulty), transition_proof)
}

fn insert_inner(
&self,
transaction: &mut DBTransaction,
header: Header,
total_difficulty: Option<U256>,
transition_proof: Option<Vec<u8>>,
) -> Result<PendingChanges, BlockImportError> {
let hash = header.hash();
let number = header.number();
Expand All @@ -321,19 +379,24 @@ impl HeaderChain {
// hold candidates the whole time to guard import order.
let mut candidates = self.candidates.write();

// find parent details.
let parent_td =
if number == 1 {
self.genesis_header.difficulty()
} else {
candidates.get(&(number - 1))
.and_then(|entry| entry.candidates.iter().find(|c| c.hash == parent_hash))
.map(|c| c.total_difficulty)
.ok_or_else(|| BlockError::UnknownParent(parent_hash))
.map_err(BlockImportError::Block)?
};
// find total difficulty.
let total_difficulty = match total_difficulty {
Some(td) => td,
None => {
let parent_td =
if number == 1 {
self.genesis_header.difficulty()
} else {
candidates.get(&(number - 1))
.and_then(|entry| entry.candidates.iter().find(|c| c.hash == parent_hash))
.map(|c| c.total_difficulty)
.ok_or_else(|| BlockError::UnknownParent(parent_hash))
.map_err(BlockImportError::Block)?
};

let total_difficulty = parent_td + *header.difficulty();
parent_td + *header.difficulty()
},
};

// insert headers and candidates entries and write era to disk.
{
Expand Down Expand Up @@ -479,6 +542,49 @@ impl HeaderChain {
Ok(pending)
}

/// Generates the specifications for hardcoded sync. This is typically only called manually
/// from time to time by a Parity developer in order to update the chain specifications.
///
/// Returns `None` if we are at the genesis block.
pub fn read_hardcoded_sync(&self) -> Option<SpecHardcodedSync> {
let mut chts = Vec::new();

for cht_num in 0.. {
let cht = match self.cht_root(cht_num) {
Some(cht) => cht,
_ if cht_num != 0 => {
let h_num = 1 + cht_num as u64 * cht::SIZE;
let header = if let Some(header) = self.block_header(BlockId::Number(h_num)) {
header
} else {
panic!("Header of block #{} not found in DB", h_num);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

same: do not panic.

};

let decoded = header.decode();

let entry: Entry = ::rlp::decode(&self.db.get(self.col, era_key(h_num).as_bytes()).unwrap().unwrap());
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

and we really never use unwrap.

let total_difficulty = entry.candidates.iter()
.find(|c| c.hash == decoded.hash())
.expect("no candidate matching block found in DB")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should rather prove based on the fact that an inconsistent but correctly-loaded database shouldn't be possible.

.total_difficulty;

return Some(SpecHardcodedSync {
header: header,
total_difficulty: total_difficulty,
chts: chts,
});
},
_ => {
return None;
},
};

chts.push(cht);
}

unreachable!("we are after an infinite loop")
Copy link
Contributor

@rphmeier rphmeier Mar 8, 2018

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

could be a loop with a break-value, either way it could follow the codebase style of proposition; proof; qed

}

/// Apply pending changes from a previous `insert` operation.
/// Must be done before the next `insert` call.
pub fn apply_pending(&self, pending: PendingChanges) {
Expand Down Expand Up @@ -1038,4 +1144,45 @@ mod tests {
assert!(chain.live_epoch_proofs.read().is_empty());
assert_eq!(chain.epoch_transition_for(parent_hash).unwrap().1, vec![1, 2, 3, 4]);
}

#[test]
fn hardcoded_sync_gen() {
let spec = Spec::new_test();
let genesis_header = spec.genesis_header();
let db = make_db();

let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));

let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap();

let mut parent_hash = genesis_header.hash();
let mut rolling_timestamp = genesis_header.timestamp();
let mut total_difficulty = *genesis_header.difficulty();
let h_num = 3 * ::cht::SIZE + 1;
for i in 1..10000 {
let mut header = Header::new();
header.set_parent_hash(parent_hash);
header.set_number(i);
header.set_timestamp(rolling_timestamp);
let diff = *genesis_header.difficulty() * i as u32;
header.set_difficulty(diff);
if i <= h_num {
total_difficulty = total_difficulty + diff;
}
parent_hash = header.hash();

let mut tx = db.transaction();
let pending = chain.insert(&mut tx, header, None).unwrap();
db.write(tx).unwrap();
chain.apply_pending(pending);

rolling_timestamp += 10;
}

let hardcoded_sync = chain.read_hardcoded_sync().unwrap();
assert_eq!(hardcoded_sync.chts.len(), 3);
assert_eq!(hardcoded_sync.total_difficulty, total_difficulty);
let decoded: Header = hardcoded_sync.header.decode();
assert_eq!(decoded.number(), h_num);
}
}
16 changes: 12 additions & 4 deletions ethcore/light/src/client/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,20 +22,20 @@ use ethcore::block_status::BlockStatus;
use ethcore::client::{ClientReport, EnvInfo};
use ethcore::engines::{epoch, EthEngine, EpochChange, EpochTransition, Proof};
use ethcore::machine::EthereumMachine;
use ethcore::error::BlockImportError;
use ethcore::error::{Error, BlockImportError};
use ethcore::ids::BlockId;
use ethcore::header::{BlockNumber, Header};
use ethcore::verification::queue::{self, HeaderQueue};
use ethcore::blockchain_info::BlockChainInfo;
use ethcore::spec::Spec;
use ethcore::spec::{Spec, SpecHardcodedSync};
use ethcore::service::ClientIoMessage;
use ethcore::encoded;
use io::IoChannel;
use parking_lot::{Mutex, RwLock};
use ethereum_types::{H256, U256};
use futures::{IntoFuture, Future};

use kvdb::{self, KeyValueDB};
use kvdb::KeyValueDB;

use self::fetch::ChainDataFetcher;
use self::header_chain::{AncestryIter, HeaderChain};
Expand Down Expand Up @@ -176,7 +176,7 @@ impl<T: ChainDataFetcher> Client<T> {
fetcher: T,
io_channel: IoChannel<ClientIoMessage>,
cache: Arc<Mutex<Cache>>
) -> Result<Self, kvdb::Error> {
) -> Result<Self, Error> {
Ok(Client {
queue: HeaderQueue::new(config.queue, spec.engine.clone(), io_channel, config.check_seal),
engine: spec.engine.clone(),
Expand All @@ -190,6 +190,14 @@ impl<T: ChainDataFetcher> Client<T> {
})
}

/// Generates the specifications for hardcoded sync. This is typically only called manually
/// from time to time by a Parity developer in order to update the chain specifications.
///
/// Returns `None` if we are at the genesis block.
pub fn read_hardcoded_sync(&self) -> Option<SpecHardcodedSync> {
self.chain.read_hardcoded_sync()
}

/// Adds a new `LightChainNotify` listener.
pub fn add_listener(&self, listener: Weak<LightChainNotify>) {
self.listeners.write().push(listener);
Expand Down
18 changes: 13 additions & 5 deletions ethcore/light/src/client/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,11 @@ use std::fmt;
use std::sync::Arc;

use ethcore::db;
use ethcore::error::Error as CoreError;
use ethcore::service::ClientIoMessage;
use ethcore::spec::Spec;
use io::{IoContext, IoError, IoHandler, IoService};
use kvdb::{self, KeyValueDB};
use kvdb::KeyValueDB;

use cache::Cache;
use parking_lot::Mutex;
Expand All @@ -34,16 +35,23 @@ use super::{ChainDataFetcher, Client, Config as ClientConfig};
/// Errors on service initialization.
#[derive(Debug)]
pub enum Error {
/// Database error.
Database(kvdb::Error),
/// Core error.
Core(CoreError),
/// I/O service error.
Io(IoError),
}

impl From<CoreError> for Error {
#[inline]
fn from(err: CoreError) -> Error {
Error::Core(err)
}
}

impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::Database(ref msg) => write!(f, "Database error: {}", msg),
Error::Core(ref msg) => write!(f, "Core error: {}", msg),
Error::Io(ref err) => write!(f, "I/O service error: {}", err),
}
}
Expand All @@ -67,7 +75,7 @@ impl<T: ChainDataFetcher> Service<T> {
fetcher,
io_service.channel(),
cache,
).map_err(Error::Database)?);
)?);

io_service.register_handler(Arc::new(ImportBlocks(client.clone()))).map_err(Error::Io)?;
spec.engine.register_client(Arc::downgrade(&client) as _);
Expand Down
Loading