diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 1f4b4b8807e..33cfbd9d9fb 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -153,7 +153,7 @@ build-android: build-linux: <<: *build-on-linux - # only: *releaseable_branches + only: *releaseable_branches build-linux-i386: <<: *build-on-linux diff --git a/Cargo.lock b/Cargo.lock index 2e6fefa5b90..3bf2f113f45 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -502,7 +502,7 @@ dependencies = [ "crossbeam-utils 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -867,7 +867,6 @@ dependencies = [ "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", "blooms-db 0.1.0", "bn 0.4.4 (git+https://github.com/paritytech/bn)", - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "common-types 0.1.0", "criterion 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -924,6 +923,7 @@ dependencies = [ "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)", "stats 0.1.0", "tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "time-utils 0.1.0", @@ -1062,7 +1062,7 @@ dependencies = [ "rlp_derive 0.1.0", "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", "stats 0.1.0", "tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "trie-db 0.12.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1360,14 +1360,14 @@ dependencies = [ "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "parity-crypto 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-util-mem 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-wordlist 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-wordlist 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "tiny-keccak 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "zeroize 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1378,7 +1378,7 @@ dependencies = [ "env_logger 0.5.13 (registry+https://github.com/rust-lang/crates.io-index)", "ethkey 0.3.0", "panic_hook 0.1.0", - "parity-wordlist 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-wordlist 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1397,14 +1397,14 @@ dependencies = [ "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", "parity-crypto 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-wordlist 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-wordlist 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", "tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", "tiny-keccak 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3040,19 +3040,19 @@ dependencies = [ "serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", "time-utils 0.1.0", "tiny-keccak 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "zeroize 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "parity-wordlist" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -3098,7 +3098,7 @@ dependencies = [ "libc 0.2.48 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3112,7 +3112,7 @@ dependencies = [ "petgraph 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", "thread-id 3.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3278,7 +3278,7 @@ dependencies = [ "hamming 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "primal-bit 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", "primal-estimate 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -3874,11 +3874,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "smallvec" -version = "0.6.5" +version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] [[package]] name = "socket2" @@ -4332,7 +4329,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", "trace-time 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -4715,6 +4712,25 @@ dependencies = [ "xml-rs 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "zeroize" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "zeroize_derive 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "zeroize_derive" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 0.4.20 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.15.26 (registry+https://github.com/rust-lang/crates.io-index)", + "synstructure 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + [metadata] "checksum aes 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "54eb1d8fe354e5fc611daf4f2ea97dd45a765f4f1e4512306ec183ae2e8f20c9" "checksum aes-ctr 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d2e5b0458ea3beae0d1d8c0f3946564f8e10f90646cf78c06b4351052058d1ee" @@ -4934,7 +4950,7 @@ dependencies = [ "checksum parity-tokio-ipc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "eb002c2d3539ccd3b82bd915ec060028d4ab350ad203dbffa20028c1e483af5b" "checksum parity-util-mem 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "89e80f22052161e0cb55cb5a8a75890420c525031f95c9d262dbb0434aa85dc1" "checksum parity-wasm 0.31.3 (registry+https://github.com/rust-lang/crates.io-index)" = "511379a8194230c2395d2f5fa627a5a7e108a9f976656ce723ae68fca4097bfc" -"checksum parity-wordlist 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf13102febd98f4ad416a526b42deb82daf482626ba6ab10d0ebf8f45327514c" +"checksum parity-wordlist 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "573d08f0d3bc8a6ffcdac1de2725b5daeed8db26345a9c12d91648e2d6457f3e" "checksum parity-ws 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2fec5048fba72a2e01baeb0d08089db79aead4b57e2443df172fb1840075a233" "checksum parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f0802bff09003b291ba756dc7e79313e51cc31667e94afbe847def490424cde5" "checksum parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ab41b4aed082705d1056416ae4468b6ea99d52599ecf3169b00088d43113e337" @@ -5022,7 +5038,7 @@ dependencies = [ "checksum slab 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6dbdd334bd28d328dad1c41b0ea662517883d8880d8533895ef96c8003dec9c4" "checksum slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "17b4fcaed89ab08ef143da37bc52adbcc04d4a69014f4c1208d6b51f0c47bc23" "checksum slab 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5f9776d6b986f77b35c6cf846c11ad986ff128fe0b2b63a3628e3755e8d3102d" -"checksum smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "153ffa32fd170e9944f7e0838edf824a754ec4c1fc64746fcc9fe1f8fa602e5d" +"checksum smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)" = "ab606a9c5e214920bb66c458cd7be8ef094f813f20fe77a54cc7dbfff220d4b7" "checksum socket2 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "c4d11a52082057d87cb5caa31ad812f4504b97ab44732cd8359df2e9ff9f48e7" "checksum spin 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "44363f6f51401c34e7be73db0db371c04705d35efbe9f7d6082e03a921a32c55" "checksum stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dba1a27d3efae4351c8051072d619e3ade2820635c3958d826bfea39d59b54c8" @@ -5113,3 +5129,5 @@ dependencies = [ "checksum xdg 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a66b7c2281ebde13cf4391d70d4c7e5946c3c25e72a7b859ca8f677dcd0b0c61" "checksum xml-rs 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3c1cb601d29fe2c2ac60a2b2e5e293994d87a1f6fa9687a31a15270f909be9c2" "checksum xmltree 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ff8eaee9d17062850f1e6163b509947969242990ee59a35801af437abe041e70" +"checksum zeroize 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5e2ea4afc22e9497e26b42bf047083c30f7e3ca566f3bcd7187f83d18b327043" +"checksum zeroize_derive 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "afd1469e4bbca3b96606d26ba6e9bd6d3aed3b1299c82b92ec94377d22d78dbc" diff --git a/README.md b/README.md index a8489013a3c..e4aec89927a 100644 --- a/README.md +++ b/README.md @@ -16,11 +16,12 @@ 3.2 [Building from Source Code](#chapter-0032)
3.3 [Simple One-Line Installer for Mac and Linux](#chapter-0033)
3.4 [Starting Parity Ethereum](#chapter-0034) -4. [Documentation](#chapter-004) -5. [Toolchain](#chapter-005) -6. [Community](#chapter-006) -7. [Contributing](#chapter-007) -8. [License](#chapter-008) +4. [Testing](#chapter-004) +5. [Documentation](#chapter-005) +6. [Toolchain](#chapter-006) +7. [Community](#chapter-007) +8. [Contributing](#chapter-008) +9. [License](#chapter-009) ## 1. Description @@ -65,7 +66,7 @@ We recommend installing Rust through [rustup](https://www.rustup.rs/). If you do `clang` is required. It comes with Xcode command line tools or can be installed with homebrew. -- Windows +- Windows: Make sure you have Visual Studio 2015 with C++ support installed. Next, download and run the `rustup` installer from https://static.rust-lang.org/rustup/dist/x86_64-pc-windows-msvc/rustup-init.exe, start "VS2015 x64 Native Tools Command Prompt", and use the following command to install and set up the `msvc` toolchain: ```bash @@ -148,7 +149,25 @@ To start Parity Ethereum as a regular user using `systemd` init: 2. Copy release to bin folder, write `sudo install ./target/release/parity /usr/bin/parity` 3. To configure Parity Ethereum, write a `/etc/parity/config.toml` config file, see [Configuring Parity Ethereum](https://paritytech.github.io/wiki/Configuring-Parity) for details. -## 4. Documentation +## 4. Testing + +You can run tests with the following commands: + +* **All** packages + ``` + cargo test --all + ``` + +* Specific package + ``` + cargo test --package + ``` + +Replace `` with one of the packages from the [package list](#package-list) (e.g. `cargo test --package evmbin`). + +You can show your logs in the test output by passing `--nocapture` (i.e. `cargo test --package evmbin -- --nocapture`) + +## 5. Documentation Official website: https://parity.io @@ -160,16 +179,20 @@ You can generate documentation for Parity Ethereum Rust packages that automatica * **All** packages ``` - cargo doc --open + cargo doc --document-private-items --open ``` * Specific package ``` - cargo doc --package --open + cargo doc --package -- --document-private-items --open ``` +Use`--document-private-items` to also view private documentation and `--no-deps` to exclude building documentation for dependencies. + Replacing `` with one of the following from the details section below (i.e. `cargo doc --package parity-ethereum --open`): + +**Package List**

* Parity Ethereum (EthCore) Client Application @@ -330,7 +353,7 @@ Example (generic documentation comment): /// ``` -## 5. Toolchain +## 6. Toolchain In addition to the Parity Ethereum client, there are additional tools in this repository available: @@ -342,7 +365,7 @@ In addition to the Parity Ethereum client, there are additional tools in this re The following tool is available in a separate repository: - [ethabi](https://github.com/paritytech/ethabi) - Parity Ethereum Encoding of Function Calls. [Docs here](https://crates.io/crates/ethabi) -## 6. Community +## 7. Community ### Join the chat! @@ -355,7 +378,7 @@ Questions? Get in touch with us on Gitter: Alternatively, join our community on Matrix: [![Riot: +Parity](https://img.shields.io/badge/riot-%2Bparity%3Amatrix.parity.io-orange.svg)](https://riot.im/app/#/group/+parity:matrix.parity.io) -## 7. Contributing +## 8. Contributing An introduction has been provided in the ["So You Want to be a Core Developer" presentation slides by Hernando Castano](http://tiny.cc/contrib-to-parity-eth). Additional guidelines are provided in [CONTRIBUTING](./.github/CONTRIBUTING.md). @@ -363,6 +386,6 @@ An introduction has been provided in the ["So You Want to be a Core Developer" p [CODE_OF_CONDUCT](./.github/CODE_OF_CONDUCT.md) -## 8. License +## 9. License [LICENSE](./LICENSE) diff --git a/accounts/ethkey/Cargo.toml b/accounts/ethkey/Cargo.toml index a66a74de05a..3806b0c3baa 100644 --- a/accounts/ethkey/Cargo.toml +++ b/accounts/ethkey/Cargo.toml @@ -11,11 +11,11 @@ eth-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1" } ethereum-types = "0.6.0" lazy_static = "1.0" log = "0.4" -parity-util-mem = "0.1" -parity-wordlist = "1.2" +parity-wordlist = "1.3" quick-error = "1.2.2" rand = "0.6" rustc-hex = "1.0" serde = "1.0" serde_derive = "1.0" tiny-keccak = "1.4" +zeroize = "0.9.1" diff --git a/accounts/ethkey/src/lib.rs b/accounts/ethkey/src/lib.rs index 2312f2bcd31..4f55f056d07 100644 --- a/accounts/ethkey/src/lib.rs +++ b/accounts/ethkey/src/lib.rs @@ -19,7 +19,6 @@ extern crate edit_distance; extern crate parity_crypto; extern crate ethereum_types; -extern crate parity_util_mem; extern crate parity_wordlist; #[macro_use] extern crate quick_error; @@ -28,6 +27,7 @@ extern crate rustc_hex; extern crate secp256k1; extern crate serde; extern crate tiny_keccak; +extern crate zeroize; #[macro_use] extern crate lazy_static; diff --git a/accounts/ethkey/src/secret.rs b/accounts/ethkey/src/secret.rs index 25136ee8330..c850fa70ee8 100644 --- a/accounts/ethkey/src/secret.rs +++ b/accounts/ethkey/src/secret.rs @@ -21,17 +21,23 @@ use rustc_hex::ToHex; use secp256k1::constants::{SECRET_KEY_SIZE as SECP256K1_SECRET_KEY_SIZE}; use secp256k1::key; use ethereum_types::H256; -use parity_util_mem::Memzero; +use zeroize::Zeroize; use {Error, SECP256K1}; #[derive(Clone, PartialEq, Eq)] pub struct Secret { - inner: Memzero, + inner: H256, +} + +impl Drop for Secret { + fn drop(&mut self) { + self.inner.0.zeroize() + } } impl ToHex for Secret { fn to_hex(&self) -> String { - format!("{:x}", *self.inner) + format!("{:x}", self.inner) } } @@ -61,12 +67,12 @@ impl Secret { } let mut h = H256::zero(); h.as_bytes_mut().copy_from_slice(&key[0..32]); - Some(Secret { inner: Memzero::from(h) }) + Some(Secret { inner: h }) } /// Creates zero key, which is invalid for crypto operations, but valid for math operation. pub fn zero() -> Self { - Secret { inner: Memzero::from(H256::zero()) } + Secret { inner: H256::zero() } } /// Imports and validates the key. @@ -214,7 +220,7 @@ impl FromStr for Secret { impl From<[u8; 32]> for Secret { fn from(k: [u8; 32]) -> Self { - Secret { inner: Memzero::from(H256(k)) } + Secret { inner: H256(k) } } } diff --git a/accounts/ethstore/src/json/crypto.rs b/accounts/ethstore/src/json/crypto.rs index 34664f98b0e..a7315d7e5c9 100644 --- a/accounts/ethstore/src/json/crypto.rs +++ b/accounts/ethstore/src/json/crypto.rs @@ -41,7 +41,7 @@ impl str::FromStr for Crypto { impl From for String { fn from(c: Crypto) -> Self { - serde_json::to_string(&c).expect("serialization cannot fail, cause all crypto keys are strings") + serde_json::to_string(&c).expect("Serialization cannot fail, because all crypto keys are strings") } } diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index b35bad357ff..9c315fffbbc 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -10,7 +10,6 @@ authors = ["Parity Technologies "] ansi_term = "0.11" blooms-db = { path = "../util/blooms-db", optional = true } bn = { git = "https://github.com/paritytech/bn", default-features = false } -byteorder = "1.0" common-types = { path = "types" } crossbeam = "0.4" derive_more = "0.14.0" @@ -82,6 +81,7 @@ fetch = { path = "../util/fetch" } kvdb-rocksdb = "0.1.3" parity-runtime = { path = "../util/runtime" } rlp_compress = { path = "../util/rlp-compress" } +serde_json = "1.0" tempdir = "0.3" trie-standardmap = "0.12.4" diff --git a/ethcore/blockchain/src/best_block.rs b/ethcore/blockchain/src/best_block.rs index 20f247391dc..cddb798358d 100644 --- a/ethcore/blockchain/src/best_block.rs +++ b/ethcore/blockchain/src/best_block.rs @@ -24,7 +24,8 @@ use common_types::header::Header; /// For GHOST fork-choice rule it would typically describe the block with highest /// combined difficulty (usually the block with the highest block number). /// -/// Sometimes refered as 'latest block'. +/// Sometimes referred as 'latest block'. +#[derive(Debug)] pub struct BestBlock { /// Best block decoded header. pub header: Header, @@ -35,7 +36,7 @@ pub struct BestBlock { } /// Best ancient block info. If the blockchain has a gap this keeps track of where it starts. -#[derive(Default)] +#[derive(Debug, Default)] pub struct BestAncientBlock { /// Best block hash. pub hash: H256, diff --git a/ethcore/blockchain/src/blockchain.rs b/ethcore/blockchain/src/blockchain.rs index dbe18f25310..f1350f957f0 100644 --- a/ethcore/blockchain/src/blockchain.rs +++ b/ethcore/blockchain/src/blockchain.rs @@ -652,10 +652,7 @@ impl BlockChain { // and write them if let (Some(hash), Some(number)) = (best_ancient, best_ancient_number) { let mut best_ancient_block = bc.best_ancient_block.write(); - *best_ancient_block = Some(BestAncientBlock { - hash: hash, - number: number, - }); + *best_ancient_block = Some(BestAncientBlock { hash, number }); } } diff --git a/ethcore/db/src/keys.rs b/ethcore/db/src/keys.rs index ceab94211ec..d7db42bf6c1 100644 --- a/ethcore/db/src/keys.rs +++ b/ethcore/db/src/keys.rs @@ -205,7 +205,7 @@ pub struct TransactionAddress { } /// Contains all block receipts. -#[derive(Clone, RlpEncodableWrapper, RlpDecodableWrapper, MallocSizeOf)] +#[derive(Debug, Clone, RlpEncodableWrapper, RlpDecodableWrapper, MallocSizeOf)] pub struct BlockReceipts { /// Block receipts pub receipts: Vec, @@ -214,9 +214,7 @@ pub struct BlockReceipts { impl BlockReceipts { /// Create new block receipts wrapper. pub fn new(receipts: Vec) -> Self { - BlockReceipts { - receipts: receipts - } + BlockReceipts { receipts } } } diff --git a/ethcore/light/src/client/fetch.rs b/ethcore/light/src/client/fetch.rs index dd052917fc5..373f6f0bd0b 100644 --- a/ethcore/light/src/client/fetch.rs +++ b/ethcore/light/src/client/fetch.rs @@ -21,7 +21,7 @@ use std::sync::Arc; use common_types::encoded; use common_types::header::Header; use common_types::receipt::Receipt; -use ethcore::engines::{EthEngine, StateDependentProof}; +use ethcore::engines::{Engine, StateDependentProof}; use ethereum_types::H256; use futures::future::IntoFuture; @@ -47,7 +47,7 @@ pub trait ChainDataFetcher: Send + Sync + 'static { fn epoch_transition( &self, _hash: H256, - _engine: Arc, + _engine: Arc, _checker: Arc ) -> Self::Transition; } @@ -76,7 +76,7 @@ impl ChainDataFetcher for Unavailable { fn epoch_transition( &self, _hash: H256, - _engine: Arc, + _engine: Arc, _checker: Arc ) -> Self::Transition { Err("fetching epoch transition proofs unavailable") diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index c6e0925dbac..bcabd809c5c 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -19,7 +19,7 @@ use std::sync::{Weak, Arc}; use ethcore::client::{ClientReport, EnvInfo, ClientIoMessage}; -use ethcore::engines::{epoch, EthEngine, EpochChange, EpochTransition, Proof}; +use ethcore::engines::{epoch, Engine, EpochChange, EpochTransition, Proof}; use ethcore::error::{Error, EthcoreResult}; use ethcore::verification::queue::{self, HeaderQueue}; use ethcore::spec::{Spec, SpecHardcodedSync}; @@ -114,7 +114,7 @@ pub trait LightChainClient: Send + Sync { fn env_info(&self, id: BlockId) -> Option; /// Get a handle to the consensus engine. - fn engine(&self) -> &Arc; + fn engine(&self) -> &Arc; /// Query whether a block is known. fn is_known(&self, hash: &H256) -> bool; @@ -159,7 +159,7 @@ impl AsLightClient for T { /// Light client implementation. pub struct Client { queue: HeaderQueue, - engine: Arc, + engine: Arc, chain: HeaderChain, report: RwLock, import_lock: Mutex<()>, @@ -375,7 +375,7 @@ impl Client { } /// Get a handle to the verification engine. - pub fn engine(&self) -> &Arc { + pub fn engine(&self) -> &Arc { &self.engine } @@ -578,7 +578,7 @@ impl LightChainClient for Client { Client::env_info(self, id) } - fn engine(&self) -> &Arc { + fn engine(&self) -> &Arc { Client::engine(self) } diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index 380a7c93335..b2bf39951f3 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -24,7 +24,7 @@ use common_types::basic_account::BasicAccount; use common_types::encoded; use common_types::receipt::Receipt; use common_types::transaction::SignedTransaction; -use ethcore::engines::{EthEngine, StateDependentProof}; +use ethcore::engines::{Engine, StateDependentProof}; use ethcore::state::{self, ProvedExecution}; use ethereum_types::{H256, U256, Address}; use ethtrie::{TrieError, TrieDB}; @@ -1037,7 +1037,7 @@ pub struct TransactionProof { // TODO: it's not really possible to provide this if the header is unknown. pub env_info: EnvInfo, /// Consensus engine. - pub engine: Arc, + pub engine: Arc, } impl TransactionProof { @@ -1080,7 +1080,7 @@ pub struct Signal { /// Block hash and number to fetch proof for. pub hash: H256, /// Consensus engine, used to check the proof. - pub engine: Arc, + pub engine: Arc, /// Special checker for the proof. pub proof_check: Arc, } diff --git a/ethcore/res/instant_seal.json b/ethcore/res/instant_seal.json index 3ec998f0e5c..0a46f07ff67 100644 --- a/ethcore/res/instant_seal.json +++ b/ethcore/res/instant_seal.json @@ -24,6 +24,9 @@ "eip211Transition": "0x0", "eip214Transition": "0x0", "eip658Transition": "0x0", + "eip145Transition": "0x0", + "eip1014Transition": "0x0", + "eip1052Transition": "0x0", "wasmActivationTransition": "0x0" }, "genesis": { diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index d672668f733..7eb81f4238d 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -22,13 +22,13 @@ //! and can be appended to with transactions and uncles. //! //! When ready, `OpenBlock` can be closed and turned into a `ClosedBlock`. A `ClosedBlock` can -//! be reopend again by a miner under certain circumstances. On block close, state commit is +//! be re-opend again by a miner under certain circumstances. On block close, state commit is //! performed. //! //! `LockedBlock` is a version of a `ClosedBlock` that cannot be reopened. It can be sealed //! using an engine. //! -//! `ExecutedBlock` is an underlaying data structure used by all structs above to store block +//! `ExecutedBlock` is an underlying data structure used by all structs above to store block //! related info. use std::{cmp, ops}; @@ -38,7 +38,7 @@ use std::sync::Arc; use bytes::Bytes; use ethereum_types::{H256, U256, Address, Bloom}; -use engines::EthEngine; +use engines::Engine; use error::{Error, BlockError}; use factory::Factories; use state_db::StateDB; @@ -52,7 +52,7 @@ use vm::{EnvInfo, LastHashes}; use hash::keccak; use rlp::{RlpStream, Encodable, encode_list}; use types::transaction::{SignedTransaction, Error as TransactionError}; -use types::header::{Header, ExtendedHeader}; +use types::header::Header; use types::receipt::{Receipt, TransactionOutcome}; /// Block that is ready for transactions to be added. @@ -61,7 +61,8 @@ use types::receipt::{Receipt, TransactionOutcome}; /// maintain the system `state()`. We also archive execution receipts in preparation for later block creation. pub struct OpenBlock<'x> { block: ExecutedBlock, - engine: &'x dyn EthEngine, + engine: &'x dyn Engine, + parent: Header, } /// Just like `OpenBlock`, except that we've applied `Engine::on_close_block`, finished up the non-seal header fields, @@ -72,6 +73,7 @@ pub struct OpenBlock<'x> { pub struct ClosedBlock { block: ExecutedBlock, unclosed_state: State, + parent: Header, } /// Just like `ClosedBlock` except that we can't reopen it and it's faster. @@ -102,7 +104,7 @@ pub struct ExecutedBlock { pub receipts: Vec, /// Hashes of already executed transactions. pub transactions_set: HashSet, - /// Underlaying state. + /// Underlying state. pub state: State, /// Transaction traces. pub traces: Tracing, @@ -119,13 +121,13 @@ impl ExecutedBlock { uncles: Default::default(), receipts: Default::default(), transactions_set: Default::default(), - state: state, + state, traces: if tracing { Tracing::enabled() } else { Tracing::Disabled }, - last_hashes: last_hashes, + last_hashes, } } @@ -162,8 +164,8 @@ pub trait Drain { impl<'x> OpenBlock<'x> { /// Create a new `OpenBlock` ready for transaction pushing. - pub fn new<'a, I: IntoIterator>( - engine: &'x dyn EthEngine, + pub fn new<'a>( + engine: &'x dyn Engine, factories: Factories, tracing: bool, db: StateDB, @@ -173,14 +175,11 @@ impl<'x> OpenBlock<'x> { gas_range_target: (U256, U256), extra_data: Bytes, is_epoch_begin: bool, - ancestry: I, ) -> Result { let number = parent.number() + 1; let state = State::from_existing(db, parent.state_root().clone(), engine.account_start_nonce(number), factories)?; - let mut r = OpenBlock { - block: ExecutedBlock::new(state, last_hashes, tracing), - engine: engine, - }; + + let mut r = OpenBlock { block: ExecutedBlock::new(state, last_hashes, tracing), engine, parent: parent.clone() }; r.block.header.set_parent_hash(parent.hash()); r.block.header.set_number(number); @@ -195,7 +194,7 @@ impl<'x> OpenBlock<'x> { engine.populate_from_parent(&mut r.block.header, parent); engine.machine().on_new_block(&mut r.block)?; - engine.on_new_block(&mut r.block, is_epoch_begin, &mut ancestry.into_iter())?; + engine.on_new_block(&mut r.block, is_epoch_begin)?; Ok(r) } @@ -297,19 +296,20 @@ impl<'x> OpenBlock<'x> { /// Turn this into a `ClosedBlock`. pub fn close(self) -> Result { let unclosed_state = self.block.state.clone(); + let parent = self.parent.clone(); let locked = self.close_and_lock()?; Ok(ClosedBlock { block: locked.block, unclosed_state, + parent, }) } /// Turn this into a `LockedBlock`. pub fn close_and_lock(self) -> Result { let mut s = self; - - s.engine.on_close_block(&mut s.block)?; + s.engine.on_close_block(&mut s.block, &s.parent)?; s.block.state.commit()?; s.block.header.set_transactions_root(ordered_trie_root(s.block.transactions.iter().map(|e| e.rlp_bytes()))); @@ -374,14 +374,12 @@ impl ClosedBlock { } /// Given an engine reference, reopen the `ClosedBlock` into an `OpenBlock`. - pub fn reopen(self, engine: &dyn EthEngine) -> OpenBlock { + pub fn reopen(self, engine: &dyn Engine) -> OpenBlock { // revert rewards (i.e. set state back at last transaction's state). let mut block = self.block; block.state = self.unclosed_state; - OpenBlock { - block: block, - engine: engine, - } + let parent = self.parent; + OpenBlock { block, engine, parent } } } @@ -404,7 +402,7 @@ impl LockedBlock { /// Provide a valid seal in order to turn this into a `SealedBlock`. /// /// NOTE: This does not check the validity of `seal` with the engine. - pub fn seal(self, engine: &dyn EthEngine, seal: Vec) -> Result { + pub fn seal(self, engine: &dyn Engine, seal: Vec) -> Result { let expected_seal_fields = engine.seal_fields(&self.header); let mut s = self; if seal.len() != expected_seal_fields { @@ -429,7 +427,7 @@ impl LockedBlock { /// TODO(https://github.com/paritytech/parity-ethereum/issues/10407): This is currently only used in POW chain call paths, we should really merge it with seal() above. pub fn try_seal( self, - engine: &dyn EthEngine, + engine: &dyn Engine, seal: Vec, ) -> Result { let mut s = self; @@ -472,14 +470,13 @@ pub(crate) fn enact( header: Header, transactions: Vec, uncles: Vec

, - engine: &dyn EthEngine, + engine: &dyn Engine, tracing: bool, db: StateDB, parent: &Header, last_hashes: Arc, factories: Factories, is_epoch_begin: bool, - ancestry: &mut dyn Iterator, ) -> Result { // For trace log let trace_state = if log_enabled!(target: "enact", ::log::Level::Trace) { @@ -501,7 +498,6 @@ pub(crate) fn enact( (3141562.into(), 31415620.into()), vec![], is_epoch_begin, - ancestry, )?; if let Some(ref s) = trace_state { @@ -522,17 +518,16 @@ pub(crate) fn enact( b.close_and_lock() } -/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header +/// Enact the block given by `block_bytes` using `engine` on the database `db` with the given `parent` block header pub fn enact_verified( block: PreverifiedBlock, - engine: &dyn EthEngine, + engine: &dyn Engine, tracing: bool, db: StateDB, parent: &Header, last_hashes: Arc, factories: Factories, is_epoch_begin: bool, - ancestry: &mut dyn Iterator, ) -> Result { enact( @@ -546,7 +541,6 @@ pub fn enact_verified( last_hashes, factories, is_epoch_begin, - ancestry, ) } @@ -554,7 +548,7 @@ pub fn enact_verified( mod tests { use test_helpers::get_temp_state_db; use super::*; - use engines::EthEngine; + use engines::Engine; use vm::LastHashes; use error::Error; use factory::Factories; @@ -571,7 +565,7 @@ mod tests { /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header fn enact_bytes( block_bytes: Vec, - engine: &dyn EthEngine, + engine: &dyn Engine, tracing: bool, db: StateDB, parent: &Header, @@ -608,7 +602,6 @@ mod tests { (3141562.into(), 31415620.into()), vec![], false, - None, )?; b.populate_from(&header); @@ -624,7 +617,7 @@ mod tests { /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards fn enact_and_seal( block_bytes: Vec, - engine: &dyn EthEngine, + engine: &dyn Engine, tracing: bool, db: StateDB, parent: &Header, @@ -643,7 +636,7 @@ mod tests { let genesis_header = spec.genesis_header(); let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); - let b = OpenBlock::new(&*spec.engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b = OpenBlock::new(&*spec.engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b = b.close_and_lock().unwrap(); let _ = b.seal(&*spec.engine, vec![]); } @@ -657,7 +650,7 @@ mod tests { let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); - let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![], false, None).unwrap() + let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![], false).unwrap() .close_and_lock().unwrap().seal(engine, vec![]).unwrap(); let orig_bytes = b.rlp_bytes(); let orig_db = b.drain().state.drop().1; @@ -682,7 +675,7 @@ mod tests { let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); - let mut open_block = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let mut open_block = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![], false).unwrap(); let mut uncle1_header = Header::new(); uncle1_header.set_extra_data(b"uncle1".to_vec()); let mut uncle2_header = Header::new(); diff --git a/ethcore/src/builtin.rs b/ethcore/src/builtin.rs index bae1c75da36..6a52c9cdd42 100644 --- a/ethcore/src/builtin.rs +++ b/ethcore/src/builtin.rs @@ -19,7 +19,6 @@ use std::cmp::{max, min}; use std::io::{self, Read}; -use byteorder::{ByteOrder, BigEndian}; use parity_crypto::digest; use num::{BigUint, Zero, One}; @@ -369,7 +368,9 @@ impl Impl for ModexpImpl { // but so would running out of addressable memory! let mut read_len = |reader: &mut io::Chain<&[u8], io::Repeat>| { reader.read_exact(&mut buf[..]).expect("reading from zero-extended memory cannot fail; qed"); - BigEndian::read_u64(&buf[24..]) as usize + let mut len_bytes = [0u8; 8]; + len_bytes.copy_from_slice(&buf[24..]); + u64::from_be_bytes(len_bytes) as usize }; let base_len = read_len(&mut reader); diff --git a/ethcore/src/client/ancient_import.rs b/ethcore/src/client/ancient_import.rs index 9dc5e97b9f8..f89afa13a3f 100644 --- a/ethcore/src/client/ancient_import.rs +++ b/ethcore/src/client/ancient_import.rs @@ -18,7 +18,7 @@ use std::sync::Arc; -use engines::{EthEngine, EpochVerifier}; +use engines::{Engine, EpochVerifier}; use blockchain::BlockChain; use parking_lot::RwLock; @@ -32,12 +32,12 @@ const HEAVY_VERIFY_RATE: f32 = 0.02; /// epoch. pub struct AncientVerifier { cur_verifier: RwLock>>, - engine: Arc, + engine: Arc, } impl AncientVerifier { /// Create a new ancient block verifier with the given engine. - pub fn new(engine: Arc) -> Self { + pub fn new(engine: Arc) -> Self { AncientVerifier { cur_verifier: RwLock::new(None), engine, diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 26a58fc867e..e9847437c9d 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -16,7 +16,6 @@ use std::cmp; use std::collections::{HashSet, BTreeMap, VecDeque}; -use std::str::FromStr; use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering}; use std::sync::{Arc, Weak}; use std::time::{Instant, Duration}; @@ -60,7 +59,7 @@ use client::{ IoClient, BadBlocks, }; use client::bad_blocks; -use engines::{MAX_UNCLE_AGE, EthEngine, EpochTransition, ForkChoice, EngineError, SealingState}; +use engines::{MAX_UNCLE_AGE, Engine, EpochTransition, ForkChoice, EngineError, SealingState}; use engines::epoch::PendingTransition; use error::{ ImportError, ExecutionError, CallError, BlockError, @@ -165,7 +164,7 @@ struct Importer { pub ancient_verifier: AncientVerifier, /// Ethereum engine to be used during import - pub engine: Arc, + pub engine: Arc, /// A lru cache of recently detected bad blocks pub bad_blocks: bad_blocks::BadBlocks, @@ -187,7 +186,7 @@ pub struct Client { chain: RwLock>, tracedb: RwLock>, - engine: Arc, + engine: Arc, /// Client configuration config: ClientConfig, @@ -245,7 +244,7 @@ pub struct Client { impl Importer { pub fn new( config: &ClientConfig, - engine: Arc, + engine: Arc, message_channel: IoChannel, miner: Arc, ) -> Result { @@ -408,7 +407,6 @@ impl Importer { last_hashes, client.factories.clone(), is_epoch_begin, - &mut chain.ancestry_with_metadata_iter(*header.parent_hash()), ); let mut locked_block = match enact_result { @@ -753,7 +751,7 @@ impl Client { let importer = Importer::new(&config, engine.clone(), message_channel.clone(), miner)?; - let registrar_address = engine.additional_params().get("registrar").and_then(|s| Address::from_str(s).ok()); + let registrar_address = engine.machine().params().registrar; if let Some(ref addr) = registrar_address { trace!(target: "client", "Found registrar at {}", addr); } @@ -857,7 +855,7 @@ impl Client { } /// Returns engine reference. - pub fn engine(&self) -> &dyn EthEngine { + pub fn engine(&self) -> &dyn Engine { &*self.engine } @@ -1109,7 +1107,7 @@ impl Client { let mut ss = self.sleep_state.lock(); if let Some(t) = ss.last_activity { if Instant::now() > t + timeout { - self.sleep(); + self.sleep(false); ss.last_activity = None; } } @@ -1119,7 +1117,7 @@ impl Client { let now = Instant::now(); if let Some(t) = ss.last_activity { if now > t + timeout { - self.sleep(); + self.sleep(false); ss.last_activity = None; ss.last_autosleep = Some(now); } @@ -1218,10 +1216,10 @@ impl Client { } } - fn sleep(&self) { + fn sleep(&self, force: bool) { if self.liveness.load(AtomicOrdering::Relaxed) { // only sleep if the import queue is mostly empty. - if self.queue_info().total_queue_size() <= MAX_QUEUE_SIZE_TO_SLEEP_ON { + if force || (self.queue_info().total_queue_size() <= MAX_QUEUE_SIZE_TO_SLEEP_ON) { self.liveness.store(false, AtomicOrdering::Relaxed); self.notify(|n| n.stop()); info!(target: "mode", "sleep: Sleeping."); @@ -1661,7 +1659,7 @@ impl Call for Client { } impl EngineInfo for Client { - fn engine(&self) -> &dyn EthEngine { + fn engine(&self) -> &dyn Engine { Client::engine(self) } } @@ -1734,7 +1732,7 @@ impl BlockChainClient for Client { } match new_mode { Mode::Active => self.wake_up(), - Mode::Off => self.sleep(), + Mode::Off => self.sleep(true), _ => {(*self.sleep_state.lock()).last_activity = Some(Instant::now()); } } } @@ -1982,10 +1980,6 @@ impl BlockChainClient for Client { self.importer.block_queue.clear(); } - fn additional_params(&self) -> BTreeMap { - self.engine.additional_params().into_iter().collect() - } - fn logs(&self, filter: Filter) -> Result, BlockId> { let chain = self.chain.read(); @@ -2366,7 +2360,6 @@ impl PrepareOpenBlock for Client { gas_range_target, extra_data, is_epoch_begin, - chain.ancestry_with_metadata_iter(best_header.hash()), )?; // Add uncles diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 1b28ca2cdbd..7894a665de3 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -59,7 +59,7 @@ use client::{ Call, StateClient, EngineInfo, AccountData, BlockChain, BlockProducer, SealedBlockImporter, IoClient, BadBlocks }; -use engines::EthEngine; +use engines::Engine; use error::{Error, EthcoreResult}; use executed::CallError; use executive::Executed; @@ -417,7 +417,6 @@ impl PrepareOpenBlock for TestBlockChainClient { gas_range_target, extra_data, false, - None, )?; // TODO [todr] Override timestamp for predictability open_block.set_timestamp(*self.latest_block_timestamp.read()); @@ -627,7 +626,7 @@ impl StateClient for TestBlockChainClient { } impl EngineInfo for TestBlockChainClient { - fn engine(&self) -> &dyn EthEngine { + fn engine(&self) -> &dyn Engine { unimplemented!() } } @@ -841,10 +840,6 @@ impl BlockChainClient for TestBlockChainClient { fn clear_queue(&self) { } - fn additional_params(&self) -> BTreeMap { - Default::default() - } - fn filter_traces(&self, _filter: TraceFilter) -> Option> { self.traces.read().clone() } diff --git a/ethcore/src/client/traits.rs b/ethcore/src/client/traits.rs index 9d0b27fc709..ff4b778f4a8 100644 --- a/ethcore/src/client/traits.rs +++ b/ethcore/src/client/traits.rs @@ -43,7 +43,7 @@ use vm::LastHashes; use block::{OpenBlock, SealedBlock, ClosedBlock}; use client::Mode; -use engines::EthEngine; +use engines::Engine; use error::{Error, EthcoreResult}; use executed::CallError; use executive::Executed; @@ -184,7 +184,7 @@ pub trait Call { /// Provides `engine` method pub trait EngineInfo { /// Get underlying engine object - fn engine(&self) -> &dyn EthEngine; + fn engine(&self) -> &dyn Engine; } /// IO operations that should off-load heavy work to another thread. @@ -296,9 +296,6 @@ pub trait BlockChainClient : Sync + Send + AccountData + BlockChain + CallContra /// Clear block queue and abort all import activity. fn clear_queue(&self); - /// Get the registrar address, if it exists. - fn additional_params(&self) -> BTreeMap; - /// Returns logs matching given filter. If one of the filtering block cannot be found, returns the block id that caused the error. fn logs(&self, filter: Filter) -> Result, BlockId>; diff --git a/ethcore/src/engines/authority_round/mod.rs b/ethcore/src/engines/authority_round/mod.rs index a62d744a464..cb9b8a49954 100644 --- a/ethcore/src/engines/authority_round/mod.rs +++ b/ethcore/src/engines/authority_round/mod.rs @@ -1211,7 +1211,6 @@ impl Engine for AuthorityRound { &self, block: &mut ExecutedBlock, epoch_begin: bool, - _ancestry: &mut dyn Iterator, ) -> Result<(), Error> { // with immediate transitions, we don't use the epoch mechanism anyway. // the genesis is always considered an epoch, but we ignore it intentionally. @@ -1236,26 +1235,18 @@ impl Engine for AuthorityRound { } /// Apply the block reward on finalisation of the block. - fn on_close_block(&self, block: &mut ExecutedBlock) -> Result<(), Error> { + fn on_close_block( + &self, + block: &mut ExecutedBlock, + parent: &Header, + ) -> Result<(), Error> { let mut beneficiaries = Vec::new(); if block.header.number() >= self.empty_steps_transition { let empty_steps = if block.header.seal().is_empty() { // this is a new block, calculate rewards based on the empty steps messages we have accumulated - let client = match self.client.read().as_ref().and_then(|weak| weak.upgrade()) { - Some(client) => client, - None => { - debug!(target: "engine", "Unable to close block: missing client ref."); - return Err(EngineError::RequiresClient.into()) - }, - }; - - let parent = client.block_header(::client::BlockId::Hash(*block.header.parent_hash())) - .expect("hash is from parent; parent header must exist; qed") - .decode()?; - - let parent_step = header_step(&parent, self.empty_steps_transition)?; + let parent_step = header_step(parent, self.empty_steps_transition)?; let current_step = self.step.inner.load(); - self.empty_steps(parent_step.into(), current_step.into(), parent.hash()) + self.empty_steps(parent_step, current_step, parent.hash()) } else { // we're verifying a block, extract empty steps from the seal header_empty_steps(&block.header)? @@ -1636,7 +1627,7 @@ mod tests { }; use spec::Spec; use types::transaction::{Action, Transaction}; - use engines::{Seal, Engine, EngineError, EthEngine}; + use engines::{Seal, Engine, EngineError}; use engines::validator_set::{TestSet, SimpleList}; use error::Error; use super::{AuthorityRoundParams, AuthorityRound, EmptyStep, SealedEmptyStep, calculate_score}; @@ -1707,9 +1698,9 @@ mod tests { let db1 = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); let db2 = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); - let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b1 = b1.close_and_lock().unwrap(); - let b2 = OpenBlock::new(engine, Default::default(), false, db2, &genesis_header, last_hashes, addr2, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b2 = OpenBlock::new(engine, Default::default(), false, db2, &genesis_header, last_hashes, addr2, (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b2 = b2.close_and_lock().unwrap(); engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); @@ -1741,9 +1732,9 @@ mod tests { let db2 = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); - let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b1 = b1.close_and_lock().unwrap(); - let b2 = OpenBlock::new(engine, Default::default(), false, db2, &genesis_header, last_hashes, addr2, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b2 = OpenBlock::new(engine, Default::default(), false, db2, &genesis_header, last_hashes, addr2, (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b2 = b2.close_and_lock().unwrap(); engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); @@ -1942,14 +1933,14 @@ mod tests { (spec, tap, accounts) } - fn empty_step(engine: &dyn EthEngine, step: u64, parent_hash: &H256) -> EmptyStep { + fn empty_step(engine: &dyn Engine, step: u64, parent_hash: &H256) -> EmptyStep { let empty_step_rlp = super::empty_step_rlp(step, parent_hash); let signature = engine.sign(keccak(&empty_step_rlp)).unwrap().into(); let parent_hash = parent_hash.clone(); EmptyStep { step, signature, parent_hash } } - fn sealed_empty_step(engine: &dyn EthEngine, step: u64, parent_hash: &H256) -> SealedEmptyStep { + fn sealed_empty_step(engine: &dyn Engine, step: u64, parent_hash: &H256) -> SealedEmptyStep { let empty_step_rlp = super::empty_step_rlp(step, parent_hash); let signature = engine.sign(keccak(&empty_step_rlp)).unwrap().into(); SealedEmptyStep { signature, step } @@ -1991,7 +1982,7 @@ mod tests { engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); - let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b1 = b1.close_and_lock().unwrap(); // the block is empty so we don't seal and instead broadcast an empty step message @@ -2029,7 +2020,7 @@ mod tests { engine.register_client(Arc::downgrade(&client) as _); // step 2 - let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b1 = b1.close_and_lock().unwrap(); // since the block is empty it isn't sealed and we generate empty steps @@ -2038,7 +2029,7 @@ mod tests { engine.step(); // step 3 - let mut b2 = OpenBlock::new(engine, Default::default(), false, db2, &genesis_header, last_hashes.clone(), addr2, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let mut b2 = OpenBlock::new(engine, Default::default(), false, db2, &genesis_header, last_hashes.clone(), addr2, (3141562.into(), 31415620.into()), vec![], false).unwrap(); b2.push_transaction(Transaction { action: Action::Create, nonce: U256::from(0), @@ -2082,7 +2073,7 @@ mod tests { engine.register_client(Arc::downgrade(&client) as _); // step 2 - let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b1 = b1.close_and_lock().unwrap(); // since the block is empty it isn't sealed and we generate empty steps @@ -2091,7 +2082,7 @@ mod tests { engine.step(); // step 3 - let b2 = OpenBlock::new(engine, Default::default(), false, db2, &genesis_header, last_hashes.clone(), addr2, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b2 = OpenBlock::new(engine, Default::default(), false, db2, &genesis_header, last_hashes.clone(), addr2, (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b2 = b2.close_and_lock().unwrap(); engine.set_signer(Box::new((tap.clone(), addr2, "0".into()))); assert_eq!(engine.generate_seal(&b2, &genesis_header), Seal::None); @@ -2099,7 +2090,7 @@ mod tests { // step 4 // the spec sets the maximum_empty_steps to 2 so we will now seal an empty block and include the empty step messages - let b3 = OpenBlock::new(engine, Default::default(), false, db3, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b3 = OpenBlock::new(engine, Default::default(), false, db3, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b3 = b3.close_and_lock().unwrap(); engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); @@ -2132,7 +2123,7 @@ mod tests { engine.register_client(Arc::downgrade(&client) as _); // step 2 - let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b1 = b1.close_and_lock().unwrap(); // since the block is empty it isn't sealed and we generate empty steps @@ -2142,7 +2133,7 @@ mod tests { // step 3 // the signer of the accumulated empty step message should be rewarded - let b2 = OpenBlock::new(engine, Default::default(), false, db2, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b2 = OpenBlock::new(engine, Default::default(), false, db2, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false).unwrap(); let addr1_balance = b2.state.balance(&addr1).unwrap(); // after closing the block `addr1` should be reward twice, one for the included empty step message and another for block creation @@ -2242,7 +2233,6 @@ mod tests { (3141562.into(), 31415620.into()), vec![], false, - None, ).unwrap(); let b1 = b1.close_and_lock().unwrap(); @@ -2264,7 +2254,6 @@ mod tests { (3141562.into(), 31415620.into()), vec![], false, - None, ).unwrap(); let addr1_balance = b2.state.balance(&addr1).unwrap(); diff --git a/ethcore/src/engines/basic_authority.rs b/ethcore/src/engines/basic_authority.rs index 18afc51ce68..b08512b79b5 100644 --- a/ethcore/src/engines/basic_authority.rs +++ b/ethcore/src/engines/basic_authority.rs @@ -268,7 +268,7 @@ mod tests { let genesis_header = spec.genesis_header(); let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); - let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b = b.close_and_lock().unwrap(); if let Seal::Regular(seal) = engine.generate_seal(&b, &genesis_header) { assert!(b.try_seal(engine, seal).is_ok()); diff --git a/ethcore/src/engines/clique/mod.rs b/ethcore/src/engines/clique/mod.rs index 9c8e1823feb..89bcef94c2b 100644 --- a/ethcore/src/engines/clique/mod.rs +++ b/ethcore/src/engines/clique/mod.rs @@ -368,13 +368,16 @@ impl Engine for Clique { &self, _block: &mut ExecutedBlock, _epoch_begin: bool, - _ancestry: &mut dyn Iterator, ) -> Result<(), Error> { Ok(()) } // Clique has no block reward. - fn on_close_block(&self, _block: &mut ExecutedBlock) -> Result<(), Error> { + fn on_close_block( + &self, + _block: &mut ExecutedBlock, + _parent_header: &Header + ) -> Result<(), Error> { Ok(()) } diff --git a/ethcore/src/engines/clique/tests.rs b/ethcore/src/engines/clique/tests.rs index 76140284f6d..97e218a05ce 100644 --- a/ethcore/src/engines/clique/tests.rs +++ b/ethcore/src/engines/clique/tests.rs @@ -111,7 +111,7 @@ impl CliqueTester { /// Get signers after a certain state // This is generally used to fetch the state after a test has been executed and checked against - // the intial list of signers provided in the test + // the initial list of signers provided in the test pub fn clique_signers(&self, hash: &H256) -> impl Iterator { self.get_state_at_block(hash).signers().clone().into_iter() } @@ -171,7 +171,6 @@ impl CliqueTester { (3141562.into(), 31415620.into()), extra_data, false, - None, ).unwrap(); { diff --git a/ethcore/src/engines/instant_seal.rs b/ethcore/src/engines/instant_seal.rs index 27f133424d5..58fbfeed1d4 100644 --- a/ethcore/src/engines/instant_seal.rs +++ b/ethcore/src/engines/instant_seal.rs @@ -110,7 +110,7 @@ mod tests { let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); let genesis_header = spec.genesis_header(); let last_hashes = Arc::new(vec![genesis_header.hash()]); - let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b = b.close_and_lock().unwrap(); if let Seal::Regular(seal) = engine.generate_seal(&b, &genesis_header) { assert!(b.try_seal(engine, seal).is_ok()); diff --git a/ethcore/src/engines/mod.rs b/ethcore/src/engines/mod.rs index 8dcc36de158..b0207b075eb 100644 --- a/ethcore/src/engines/mod.rs +++ b/ethcore/src/engines/mod.rs @@ -38,7 +38,7 @@ pub use types::engines::ForkChoice; pub use types::engines::epoch::{self, Transition as EpochTransition}; use std::sync::{Weak, Arc}; -use std::collections::{BTreeMap, HashMap}; +use std::collections::BTreeMap; use std::{fmt, error}; use builtin::Builtin; @@ -304,13 +304,16 @@ pub trait Engine: Sync + Send { &self, _block: &mut ExecutedBlock, _epoch_begin: bool, - _ancestry: &mut dyn Iterator, ) -> Result<(), Error> { Ok(()) } /// Block transformation functions, after the transactions. - fn on_close_block(&self, _block: &mut ExecutedBlock) -> Result<(), Error> { + fn on_close_block( + &self, + _block: &mut ExecutedBlock, + _parent_header: &Header, + ) -> Result<(), Error> { Ok(()) } @@ -475,22 +478,7 @@ pub trait Engine: Sync + Send { fn executive_author(&self, header: &Header) -> Result { Ok(*header.author()) } -} -/// Check whether a given block is the best block based on the default total difficulty rule. -pub fn total_difficulty_fork_choice(new: &ExtendedHeader, best: &ExtendedHeader) -> ForkChoice { - if new.total_score() > best.total_score() { - ForkChoice::New - } else { - ForkChoice::Old - } -} - -/// Common type alias for an engine coupled with an Ethereum-like state machine. -// TODO: make this a _trait_ alias when those exist. -// fortunately the effect is largely the same since engines are mostly used -// via trait objects. -pub trait EthEngine: Engine { /// Get the general parameters of the chain. fn params(&self) -> &CommonParams { self.machine().params() @@ -558,19 +546,20 @@ pub trait EthEngine: Engine { self.machine().verify_transaction_basic(t, header) } - /// Additional information. - fn additional_params(&self) -> HashMap { - self.machine().additional_params() - } - /// Performs pre-validation of RLP decoded transaction before other processing fn decode_transaction(&self, transaction: &[u8]) -> Result { self.machine().decode_transaction(transaction) } } -// convenience wrappers for existing functions. -impl EthEngine for T where T: Engine { } +/// Check whether a given block is the best block based on the default total difficulty rule. +pub fn total_difficulty_fork_choice(new: &ExtendedHeader, best: &ExtendedHeader) -> ForkChoice { + if new.total_score() > best.total_score() { + ForkChoice::New + } else { + ForkChoice::Old + } +} /// Verifier for all blocks within an epoch with self-contained state. pub trait EpochVerifier: Send + Sync { diff --git a/ethcore/src/engines/null_engine.rs b/ethcore/src/engines/null_engine.rs index a291969772c..71cb9d459b5 100644 --- a/ethcore/src/engines/null_engine.rs +++ b/ethcore/src/engines/null_engine.rs @@ -61,7 +61,11 @@ impl Engine for NullEngine { fn machine(&self) -> &Machine { &self.machine } - fn on_close_block(&self, block: &mut ExecutedBlock) -> Result<(), Error> { + fn on_close_block( + &self, + block: &mut ExecutedBlock, + _parent_header: &Header + ) -> Result<(), Error> { use std::ops::Shr; let author = *block.header.author(); diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index c29f960e573..a6540ab7908 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -239,7 +239,7 @@ impl Engine for Arc { /// Apply the block reward on finalisation of the block. /// This assumes that all uncles are valid uncles (i.e. of at least one generation before the current). - fn on_close_block(&self, block: &mut ExecutedBlock) -> Result<(), Error> { + fn on_close_block(&self, block: &mut ExecutedBlock, _parent_header: &Header) -> Result<(), Error> { use std::ops::Shr; let author = *block.header.author(); @@ -540,7 +540,7 @@ mod tests { let genesis_header = spec.genesis_header(); let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); - let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b = b.close().unwrap(); assert_eq!(b.state.balance(&Address::zero()).unwrap(), U256::from_str("4563918244f40000").unwrap()); } @@ -589,7 +589,7 @@ mod tests { let genesis_header = spec.genesis_header(); let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); - let mut b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let mut b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![], false).unwrap(); let mut uncle = Header::new(); let uncle_author = Address::from_str("ef2d6d194084c2de36e0dabfce45d046b37d1106").unwrap(); uncle.set_author(uncle_author); @@ -607,7 +607,7 @@ mod tests { let genesis_header = spec.genesis_header(); let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); - let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b = b.close().unwrap(); let ubi_contract = Address::from_str("00efdd5883ec628983e9063c7d969fe268bbf310").unwrap(); diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index a8f7f002e53..a0ee0c2d701 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -55,7 +55,6 @@ extern crate ansi_term; extern crate bn; -extern crate byteorder; extern crate common_types as types; extern crate crossbeam; extern crate ethabi; @@ -122,6 +121,8 @@ extern crate blooms_db; extern crate env_logger; #[cfg(test)] extern crate rlp_compress; +#[cfg(test)] +extern crate serde_json; #[macro_use] extern crate ethabi_derive; diff --git a/ethcore/src/machine.rs b/ethcore/src/machine.rs index 10f3078c6b8..51d4e3f8278 100644 --- a/ethcore/src/machine.rs +++ b/ethcore/src/machine.rs @@ -16,7 +16,7 @@ //! Ethereum-like state machine definition. -use std::collections::{BTreeMap, HashMap}; +use std::collections::BTreeMap; use std::cmp; use std::sync::Arc; @@ -385,13 +385,6 @@ impl Machine { Ok(()) } - /// Additional params. - pub fn additional_params(&self) -> HashMap { - hash_map![ - "registrar".to_owned() => format!("{:x}", self.params.registrar) - ] - } - /// Performs pre-validation of RLP decoded transaction before other processing pub fn decode_transaction(&self, transaction: &[u8]) -> Result { let rlp = Rlp::new(&transaction); diff --git a/ethcore/src/miner/filter_options.rs b/ethcore/src/miner/filter_options.rs new file mode 100644 index 00000000000..db0e67378ad --- /dev/null +++ b/ethcore/src/miner/filter_options.rs @@ -0,0 +1,869 @@ +// Copyright 2015-2019 Parity Technologies (UK) Ltd. +// This file is part of Parity Ethereum. + +// Parity Ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Ethereum. If not, see . + +use ethereum_types::{Address, U256}; +use serde::de::{Deserialize, Deserializer, Error, MapAccess, Visitor}; +use std::fmt; +use std::marker::PhantomData; + +/// This structure provides filtering options for the pending transactions RPC API call +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct FilterOptions { + /// Contains the operator to filter the from value of the transaction + pub from: FilterOperator
, + /// Contains the operator to filter the to value of the transaction + pub to: FilterOperator>, + /// Contains the operator to filter the gas value of the transaction + pub gas: FilterOperator, + /// Contains the operator to filter the gas price value of the transaction + pub gas_price: FilterOperator, + /// Contains the operator to filter the transaction value + pub value: FilterOperator, + /// Contains the operator to filter the nonce value of the transaction + pub nonce: FilterOperator, +} + +impl Default for FilterOptions { + fn default() -> Self { + FilterOptions { + from: FilterOperator::Any, + to: FilterOperator::Any, + gas: FilterOperator::Any, + gas_price: FilterOperator::Any, + value: FilterOperator::Any, + nonce: FilterOperator::Any, + } + } +} + +/// The highly generic use of implementing Deserialize for FilterOperator +/// will result in a compiler error if the type FilterOperator::Eq(None) +/// gets returned explicitly. Therefore this Wrapper will be used for +/// deserialization, directly identifying the contract creation. +enum Wrapper { + O(FilterOperator), + CC, // Contract Creation +} + +/// Available operators for filtering options. +/// The `from` filter only accepts Any and Eq(Address) +/// The `to` filter only accepts Any, Eq(Address) and Eq(None) for contract creation. +#[derive(Debug, Clone, Eq, PartialEq)] +pub enum FilterOperator { + Any, + Eq(T), + GreaterThan(T), + LessThan(T), +} + +/// Since there are multiple operators which are not supported equally by all filters, +/// this trait will validate each of those operators. The corresponding method is called +/// inside the `Deserialize` -> `Visitor` implementation for FilterOperator. In case new +/// operators get introduced, a whitelist instead of a blacklist is used. +/// +/// The `from` filter validates with `validate_from` +/// The `to` filter validates with `validate_from` +/// All other filters such as gas and price validate with `validate_value` +trait Validate<'de, T, M: MapAccess<'de>> { + fn validate_from(&mut self) -> Result, M::Error>; + fn validate_to(&mut self) -> Result>, M::Error>; + fn validate_value(&mut self) -> Result, M::Error>; +} + +impl<'de, T, M> Validate<'de, T, M> for M + where T: Deserialize<'de>, M: MapAccess<'de> +{ + fn validate_from(&mut self) -> Result, M::Error> { + use self::Wrapper as W; + use self::FilterOperator::*; + let wrapper = self.next_value()?; + match wrapper { + W::O(val) => { + match val { + Any | Eq(_) => Ok(val), + _ => { + Err(M::Error::custom( + "the `from` filter only supports the `eq` operator", + )) + } + } + }, + W::CC => { + Err(M::Error::custom( + "the `from` filter only supports the `eq` operator", + )) + } + } + } + fn validate_to(&mut self) -> Result>, M::Error> { + use self::Wrapper as W; + use self::FilterOperator::*; + let wrapper = self.next_value()?; + match wrapper { + W::O(val) => { + match val { + Any => Ok(Any), + Eq(address) => Ok(Eq(Some(address))), + _ => { + Err(M::Error::custom( + "the `to` filter only supports the `eq` or `action` operator", + )) + } + } + }, + W::CC => Ok(FilterOperator::Eq(None)), + } + } + fn validate_value(&mut self) -> Result, M::Error> { + use self::Wrapper as W; + let wrapper = self.next_value()?; + match wrapper { + W::O(val) => Ok(val), + W::CC => { + Err(M::Error::custom( + "the operator `action` is only supported by the `to` filter", + )) + } + } + } +} + +impl<'de> Deserialize<'de> for FilterOptions { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct FilterOptionsVisitor; + impl<'de> Visitor<'de> for FilterOptionsVisitor { + type Value = FilterOptions; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + // "This Visitor expects to receive ..." + formatter.write_str("a map with one valid filter such as `from`, `to`, `gas`, `gas_price`, `value` or `nonce`") + } + + fn visit_map(self, mut map: M) -> Result + where + M: MapAccess<'de>, + { + let mut filter = FilterOptions::default(); + while let Some(key) = map.next_key()? { + match key { + "from" => { + filter.from = map.validate_from()?; + }, + "to" => { + // Compiler cannot infer type, so set one (nothing specific for this method) + filter.to = Validate::<(), _>::validate_to(&mut map)?; + }, + "gas" => { + filter.gas = map.validate_value()?; + }, + "gas_price" => { + filter.gas_price = map.validate_value()?; + }, + "value" => { + filter.value = map.validate_value()?; + }, + "nonce" => { + filter.nonce = map.validate_value()?; + }, + unknown => { + return Err(M::Error::unknown_field( + unknown, + &["from", "to", "gas", "gas_price", "value", "nonce"], + )) + } + } + } + + Ok(filter) + } + } + + impl<'de, T: Deserialize<'de>> Deserialize<'de> for Wrapper { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct WrapperVisitor { + data: PhantomData, + }; + impl<'de, T: Deserialize<'de>> Visitor<'de> for WrapperVisitor { + type Value = Wrapper; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + // "This Visitor expects to receive ..." + formatter.write_str( + "a map with one valid operator such as `eq`, `gt` or `lt`. \ + The to filter can also contain `action`", + ) + } + + fn visit_map(self, mut map: M) -> Result + where + M: MapAccess<'de>, + { + use self::Wrapper as W; + let mut counter = 0; + let mut f_op = Wrapper::O(FilterOperator::Any); + + while let Some(key) = map.next_key()? { + match key { + "eq" => f_op = W::O(FilterOperator::Eq(map.next_value()?)), + "gt" => f_op = W::O(FilterOperator::GreaterThan(map.next_value()?)), + "lt" => f_op = W::O(FilterOperator::LessThan(map.next_value()?)), + "action" => { + match map.next_value()? { + "contract_creation" => { + f_op = W::CC; + }, + _ => { + return Err(M::Error::custom( + "`action` only supports the value `contract_creation`", + )) + } + } + } + unknown => { + // skip mentioning `action` since it's a special/rare + // case and might confuse the usage with other filters. + return Err(M::Error::unknown_field(unknown, &["eq", "gt", "lt"])); + } + } + + counter += 1; + } + + // Good practices ensured: only one operator per filter field is allowed. + // In case there is more than just one operator, this method must still process + // all of them, otherwise serde returns an error mentioning a trailing comma issue + // (even on valid JSON), which is misleading to the user of this software. + if counter > 1 { + return Err(M::Error::custom( + "only one operator per filter type allowed", + )); + } + + Ok(f_op) + } + } + + deserializer.deserialize_map(WrapperVisitor { data: PhantomData }) + } + } + + deserializer.deserialize_map(FilterOptionsVisitor) + } +} + +#[cfg(test)] +mod tests { + use ethereum_types::{Address, U256}; + use serde_json; + use super::*; + use std::str::FromStr; + + #[test] + fn valid_defaults() { + let default = FilterOptions::default(); + assert_eq!(default.from, FilterOperator::Any); + assert_eq!(default.to, FilterOperator::Any); + assert_eq!(default.gas, FilterOperator::Any); + assert_eq!(default.gas_price, FilterOperator::Any); + assert_eq!(default.value, FilterOperator::Any); + assert_eq!(default.nonce, FilterOperator::Any); + + let json = r#"{}"#; + let res = serde_json::from_str::(json).unwrap(); + assert_eq!(res, default); + } + + #[test] + fn valid_full_deserialization() { + let json = r#" + { + "from": { + "eq": "0x5f3dffcf347944d3739b0805c934d86c8621997f" + }, + "to": { + "eq": "0xe8b2d01ffa0a15736b2370b6e5064f9702c891b6" + }, + "gas": { + "eq": "0x493e0" + }, + "gas_price": { + "eq": "0x12a05f200" + }, + "value": { + "eq": "0x0" + }, + "nonce": { + "eq": "0x577" + } + } + "#; + + let res = serde_json::from_str::(json).unwrap(); + assert_eq!(res, FilterOptions { + from: FilterOperator::Eq(Address::from_str("5f3dffcf347944d3739b0805c934d86c8621997f").unwrap()), + to: FilterOperator::Eq(Some(Address::from_str("e8b2d01ffa0a15736b2370b6e5064f9702c891b6").unwrap())), + gas: FilterOperator::Eq(U256::from(300_000)), + gas_price: FilterOperator::Eq(U256::from(5_000_000_000 as i64)), + value: FilterOperator::Eq(U256::from(0)), + nonce: FilterOperator::Eq(U256::from(1399)), + }) + } + + #[test] + fn invalid_full_deserialization() { + // Invalid filter type `zyx` + let json = r#" + { + "from": { + "eq": "0x5f3dffcf347944d3739b0805c934d86c8621997f" + }, + "to": { + "eq": "0xe8b2d01ffa0a15736b2370b6e5064f9702c891b6" + }, + "zyx": { + "eq": "0x493e0" + }, + "gas_price": { + "eq": "0x12a05f200" + }, + "value": { + "eq": "0x0" + }, + "nonce": { + "eq": "0x577" + } + } + "#; + + let res = serde_json::from_str::(json); + assert!(res.is_err()) + } + + #[test] + fn valid_from_operators() { + // Only one valid operator for from + let json = r#" + { + "from": { + "eq": "0x5f3dffcf347944d3739b0805c934d86c8621997f" + } + } + "#; + let default = FilterOptions::default(); + let res = serde_json::from_str::(json).unwrap(); + assert_eq!(res, FilterOptions { + from: FilterOperator::Eq(Address::from_str("5f3dffcf347944d3739b0805c934d86c8621997f").unwrap()), + ..default + }); + } + + #[test] + fn invalid_from_operators() { + // Multiple operators are invalid + let json = r#" + { + "from": { + "eq": "0x5f3dffcf347944d3739b0805c934d86c8621997f", + "lt": "0x407d73d8a49eeb85d32cf465507dd71d507100c1" + } + } + "#; + let res = serde_json::from_str::(json); + assert!(res.is_err()); + + // Gt + let json = r#" + { + "from": { + "gt": "0x5f3dffcf347944d3739b0805c934d86c8621997f" + } + } + "#; + let res = serde_json::from_str::(json); + assert!(res.is_err()); + + // Lt + let json = r#" + { + "from": { + "lt": "0x5f3dffcf347944d3739b0805c934d86c8621997f" + } + } + "#; + let res = serde_json::from_str::(json); + assert!(res.is_err()); + + // Action + let json = r#" + { + "from": { + "action": "contract_creation" + } + } + "#; + let res = serde_json::from_str::(json); + assert!(res.is_err()); + + // Unknown operator + let json = r#" + { + "from": { + "abc": "0x0" + } + } + "#; + let res = serde_json::from_str::(json); + assert!(res.is_err()); + } + + #[test] + fn valid_to_operators() { + // Only two valid operator for to + // Eq + let json = r#" + { + "to": { + "eq": "0xe8b2d01ffa0a15736b2370b6e5064f9702c891b6" + } + } + "#; + let default = FilterOptions::default(); + let res = serde_json::from_str::(json).unwrap(); + assert_eq!(res, FilterOptions { + to: FilterOperator::Eq(Some(Address::from_str("e8b2d01ffa0a15736b2370b6e5064f9702c891b6").unwrap())), + ..default.clone() + }); + + // Action + let json = r#" + { + "to": { + "action": "contract_creation" + } + } + "#; + let res = serde_json::from_str::(json).unwrap(); + assert_eq!(res, FilterOptions { + to: FilterOperator::Eq(None), + ..default + }); + } + + #[test] + fn invalid_to_operators() { + // Multiple operators are invalid + let json = r#" + { + "to": { + "eq": "0xe8b2d01ffa0a15736b2370b6e5064f9702c891b6", + "action": "contract_creation" + } + } + "#; + let res = serde_json::from_str::(json); + assert!(res.is_err()); + + // Gt + let json = r#" + { + "to": { + "gt": "0xe8b2d01ffa0a15736b2370b6e5064f9702c891b6" + } + } + "#; + let res = serde_json::from_str::(json); + assert!(res.is_err()); + + // Lt + let json = r#" + { + "to": { + "lt": "0xe8b2d01ffa0a15736b2370b6e5064f9702c891b6" + } + } + "#; + let res = serde_json::from_str::(json); + assert!(res.is_err()); + + // Action (invalid value, must be "contract_creation") + let json = r#" + { + "to": { + "action": "some_invalid_value" + } + } + "#; + let res = serde_json::from_str::(json); + assert!(res.is_err()); + + // Unknown operator + let json = r#" + { + "to": { + "abc": "0x0" + } + } + "#; + let res = serde_json::from_str::(json); + assert!(res.is_err()); + } + + #[test] + fn valid_gas_operators() { + // Eq + let json = r#" + { + "gas": { + "eq": "0x493e0" + } + } + "#; + let default = FilterOptions::default(); + let res = serde_json::from_str::(json).unwrap(); + assert_eq!(res, FilterOptions { + gas: FilterOperator::Eq(U256::from(300_000)), + ..default.clone() + }); + + // Gt + let json = r#" + { + "gas": { + "gt": "0x493e0" + } + } + "#; + let default = FilterOptions::default(); + let res = serde_json::from_str::(json).unwrap(); + assert_eq!(res, FilterOptions { + gas: FilterOperator::GreaterThan(U256::from(300_000)), + ..default.clone() + }); + + // Lt + let json = r#" + { + "gas": { + "lt": "0x493e0" + } + } + "#; + let default = FilterOptions::default(); + let res = serde_json::from_str::(json).unwrap(); + assert_eq!(res, FilterOptions { + gas: FilterOperator::LessThan(U256::from(300_000)), + ..default + }); + } + + #[test] + fn invalid_gas_operators() { + // Multiple operators are invalid + let json = r#" + { + "gas": { + "eq": "0x493e0", + "lt": "0x493e0" + } + } + "#; + let res = serde_json::from_str::(json); + assert!(res.is_err()); + + // Action + let json = r#" + { + "gas": { + "action": "contract_creation" + } + } + "#; + let res = serde_json::from_str::(json); + assert!(res.is_err()); + + // Unknown operator + let json = r#" + { + "gas": { + "abc": "0x0" + } + } + "#; + let res = serde_json::from_str::(json); + assert!(res.is_err()); + } + + #[test] + fn valid_gas_price_operators() { + // Eq + let json = r#" + { + "gas_price": { + "eq": "0x12a05f200" + } + } + "#; + let default = FilterOptions::default(); + let res = serde_json::from_str::(json).unwrap(); + assert_eq!(res, FilterOptions { + gas_price: FilterOperator::Eq(U256::from(5_000_000_000 as i64)), + ..default.clone() + }); + + // Gt + let json = r#" + { + "gas_price": { + "gt": "0x12a05f200" + } + } + "#; + let default = FilterOptions::default(); + let res = serde_json::from_str::(json).unwrap(); + assert_eq!(res, FilterOptions { + gas_price: FilterOperator::GreaterThan(U256::from(5_000_000_000 as i64)), + ..default.clone() + }); + + // Lt + let json = r#" + { + "gas_price": { + "lt": "0x12a05f200" + } + } + "#; + let default = FilterOptions::default(); + let res = serde_json::from_str::(json).unwrap(); + assert_eq!(res, FilterOptions { + gas_price: FilterOperator::LessThan(U256::from(5_000_000_000 as i64)), + ..default + }); + } + + #[test] + fn invalid_gas_price_operators() { + // Multiple operators are invalid + let json = r#" + { + "gas_price": { + "eq": "0x12a05f200", + "lt": "0x12a05f200" + } + } + "#; + let res = serde_json::from_str::(json); + assert!(res.is_err()); + + // Action + let json = r#" + { + "gas_price": { + "action": "contract_creation" + } + } + "#; + let res = serde_json::from_str::(json); + assert!(res.is_err()); + + // Unknown operator + let json = r#" + { + "gas_price": { + "abc": "0x0" + } + } + "#; + let res = serde_json::from_str::(json); + assert!(res.is_err()); + } + + #[test] + fn valid_value_operators() { + // Eq + let json = r#" + { + "value": { + "eq": "0x0" + } + } + "#; + let default = FilterOptions::default(); + let res = serde_json::from_str::(json).unwrap(); + assert_eq!(res, FilterOptions { + value: FilterOperator::Eq(U256::from(0)), + ..default.clone() + }); + + // Gt + let json = r#" + { + "value": { + "gt": "0x0" + } + } + "#; + let default = FilterOptions::default(); + let res = serde_json::from_str::(json).unwrap(); + assert_eq!(res, FilterOptions { + value: FilterOperator::GreaterThan(U256::from(0)), + ..default.clone() + }); + + // Lt + let json = r#" + { + "value": { + "lt": "0x0" + } + } + "#; + let default = FilterOptions::default(); + let res = serde_json::from_str::(json).unwrap(); + assert_eq!(res, FilterOptions { + value: FilterOperator::LessThan(U256::from(0)), + ..default + }); + } + + #[test] + fn invalid_value_operators() { + // Multiple operators are invalid + let json = r#" + { + "value": { + "eq": "0x0", + "lt": "0x0" + } + } + "#; + let res = serde_json::from_str::(json); + assert!(res.is_err()); + + // Action + let json = r#" + { + "value": { + "action": "contract_creation" + } + } + "#; + let res = serde_json::from_str::(json); + assert!(res.is_err()); + + // Unknown operator + let json = r#" + { + "value": { + "abc": "0x0" + } + } + "#; + let res = serde_json::from_str::(json); + assert!(res.is_err()); + } + + #[test] + fn valid_nonce_operators() { + // Eq + let json = r#" + { + "nonce": { + "eq": "0x577" + } + } + "#; + let default = FilterOptions::default(); + let res = serde_json::from_str::(json).unwrap(); + assert_eq!(res, FilterOptions { + nonce: FilterOperator::Eq(U256::from(1399)), + ..default.clone() + }); + + // Gt + let json = r#" + { + "nonce": { + "gt": "0x577" + } + } + "#; + let default = FilterOptions::default(); + let res = serde_json::from_str::(json).unwrap(); + assert_eq!(res, FilterOptions { + nonce: FilterOperator::GreaterThan(U256::from(1399)), + ..default.clone() + }); + + // Lt + let json = r#" + { + "nonce": { + "lt": "0x577" + } + } + "#; + let default = FilterOptions::default(); + let res = serde_json::from_str::(json).unwrap(); + assert_eq!(res, FilterOptions { + nonce: FilterOperator::LessThan(U256::from(1399)), + ..default + }); + } + + #[test] + fn invalid_nonce_operators() { + // Multiple operators are invalid + let json = r#" + { + "nonce": { + "eq": "0x577", + "lt": "0x577" + } + } + "#; + let res = serde_json::from_str::(json); + assert!(res.is_err()); + + // Action + let json = r#" + { + "nonce": { + "action": "contract_creation" + } + } + "#; + let res = serde_json::from_str::(json); + assert!(res.is_err()); + + // Unknown operator + let json = r#" + { + "nonce": { + "abc": "0x0" + } + } + "#; + let res = serde_json::from_str::(json); + assert!(res.is_err()); + } +} diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index 0bd40d1c8d1..e933d7d58b6 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -31,6 +31,7 @@ use ethcore_miner::work_notify::NotifyWork; use ethereum_types::{H256, U256, Address}; use futures::sync::mpsc; use io::IoChannel; +use miner::filter_options::{FilterOptions, FilterOperator}; use miner::pool_client::{PoolClient, CachedNonceClient, NonceCache}; use miner; use parking_lot::{Mutex, RwLock}; @@ -53,7 +54,7 @@ use client::{ BlockChain, ChainInfo, BlockProducer, SealedBlockImporter, Nonce, TransactionInfo, TransactionId }; use client::{BlockId, ClientIoMessage}; -use engines::{EthEngine, Seal, SealingState, EngineSigner}; +use engines::{Engine, Seal, SealingState, EngineSigner}; use error::Error; use executed::ExecutionError; use executive::contract_address; @@ -245,7 +246,7 @@ pub struct Miner { options: MinerOptions, // TODO [ToDr] Arc is only required because of price updater transaction_queue: Arc, - engine: Arc, + engine: Arc, accounts: Arc, io_channel: RwLock>>, service_transaction_checker: Option, @@ -547,7 +548,7 @@ impl Miner { } }, // Invalid nonce error can happen only if previous transaction is skipped because of gas limit. - // If there is errornous state of transaction queue it will be fixed when next block is imported. + // If there is erroneous state of transaction queue it will be fixed when next block is imported. Err(Error::Execution(ExecutionError::InvalidNonce { expected, got })) => { debug!(target: "miner", "Skipping adding transaction to block because of invalid nonce: {:?} (expected: {:?}, got: {:?})", hash, expected, got); }, @@ -1050,6 +1051,19 @@ impl miner::MinerService for Miner { -> Vec> where C: ChainInfo + Nonce + Sync, + { + // No special filtering options applied (neither tx_hash, receiver or sender) + self.ready_transactions_filtered(chain, max_len, None, ordering) + } + + fn ready_transactions_filtered( + &self, + chain: &C, + max_len: usize, + filter: Option, + ordering: miner::PendingOrdering, + ) -> Vec> where + C: ChainInfo + Nonce + Sync, { let chain_info = chain.chain_info(); @@ -1071,12 +1085,76 @@ impl miner::MinerService for Miner { ) }; + use miner::filter_options::FilterOperator::*; let from_pending = || { self.map_existing_pending_block(|sealing| { + // This filter is used for gas, gas price, value and nonce. + // Sender and receiver have their custom matches, since those + // allow/disallow different operators. + fn match_common_filter(operator: &FilterOperator, tx_value: &U256) -> bool { + match operator { + Eq(value) => tx_value == value, + GreaterThan(value) => tx_value > value, + LessThan(value) => tx_value < value, + // Will always occure on `Any`, other operators + // get handled during deserialization + _ => true, + } + } + sealing.transactions .iter() .map(|signed| pool::VerifiedTransaction::from_pending_block_transaction(signed.clone())) .map(Arc::new) + // Filter by sender + .filter(|tx| { + filter.as_ref().map_or(true, |filter| { + let sender = tx.signed().sender(); + match filter.from { + Eq(value) => sender == value, + // Will always occure on `Any`, other operators + // get handled during deserialization + _ => true, + } + }) + }) + // Filter by receiver + .filter(|tx| { + filter.as_ref().map_or(true, |filter| { + let receiver = (*tx.signed()).receiver(); + match filter.to { + // Could apply to `Some(Address)` or `None` (for contract creation) + Eq(value) => receiver == value, + // Will always occure on `Any`, other operators + // get handled during deserialization + _ => true, + } + }) + }) + // Filter by gas + .filter(|tx| { + filter.as_ref().map_or(true, |filter| { + match_common_filter(&filter.gas, &(*tx.signed()).gas) + }) + }) + // Filter by gas price + .filter(|tx| { + filter.as_ref().map_or(true, |filter| { + match_common_filter(&filter.gas_price, &(*tx.signed()).gas_price) + }) + }) + // Filter by tx value + .filter(|tx| { + filter.as_ref().map_or(true, |filter| { + match_common_filter(&filter.value, &(*tx.signed()).value) + }) + }) + // Filter by nonce + .filter(|tx| { + filter.as_ref().map_or(true, |filter| { + match_common_filter(&filter.nonce, &(*tx.signed()).nonce) + }) + }) .take(max_len) .collect() }, chain_info.best_block_number) diff --git a/ethcore/src/miner/mod.rs b/ethcore/src/miner/mod.rs index fd7ab96513c..c6397fccc12 100644 --- a/ethcore/src/miner/mod.rs +++ b/ethcore/src/miner/mod.rs @@ -20,12 +20,13 @@ //! Keeps track of transactions and currently sealed pending block. mod miner; - +mod filter_options; pub mod pool_client; #[cfg(feature = "stratum")] pub mod stratum; pub use self::miner::{Miner, MinerOptions, Penalization, PendingSet, AuthoringParams, Author}; +pub use self::filter_options::FilterOptions; pub use ethcore_miner::local_accounts::LocalAccounts; pub use ethcore_miner::pool::PendingOrdering; @@ -184,6 +185,14 @@ pub trait MinerService : Send + Sync { fn ready_transactions(&self, chain: &C, max_len: usize, ordering: PendingOrdering) -> Vec> where C: ChainInfo + Nonce + Sync; + /// Get a list of all ready transactions either ordered by priority or unordered (cheaper), optionally filtered by hash, sender or receiver. + /// + /// Depending on the settings may look in transaction pool or only in pending block. + /// If you don't need a full set of transactions, you can add `max_len` and create only a limited set of + /// transactions. + fn ready_transactions_filtered(&self, chain: &C, max_len: usize, filter: Option, ordering: PendingOrdering) -> Vec> + where C: ChainInfo + Nonce + Sync; + /// Get a list of all transactions in the pool (some of them might not be ready for inclusion yet). fn queued_transactions(&self) -> Vec>; diff --git a/ethcore/src/miner/pool_client.rs b/ethcore/src/miner/pool_client.rs index 98431b45852..b197e56f6c4 100644 --- a/ethcore/src/miner/pool_client.rs +++ b/ethcore/src/miner/pool_client.rs @@ -37,7 +37,7 @@ use parking_lot::RwLock; use call_contract::CallContract; use client::{TransactionId, BlockInfo, Nonce}; -use engines::EthEngine; +use engines::Engine; use miner; use transaction_ext::Transaction; @@ -72,7 +72,7 @@ impl NonceCache { pub struct PoolClient<'a, C: 'a> { chain: &'a C, cached_nonces: CachedNonceClient<'a, C>, - engine: &'a dyn EthEngine, + engine: &'a dyn Engine, accounts: &'a dyn LocalAccounts, best_block_header: Header, service_transaction_checker: Option<&'a ServiceTransactionChecker>, @@ -98,7 +98,7 @@ impl<'a, C: 'a> PoolClient<'a, C> where pub fn new( chain: &'a C, cache: &'a NonceCache, - engine: &'a dyn EthEngine, + engine: &'a dyn Engine, accounts: &'a dyn LocalAccounts, service_transaction_checker: Option<&'a ServiceTransactionChecker>, ) -> Self { diff --git a/ethcore/src/pod_account.rs b/ethcore/src/pod_account.rs index 087577b02c6..196d627b124 100644 --- a/ethcore/src/pod_account.rs +++ b/ethcore/src/pod_account.rs @@ -16,7 +16,6 @@ //! Account system expressed in Plain Old Data. -use std::fmt; use std::collections::BTreeMap; use itertools::Itertools; use hash::{keccak}; @@ -53,7 +52,8 @@ pub struct PodAccount { fn opt_bytes_to_hex(opt_bytes: &Option, serializer: S) -> Result where S: Serializer { - serializer.collect_str(&format_args!("0x{}",opt_bytes.as_ref().map_or("".to_string(), |b|b.to_hex()))) + let readable = opt_bytes.as_ref().map(|b| b.to_hex()).unwrap_or_default(); + serializer.collect_str(&format_args!("0x{}", readable)) } impl PodAccount { @@ -124,18 +124,6 @@ impl From for PodAccount { } } -impl fmt::Display for PodAccount { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "(bal={}; nonce={}; code={} bytes, #{}; storage={} items)", - self.balance, - self.nonce, - self.code.as_ref().map_or(0, |c| c.len()), - self.code.as_ref().map_or_else(H256::zero, |c| keccak(c)), - self.storage.len(), - ) - } -} - /// Determine difference between two optionally existant `Account`s. Returns None /// if they are the same. pub fn diff_pod(pre: Option<&PodAccount>, post: Option<&PodAccount>) -> Option { diff --git a/ethcore/src/pod_state.rs b/ethcore/src/pod_state.rs index 1c5ecc8400e..406bcc05dc4 100644 --- a/ethcore/src/pod_state.rs +++ b/ethcore/src/pod_state.rs @@ -16,9 +16,7 @@ //! State of all accounts in the system expressed in Plain Old Data. -use std::fmt; use std::collections::BTreeMap; -use itertools::Itertools; use ethereum_types::{H256, Address}; use triehash::sec_trie_root; use pod_account::{self, PodAccount}; @@ -30,12 +28,6 @@ use ethjson; pub struct PodState(BTreeMap); impl PodState { - /// Contruct a new object from the `m`. - pub fn new() -> PodState { Default::default() } - - /// Contruct a new object from the `m`. - pub fn from(m: BTreeMap) -> PodState { PodState(m) } - /// Get the underlying map. pub fn get(&self) -> &BTreeMap { &self.0 } @@ -65,21 +57,18 @@ impl From for PodState { } } -impl fmt::Display for PodState { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - for (add, acc) in &self.0 { - writeln!(f, "{} => {}", add, acc)?; - } - Ok(()) +impl From> for PodState { + fn from(s: BTreeMap) -> Self { + PodState(s) } } /// Calculate and return diff between `pre` state and `post` state. pub fn diff_pod(pre: &PodState, post: &PodState) -> StateDiff { StateDiff { - raw: pre.get().keys() - .merge(post.get().keys()) - .filter_map(|acc| pod_account::diff_pod(pre.get().get(acc), post.get().get(acc)).map(|d| (acc.clone(), d))) + raw: pre.0.keys() + .chain(post.0.keys()) + .filter_map(|acc| pod_account::diff_pod(pre.0.get(acc), post.0.get(acc)).map(|d| (*acc, d))) .collect() } } @@ -87,9 +76,9 @@ pub fn diff_pod(pre: &PodState, post: &PodState) -> StateDiff { #[cfg(test)] mod test { use std::collections::BTreeMap; - use types::state_diff::*; - use types::account_diff::*; use pod_account::PodAccount; + use types::account_diff::{AccountDiff, Diff}; + use types::state_diff::StateDiff; use super::{PodState, Address}; #[test] @@ -102,7 +91,7 @@ mod test { storage: map![], } ]); - assert_eq!(super::diff_pod(&a, &PodState::new()), StateDiff { raw: map![ + assert_eq!(super::diff_pod(&a, &PodState::default()), StateDiff { raw: map![ Address::from_low_u64_be(1) => AccountDiff{ balance: Diff::Died(69.into()), nonce: Diff::Died(0.into()), @@ -110,7 +99,7 @@ mod test { storage: map![], } ]}); - assert_eq!(super::diff_pod(&PodState::new(), &a), StateDiff{ raw: map![ + assert_eq!(super::diff_pod(&PodState::default(), &a), StateDiff{ raw: map![ Address::from_low_u64_be(1) => AccountDiff{ balance: Diff::Born(69.into()), nonce: Diff::Born(0.into()), diff --git a/ethcore/src/snapshot/consensus/authority.rs b/ethcore/src/snapshot/consensus/authority.rs index 621d9845121..4f65d42909e 100644 --- a/ethcore/src/snapshot/consensus/authority.rs +++ b/ethcore/src/snapshot/consensus/authority.rs @@ -24,7 +24,7 @@ use super::{SnapshotComponents, Rebuilder, ChunkSink}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; -use engines::{EthEngine, EpochVerifier, EpochTransition}; +use engines::{Engine, EpochVerifier, EpochTransition}; use snapshot::{Error, ManifestData, Progress}; use blockchain::{BlockChain, BlockChainDB, BlockProvider}; @@ -184,7 +184,7 @@ impl ChunkRebuilder { &mut self, last_verifier: &mut Option>, transition_rlp: Rlp, - engine: &dyn EthEngine, + engine: &dyn Engine, ) -> Result { use engines::ConstructedVerifier; @@ -240,7 +240,7 @@ impl Rebuilder for ChunkRebuilder { fn feed( &mut self, chunk: &[u8], - engine: &dyn EthEngine, + engine: &dyn Engine, abort_flag: &AtomicBool, ) -> Result<(), ::error::Error> { let rlp = Rlp::new(chunk); @@ -348,7 +348,7 @@ impl Rebuilder for ChunkRebuilder { Ok(()) } - fn finalize(&mut self, _engine: &dyn EthEngine) -> Result<(), ::error::Error> { + fn finalize(&mut self) -> Result<(), ::error::Error> { if !self.had_genesis { return Err(Error::WrongChunkFormat("No genesis transition included.".into()).into()); } @@ -358,6 +358,7 @@ impl Rebuilder for ChunkRebuilder { None => return Err(Error::WrongChunkFormat("Warp target block not included.".into()).into()), }; + trace!(target: "snapshot", "rebuilder, finalize: verifying {} unverified first blocks", self.unverified_firsts.len()); // verify the first entries of chunks we couldn't before. // we store all last verifiers, but not all firsts. // match each unverified first epoch with a last epoch verifier. diff --git a/ethcore/src/snapshot/consensus/mod.rs b/ethcore/src/snapshot/consensus/mod.rs index 670700c10cf..d6f317538ba 100644 --- a/ethcore/src/snapshot/consensus/mod.rs +++ b/ethcore/src/snapshot/consensus/mod.rs @@ -21,7 +21,7 @@ use std::sync::atomic::AtomicBool; use std::sync::Arc; use blockchain::{BlockChain, BlockChainDB}; -use engines::EthEngine; +use engines::Engine; use snapshot::{Error, ManifestData, Progress}; use ethereum_types::H256; @@ -83,7 +83,7 @@ pub trait Rebuilder: Send { fn feed( &mut self, chunk: &[u8], - engine: &dyn EthEngine, + engine: &dyn Engine, abort_flag: &AtomicBool, ) -> Result<(), ::error::Error>; @@ -92,5 +92,5 @@ pub trait Rebuilder: Send { /// /// This should apply the necessary "glue" between chunks, /// and verify against the restored state. - fn finalize(&mut self, engine: &dyn EthEngine) -> Result<(), ::error::Error>; + fn finalize(&mut self) -> Result<(), ::error::Error>; } diff --git a/ethcore/src/snapshot/consensus/work.rs b/ethcore/src/snapshot/consensus/work.rs index 201d528d140..eda200d6671 100644 --- a/ethcore/src/snapshot/consensus/work.rs +++ b/ethcore/src/snapshot/consensus/work.rs @@ -27,7 +27,7 @@ use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use blockchain::{BlockChain, BlockChainDB, BlockProvider}; -use engines::EthEngine; +use engines::Engine; use snapshot::{Error, ManifestData, Progress}; use snapshot::block::AbridgedBlock; use ethereum_types::H256; @@ -208,15 +208,15 @@ impl PowRebuilder { /// Create a new PowRebuilder. fn new(chain: BlockChain, db: Arc, manifest: &ManifestData, snapshot_blocks: u64) -> Result { Ok(PowRebuilder { - chain: chain, - db: db, + chain, + db, rng: OsRng::new().map_err(|e| format!("{}", e))?, disconnected: Vec::new(), best_number: manifest.block_number, best_hash: manifest.block_hash, best_root: manifest.state_root, fed_blocks: 0, - snapshot_blocks: snapshot_blocks, + snapshot_blocks, }) } } @@ -224,7 +224,7 @@ impl PowRebuilder { impl Rebuilder for PowRebuilder { /// Feed the rebuilder an uncompressed block chunk. /// Returns the number of blocks fed or any errors. - fn feed(&mut self, chunk: &[u8], engine: &dyn EthEngine, abort_flag: &AtomicBool) -> Result<(), ::error::Error> { + fn feed(&mut self, chunk: &[u8], engine: &dyn Engine, abort_flag: &AtomicBool) -> Result<(), ::error::Error> { use snapshot::verify_old_block; use ethereum_types::U256; use triehash::ordered_trie_root; @@ -298,9 +298,9 @@ impl Rebuilder for PowRebuilder { } /// Glue together any disconnected chunks and check that the chain is complete. - fn finalize(&mut self, _: &dyn EthEngine) -> Result<(), ::error::Error> { + fn finalize(&mut self) -> Result<(), ::error::Error> { let mut batch = self.db.transaction(); - + trace!(target: "snapshot", "rebuilder, finalize: inserting {} disconnected chunks", self.disconnected.len()); for (first_num, first_hash) in self.disconnected.drain(..) { let parent_num = first_num - 1; diff --git a/ethcore/src/snapshot/error.rs b/ethcore/src/snapshot/error.rs index 8381bd4cb9a..68742e2e178 100644 --- a/ethcore/src/snapshot/error.rs +++ b/ethcore/src/snapshot/error.rs @@ -68,8 +68,8 @@ pub enum Error { BadEpochProof(u64), /// Wrong chunk format. WrongChunkFormat(String), - /// Unlinked ancient block chain - UnlinkedAncientBlockChain, + /// Unlinked ancient block chain; includes the parent hash where linkage failed + UnlinkedAncientBlockChain(H256), } impl error::Error for Error { @@ -108,7 +108,7 @@ impl fmt::Display for Error { Error::SnapshotAborted => write!(f, "Snapshot was aborted."), Error::BadEpochProof(i) => write!(f, "Bad epoch proof for transition to epoch {}", i), Error::WrongChunkFormat(ref msg) => write!(f, "Wrong chunk format: {}", msg), - Error::UnlinkedAncientBlockChain => write!(f, "Unlinked ancient blocks chain"), + Error::UnlinkedAncientBlockChain(parent_hash) => write!(f, "Unlinked ancient blocks chain at parent_hash={:#x}", parent_hash), } } } diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index 96136b042fd..d5ce8c2b179 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -27,7 +27,7 @@ use hash::{keccak, KECCAK_NULL_RLP, KECCAK_EMPTY}; use account_db::{AccountDB, AccountDBMut}; use blockchain::{BlockChain, BlockProvider}; -use engines::EthEngine; +use engines::Engine; use types::header::Header; use types::ids::BlockId; @@ -578,7 +578,7 @@ const POW_VERIFY_RATE: f32 = 0.02; /// Verify an old block with the given header, engine, blockchain, body. If `always` is set, it will perform /// the fullest verification possible. If not, it will take a random sample to determine whether it will /// do heavy or light verification. -pub fn verify_old_block(rng: &mut OsRng, header: &Header, engine: &dyn EthEngine, chain: &BlockChain, always: bool) -> Result<(), ::error::Error> { +pub fn verify_old_block(rng: &mut OsRng, header: &Header, engine: &dyn Engine, chain: &BlockChain, always: bool) -> Result<(), ::error::Error> { engine.verify_block_basic(header)?; if always || rng.gen::() <= POW_VERIFY_RATE { diff --git a/ethcore/src/snapshot/service.rs b/ethcore/src/snapshot/service.rs index a9ff866ebed..5e1efe13824 100644 --- a/ethcore/src/snapshot/service.rs +++ b/ethcore/src/snapshot/service.rs @@ -29,7 +29,7 @@ use super::io::{SnapshotReader, LooseReader, SnapshotWriter, LooseWriter}; use blockchain::{BlockChain, BlockChainDB, BlockChainDBHandler}; use client::{BlockInfo, BlockChainClient, Client, ChainInfo, ClientIoMessage}; -use engines::EthEngine; +use engines::Engine; use error::Error; use snapshot::{Error as SnapshotError}; use hash::keccak; @@ -43,6 +43,7 @@ use bytes::Bytes; use journaldb::Algorithm; use kvdb::DBTransaction; use snappy; +use snapshot::error::Error::UnlinkedAncientBlockChain; /// Helper for removing directories in case of error. struct Guard(bool, PathBuf); @@ -91,7 +92,7 @@ struct RestorationParams<'a> { writer: Option, // writer for recovered snapshot. genesis: &'a [u8], // genesis block of the chain. guard: Guard, // guard for the restoration directory. - engine: &'a dyn EthEngine, + engine: &'a dyn Engine, } impl Restoration { @@ -110,17 +111,17 @@ impl Restoration { let secondary = components.rebuilder(chain, raw_db.clone(), &manifest)?; - let root = manifest.state_root.clone(); + let final_state_root = manifest.state_root.clone(); Ok(Restoration { - manifest: manifest, + manifest, state_chunks_left: state_chunks, block_chunks_left: block_chunks, state: StateRebuilder::new(raw_db.key_value().clone(), params.pruning), - secondary: secondary, + secondary, writer: params.writer, snappy_buffer: Vec::new(), - final_state_root: root, + final_state_root, guard: params.guard, db: raw_db, }) @@ -149,7 +150,7 @@ impl Restoration { } // feeds a block chunk - fn feed_blocks(&mut self, hash: H256, chunk: &[u8], engine: &dyn EthEngine, flag: &AtomicBool) -> Result<(), Error> { + fn feed_blocks(&mut self, hash: H256, chunk: &[u8], engine: &dyn Engine, flag: &AtomicBool) -> Result<(), Error> { if self.block_chunks_left.contains(&hash) { let expected_len = snappy::decompressed_len(chunk)?; if expected_len > MAX_CHUNK_SIZE { @@ -170,7 +171,7 @@ impl Restoration { } // finish up restoration. - fn finalize(mut self, engine: &dyn EthEngine) -> Result<(), Error> { + fn finalize(mut self) -> Result<(), Error> { use trie::TrieError; if !self.is_done() { return Ok(()) } @@ -186,13 +187,14 @@ impl Restoration { self.state.finalize(self.manifest.block_number, self.manifest.block_hash)?; // connect out-of-order chunks and verify chain integrity. - self.secondary.finalize(engine)?; + self.secondary.finalize()?; if let Some(writer) = self.writer { writer.finish(self.manifest)?; } self.guard.disarm(); + trace!(target: "snapshot", "restoration finalised correctly"); Ok(()) } @@ -211,7 +213,7 @@ pub trait SnapshotClient: BlockChainClient + BlockInfo + DatabaseRestore {} /// Snapshot service parameters. pub struct ServiceParams { /// The consensus engine this is built on. - pub engine: Arc, + pub engine: Arc, /// The chain's genesis block. pub genesis_block: Bytes, /// State pruning algorithm. @@ -237,7 +239,7 @@ pub struct Service { pruning: Algorithm, status: Mutex, reader: RwLock>, - engine: Arc, + engine: Arc, genesis_block: Bytes, state_chunks: AtomicUsize, block_chunks: AtomicUsize, @@ -337,16 +339,6 @@ impl Service { dir } - // replace one the client's database with our own. - fn replace_client_db(&self) -> Result<(), Error> { - let migrated_blocks = self.migrate_blocks()?; - info!(target: "snapshot", "Migrated {} ancient blocks", migrated_blocks); - - let rest_db = self.restoration_db(); - self.client.restore_db(&*rest_db.to_string_lossy())?; - Ok(()) - } - // Migrate the blocks in the current DB into the new chain fn migrate_blocks(&self) -> Result { // Count the number of migrated blocks @@ -361,11 +353,27 @@ impl Service { // The old database looks like this: // [genesis, best_ancient_block] ... [first_block, best_block] - // If we are fully synced neither `best_ancient_block` nor `first_block` is set, and we can assume that the whole range from [genesis, best_block] is imported. - // The new database only contains the tip of the chain ([first_block, best_block]), + // If we are fully synced neither `best_ancient_block` nor `first_block` is set, and we can + // assume that the whole range from [genesis, best_block] is imported. + // The new database only contains the tip of the chain ([new_first_block, new_best_block]), // so the useful set of blocks is defined as: // [0 ... min(new.first_block, best_ancient_block or best_block)] + // + // If, for whatever reason, the old db does not have ancient blocks (i.e. + // `best_ancient_block` is `None` AND a non-zero `first_block`), such that the old db looks + // like [old_first_block..old_best_block] (which may or may not partially overlap with + // [new_first_block..new_best_block]) we do the conservative thing and do not migrate the + // old blocks. let find_range = || -> Option<(H256, H256)> { + // In theory, if the current best_block is > new first_block (i.e. ranges overlap) + // we could salvage them but what if there's been a re-org at the boundary and the two + // chains do not match anymore? We'd have to check the existing blocks carefully. + if cur_chain_info.ancient_block_number.is_none() && cur_chain_info.first_block_number.unwrap_or(0) > 0 { + info!(target: "blockchain", "blocks in the current DB do not stretch back to genesis; can't salvage them into the new DB. In current DB, first block: #{:?}/{:?}, best block: #{:?}/{:?}", + cur_chain_info.first_block_number, cur_chain_info.first_block_hash, + cur_chain_info.best_block_number, cur_chain_info.best_block_hash); + return None; + } let next_available_from = next_chain_info.first_block_number?; let cur_available_to = cur_chain_info.ancient_block_number.unwrap_or(cur_chain_info.best_block_number); @@ -375,10 +383,11 @@ impl Service { return None; } - trace!(target: "snapshot", "Trying to import ancient blocks until {}", highest_block_num); + trace!(target: "snapshot", "Trying to import ancient blocks until {}. First block in new chain=#{}, first block in old chain=#{:?}, best block in old chain=#{}", + highest_block_num, next_available_from, cur_chain_info.first_block_number, cur_chain_info.best_block_number); // Here we start from the highest block number and go backward to 0, - // thus starting at `highest_block_num` and targetting `0`. + // thus starting at `highest_block_num` and targeting `0`. let target_hash = self.client.block_hash(BlockId::Number(0))?; let start_hash = self.client.block_hash(BlockId::Number(highest_block_num))?; @@ -398,7 +407,10 @@ impl Service { return Ok(count); } - let block = self.client.block(BlockId::Hash(parent_hash)).ok_or(::snapshot::error::Error::UnlinkedAncientBlockChain)?; + let block = self.client.block(BlockId::Hash(parent_hash)).ok_or_else(|| { + error!(target: "snapshot", "migrate_blocks: did not find block from parent_hash={:#x} (start_hash={:#x})", parent_hash, start_hash); + UnlinkedAncientBlockChain(parent_hash) + })?; parent_hash = block.parent_hash(); let block_number = block.number(); @@ -412,7 +424,14 @@ impl Service { next_chain.insert_unordered_block(&mut batch, block, block_receipts, Some(parent_total_difficulty), false, true); count += 1; }, - _ => break, + _ => { + // We couldn't reach the targeted hash + error!(target: "snapshot", "migrate_blocks: failed to find receipts and parent total difficulty; cannot reach the target_hash ({:#x}). Block #{}, parent_hash={:#x}, parent_total_difficulty={:?}, start_hash={:#x}, ancient_block_number={:?}, best_block_number={:?}", + target_hash, block_number, parent_hash, parent_total_difficulty, + start_hash, cur_chain_info.ancient_block_number, cur_chain_info.best_block_number, + ); + return Err(UnlinkedAncientBlockChain(parent_hash).into()); + }, } // Writing changes to DB and logging every now and then @@ -433,11 +452,6 @@ impl Service { next_chain.commit(); next_db.key_value().flush().expect("DB flush failed."); - // We couldn't reach the targeted hash - if parent_hash != target_hash { - return Err(::snapshot::error::Error::UnlinkedAncientBlockChain.into()); - } - // Update best ancient block in the Next Chain next_chain.update_best_ancient_block(&start_hash); Ok(count) @@ -549,6 +563,8 @@ impl Service { *self.status.lock() = RestorationStatus::Initializing { chunks_done: 0, + state_chunks: manifest.state_hashes.len() as u32, + block_chunks: manifest.block_hashes.len() as u32, }; fs::create_dir_all(&rest_dir)?; @@ -563,7 +579,7 @@ impl Service { manifest: manifest.clone(), pruning: self.pruning, db: self.restoration_db_handler.open(&rest_db)?, - writer: writer, + writer, genesis: &self.genesis_block, guard: Guard::new(rest_db), engine: &*self.engine, @@ -654,15 +670,20 @@ impl Service { // lead to deadlock. fn finalize_restoration(&self, rest: &mut Option) -> Result<(), Error> { trace!(target: "snapshot", "finalizing restoration"); + *self.status.lock() = RestorationStatus::Finalizing; let recover = rest.as_ref().map_or(false, |rest| rest.writer.is_some()); // destroy the restoration before replacing databases and snapshot. rest.take() - .map(|r| r.finalize(&*self.engine)) + .map(|r| r.finalize()) .unwrap_or(Ok(()))?; - self.replace_client_db()?; + let migrated_blocks = self.migrate_blocks()?; + info!(target: "snapshot", "Migrated {} ancient blocks", migrated_blocks); + + // replace the Client's database with the new one (restart the Client). + self.client.restore_db(&*self.restoration_db().to_string_lossy())?; if recover { let mut reader = self.reader.write(); @@ -690,14 +711,20 @@ impl Service { /// Feed a chunk of either kind (block or state). no-op if no restoration or status is wrong. fn feed_chunk(&self, hash: H256, chunk: &[u8], is_state: bool) { // TODO: be able to process block chunks and state chunks at same time? - let mut restoration = self.restoration.lock(); - match self.feed_chunk_with_restoration(&mut restoration, hash, chunk, is_state) { + let r = { + let mut restoration = self.restoration.lock(); + self.feed_chunk_with_restoration(&mut restoration, hash, chunk, is_state) + }; + match r { Ok(()) | Err(Error::Snapshot(SnapshotError::RestorationAborted)) => (), Err(e) => { + // TODO: after this we're sometimes deadlocked warn!("Encountered error during snapshot restoration: {}", e); - *self.restoration.lock() = None; - *self.status.lock() = RestorationStatus::Failed; + self.abort_restore(); + if let Some(mut status) = self.status.try_lock_for(std::time::Duration::from_millis(10)) { + *status = RestorationStatus::Failed; + } let _ = fs::remove_dir_all(self.restoration_dir()); } } @@ -707,8 +734,8 @@ impl Service { fn feed_chunk_with_restoration(&self, restoration: &mut Option, hash: H256, chunk: &[u8], is_state: bool) -> Result<(), Error> { let (result, db) = { match self.status() { - RestorationStatus::Inactive | RestorationStatus::Failed => { - trace!(target: "snapshot", "Tried to restore chunk {:x} while inactive or failed", hash); + RestorationStatus::Inactive | RestorationStatus::Failed | RestorationStatus::Finalizing => { + trace!(target: "snapshot", "Tried to restore chunk {:x} while inactive, failed or finalizing", hash); return Ok(()); }, RestorationStatus::Ongoing { .. } | RestorationStatus::Initializing { .. } => { @@ -803,7 +830,7 @@ impl SnapshotService for Service { let mut cur_status = self.status.lock(); match *cur_status { - RestorationStatus::Initializing { ref mut chunks_done } => { + RestorationStatus::Initializing { ref mut chunks_done, .. } => { *chunks_done = self.state_chunks.load(Ordering::SeqCst) as u32 + self.block_chunks.load(Ordering::SeqCst) as u32; } diff --git a/ethcore/src/snapshot/tests/helpers.rs b/ethcore/src/snapshot/tests/helpers.rs index 96c5c51b023..a6e516b1b1a 100644 --- a/ethcore/src/snapshot/tests/helpers.rs +++ b/ethcore/src/snapshot/tests/helpers.rs @@ -26,7 +26,7 @@ use account_db::AccountDBMut; use types::basic_account::BasicAccount; use blockchain::{BlockChain, BlockChainDB}; use client::{Client, ChainInfo}; -use engines::EthEngine; +use engines::Engine; use snapshot::{StateRebuilder}; use snapshot::io::{SnapshotReader, PackedWriter, PackedReader}; @@ -152,7 +152,7 @@ pub fn snap(client: &Client) -> (Box, TempDir) { /// write into the given database. pub fn restore( db: Arc, - engine: &dyn EthEngine, + engine: &dyn Engine, reader: &dyn SnapshotReader, genesis: &[u8], ) -> Result<(), ::error::Error> { @@ -187,5 +187,5 @@ pub fn restore( trace!(target: "snapshot", "finalizing"); state.finalize(manifest.block_number, manifest.block_hash)?; - secondary.finalize(engine) + secondary.finalize() } diff --git a/ethcore/src/snapshot/tests/proof_of_work.rs b/ethcore/src/snapshot/tests/proof_of_work.rs index fb714e667f5..4aa444229a2 100644 --- a/ethcore/src/snapshot/tests/proof_of_work.rs +++ b/ethcore/src/snapshot/tests/proof_of_work.rs @@ -93,7 +93,7 @@ fn chunk_and_restore(amount: u64) { rebuilder.feed(&chunk, engine.as_ref(), &flag).unwrap(); } - rebuilder.finalize(engine.as_ref()).unwrap(); + rebuilder.finalize().unwrap(); drop(rebuilder); // and test it. diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index 140bd8264f6..7a409f52188 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -35,7 +35,7 @@ use vm::{EnvInfo, CallType, ActionValue, ActionParams, ParamsType}; use builtin::Builtin; use engines::{ - EthEngine, NullEngine, InstantSeal, InstantSealParams, BasicAuthority, Clique, + Engine, NullEngine, InstantSeal, InstantSealParams, BasicAuthority, Clique, AuthorityRound, DEFAULT_BLOCKHASH_CONTRACT }; use error::Error; @@ -138,7 +138,7 @@ pub struct CommonParams { /// Gas limit bound divisor (how much gas limit can change per block) pub gas_limit_bound_divisor: U256, /// Registrar contract address. - pub registrar: Address, + pub registrar: Option
, /// Node permission managing contract address. pub node_permission_contract: Option
, /// Maximum contract code size that can be deployed. @@ -315,7 +315,7 @@ impl From for CommonParams { nonce_cap_increment: p.nonce_cap_increment.map_or(64, Into::into), remove_dust_contracts: p.remove_dust_contracts.unwrap_or(false), gas_limit_bound_divisor: p.gas_limit_bound_divisor.into(), - registrar: p.registrar.map_or_else(Address::zero, Into::into), + registrar: p.registrar.map(Into::into), node_permission_contract: p.node_permission_contract.map(Into::into), max_code_size: p.max_code_size.map_or(u64::max_value(), Into::into), max_transaction_size: p.max_transaction_size.map_or(MAX_TRANSACTION_SIZE, Into::into), @@ -382,7 +382,7 @@ pub struct Spec { /// User friendly spec name pub name: String, /// What engine are we using for this? - pub engine: Arc, + pub engine: Arc, /// Name of the subdir inside the main data dir to use for chain data and settings. pub data_dir: String, @@ -601,7 +601,7 @@ impl Spec { engine_spec: ethjson::spec::Engine, params: CommonParams, builtins: BTreeMap, - ) -> Arc { + ) -> Arc { let machine = Self::machine(&engine_spec, params, builtins); match engine_spec { diff --git a/ethcore/src/state/mod.rs b/ethcore/src/state/mod.rs index 62e2465d4ac..4f26823aea6 100644 --- a/ethcore/src/state/mod.rs +++ b/ethcore/src/state/mod.rs @@ -2678,7 +2678,7 @@ mod tests { state.kill_account(&a); let diff = state.diff_from(original).unwrap(); - let diff_map = diff.get(); + let diff_map = diff.raw; assert_eq!(diff_map.len(), 1); assert!(diff_map.get(&a).is_some()); assert_eq!(diff_map.get(&a), @@ -2709,7 +2709,7 @@ mod tests { state.set_storage(&a, BigEndianHash::from_uint(&U256::from(1u64)), BigEndianHash::from_uint(&U256::from(100u64))).unwrap(); let diff = state.diff_from(original).unwrap(); - let diff_map = diff.get(); + let diff_map = diff.raw; assert_eq!(diff_map.len(), 1); assert!(diff_map.get(&a).is_some()); assert_eq!(diff_map.get(&a), diff --git a/ethcore/src/state_db.rs b/ethcore/src/state_db.rs index 132677ab5cf..cb00d2132e8 100644 --- a/ethcore/src/state_db.rs +++ b/ethcore/src/state_db.rs @@ -21,7 +21,6 @@ use std::io; use std::sync::Arc; use bloom_journal::{Bloom, BloomJournal}; -use byteorder::{LittleEndian, ByteOrder}; use db::COL_ACCOUNT_BLOOM; use ethereum_types::{H256, Address}; use hash::keccak; @@ -169,11 +168,15 @@ impl StateDB { let hash_count = hash_count_bytes[0]; let mut bloom_parts = vec![0u64; ACCOUNT_BLOOM_SPACE / 8]; - let mut key = [0u8; 8]; for i in 0..ACCOUNT_BLOOM_SPACE / 8 { - LittleEndian::write_u64(&mut key, i as u64); + let key: [u8; 8] = (i as u64).to_le_bytes(); bloom_parts[i] = db.get(COL_ACCOUNT_BLOOM, &key).expect("low-level database error") - .and_then(|val| Some(LittleEndian::read_u64(&val[..]))) + .map(|val| { + assert!(val.len() == 8, "low-level database error"); + let mut buff = [0u8; 8]; + buff.copy_from_slice(&*val); + u64::from_le_bytes(buff) + }) .unwrap_or(0u64); } @@ -186,12 +189,10 @@ impl StateDB { pub fn commit_bloom(batch: &mut DBTransaction, journal: BloomJournal) -> io::Result<()> { assert!(journal.hash_functions <= 255); batch.put(COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY, &[journal.hash_functions as u8]); - let mut key = [0u8; 8]; - let mut val = [0u8; 8]; for (bloom_part_index, bloom_part_value) in journal.entries { - LittleEndian::write_u64(&mut key, bloom_part_index as u64); - LittleEndian::write_u64(&mut val, bloom_part_value); + let key: [u8; 8] = (bloom_part_index as u64).to_le_bytes(); + let val: [u8; 8] = bloom_part_value.to_le_bytes(); batch.put(COL_ACCOUNT_BLOOM, &key, &val); } Ok(()) diff --git a/ethcore/src/test_helpers.rs b/ethcore/src/test_helpers.rs index 6bdaeadf9bd..5137aade025 100644 --- a/ethcore/src/test_helpers.rs +++ b/ethcore/src/test_helpers.rs @@ -155,7 +155,6 @@ pub fn generate_dummy_client_with_spec_and_data(test_spec: F, block_number: u (3141562.into(), 31415620.into()), vec![], false, - None, ).unwrap(); rolling_timestamp += 10; b.set_timestamp(rolling_timestamp); diff --git a/ethcore/src/tests/client.rs b/ethcore/src/tests/client.rs index 6edd16dba9c..343751b0481 100644 --- a/ethcore/src/tests/client.rs +++ b/ethcore/src/tests/client.rs @@ -69,11 +69,8 @@ fn should_return_registrar() { Arc::new(Miner::new_for_tests(&spec, None)), IoChannel::disconnected(), ).unwrap(); - let params = client.additional_params(); - let address = ¶ms["registrar"]; - - assert_eq!(address.len(), 40); - assert!(U256::from_str(address).is_ok()); + let address = client.registrar_address(); + assert_eq!(address, Some("52dff57a8a1532e6afb3dc07e2af58bb9eb05b3d".parse().unwrap())); } #[test] @@ -120,7 +117,7 @@ fn query_none_block() { Arc::new(Miner::new_for_tests(&spec, None)), IoChannel::disconnected(), ).unwrap(); - let non_existant = client.block_header(BlockId::Number(188)); + let non_existant = client.block_header(BlockId::Number(188)); assert!(non_existant.is_none()); } diff --git a/ethcore/src/tests/trace.rs b/ethcore/src/tests/trace.rs index 882fed4436a..b80c7485128 100644 --- a/ethcore/src/tests/trace.rs +++ b/ethcore/src/tests/trace.rs @@ -87,7 +87,6 @@ fn can_trace_block_and_uncle_reward() { (3141562.into(), 31415620.into()), vec![], false, - None, ).unwrap(); rolling_timestamp += 10; root_block.set_timestamp(rolling_timestamp); @@ -116,7 +115,6 @@ fn can_trace_block_and_uncle_reward() { (3141562.into(), 31415620.into()), vec![], false, - None, ).unwrap(); rolling_timestamp += 10; parent_block.set_timestamp(rolling_timestamp); @@ -144,7 +142,6 @@ fn can_trace_block_and_uncle_reward() { (3141562.into(), 31415620.into()), vec![], false, - None, ).unwrap(); rolling_timestamp += 10; block.set_timestamp(rolling_timestamp); diff --git a/ethcore/src/verification/canon_verifier.rs b/ethcore/src/verification/canon_verifier.rs index 76f37c19df0..230521c305b 100644 --- a/ethcore/src/verification/canon_verifier.rs +++ b/ethcore/src/verification/canon_verifier.rs @@ -18,7 +18,7 @@ use call_contract::CallContract; use client::BlockInfo; -use engines::EthEngine; +use engines::Engine; use error::Error; use types::header::Header; use super::Verifier; @@ -32,7 +32,7 @@ impl Verifier for CanonVerifier { &self, header: &Header, parent: &Header, - engine: &dyn EthEngine, + engine: &dyn Engine, do_full: Option>, ) -> Result<(), Error> { verification::verify_block_family(header, parent, engine, do_full) @@ -42,7 +42,7 @@ impl Verifier for CanonVerifier { verification::verify_block_final(expected, got) } - fn verify_block_external(&self, header: &Header, engine: &dyn EthEngine) -> Result<(), Error> { + fn verify_block_external(&self, header: &Header, engine: &dyn Engine) -> Result<(), Error> { engine.verify_block_external(header) } } diff --git a/ethcore/src/verification/noop_verifier.rs b/ethcore/src/verification/noop_verifier.rs index 3b646ed9e40..f503caa2f16 100644 --- a/ethcore/src/verification/noop_verifier.rs +++ b/ethcore/src/verification/noop_verifier.rs @@ -18,7 +18,7 @@ use call_contract::CallContract; use client::BlockInfo; -use engines::EthEngine; +use engines::Engine; use error::Error; use types::header::Header; use super::{verification, Verifier}; @@ -32,7 +32,7 @@ impl Verifier for NoopVerifier { &self, _: &Header, _t: &Header, - _: &dyn EthEngine, + _: &dyn Engine, _: Option> ) -> Result<(), Error> { Ok(()) @@ -42,7 +42,7 @@ impl Verifier for NoopVerifier { Ok(()) } - fn verify_block_external(&self, _header: &Header, _engine: &dyn EthEngine) -> Result<(), Error> { + fn verify_block_external(&self, _header: &Header, _engine: &dyn Engine) -> Result<(), Error> { Ok(()) } } diff --git a/ethcore/src/verification/queue/kind.rs b/ethcore/src/verification/queue/kind.rs index 508503a04b0..0ac0b4f34fc 100644 --- a/ethcore/src/verification/queue/kind.rs +++ b/ethcore/src/verification/queue/kind.rs @@ -16,7 +16,7 @@ //! Definition of valid items for the verification queue. -use engines::EthEngine; +use engines::Engine; use error::Error; use parity_util_mem::MallocSizeOf; @@ -58,17 +58,17 @@ pub trait Kind: 'static + Sized + Send + Sync { type Verified: Sized + Send + BlockLike + MallocSizeOf; /// Attempt to create the `Unverified` item from the input. - fn create(input: Self::Input, engine: &dyn EthEngine, check_seal: bool) -> Result; + fn create(input: Self::Input, engine: &dyn Engine, check_seal: bool) -> Result; /// Attempt to verify the `Unverified` item using the given engine. - fn verify(unverified: Self::Unverified, engine: &dyn EthEngine, check_seal: bool) -> Result; + fn verify(unverified: Self::Unverified, engine: &dyn Engine, check_seal: bool) -> Result; } /// The blocks verification module. pub mod blocks { use super::{Kind, BlockLike}; - use engines::EthEngine; + use engines::Engine; use error::{Error, BlockError}; use types::header::Header; use verification::{PreverifiedBlock, verify_block_basic, verify_block_unordered}; @@ -86,7 +86,7 @@ pub mod blocks { type Unverified = Unverified; type Verified = PreverifiedBlock; - fn create(input: Self::Input, engine: &dyn EthEngine, check_seal: bool) -> Result { + fn create(input: Self::Input, engine: &dyn Engine, check_seal: bool) -> Result { match verify_block_basic(&input, engine, check_seal) { Ok(()) => Ok(input), Err(Error::Block(BlockError::TemporarilyInvalid(oob))) => { @@ -100,7 +100,7 @@ pub mod blocks { } } - fn verify(un: Self::Unverified, engine: &dyn EthEngine, check_seal: bool) -> Result { + fn verify(un: Self::Unverified, engine: &dyn Engine, check_seal: bool) -> Result { let hash = un.hash(); match verify_block_unordered(un, engine, check_seal) { Ok(verified) => Ok(verified), @@ -179,7 +179,7 @@ pub mod blocks { pub mod headers { use super::{Kind, BlockLike}; - use engines::EthEngine; + use engines::Engine; use error::Error; use types::header::Header; use verification::verify_header_params; @@ -200,14 +200,14 @@ pub mod headers { type Unverified = Header; type Verified = Header; - fn create(input: Self::Input, engine: &dyn EthEngine, check_seal: bool) -> Result { + fn create(input: Self::Input, engine: &dyn Engine, check_seal: bool) -> Result { match verify_header_params(&input, engine, true, check_seal) { Ok(_) => Ok(input), Err(err) => Err((input, err)) } } - fn verify(unverified: Self::Unverified, engine: &dyn EthEngine, check_seal: bool) -> Result { + fn verify(unverified: Self::Unverified, engine: &dyn Engine, check_seal: bool) -> Result { match check_seal { true => engine.verify_block_unordered(&unverified,).map(|_| unverified), false => Ok(unverified), diff --git a/ethcore/src/verification/queue/mod.rs b/ethcore/src/verification/queue/mod.rs index 7acc496ebb8..21e9f92ef90 100644 --- a/ethcore/src/verification/queue/mod.rs +++ b/ethcore/src/verification/queue/mod.rs @@ -27,7 +27,7 @@ use ethereum_types::{H256, U256}; use parking_lot::{Condvar, Mutex, RwLock}; use io::*; use error::{BlockError, ImportError, Error}; -use engines::EthEngine; +use engines::Engine; use client::ClientIoMessage; use len_caching_lock::LenCachingMutex; @@ -133,7 +133,7 @@ struct Sizes { /// A queue of items to be verified. Sits between network or other I/O and the `BlockChain`. /// Keeps them in the same order as inserted, minus invalid items. pub struct VerificationQueue { - engine: Arc, + engine: Arc, more_to_verify: Arc, verification: Arc>, deleting: Arc, @@ -201,7 +201,7 @@ struct Verification { impl VerificationQueue { /// Creates a new queue instance. - pub fn new(config: Config, engine: Arc, message_channel: IoChannel, check_seal: bool) -> Self { + pub fn new(config: Config, engine: Arc, message_channel: IoChannel, check_seal: bool) -> Self { let verification = Arc::new(Verification { unverified: LenCachingMutex::new(VecDeque::new()), verifying: LenCachingMutex::new(VecDeque::new()), @@ -288,7 +288,7 @@ impl VerificationQueue { fn verify( verification: Arc>, - engine: Arc, + engine: Arc, wait: Arc, ready: Arc, empty: Arc, diff --git a/ethcore/src/verification/verification.rs b/ethcore/src/verification/verification.rs index 87a19697c45..664926374f4 100644 --- a/ethcore/src/verification/verification.rs +++ b/ethcore/src/verification/verification.rs @@ -34,7 +34,7 @@ use unexpected::{Mismatch, OutOfBounds}; use blockchain::*; use call_contract::CallContract; use client::BlockInfo; -use engines::{EthEngine, MAX_UNCLE_AGE}; +use engines::{Engine, MAX_UNCLE_AGE}; use error::{BlockError, Error}; use types::{BlockNumber, header::Header}; use types::transaction::SignedTransaction; @@ -56,7 +56,7 @@ pub struct PreverifiedBlock { } /// Phase 1 quick block verification. Only does checks that are cheap. Operates on a single block -pub fn verify_block_basic(block: &Unverified, engine: &dyn EthEngine, check_seal: bool) -> Result<(), Error> { +pub fn verify_block_basic(block: &Unverified, engine: &dyn Engine, check_seal: bool) -> Result<(), Error> { verify_header_params(&block.header, engine, true, check_seal)?; verify_block_integrity(block)?; @@ -81,7 +81,7 @@ pub fn verify_block_basic(block: &Unverified, engine: &dyn EthEngine, check_seal /// Phase 2 verification. Perform costly checks such as transaction signatures and block nonce for ethash. /// Still operates on a individual block /// Returns a `PreverifiedBlock` structure populated with transactions -pub fn verify_block_unordered(block: Unverified, engine: &dyn EthEngine, check_seal: bool) -> Result { +pub fn verify_block_unordered(block: Unverified, engine: &dyn Engine, check_seal: bool) -> Result { let header = block.header; if check_seal { engine.verify_block_unordered(&header)?; @@ -130,7 +130,7 @@ pub struct FullFamilyParams<'a, C: BlockInfo + CallContract + 'a> { } /// Phase 3 verification. Check block information against parent and uncles. -pub fn verify_block_family(header: &Header, parent: &Header, engine: &dyn EthEngine, do_full: Option>) -> Result<(), Error> { +pub fn verify_block_family(header: &Header, parent: &Header, engine: &dyn Engine, do_full: Option>) -> Result<(), Error> { // TODO: verify timestamp verify_parent(&header, &parent, engine)?; engine.verify_block_family(&header, &parent)?; @@ -151,7 +151,7 @@ pub fn verify_block_family(header: &Header, parent: Ok(()) } -fn verify_uncles(block: &PreverifiedBlock, bc: &dyn BlockProvider, engine: &dyn EthEngine) -> Result<(), Error> { +fn verify_uncles(block: &PreverifiedBlock, bc: &dyn BlockProvider, engine: &dyn Engine) -> Result<(), Error> { let header = &block.header; let num_uncles = block.uncles.len(); let max_uncles = engine.maximum_uncle_count(header.number()); @@ -259,7 +259,7 @@ pub fn verify_block_final(expected: &Header, got: &Header) -> Result<(), Error> } /// Check basic header parameters. -pub fn verify_header_params(header: &Header, engine: &dyn EthEngine, is_full: bool, check_seal: bool) -> Result<(), Error> { +pub fn verify_header_params(header: &Header, engine: &dyn Engine, is_full: bool, check_seal: bool) -> Result<(), Error> { if check_seal { let expected_seal_fields = engine.seal_fields(header); if header.seal().len() != expected_seal_fields { @@ -318,7 +318,7 @@ pub fn verify_header_params(header: &Header, engine: &dyn EthEngine, is_full: bo } /// Check header parameters agains parent header. -fn verify_parent(header: &Header, parent: &Header, engine: &dyn EthEngine) -> Result<(), Error> { +fn verify_parent(header: &Header, parent: &Header, engine: &dyn Engine) -> Result<(), Error> { assert!(header.parent_hash().is_zero() || &parent.hash() == header.parent_hash(), "Parent hash should already have been verified; qed"); @@ -381,7 +381,7 @@ mod tests { use blockchain::{BlockDetails, TransactionAddress, BlockReceipts}; use types::encoded; use hash::keccak; - use engines::EthEngine; + use engines::Engine; use error::BlockError::*; use ethkey::{Random, Generator}; use spec::{CommonParams, Spec}; @@ -508,12 +508,12 @@ mod tests { } } - fn basic_test(bytes: &[u8], engine: &dyn EthEngine) -> Result<(), Error> { + fn basic_test(bytes: &[u8], engine: &dyn Engine) -> Result<(), Error> { let unverified = Unverified::from_rlp(bytes.to_vec())?; verify_block_basic(&unverified, engine, true) } - fn family_test(bytes: &[u8], engine: &dyn EthEngine, bc: &BC) -> Result<(), Error> where BC: BlockProvider { + fn family_test(bytes: &[u8], engine: &dyn Engine, bc: &BC) -> Result<(), Error> where BC: BlockProvider { let block = Unverified::from_rlp(bytes.to_vec()).unwrap(); let header = block.header; let transactions: Vec<_> = block.transactions @@ -545,7 +545,7 @@ mod tests { verify_block_family(&block.header, &parent, engine, Some(full_params)) } - fn unordered_test(bytes: &[u8], engine: &dyn EthEngine) -> Result<(), Error> { + fn unordered_test(bytes: &[u8], engine: &dyn Engine) -> Result<(), Error> { let un = Unverified::from_rlp(bytes.to_vec())?; verify_block_unordered(un, engine, false)?; Ok(()) diff --git a/ethcore/src/verification/verifier.rs b/ethcore/src/verification/verifier.rs index 309e596aa78..b5c2782a6f4 100644 --- a/ethcore/src/verification/verifier.rs +++ b/ethcore/src/verification/verifier.rs @@ -18,7 +18,7 @@ use call_contract::CallContract; use client::BlockInfo; -use engines::EthEngine; +use engines::Engine; use error::Error; use types::header::Header; use super::verification; @@ -32,12 +32,12 @@ pub trait Verifier: Send + Sync &self, header: &Header, parent: &Header, - engine: &dyn EthEngine, + engine: &dyn Engine, do_full: Option> ) -> Result<(), Error>; /// Do a final verification check for an enacted header vs its expected counterpart. fn verify_block_final(&self, expected: &Header, got: &Header) -> Result<(), Error>; /// Verify a block, inspecting external state. - fn verify_block_external(&self, header: &Header, engine: &dyn EthEngine) -> Result<(), Error>; + fn verify_block_external(&self, header: &Header, engine: &dyn Engine) -> Result<(), Error>; } diff --git a/ethcore/sync/src/chain/handler.rs b/ethcore/sync/src/chain/handler.rs index afd0b4ff227..ada5058dc9c 100644 --- a/ethcore/sync/src/chain/handler.rs +++ b/ethcore/sync/src/chain/handler.rs @@ -256,7 +256,7 @@ impl SyncHandler { return Err(DownloaderImportError::Invalid); } match io.chain().block_status(BlockId::Hash(hash.clone())) { - BlockStatus::InChain => { + BlockStatus::InChain => { trace!(target: "sync", "New block hash already in chain {:?}", hash); }, BlockStatus::Queued => { @@ -529,10 +529,14 @@ impl SyncHandler { sync.snapshot.clear(); return Ok(()); }, - RestorationStatus::Initializing { .. } => { + RestorationStatus::Initializing { .. } => { trace!(target: "warp", "{}: Snapshot restoration is initializing", peer_id); return Ok(()); } + RestorationStatus::Finalizing => { + trace!(target: "warp", "{}: Snapshot finalizing restoration", peer_id); + return Ok(()); + } RestorationStatus::Ongoing { .. } => { trace!(target: "sync", "{}: Snapshot restoration is ongoing", peer_id); }, diff --git a/ethcore/sync/src/chain/mod.rs b/ethcore/sync/src/chain/mod.rs index 0c2b03d1bd0..fb655308d98 100644 --- a/ethcore/sync/src/chain/mod.rs +++ b/ethcore/sync/src/chain/mod.rs @@ -1210,7 +1210,7 @@ impl ChainSync { RestorationStatus::Inactive | RestorationStatus::Failed => { self.set_state(SyncState::SnapshotWaiting); }, - RestorationStatus::Initializing { .. } | RestorationStatus::Ongoing { .. } => (), + RestorationStatus::Initializing { .. } | RestorationStatus::Ongoing { .. } | RestorationStatus::Finalizing => (), }, SyncState::SnapshotWaiting => { match io.snapshot_service().status() { @@ -1221,6 +1221,9 @@ impl ChainSync { RestorationStatus::Initializing { .. } => { trace!(target:"sync", "Snapshot restoration is initializing"); }, + RestorationStatus::Finalizing { .. } => { + trace!(target:"sync", "Snapshot finalizing restoration"); + }, RestorationStatus::Ongoing { state_chunks_done, block_chunks_done, .. } => { if !self.snapshot.is_complete() && self.snapshot.done_chunks() - (state_chunks_done + block_chunks_done) as usize <= MAX_SNAPSHOT_CHUNKS_DOWNLOAD_AHEAD { trace!(target:"sync", "Resuming snapshot sync"); diff --git a/ethcore/types/src/account_diff.rs b/ethcore/types/src/account_diff.rs index 90ee73305d2..b9ff991565d 100644 --- a/ethcore/types/src/account_diff.rs +++ b/ethcore/types/src/account_diff.rs @@ -16,12 +16,9 @@ //! Diff between two accounts. -use std::cmp::*; -use std::fmt; use std::collections::BTreeMap; -use std::convert::TryFrom; -use ethereum_types::{BigEndianHash as _, H256, U256}; use bytes::Bytes; +use ethereum_types::{H256, U256}; #[derive(Debug, PartialEq, Eq, Clone)] /// Diff type for specifying a change (or not). @@ -46,14 +43,13 @@ impl Diff { } } - /// Get the before value, if there is one. - pub fn pre(&self) -> Option<&T> { match *self { Diff::Died(ref x) | Diff::Changed(ref x, _) => Some(x), _ => None } } - - /// Get the after value, if there is one. - pub fn post(&self) -> Option<&T> { match *self { Diff::Born(ref x) | Diff::Changed(_, ref x) => Some(x), _ => None } } - /// Determine whether there was a change or not. - pub fn is_same(&self) -> bool { match *self { Diff::Same => true, _ => false }} + pub fn is_same(&self) -> bool { + match *self { + Diff::Same => true, + _ => false + } + } } #[derive(Debug, PartialEq, Eq, Clone)] @@ -62,83 +58,9 @@ pub struct AccountDiff { /// Change in balance, allowed to be `Diff::Same`. pub balance: Diff, /// Change in nonce, allowed to be `Diff::Same`. - pub nonce: Diff, // Allowed to be Same + pub nonce: Diff, /// Change in code, allowed to be `Diff::Same`. - pub code: Diff, // Allowed to be Same + pub code: Diff, /// Change in storage, values are not allowed to be `Diff::Same`. pub storage: BTreeMap>, } - -#[derive(Debug, PartialEq, Eq, Clone)] -/// Change in existance type. -// TODO: include other types of change. -pub enum Existance { - /// Item came into existance. - Born, - /// Item stayed in existance. - Alive, - /// Item went out of existance. - Died, -} - -impl fmt::Display for Existance { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Existance::Born => write!(f, "+++")?, - Existance::Alive => write!(f, "***")?, - Existance::Died => write!(f, "XXX")?, - } - Ok(()) - } -} - -impl AccountDiff { - /// Get `Existance` projection. - pub fn existance(&self) -> Existance { - match self.balance { - Diff::Born(_) => Existance::Born, - Diff::Died(_) => Existance::Died, - _ => Existance::Alive, - } - } -} - -// TODO: refactor into something nicer. -fn interpreted_hash(u: &H256) -> String { - let uint = u.into_uint(); - if let Ok(n) = u64::try_from(uint) { - format!("{} = {:#x}", n, n) - } else { - format!("#{}", u) - } -} - -impl fmt::Display for AccountDiff { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - use bytes::ToPretty; - - match self.nonce { - Diff::Born(ref x) => write!(f, " non {}", x)?, - Diff::Changed(ref pre, ref post) => write!(f, "#{} ({} {} {})", post, pre, if pre > post {"-"} else {"+"}, *max(pre, post) - * min(pre, post))?, - _ => {}, - } - match self.balance { - Diff::Born(ref x) => write!(f, " bal {}", x)?, - Diff::Changed(ref pre, ref post) => write!(f, "${} ({} {} {})", post, pre, if pre > post {"-"} else {"+"}, *max(pre, post) - *min(pre, post))?, - _ => {}, - } - if let Diff::Born(ref x) = self.code { - write!(f, " code {}", x.pretty())?; - } - write!(f, "\n")?; - for (k, dv) in &self.storage { - match *dv { - Diff::Born(ref v) => write!(f, " + {} => {}\n", interpreted_hash(k), interpreted_hash(v))?, - Diff::Changed(ref pre, ref post) => write!(f, " * {} => {} (was {})\n", interpreted_hash(k), interpreted_hash(post), interpreted_hash(pre))?, - Diff::Died(_) => write!(f, " X {}\n", interpreted_hash(k))?, - _ => {}, - } - } - Ok(()) - } -} diff --git a/ethcore/types/src/restoration_status.rs b/ethcore/types/src/restoration_status.rs index b36ec7ef4a9..f02aa118c80 100644 --- a/ethcore/types/src/restoration_status.rs +++ b/ethcore/types/src/restoration_status.rs @@ -23,6 +23,10 @@ pub enum RestorationStatus { Inactive, /// Restoration is initializing Initializing { + /// Total number of state chunks. + state_chunks: u32, + /// Total number of block chunks. + block_chunks: u32, /// Number of chunks done/imported chunks_done: u32, }, @@ -37,6 +41,7 @@ pub enum RestorationStatus { /// Number of block chunks completed. block_chunks_done: u32, }, + Finalizing, /// Failed restoration. Failed, } diff --git a/ethcore/types/src/state_diff.rs b/ethcore/types/src/state_diff.rs index 5605719f015..21a10027f69 100644 --- a/ethcore/types/src/state_diff.rs +++ b/ethcore/types/src/state_diff.rs @@ -16,11 +16,9 @@ //! State diff module. -use std::fmt; -use std::ops::*; use std::collections::BTreeMap; +use account_diff::AccountDiff; use ethereum_types::Address; -use account_diff::*; /// Expression for the delta between two system states. Encoded the /// delta of every altered account. @@ -29,27 +27,3 @@ pub struct StateDiff { /// Raw diff key-value pub raw: BTreeMap } - -impl StateDiff { - /// Get the actual data. - pub fn get(&self) -> &BTreeMap { - &self.raw - } -} - -impl fmt::Display for StateDiff { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - for (add, acc) in &self.raw { - write!(f, "{} {}: {}", acc.existance(), add, acc)?; - } - Ok(()) - } -} - -impl Deref for StateDiff { - type Target = BTreeMap; - - fn deref(&self) -> &Self::Target { - &self.raw - } -} diff --git a/ethcore/types/src/transaction/transaction.rs b/ethcore/types/src/transaction/transaction.rs index 12e0d712563..dbd84b0fd95 100644 --- a/ethcore/types/src/transaction/transaction.rs +++ b/ethcore/types/src/transaction/transaction.rs @@ -313,6 +313,14 @@ impl UnverifiedTransaction { self.r.is_zero() && self.s.is_zero() } + /// Returns transaction receiver, if any + pub fn receiver(&self) -> Option
{ + match self.unsigned.action { + Action::Create => None, + Action::Call(receiver) => Some(receiver), + } + } + /// Append object with a signature into RLP stream fn rlp_append_sealed_transaction(&self, s: &mut RlpStream) { s.begin_list(9); diff --git a/ethcore/types/src/verification_queue_info.rs b/ethcore/types/src/verification_queue_info.rs index a855fee6a17..b89cfe20c0d 100644 --- a/ethcore/types/src/verification_queue_info.rs +++ b/ethcore/types/src/verification_queue_info.rs @@ -37,9 +37,6 @@ impl VerificationQueueInfo { /// The total size of the queues. pub fn total_queue_size(&self) -> usize { self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size } - /// The size of the unverified and verifying queues. - pub fn incomplete_queue_size(&self) -> usize { self.unverified_queue_size + self.verifying_queue_size } - /// Indicates that queue is full pub fn is_full(&self) -> bool { self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size > self.max_queue_size || diff --git a/evmbin/src/display/json.rs b/evmbin/src/display/json.rs index 449938eab56..195e00c7379 100644 --- a/evmbin/src/display/json.rs +++ b/evmbin/src/display/json.rs @@ -47,6 +47,42 @@ pub struct Informant { unmatched: bool, } +#[derive(Serialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct TraceData<'a> { + pc: usize, + op: u8, + op_name: &'a str, + gas: &'a str, + gas_cost: &'a str, + memory: &'a str, + stack: &'a [U256], + storage: &'a HashMap, + depth: usize, +} + +#[derive(Serialize, Debug)] +pub struct MessageInitial<'a> { + action: &'a str, + test: &'a str, +} + +#[derive(Serialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct MessageSuccess<'a> { + output: &'a str, + gas_used: &'a str, + time: &'a u64, +} + +#[derive(Serialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct MessageFailure<'a> { + error: &'a str, + gas_used: &'a str, + time: &'a u64, +} + impl Informant { fn with_informant_in_depth(informant: &mut Informant, depth: usize, f: F) { if depth == 0 { @@ -59,17 +95,21 @@ impl Informant { fn informant_trace(informant: &Informant, gas_used: U256) -> String { let info = ::evm::Instruction::from_u8(informant.instruction).map(|i| i.info()); - json!({ - "pc": informant.pc, - "op": informant.instruction, - "opName": info.map(|i| i.name).unwrap_or(""), - "gas": format!("{:#x}", gas_used.saturating_add(informant.gas_cost)), - "gasCost": format!("{:#x}", informant.gas_cost), - "memory": format!("0x{}", informant.memory.to_hex()), - "stack": informant.stack, - "storage": informant.storage, - "depth": informant.depth, - }).to_string() + let trace_data = + TraceData { + pc: informant.pc, + op: informant.instruction, + op_name: info.map(|i| i.name).unwrap_or(""), + gas: &format!("{:#x}", gas_used.saturating_add(informant.gas_cost)), + gas_cost: &format!("{:#x}", informant.gas_cost), + memory: &format!("0x{}", informant.memory.to_hex()), + stack: &informant.stack, + storage: &informant.storage, + depth: informant.depth, + } + ; + + serde_json::to_string(&trace_data).expect("Serialization cannot fail; qed") } } @@ -77,7 +117,15 @@ impl vm::Informant for Informant { type Sink = (); fn before_test(&mut self, name: &str, action: &str) { - println!("{}", json!({"action": action, "test": name})); + let message_init = + MessageInitial { + action, + test: &name, + } + ; + + let s = serde_json::to_string(&message_init).expect("Serialization cannot fail; qed"); + println!("{}", s); } fn set_gas(&mut self, gas: U256) { @@ -93,26 +141,32 @@ impl vm::Informant for Informant { println!("{}", trace); } - let success_msg = json!({ - "output": format!("0x{}", success.output.to_hex()), - "gasUsed": format!("{:#x}", success.gas_used), - "time": display::as_micros(&success.time), - }); + let message_success = + MessageSuccess { + output: &format!("0x{}", success.output.to_hex()), + gas_used: &format!("{:#x}", success.gas_used), + time: &display::as_micros(&success.time), + } + ; - println!("{}", success_msg) + let s = serde_json::to_string(&message_success).expect("Serialization cannot fail; qed"); + println!("{}", s); }, Err(failure) => { for trace in failure.traces.unwrap_or_else(Vec::new) { println!("{}", trace); } - let failure_msg = json!({ - "error": &failure.error.to_string(), - "gasUsed": format!("{:#x}", failure.gas_used), - "time": display::as_micros(&failure.time), - }); + let message_failure = + MessageFailure { + error: &failure.error.to_string(), + gas_used: &format!("{:#x}", failure.gas_used), + time: &display::as_micros(&failure.time), + } + ; - println!("{}", failure_msg) + let s = serde_json::to_string(&message_failure).expect("Serialization cannot fail; qed"); + println!("{}", s); }, } } diff --git a/evmbin/src/display/std_json.rs b/evmbin/src/display/std_json.rs index 74d9e0040a2..1734c305e24 100644 --- a/evmbin/src/display/std_json.rs +++ b/evmbin/src/display/std_json.rs @@ -64,6 +64,52 @@ pub struct Informant { out_sink: Out, } +#[derive(Serialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct TraceData<'a> { + pc: usize, + op: u8, + op_name: &'a str, + gas: &'a str, + stack: &'a [U256], + storage: &'a HashMap, + depth: usize, +} + +#[derive(Serialize, Debug)] +pub struct MessageInitial<'a> { + action: &'a str, + test: &'a str, +} + +#[derive(Serialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct MessageSuccess<'a> { + output: &'a str, + gas_used: &'a str, + time: &'a u64, +} + +#[derive(Serialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct MessageFailure<'a> { + error: &'a str, + gas_used: &'a str, + time: &'a u64, +} + +#[derive(Serialize, Debug)] +pub struct DumpData<'a> { + root: &'a H256, + accounts: &'a pod_state::PodState, +} + +#[derive(Serialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct TraceDataStateRoot<'a> { + state_root: &'a H256, +} + impl Default for Informant { fn default() -> Self { Self::new(io::stderr(), io::stdout()) @@ -95,7 +141,8 @@ impl Informant { storage: Default::default(), subinfos: Default::default(), subdepth: 0, - trace_sink, out_sink + trace_sink, + out_sink, } } @@ -108,12 +155,16 @@ impl Informant { } fn dump_state_into(trace_sink: &mut Trace, root: H256, end_state: &Option) { - if let Some(ref end_state) = end_state { - let dump_data = json!({ - "root": root, - "accounts": end_state, - }); - writeln!(trace_sink, "{}", dump_data).expect("The sink must be writeable."); + if let Some(ref end_state) = end_state { + let dump_data = + DumpData { + root: &root, + accounts: end_state, + } + ; + + let s = serde_json::to_string(&dump_data).expect("Serialization cannot fail; qed"); + writeln!(trace_sink, "{}", s).expect("The sink must be writeable."); } } @@ -124,12 +175,15 @@ impl vm::Informant for Informant { type Sink = (Trace, Out); fn before_test(&mut self, name: &str, action: &str) { - let out_data = json!({ - "action": action, - "test": name, - }); + let message_init = + MessageInitial { + action, + test: &name, + } + ; - writeln!(&mut self.out_sink, "{}", out_data).expect("The sink must be writeable."); + let s = serde_json::to_string(&message_init).expect("Serialization cannot fail; qed"); + writeln!(&mut self.out_sink, "{}", s).expect("The sink must be writeable."); } fn set_gas(&mut self, _gas: U256) {} @@ -137,34 +191,46 @@ impl vm::Informant for Informant { fn clone_sink(&self) -> Self::Sink { (self.trace_sink.clone(), self.out_sink.clone()) } + fn finish(result: vm::RunResult<::Output>, (ref mut trace_sink, ref mut out_sink): &mut Self::Sink) { match result { Ok(success) => { - let trace_data = json!({"stateRoot": success.state_root}); - writeln!(trace_sink, "{}", trace_data) - .expect("The sink must be writeable."); + let state_root_data = + TraceDataStateRoot { + state_root: &success.state_root, + } + ; + + let s = serde_json::to_string(&state_root_data).expect("Serialization cannot fail; qed"); + writeln!(trace_sink, "{}", s).expect("The sink must be writeable."); Self::dump_state_into(trace_sink, success.state_root, &success.end_state); - let out_data = json!({ - "output": format!("0x{}", success.output.to_hex()), - "gasUsed": format!("{:#x}", success.gas_used), - "time": display::as_micros(&success.time), - }); + let message_success = + MessageSuccess { + output: &format!("0x{}", success.output.to_hex()), + gas_used: &format!("{:#x}", success.gas_used), + time: &display::as_micros(&success.time), + } + ; - writeln!(out_sink, "{}", out_data).expect("The sink must be writeable."); + let s = serde_json::to_string(&message_success).expect("Serialization cannot fail; qed"); + writeln!(out_sink, "{}", s).expect("The sink must be writeable."); }, Err(failure) => { - let out_data = json!({ - "error": &failure.error.to_string(), - "gasUsed": format!("{:#x}", failure.gas_used), - "time": display::as_micros(&failure.time), - }); + let message_failure = + MessageFailure { + error: &failure.error.to_string(), + gas_used: &format!("{:#x}", failure.gas_used), + time: &display::as_micros(&failure.time), + } + ; Self::dump_state_into(trace_sink, failure.state_root, &failure.end_state); - writeln!(out_sink, "{}", out_data).expect("The sink must be writeable."); + let s = serde_json::to_string(&message_failure).expect("Serialization cannot fail; qed"); + writeln!(out_sink, "{}", s).expect("The sink must be writeable."); }, } } @@ -178,17 +244,22 @@ impl trace::VMTracer for Informant { Self::with_informant_in_depth(self, subdepth, |informant: &mut Informant| { let info = ::evm::Instruction::from_u8(instruction).map(|i| i.info()); informant.instruction = instruction; - let trace_data = json!({ - "pc": pc, - "op": instruction, - "opName": info.map(|i| i.name).unwrap_or(""), - "gas": format!("{:#x}", current_gas), - "stack": informant.stack, - "storage": informant.storage, - "depth": informant.depth, - }); - - writeln!(&mut informant.trace_sink, "{}", trace_data).expect("The sink must be writeable."); + + let trace_data = + TraceData { + pc: pc, + op: instruction, + op_name: info.map(|i| i.name).unwrap_or(""), + gas: &format!("{:#x}", current_gas), + stack: &informant.stack, + storage: &informant.storage, + depth: informant.depth, + } + ; + + let s = serde_json::to_string(&trace_data).expect("Serialization cannot fail; qed"); + + writeln!(&mut informant.trace_sink, "{}", s).expect("The sink must be writeable."); }); true } @@ -279,8 +350,8 @@ pub mod tests { }, "60F8d6", 0xffff, - r#"{"depth":1,"gas":"0xffff","op":96,"opName":"PUSH1","pc":0,"stack":[],"storage":{}} -{"depth":1,"gas":"0xfffc","op":214,"opName":"","pc":2,"stack":["0xf8"],"storage":{}} + r#"{"pc":0,"op":96,"opName":"PUSH1","gas":"0xffff","stack":[],"storage":{},"depth":1} +{"pc":2,"op":214,"opName":"","gas":"0xfffc","stack":["0xf8"],"storage":{},"depth":1} "#, ); @@ -293,7 +364,7 @@ pub mod tests { }, "F8d6", 0xffff, - r#"{"depth":1,"gas":"0xffff","op":248,"opName":"","pc":0,"stack":[],"storage":{}} + r#"{"pc":0,"op":248,"opName":"","gas":"0xffff","stack":[],"storage":{},"depth":1} "#, ); } @@ -309,30 +380,30 @@ pub mod tests { }, "32343434345830f138343438323439f0", 0xffff, - r#"{"depth":1,"gas":"0xffff","op":50,"opName":"ORIGIN","pc":0,"stack":[],"storage":{}} -{"depth":1,"gas":"0xfffd","op":52,"opName":"CALLVALUE","pc":1,"stack":["0x0"],"storage":{}} -{"depth":1,"gas":"0xfffb","op":52,"opName":"CALLVALUE","pc":2,"stack":["0x0","0x0"],"storage":{}} -{"depth":1,"gas":"0xfff9","op":52,"opName":"CALLVALUE","pc":3,"stack":["0x0","0x0","0x0"],"storage":{}} -{"depth":1,"gas":"0xfff7","op":52,"opName":"CALLVALUE","pc":4,"stack":["0x0","0x0","0x0","0x0"],"storage":{}} -{"depth":1,"gas":"0xfff5","op":88,"opName":"PC","pc":5,"stack":["0x0","0x0","0x0","0x0","0x0"],"storage":{}} -{"depth":1,"gas":"0xfff3","op":48,"opName":"ADDRESS","pc":6,"stack":["0x0","0x0","0x0","0x0","0x0","0x5"],"storage":{}} -{"depth":1,"gas":"0xfff1","op":241,"opName":"CALL","pc":7,"stack":["0x0","0x0","0x0","0x0","0x0","0x5","0x0"],"storage":{}} -{"depth":1,"gas":"0x9e21","op":56,"opName":"CODESIZE","pc":8,"stack":["0x1"],"storage":{}} -{"depth":1,"gas":"0x9e1f","op":52,"opName":"CALLVALUE","pc":9,"stack":["0x1","0x10"],"storage":{}} -{"depth":1,"gas":"0x9e1d","op":52,"opName":"CALLVALUE","pc":10,"stack":["0x1","0x10","0x0"],"storage":{}} -{"depth":1,"gas":"0x9e1b","op":56,"opName":"CODESIZE","pc":11,"stack":["0x1","0x10","0x0","0x0"],"storage":{}} -{"depth":1,"gas":"0x9e19","op":50,"opName":"ORIGIN","pc":12,"stack":["0x1","0x10","0x0","0x0","0x10"],"storage":{}} -{"depth":1,"gas":"0x9e17","op":52,"opName":"CALLVALUE","pc":13,"stack":["0x1","0x10","0x0","0x0","0x10","0x0"],"storage":{}} -{"depth":1,"gas":"0x9e15","op":57,"opName":"CODECOPY","pc":14,"stack":["0x1","0x10","0x0","0x0","0x10","0x0","0x0"],"storage":{}} -{"depth":1,"gas":"0x9e0c","op":240,"opName":"CREATE","pc":15,"stack":["0x1","0x10","0x0","0x0"],"storage":{}} -{"depth":2,"gas":"0x210c","op":50,"opName":"ORIGIN","pc":0,"stack":[],"storage":{}} -{"depth":2,"gas":"0x210a","op":52,"opName":"CALLVALUE","pc":1,"stack":["0x0"],"storage":{}} -{"depth":2,"gas":"0x2108","op":52,"opName":"CALLVALUE","pc":2,"stack":["0x0","0x0"],"storage":{}} -{"depth":2,"gas":"0x2106","op":52,"opName":"CALLVALUE","pc":3,"stack":["0x0","0x0","0x0"],"storage":{}} -{"depth":2,"gas":"0x2104","op":52,"opName":"CALLVALUE","pc":4,"stack":["0x0","0x0","0x0","0x0"],"storage":{}} -{"depth":2,"gas":"0x2102","op":88,"opName":"PC","pc":5,"stack":["0x0","0x0","0x0","0x0","0x0"],"storage":{}} -{"depth":2,"gas":"0x2100","op":48,"opName":"ADDRESS","pc":6,"stack":["0x0","0x0","0x0","0x0","0x0","0x5"],"storage":{}} -{"depth":2,"gas":"0x20fe","op":241,"opName":"CALL","pc":7,"stack":["0x0","0x0","0x0","0x0","0x0","0x5","0xbd770416a3345f91e4b34576cb804a576fa48eb1"],"storage":{}} + r#"{"pc":0,"op":50,"opName":"ORIGIN","gas":"0xffff","stack":[],"storage":{},"depth":1} +{"pc":1,"op":52,"opName":"CALLVALUE","gas":"0xfffd","stack":["0x0"],"storage":{},"depth":1} +{"pc":2,"op":52,"opName":"CALLVALUE","gas":"0xfffb","stack":["0x0","0x0"],"storage":{},"depth":1} +{"pc":3,"op":52,"opName":"CALLVALUE","gas":"0xfff9","stack":["0x0","0x0","0x0"],"storage":{},"depth":1} +{"pc":4,"op":52,"opName":"CALLVALUE","gas":"0xfff7","stack":["0x0","0x0","0x0","0x0"],"storage":{},"depth":1} +{"pc":5,"op":88,"opName":"PC","gas":"0xfff5","stack":["0x0","0x0","0x0","0x0","0x0"],"storage":{},"depth":1} +{"pc":6,"op":48,"opName":"ADDRESS","gas":"0xfff3","stack":["0x0","0x0","0x0","0x0","0x0","0x5"],"storage":{},"depth":1} +{"pc":7,"op":241,"opName":"CALL","gas":"0xfff1","stack":["0x0","0x0","0x0","0x0","0x0","0x5","0x0"],"storage":{},"depth":1} +{"pc":8,"op":56,"opName":"CODESIZE","gas":"0x9e21","stack":["0x1"],"storage":{},"depth":1} +{"pc":9,"op":52,"opName":"CALLVALUE","gas":"0x9e1f","stack":["0x1","0x10"],"storage":{},"depth":1} +{"pc":10,"op":52,"opName":"CALLVALUE","gas":"0x9e1d","stack":["0x1","0x10","0x0"],"storage":{},"depth":1} +{"pc":11,"op":56,"opName":"CODESIZE","gas":"0x9e1b","stack":["0x1","0x10","0x0","0x0"],"storage":{},"depth":1} +{"pc":12,"op":50,"opName":"ORIGIN","gas":"0x9e19","stack":["0x1","0x10","0x0","0x0","0x10"],"storage":{},"depth":1} +{"pc":13,"op":52,"opName":"CALLVALUE","gas":"0x9e17","stack":["0x1","0x10","0x0","0x0","0x10","0x0"],"storage":{},"depth":1} +{"pc":14,"op":57,"opName":"CODECOPY","gas":"0x9e15","stack":["0x1","0x10","0x0","0x0","0x10","0x0","0x0"],"storage":{},"depth":1} +{"pc":15,"op":240,"opName":"CREATE","gas":"0x9e0c","stack":["0x1","0x10","0x0","0x0"],"storage":{},"depth":1} +{"pc":0,"op":50,"opName":"ORIGIN","gas":"0x210c","stack":[],"storage":{},"depth":2} +{"pc":1,"op":52,"opName":"CALLVALUE","gas":"0x210a","stack":["0x0"],"storage":{},"depth":2} +{"pc":2,"op":52,"opName":"CALLVALUE","gas":"0x2108","stack":["0x0","0x0"],"storage":{},"depth":2} +{"pc":3,"op":52,"opName":"CALLVALUE","gas":"0x2106","stack":["0x0","0x0","0x0"],"storage":{},"depth":2} +{"pc":4,"op":52,"opName":"CALLVALUE","gas":"0x2104","stack":["0x0","0x0","0x0","0x0"],"storage":{},"depth":2} +{"pc":5,"op":88,"opName":"PC","gas":"0x2102","stack":["0x0","0x0","0x0","0x0","0x0"],"storage":{},"depth":2} +{"pc":6,"op":48,"opName":"ADDRESS","gas":"0x2100","stack":["0x0","0x0","0x0","0x0","0x0","0x5"],"storage":{},"depth":2} +{"pc":7,"op":241,"opName":"CALL","gas":"0x20fe","stack":["0x0","0x0","0x0","0x0","0x0","0x5","0xbd770416a3345f91e4b34576cb804a576fa48eb1"],"storage":{},"depth":2} "#, ) } diff --git a/evmbin/src/info.rs b/evmbin/src/info.rs index 9090c5377e6..ca068c36b0d 100644 --- a/evmbin/src/info.rs +++ b/evmbin/src/info.rs @@ -256,16 +256,16 @@ pub mod tests { assert_eq!( &String::from_utf8_lossy(&**res.lock().unwrap()), -r#"{"depth":1,"gas":"0xffff","op":98,"opName":"PUSH3","pc":0,"stack":[],"storage":{}} -{"depth":1,"gas":"0xfffc","op":96,"opName":"PUSH1","pc":4,"stack":["0xaaaaaa"],"storage":{}} -{"depth":1,"gas":"0xfff9","op":96,"opName":"PUSH1","pc":6,"stack":["0xaaaaaa","0xaa"],"storage":{}} -{"depth":1,"gas":"0xfff6","op":80,"opName":"POP","pc":8,"stack":["0xaaaaaa","0xaa","0xaa"],"storage":{}} -{"depth":1,"gas":"0xfff4","op":96,"opName":"PUSH1","pc":9,"stack":["0xaaaaaa","0xaa"],"storage":{}} -{"depth":1,"gas":"0xfff1","op":96,"opName":"PUSH1","pc":11,"stack":["0xaaaaaa","0xaa","0xaa"],"storage":{}} -{"depth":1,"gas":"0xffee","op":96,"opName":"PUSH1","pc":13,"stack":["0xaaaaaa","0xaa","0xaa","0xaa"],"storage":{}} -{"depth":1,"gas":"0xffeb","op":96,"opName":"PUSH1","pc":15,"stack":["0xaaaaaa","0xaa","0xaa","0xaa","0xaa"],"storage":{}} -{"depth":1,"gas":"0xffe8","op":96,"opName":"PUSH1","pc":17,"stack":["0xaaaaaa","0xaa","0xaa","0xaa","0xaa","0xaa"],"storage":{}} -{"depth":1,"gas":"0xffe5","op":96,"opName":"PUSH1","pc":19,"stack":["0xaaaaaa","0xaa","0xaa","0xaa","0xaa","0xaa","0xaa"],"storage":{}} +r#"{"pc":0,"op":98,"opName":"PUSH3","gas":"0xffff","stack":[],"storage":{},"depth":1} +{"pc":4,"op":96,"opName":"PUSH1","gas":"0xfffc","stack":["0xaaaaaa"],"storage":{},"depth":1} +{"pc":6,"op":96,"opName":"PUSH1","gas":"0xfff9","stack":["0xaaaaaa","0xaa"],"storage":{},"depth":1} +{"pc":8,"op":80,"opName":"POP","gas":"0xfff6","stack":["0xaaaaaa","0xaa","0xaa"],"storage":{},"depth":1} +{"pc":9,"op":96,"opName":"PUSH1","gas":"0xfff4","stack":["0xaaaaaa","0xaa"],"storage":{},"depth":1} +{"pc":11,"op":96,"opName":"PUSH1","gas":"0xfff1","stack":["0xaaaaaa","0xaa","0xaa"],"storage":{},"depth":1} +{"pc":13,"op":96,"opName":"PUSH1","gas":"0xffee","stack":["0xaaaaaa","0xaa","0xaa","0xaa"],"storage":{},"depth":1} +{"pc":15,"op":96,"opName":"PUSH1","gas":"0xffeb","stack":["0xaaaaaa","0xaa","0xaa","0xaa","0xaa"],"storage":{},"depth":1} +{"pc":17,"op":96,"opName":"PUSH1","gas":"0xffe8","stack":["0xaaaaaa","0xaa","0xaa","0xaa","0xaa","0xaa"],"storage":{},"depth":1} +{"pc":19,"op":96,"opName":"PUSH1","gas":"0xffe5","stack":["0xaaaaaa","0xaa","0xaa","0xaa","0xaa","0xaa","0xaa"],"storage":{},"depth":1} "#); } } diff --git a/parity/configuration.rs b/parity/configuration.rs index 2a807e99b07..e864eabade1 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -16,7 +16,7 @@ use std::time::Duration; use std::io::Read; -use std::net::SocketAddr; +use std::net::{SocketAddr, ToSocketAddrs}; use std::path::PathBuf; use std::collections::{HashSet, BTreeMap}; use std::iter::FromIterator; @@ -725,9 +725,18 @@ impl Configuration { let port = self.args.arg_ports_shift + self.args.arg_port; let listen_address = SocketAddr::new(self.interface(&self.args.arg_interface).parse().unwrap(), port); let public_address = if self.args.arg_nat.starts_with("extip:") { - let host = &self.args.arg_nat[6..]; - let host = host.parse().map_err(|_| format!("Invalid host given with `--nat extip:{}`", host))?; - Some(SocketAddr::new(host, port)) + let host = self.args.arg_nat[6..].split(':').next().expect("split has at least one part; qed"); + let host = format!("{}:{}", host, port); + match host.to_socket_addrs() { + Ok(mut addr_iter) => { + if let Some(addr) = addr_iter.next() { + Some(addr) + } else { + return Err(format!("Invalid host given with `--nat extip:{}`", &self.args.arg_nat[6..])) + } + }, + Err(_) => return Err(format!("Invalid host given with `--nat extip:{}`", &self.args.arg_nat[6..])) + } } else { None }; @@ -933,13 +942,13 @@ impl Configuration { } fn snapshot_config(&self) -> Result { - let conf = SnapshotConfiguration { - no_periodic: self.args.flag_no_periodic_snapshot, - processing_threads: match self.args.arg_snapshot_threads { - Some(threads) if threads > 0 => threads, - _ => ::std::cmp::max(1, num_cpus::get_physical() / 2), - }, - }; + let mut conf = SnapshotConfiguration::default(); + conf.no_periodic = self.args.flag_no_periodic_snapshot; + if let Some(threads) = self.args.arg_snapshot_threads { + if threads > 0 { + conf.processing_threads = threads; + } + } Ok(conf) } @@ -1844,6 +1853,33 @@ mod tests { assert_eq!(conf1.ipfs_config().port, 5002); } + #[test] + fn should_resolve_external_nat_hosts() { + // Ip works + let conf = parse(&["parity", "--nat", "extip:1.1.1.1"]); + assert_eq!(conf.net_addresses().unwrap().1.unwrap().ip().to_string(), "1.1.1.1"); + assert_eq!(conf.net_addresses().unwrap().1.unwrap().port(), 30303); + + // Ip with port works, port is discarded + let conf = parse(&["parity", "--nat", "extip:192.168.1.1:123"]); + assert_eq!(conf.net_addresses().unwrap().1.unwrap().ip().to_string(), "192.168.1.1"); + assert_eq!(conf.net_addresses().unwrap().1.unwrap().port(), 30303); + + // Hostname works + let conf = parse(&["parity", "--nat", "extip:ethereum.org"]); + assert!(conf.net_addresses().unwrap().1.is_some()); + assert_eq!(conf.net_addresses().unwrap().1.unwrap().port(), 30303); + + // Hostname works, garbage at the end is discarded + let conf = parse(&["parity", "--nat", "extip:ethereum.org:whatever bla bla 123"]); + assert!(conf.net_addresses().unwrap().1.is_some()); + assert_eq!(conf.net_addresses().unwrap().1.unwrap().port(), 30303); + + // Garbage is error + let conf = parse(&["parity", "--nat", "extip:blabla"]); + assert!(conf.net_addresses().is_err()); + } + #[test] fn should_expose_all_servers() { // given diff --git a/parity/informant.rs b/parity/informant.rs index 2855579762c..14400d58f08 100644 --- a/parity/informant.rs +++ b/parity/informant.rs @@ -319,9 +319,16 @@ impl Informant { RestorationStatus::Ongoing { state_chunks, block_chunks, state_chunks_done, block_chunks_done } => { format!("Syncing snapshot {}/{}", state_chunks_done + block_chunks_done, state_chunks + block_chunks) }, - RestorationStatus::Initializing { chunks_done } => { - format!("Snapshot initializing ({} chunks restored)", chunks_done) + RestorationStatus::Initializing { chunks_done, state_chunks, block_chunks } => { + let total_chunks = state_chunks + block_chunks; + // Note that the percentage here can be slightly misleading when + // they have chunks already on disk: we'll import the local + // chunks first and then download the rest. + format!("Snapshot initializing ({}/{} chunks restored, {:.0}%)", chunks_done, total_chunks, (chunks_done as f32 / total_chunks as f32) * 100.0) }, + RestorationStatus::Finalizing => { + format!("Snapshot finalization under way") + } _ => String::new(), } ) diff --git a/parity/light_helpers/epoch_fetch.rs b/parity/light_helpers/epoch_fetch.rs index 38c277cb61e..95eea58586d 100644 --- a/parity/light_helpers/epoch_fetch.rs +++ b/parity/light_helpers/epoch_fetch.rs @@ -16,7 +16,7 @@ use std::sync::{Arc, Weak}; -use ethcore::engines::{EthEngine, StateDependentProof}; +use ethcore::engines::{Engine, StateDependentProof}; use sync::{LightSync, LightNetworkDispatcher}; use types::encoded; use types::header::Header; @@ -81,7 +81,7 @@ impl ChainDataFetcher for EpochFetch { } /// Fetch epoch transition proof at given header. - fn epoch_transition(&self, hash: H256, engine: Arc, checker: Arc) + fn epoch_transition(&self, hash: H256, engine: Arc, checker: Arc) -> Self::Transition { self.request(request::Signal { diff --git a/parity/snapshot.rs b/parity/snapshot.rs index 269965c3355..c1d2a77e3fd 100644 --- a/parity/snapshot.rs +++ b/parity/snapshot.rs @@ -123,6 +123,7 @@ fn restore_using(snapshot: Arc, reader: &R, match snapshot.status() { RestorationStatus::Ongoing { .. } => Err("Snapshot file is incomplete and missing chunks.".into()), RestorationStatus::Initializing { .. } => Err("Snapshot restoration is still initializing.".into()), + RestorationStatus::Finalizing => Err("Snapshot restoration is still finalizing.".into()), RestorationStatus::Failed => Err("Snapshot restoration failed.".into()), RestorationStatus::Inactive => { info!("Restoration complete."); diff --git a/rpc/src/v1/helpers/engine_signer.rs b/rpc/src/v1/helpers/engine_signer.rs index f993d15f230..56cead696cb 100644 --- a/rpc/src/v1/helpers/engine_signer.rs +++ b/rpc/src/v1/helpers/engine_signer.rs @@ -37,10 +37,7 @@ impl ethcore::engines::EngineSigner for EngineSigner { fn sign(&self, message: ethkey::Message) -> Result { match self.accounts.sign(self.address, Some(self.password.clone()), message) { Ok(ok) => Ok(ok), - Err(e) => { - warn!("Unable to sign consensus message: {:?}", e); - Err(ethkey::Error::InvalidSecret) - }, + Err(e) => Err(ethkey::Error::InvalidSecret), } } diff --git a/rpc/src/v1/helpers/light_fetch.rs b/rpc/src/v1/helpers/light_fetch.rs index 92b7f2ac0bc..5c6696e4328 100644 --- a/rpc/src/v1/helpers/light_fetch.rs +++ b/rpc/src/v1/helpers/light_fetch.rs @@ -737,7 +737,7 @@ where tx: EthTransaction, hdr: encoded::Header, env_info: ::vm::EnvInfo, - engine: Arc<::ethcore::engines::EthEngine>, + engine: Arc<::ethcore::engines::Engine>, on_demand: Arc, sync: Arc, } diff --git a/rpc/src/v1/impls/light/parity.rs b/rpc/src/v1/impls/light/parity.rs index f3dea5485db..21fe836b1fd 100644 --- a/rpc/src/v1/impls/light/parity.rs +++ b/rpc/src/v1/impls/light/parity.rs @@ -26,6 +26,7 @@ use ethstore::random_phrase; use sync::{LightSyncInfo, LightSyncProvider, LightNetworkDispatcher, ManageNetwork}; use updater::VersionInfo as UpdaterVersionInfo; use ethereum_types::{H64, H160, H256, H512, U64, U256}; +use ethcore::miner::FilterOptions; use ethcore_logger::RotatingLogger; use jsonrpc_core::{Result, BoxFuture}; @@ -162,12 +163,7 @@ where } fn registry_address(&self) -> Result> { - let reg = self.light_dispatch.client.engine().params().registrar; - if reg == Default::default() { - Ok(None) - } else { - Ok(Some(reg)) - } + Ok(self.light_dispatch.client.engine().params().registrar) } fn rpc_settings(&self) -> Result { @@ -217,7 +213,7 @@ where .map(Into::into) } - fn pending_transactions(&self, limit: Option) -> Result> { + fn pending_transactions(&self, limit: Option, _filter: Option) -> Result> { let txq = self.light_dispatch.transaction_queue.read(); let chain_info = self.light_dispatch.client.chain_info(); Ok( diff --git a/rpc/src/v1/impls/parity.rs b/rpc/src/v1/impls/parity.rs index 0430acb7568..e040ed52b92 100644 --- a/rpc/src/v1/impls/parity.rs +++ b/rpc/src/v1/impls/parity.rs @@ -16,13 +16,12 @@ //! Parity-specific rpc implementation. use std::sync::Arc; -use std::str::FromStr; use std::collections::BTreeMap; use crypto::DEFAULT_MAC; -use ethereum_types::{Address, H64, H160, H256, H512, U64, U256}; +use ethereum_types::{H64, H160, H256, H512, U64, U256}; use ethcore::client::{BlockChainClient, StateClient, Call}; -use ethcore::miner::{self, MinerService}; +use ethcore::miner::{self, MinerService, FilterOptions}; use ethcore::snapshot::{SnapshotService, RestorationStatus}; use ethcore::state::StateInfo; use ethcore_logger::RotatingLogger; @@ -165,12 +164,7 @@ impl Parity for ParityClient where } fn registry_address(&self) -> Result> { - Ok( - self.client - .additional_params() - .get("registrar") - .and_then(|s| Address::from_str(s).ok()) - ) + Ok(self.client.registrar_address()) } fn rpc_settings(&self) -> Result { @@ -245,10 +239,11 @@ impl Parity for ParityClient where .map(Into::into) } - fn pending_transactions(&self, limit: Option) -> Result> { - let ready_transactions = self.miner.ready_transactions( + fn pending_transactions(&self, limit: Option, filter: Option) -> Result> { + let ready_transactions = self.miner.ready_transactions_filtered( &*self.client, limit.unwrap_or_else(usize::max_value), + filter, miner::PendingOrdering::Priority, ); diff --git a/rpc/src/v1/tests/helpers/miner_service.rs b/rpc/src/v1/tests/helpers/miner_service.rs index b16651bd9c5..694dd96626b 100644 --- a/rpc/src/v1/tests/helpers/miner_service.rs +++ b/rpc/src/v1/tests/helpers/miner_service.rs @@ -22,9 +22,9 @@ use std::collections::{BTreeMap, BTreeSet, HashMap}; use bytes::Bytes; use ethcore::block::SealedBlock; use ethcore::client::{Nonce, PrepareOpenBlock, StateClient, EngineInfo}; -use ethcore::engines::{EthEngine, signer::EngineSigner}; +use ethcore::engines::{Engine, signer::EngineSigner}; use ethcore::error::Error; -use ethcore::miner::{self, MinerService, AuthoringParams}; +use ethcore::miner::{self, MinerService, AuthoringParams, FilterOptions}; use ethereum_types::{H256, U256, Address}; use miner::pool::local_transactions::Status as LocalTransactionStatus; use miner::pool::{verifier, VerifiedTransaction, QueueStatus}; @@ -99,7 +99,7 @@ impl StateClient for TestMinerService { } impl EngineInfo for TestMinerService { - fn engine(&self) -> &EthEngine { + fn engine(&self) -> &Engine { unimplemented!() } } @@ -222,6 +222,10 @@ impl MinerService for TestMinerService { self.queued_transactions() } + fn ready_transactions_filtered(&self, _chain: &C, _max_len: usize, _filter: Option, _ordering: miner::PendingOrdering) -> Vec> { + self.queued_transactions() + } + fn pending_transaction_hashes(&self, _chain: &C) -> BTreeSet { self.queued_transactions().into_iter().map(|tx| tx.signed().hash()).collect() } diff --git a/rpc/src/v1/traits/parity.rs b/rpc/src/v1/traits/parity.rs index a89e1317191..050c9bd07fa 100644 --- a/rpc/src/v1/traits/parity.rs +++ b/rpc/src/v1/traits/parity.rs @@ -19,6 +19,7 @@ use std::collections::BTreeMap; use ethereum_types::{H64, H160, H256, H512, U64, U256}; +use ethcore::miner::FilterOptions; use jsonrpc_core::{BoxFuture, Result}; use jsonrpc_derive::rpc; use v1::types::{ @@ -125,7 +126,7 @@ pub trait Parity { /// Returns all pending transactions from transaction queue. #[rpc(name = "parity_pendingTransactions")] - fn pending_transactions(&self, Option) -> Result>; + fn pending_transactions(&self, Option, Option) -> Result>; /// Returns all transactions from transaction queue. /// diff --git a/whisper/Cargo.toml b/whisper/Cargo.toml index 46351acc58a..e373b740f8c 100644 --- a/whisper/Cargo.toml +++ b/whisper/Cargo.toml @@ -30,3 +30,4 @@ time-utils = { path = "../util/time-utils" } jsonrpc-core = "10.0.1" jsonrpc-derive = "10.0.2" jsonrpc-pubsub = "10.0.1" +zeroize = "0.9.1" diff --git a/whisper/src/lib.rs b/whisper/src/lib.rs index 7ff7794b77a..550124d13cf 100644 --- a/whisper/src/lib.rs +++ b/whisper/src/lib.rs @@ -35,6 +35,7 @@ extern crate serde; extern crate slab; extern crate smallvec; extern crate tiny_keccak; +extern crate zeroize; extern crate jsonrpc_core; extern crate jsonrpc_derive; diff --git a/whisper/src/message.rs b/whisper/src/message.rs index ad50e3d9bf0..cf12d65b3a8 100644 --- a/whisper/src/message.rs +++ b/whisper/src/message.rs @@ -27,11 +27,38 @@ use tiny_keccak::{keccak256, Keccak}; #[cfg(not(time_checked_add))] use time_utils::CheckedSystemTime; +/// Bloom of topics. +type Bloom = H512; +/// Topic data index within a bloom. +type BloomTopicIndex = usize; +/// List of envelope topics. +type EnvelopeTopics = SmallVec<[EnvelopeTopic; 4]>; +/// Envelope topic data. +type EnvelopeTopicData = u8; +/// List of envelope topics data. +type EnvelopeTopicsData = [EnvelopeTopicData; 4]; +/// Expiry timestamp of an envelope. +type EnvelopeExpiryTimestamp = u64; +/// Message contained within an envelope +type EnvelopeMessage = Vec; +/// Arbitrary value used to target lower PoW hash. +type EnvelopeNonce = u64; +/// Envelope nonce in bytes. +type EnvelopeNonceBytes = [u8; 8]; +/// Envelope proving work duration in milliseconds. +type EnvelopeProvingWorkDuration = u64; +/// Envelope message uniquely identifying proving hash. +type EnvelopeProvingHash = H256; +/// Envelope work that has been proved by the proving hash. +type EnvelopeProvenWork = f64; +/// Time-to-live of an envelope in seconds. +type EnvelopeTTLDuration = u64; + /// Work-factor proved. Takes 3 parameters: size of message, time to live, /// and hash. /// /// Panics if size or TTL is zero. -pub fn work_factor_proved(size: u64, ttl: u64, hash: H256) -> f64 { +pub fn work_factor_proved(size: u64, ttl: EnvelopeTTLDuration, hash: EnvelopeProvingHash) -> EnvelopeProvenWork { assert!(size != 0 && ttl != 0); let leading_zeros = { @@ -44,51 +71,51 @@ pub fn work_factor_proved(size: u64, ttl: u64, hash: H256) -> f64 { 2.0_f64.powi(leading_zeros as i32) / spacetime } -/// A topic of a message. +/// A topic of a message. The topic is an abridged version of the first four bytes of the original topic's hash. #[derive(Debug, Default, Clone, Copy, Hash, PartialEq, Eq, PartialOrd, Ord)] -pub struct Topic(pub [u8; 4]); +pub struct EnvelopeTopic(pub EnvelopeTopicsData); -impl From<[u8; 4]> for Topic { - fn from(x: [u8; 4]) -> Self { - Topic(x) +impl From for EnvelopeTopic { + fn from(x: EnvelopeTopicsData) -> Self { + EnvelopeTopic(x) } } -impl Topic { - /// set up to three bits in the 64-byte bloom passed. +impl EnvelopeTopic { + /// Set up to three bits in the 64-byte bloom passed. /// - /// this takes 3 sets of 9 bits, treating each as an index in the range + /// This takes 3 sets of 9 bits, treating each as an index in the range /// 0..512 into the bloom and setting the corresponding bit in the bloom to 1. - pub fn bloom_into(&self, bloom: &mut H512) { + pub fn bloom_into(&self, bloom: &mut Bloom) { - let data = &self.0; + let topics_data = &self.0; for i in 0..3 { - let mut idx = data[i] as usize; + let mut topic_idx = topics_data[i] as BloomTopicIndex; - if data[3] & (1 << i) != 0 { - idx += 256; + if topics_data[3] & (1 << i) != 0 { + topic_idx += 256; } - debug_assert!(idx <= 511); - bloom.as_bytes_mut()[idx / 8] |= 1 << (7 - idx % 8); + debug_assert!(topic_idx <= 511); + bloom.as_bytes_mut()[topic_idx / 8] |= 1 << (7 - topic_idx % 8); } } /// Get bloom for single topic. - pub fn bloom(&self) -> H512 { + pub fn bloom(&self) -> Bloom { let mut bloom = Default::default(); self.bloom_into(&mut bloom); bloom } } -impl rlp::Encodable for Topic { +impl rlp::Encodable for EnvelopeTopic { fn rlp_append(&self, s: &mut RlpStream) { s.encoder().encode_value(&self.0); } } -impl rlp::Decodable for Topic { +impl rlp::Decodable for EnvelopeTopic { fn decode(rlp: &Rlp) -> Result { use std::cmp; @@ -96,16 +123,16 @@ impl rlp::Decodable for Topic { cmp::Ordering::Less => Err(DecoderError::RlpIsTooShort), cmp::Ordering::Greater => Err(DecoderError::RlpIsTooBig), cmp::Ordering::Equal => { - let mut t = [0u8; 4]; + let mut t: EnvelopeTopicsData = [0u8; 4]; t.copy_from_slice(bytes); - Ok(Topic(t)) + Ok(EnvelopeTopic(t)) } }) } } /// Calculate union of blooms for given topics. -pub fn bloom_topics(topics: &[Topic]) -> H512 { +pub fn bloom_topics(topics: &[EnvelopeTopic]) -> Bloom { let mut bloom = H512::default(); for topic in topics { topic.bloom_into(&mut bloom); @@ -143,7 +170,8 @@ impl fmt::Display for Error { } } -fn append_topics<'a>(s: &'a mut RlpStream, topics: &[Topic]) -> &'a mut RlpStream { +/// Append given topic(s) to RLP stream. +fn append_topics<'a>(s: &'a mut RlpStream, topics: &[EnvelopeTopic]) -> &'a mut RlpStream { if topics.len() == 1 { s.append(&topics[0]) } else { @@ -151,27 +179,27 @@ fn append_topics<'a>(s: &'a mut RlpStream, topics: &[Topic]) -> &'a mut RlpStrea } } -fn decode_topics(rlp: Rlp) -> Result, DecoderError> { +fn decode_topics(rlp: Rlp) -> Result { if rlp.is_list() { - rlp.iter().map(|r| r.as_val::()).collect() + rlp.iter().map(|r| r.as_val::()).collect() } else { rlp.as_val().map(|t| SmallVec::from_slice(&[t])) } } -// Raw envelope struct. +/// An `Envelope` instance is contained in each `Message`. #[derive(Clone, Debug, PartialEq, Eq)] pub struct Envelope { - /// Expiry timestamp - pub expiry: u64, - /// Time-to-live in seconds - pub ttl: u64, - /// series of 4-byte topics. - pub topics: SmallVec<[Topic; 4]>, - /// The message contained within. - pub data: Vec, + /// Expiry timestamp. + pub expiry: EnvelopeExpiryTimestamp, + /// Time-to-live in seconds. + pub ttl: EnvelopeTTLDuration, + /// Series of 4-byte topics. + pub topics: EnvelopeTopics, + /// The message contained within an envelope. + pub message_data: EnvelopeMessage, /// Arbitrary value used to target lower PoW hash. - pub nonce: u64, + pub nonce: EnvelopeNonce, } impl Envelope { @@ -180,7 +208,8 @@ impl Envelope { self.topics.len() != 1 } - fn proving_hash(&self) -> H256 { + // Generate the uniquely identifying proving hash for the message. + fn proving_hash(&self) -> EnvelopeProvingHash { use byteorder::{BigEndian, ByteOrder}; let mut buf = [0; 32]; @@ -189,7 +218,7 @@ impl Envelope { stream.append(&self.expiry).append(&self.ttl); append_topics(&mut stream, &self.topics) - .append(&self.data); + .append(&self.message_data); let mut digest = Keccak::new_keccak256(); digest.update(&*stream.drain()); @@ -212,7 +241,7 @@ impl rlp::Encodable for Envelope { .append(&self.ttl); append_topics(s, &self.topics) - .append(&self.data) + .append(&self.message_data) .append(&self.nonce); } } @@ -225,7 +254,7 @@ impl rlp::Decodable for Envelope { expiry: rlp.val_at(0)?, ttl: rlp.val_at(1)?, topics: decode_topics(rlp.at(2)?)?, - data: rlp.val_at(3)?, + message_data: rlp.val_at(3)?, nonce: rlp.val_at(4)?, }) } @@ -234,22 +263,22 @@ impl rlp::Decodable for Envelope { /// Message creation parameters. /// Pass this to `Message::create` to make a message. pub struct CreateParams { - /// time-to-live in seconds. - pub ttl: u64, - /// payload data. - pub payload: Vec, - /// Topics. May not be empty. - pub topics: Vec, + /// Envelope time-to-live in seconds. + pub ttl: EnvelopeTTLDuration, + /// Envelope payload of message data. + pub payload: EnvelopeMessage, + /// Envelope topics. Must not be empty. + pub topics: Vec, /// How many milliseconds to spend proving work. - pub work: u64, + pub work: EnvelopeProvingWorkDuration, } /// A whisper message. This is a checked message carrying around metadata. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Message { envelope: Envelope, - bloom: H512, - hash: H256, + bloom: Bloom, + hash: EnvelopeProvingHash, encoded_size: usize, } @@ -270,6 +299,7 @@ impl Message { assert!(params.ttl > 0); + // Expiry period since the last epoch rounded up to the nearest second. let expiry = { let since_epoch = SystemTime::now() .checked_add(Duration::from_secs(params.ttl)) @@ -277,10 +307,13 @@ impl Message { .ok_or(Error::TimestampOverflow)? .duration_since(time::UNIX_EPOCH).expect("time after now is after unix epoch; qed"); - // round up the sub-second to next whole second. + // Round up the sub-second to next whole second. since_epoch.as_secs() + if since_epoch.subsec_nanos() == 0 { 0 } else { 1 } }; + // Encrypt an RLP stream into a digest. Create the RLP stream by appending + // to it the envelope topics, envelope payload of message data, + // envelope ttl, and the expiry period since the last epoch. let start_digest = { let mut stream = RlpStream::new_list(4); stream.append(&expiry).append(¶ms.ttl); @@ -291,8 +324,10 @@ impl Message { digest }; + // Find the best nonce based on using updating the digest with + // randomly generated envelope nonce bytes let mut buf = [0; 32]; - let mut try_nonce = move |nonce: &[u8; 8]| { + let mut try_nonce = move |nonce: &EnvelopeNonceBytes| { let mut digest = start_digest.clone(); digest.update(&nonce[..]); digest.finalize(&mut buf[..]); @@ -300,9 +335,12 @@ impl Message { buf.clone() }; - let mut nonce: [u8; 8] = rng.gen(); + let mut nonce: EnvelopeNonceBytes = rng.gen(); let mut best_found = try_nonce(&nonce); + // Start proving work, which involves repeatedly trying to create another + // nonce hash that is better (lower PoW hash) than the latest best nonce, + // to replace it. let start = Instant::now(); while start.elapsed() <= Duration::from_millis(params.work) { @@ -316,10 +354,10 @@ impl Message { } let envelope = Envelope { - expiry: expiry, + expiry, ttl: params.ttl, topics: params.topics.into_iter().collect(), - data: params.payload, + message_data: params.payload, nonce: BigEndian::read_u64(&nonce[..]), }; @@ -344,9 +382,9 @@ impl Message { Message::from_components(envelope, encoded_size, hash, now) } - // create message from envelope, hash, and encoded size. - // does checks for validity. - fn from_components(envelope: Envelope, size: usize, hash: H256, now: SystemTime) + // Create message from envelope, hash, and encoded size. + // Does checks for validity. + fn from_components(envelope: Envelope, size: usize, hash: EnvelopeProvingHash, now: SystemTime) -> Result { const LEEWAY_SECONDS: u64 = 2; @@ -371,9 +409,9 @@ impl Message { let bloom = bloom_topics(&envelope.topics); Ok(Message { - envelope: envelope, - bloom: bloom, - hash: hash, + envelope, + bloom, + hash, encoded_size: size, }) } @@ -388,18 +426,18 @@ impl Message { self.encoded_size } - /// Get a uniquely identifying hash for the message. - pub fn hash(&self) -> &H256 { + /// Get a uniquely identifying proving hash for the message. + pub fn hash(&self) -> &EnvelopeProvingHash { &self.hash } - /// Get the bloom filter of the topics + /// Get the bloom filter of the topics. pub fn bloom(&self) -> &H512 { &self.bloom } /// Get the work proved by the hash. - pub fn work_proved(&self) -> f64 { + pub fn work_proved(&self) -> EnvelopeProvenWork { let proving_hash = self.envelope.proving_hash(); work_factor_proved(self.encoded_size as _, self.envelope.ttl, proving_hash) @@ -411,13 +449,13 @@ impl Message { } /// Get the topics. - pub fn topics(&self) -> &[Topic] { + pub fn topics(&self) -> &[EnvelopeTopic] { &self.envelope.topics } /// Get the message data. - pub fn data(&self) -> &[u8] { - &self.envelope.data + pub fn message_data(&self) -> &EnvelopeMessage { + &self.envelope.message_data } } @@ -438,7 +476,7 @@ mod tests { assert!(Message::create(CreateParams { ttl: 100, payload: vec![1, 2, 3, 4], - topics: vec![Topic([1, 2, 1, 2])], + topics: vec![EnvelopeTopic([1, 2, 1, 2])], work: 50, }).is_ok()); } @@ -448,7 +486,7 @@ mod tests { let envelope = Envelope { expiry: 100_000, ttl: 30, - data: vec![9; 256], + message_data: vec![9; 256], topics: SmallVec::from_slice(&[Default::default()]), nonce: 1010101, }; @@ -464,8 +502,8 @@ mod tests { let envelope = Envelope { expiry: 100_000, ttl: 30, - data: vec![9; 256], - topics: SmallVec::from_slice(&[Default::default(), Topic([1, 2, 3, 4])]), + message_data: vec![9; 256], + topics: SmallVec::from_slice(&[Default::default(), EnvelopeTopic([1, 2, 3, 4])]), nonce: 1010101, }; @@ -480,7 +518,7 @@ mod tests { let envelope = Envelope { expiry: 100_000, ttl: 30, - data: vec![9; 256], + message_data: vec![9; 256], topics: SmallVec::from_slice(&[Default::default()]), nonce: 1010101, }; @@ -499,7 +537,7 @@ mod tests { let envelope = Envelope { expiry: 100_000, ttl: 30, - data: vec![9; 256], + message_data: vec![9; 256], topics: SmallVec::from_slice(&[Default::default()]), nonce: 1010101, }; @@ -516,7 +554,7 @@ mod tests { let envelope = Envelope { expiry: 100_000, ttl: 200_000, - data: vec![9; 256], + message_data: vec![9; 256], topics: SmallVec::from_slice(&[Default::default()]), nonce: 1010101, }; @@ -530,10 +568,10 @@ mod tests { #[test] fn work_factor() { // 256 leading zeros -> 2^256 / 1 - assert_eq!(work_factor_proved(1, 1, H256::zero()), 115792089237316200000000000000000000000000000000000000000000000000000000000000.0); + assert_eq!(work_factor_proved(1, 1, EnvelopeProvingHash::zero()), 115792089237316200000000000000000000000000000000000000000000000000000000000000.0); // 255 leading zeros -> 2^255 / 1 - assert_eq!(work_factor_proved(1, 1, H256::from_low_u64_be(1)), 57896044618658100000000000000000000000000000000000000000000000000000000000000.0); + assert_eq!(work_factor_proved(1, 1, EnvelopeProvingHash::from_low_u64_be(1)), 57896044618658100000000000000000000000000000000000000000000000000000000000000.0); // 0 leading zeros -> 2^0 / 1 - assert_eq!(work_factor_proved(1, 1, serde_json::from_str::("\"0xff00000000000000000000000000000000000000000000000000000000000000\"").unwrap()), 1.0); + assert_eq!(work_factor_proved(1, 1, serde_json::from_str::("\"0xff00000000000000000000000000000000000000000000000000000000000000\"").unwrap()), 1.0); } } diff --git a/whisper/src/rpc/crypto.rs b/whisper/src/rpc/crypto.rs index 3f26e0350a0..ad2b59e2183 100644 --- a/whisper/src/rpc/crypto.rs +++ b/whisper/src/rpc/crypto.rs @@ -20,7 +20,7 @@ use aes_gcm::{Encryptor, Decryptor}; use ethkey::crypto::ecies; use ethereum_types::H256; use ethkey::{self, Public, Secret}; -use parity_util_mem::Memzero; +use zeroize::Zeroizing; /// Length of AES key pub const AES_KEY_LEN: usize = 32; @@ -37,7 +37,7 @@ enum AesEncode { } enum EncryptionInner { - AES(Memzero<[u8; AES_KEY_LEN]>, [u8; AES_NONCE_LEN], AesEncode), + AES(Zeroizing<[u8; AES_KEY_LEN]>, [u8; AES_NONCE_LEN], AesEncode), ECIES(Public), } @@ -59,7 +59,7 @@ impl EncryptionInstance { /// /// If generating nonces with a secure RNG, limit uses such that /// the chance of collision is negligible. - pub fn aes(key: Memzero<[u8; AES_KEY_LEN]>, nonce: [u8; AES_NONCE_LEN]) -> Self { + pub fn aes(key: Zeroizing<[u8; AES_KEY_LEN]>, nonce: [u8; AES_NONCE_LEN]) -> Self { EncryptionInstance(EncryptionInner::AES(key, nonce, AesEncode::AppendedNonce)) } @@ -67,7 +67,7 @@ impl EncryptionInstance { /// /// Key reuse here is extremely dangerous. It should be randomly generated /// with a secure RNG. - pub fn broadcast(key: Memzero<[u8; AES_KEY_LEN]>, topics: Vec) -> Self { + pub fn broadcast(key: Zeroizing<[u8; AES_KEY_LEN]>, topics: Vec) -> Self { EncryptionInstance(EncryptionInner::AES(key, BROADCAST_IV, AesEncode::OnTopics(topics))) } @@ -111,7 +111,7 @@ fn xor(a: &mut [u8; 32], b: &[u8; 32]) { } enum AesExtract { - AppendedNonce(Memzero<[u8; AES_KEY_LEN]>), // extract appended nonce. + AppendedNonce(Zeroizing<[u8; AES_KEY_LEN]>), // extract appended nonce. OnTopics(usize, usize, H256), // number of topics, index we know, topic we know. } @@ -132,7 +132,7 @@ impl DecryptionInstance { } /// 256-bit AES GCM decryption with appended nonce. - pub fn aes(key: Memzero<[u8; AES_KEY_LEN]>) -> Self { + pub fn aes(key: Zeroizing<[u8; AES_KEY_LEN]>) -> Self { DecryptionInstance(DecryptionInner::AES(AesExtract::AppendedNonce(key))) } @@ -167,7 +167,7 @@ impl DecryptionInstance { } let mut salted_topic = H256::zero(); salted_topic.as_bytes_mut().copy_from_slice(&ciphertext[(known_index * 32)..][..32]); - let key = Memzero::from((salted_topic ^ known_topic).0); + let key = Zeroizing::new((salted_topic ^ known_topic).0); let offset = num_topics * 32; Decryptor::aes_256_gcm(&*key).ok()? .decrypt(&BROADCAST_IV, Vec::from(&ciphertext[offset..])) @@ -187,6 +187,7 @@ impl DecryptionInstance { mod tests { use super::*; use rand::{Rng, rngs::OsRng}; + use std::ops::Deref; #[test] @@ -217,9 +218,9 @@ mod tests { fn encrypt_symmetric() { let mut rng = OsRng::new().unwrap(); let mut test_message = move |message: &[u8]| { - let key = Memzero::from(rng.gen::<[u8; 32]>()); + let key = Zeroizing::new(rng.gen::<[u8; 32]>()); - let instance = EncryptionInstance::aes(key.clone(), rng.gen()); + let instance = EncryptionInstance::aes(Zeroizing::new(*key.deref()), rng.gen()); let ciphertext = instance.encrypt(message).unwrap(); if !message.is_empty() { @@ -245,7 +246,7 @@ mod tests { let all_topics = (0..5).map(|_| H256::random_using(&mut rng)).collect::>(); let known_idx = 2; let known_topic = all_topics[2]; - let key = Memzero::from(rng.gen::<[u8; 32]>()); + let key = Zeroizing::new(rng.gen::<[u8; 32]>()); let instance = EncryptionInstance::broadcast(key, all_topics); let ciphertext = instance.encrypt(message).unwrap(); diff --git a/whisper/src/rpc/filter.rs b/whisper/src/rpc/filter.rs index a58f16f9c85..84a6445ca51 100644 --- a/whisper/src/rpc/filter.rs +++ b/whisper/src/rpc/filter.rs @@ -24,7 +24,7 @@ use ethkey::Public; use jsonrpc_pubsub::typed::{Subscriber, Sink}; use parking_lot::{Mutex, RwLock}; -use message::{Message, Topic}; +use message::{Message, EnvelopeTopic}; use super::{key_store::KeyStore, types::{self, FilterItem, HexEncode}}; /// Kinds of filters, @@ -198,7 +198,7 @@ impl Drop for Manager { /// Filter incoming messages by critera. pub struct Filter { - topics: Vec<(Vec, H512, Topic)>, + topics: Vec<(Vec, H512, EnvelopeTopic)>, from: Option, decrypt_with: Option, } @@ -278,7 +278,7 @@ impl Filter { } }; - let decrypted = match decrypt.decrypt(message.data()) { + let decrypted = match decrypt.decrypt(message.message_data()) { Some(d) => d, None => { trace!(target: "whisper", "Failed to decrypt message with {} matching topics", @@ -317,7 +317,7 @@ impl Filter { #[cfg(test)] mod tests { - use message::{CreateParams, Message, Topic}; + use message::{CreateParams, Message, EnvelopeTopic}; use rpc::types::{FilterRequest, HexEncode}; use rpc::abridge_topic; use super::*; @@ -366,7 +366,7 @@ mod tests { let message = Message::create(CreateParams { ttl: 100, payload: vec![1, 3, 5, 7, 9], - topics: vec![Topic([1, 8, 3, 99])], + topics: vec![EnvelopeTopic([1, 8, 3, 99])], work: 0, }).unwrap(); diff --git a/whisper/src/rpc/key_store.rs b/whisper/src/rpc/key_store.rs index 3f84e59ad3a..4d18157880e 100644 --- a/whisper/src/rpc/key_store.rs +++ b/whisper/src/rpc/key_store.rs @@ -23,7 +23,8 @@ use std::collections::HashMap; use ethereum_types::H256; use ethkey::{KeyPair, Public, Secret}; -use parity_util_mem::Memzero; +use zeroize::Zeroizing; +use std::ops::Deref; use rand::{Rng, rngs::OsRng}; use rpc::crypto::{AES_KEY_LEN, EncryptionInstance, DecryptionInstance}; @@ -35,7 +36,7 @@ pub enum Key { /// and signing. Asymmetric(KeyPair), /// AES-256 GCM mode. Suitable for encryption, decryption, but not signing. - Symmetric(Memzero<[u8; AES_KEY_LEN]>), + Symmetric(Zeroizing<[u8; AES_KEY_LEN]>), } impl Key { @@ -49,7 +50,7 @@ impl Key { /// Generate a random symmetric key with the given cryptographic RNG. pub fn new_symmetric(rng: &mut OsRng) -> Self { - Key::Symmetric(Memzero::from(rng.gen::<[u8; 32]>())) + Key::Symmetric(Zeroizing::new(rng.gen::<[u8; 32]>())) } /// From secret asymmetric key. Fails if secret is invalid. @@ -59,7 +60,7 @@ impl Key { /// From raw symmetric key. pub fn from_raw_symmetric(key: [u8; AES_KEY_LEN]) -> Self { - Key::Symmetric(Memzero::from(key)) + Key::Symmetric(Zeroizing::new(key)) } /// Get a handle to the public key if this is an asymmetric key. @@ -138,7 +139,7 @@ impl KeyStore { .map_err(|_| "could not create encryption instance for id"), Key::Symmetric(ref key) => OsRng::new() - .map(|mut rng| EncryptionInstance::aes(key.clone(), rng.gen())) + .map(|mut rng| EncryptionInstance::aes(Zeroizing::new(*key.deref()), rng.gen())) .map_err(|_| "unable to get secure randomness") }) } @@ -149,7 +150,7 @@ impl KeyStore { self.get(id).map(|key| match *key { Key::Asymmetric(ref pair) => DecryptionInstance::ecies(pair.secret().clone()) .expect("all keys stored are valid; qed"), - Key::Symmetric(ref key) => DecryptionInstance::aes(key.clone()), + Key::Symmetric(ref key) => DecryptionInstance::aes(Zeroizing::new(*key.deref())), }) } diff --git a/whisper/src/rpc/mod.rs b/whisper/src/rpc/mod.rs index 61cd14d4b9b..0612fb5ac33 100644 --- a/whisper/src/rpc/mod.rs +++ b/whisper/src/rpc/mod.rs @@ -19,7 +19,7 @@ //! Manages standard message format decoding, ephemeral identities, signing, //! encryption, and decryption. //! -//! Provides an interface for using whisper to transmit data securely. +//! Provides an interface for using Whisper to transmit data securely. use std::sync::Arc; @@ -28,14 +28,14 @@ use jsonrpc_derive::rpc; use jsonrpc_pubsub::{Session, PubSubMetadata, SubscriptionId, typed::Subscriber}; use ethereum_types::H256; -use parity_util_mem::Memzero; +use zeroize::Zeroizing; use parking_lot::RwLock; use self::filter::Filter; use self::key_store::{Key, KeyStore}; use self::types::HexEncode; -use message::{CreateParams, Message, Topic}; +use message::{CreateParams, Message, EnvelopeTopic}; mod crypto; mod filter; @@ -61,7 +61,7 @@ fn topic_hash(topic: &[u8]) -> H256 { } // abridge topic using first four bytes of hash. -fn abridge_topic(topic: &[u8]) -> Topic { +fn abridge_topic(topic: &[u8]) -> EnvelopeTopic { let mut abridged = [0; 4]; let hash = topic_hash(topic).0; abridged.copy_from_slice(&hash[..4]); @@ -99,6 +99,7 @@ pub trait Whisper { #[rpc(name = "shh_getPrivateKey")] fn get_private(&self, types::Identity) -> Result; + /// Get symmetric key. Succeeds if identity has been stored. #[rpc(name = "shh_getSymKey")] fn get_symmetric(&self, types::Identity) -> Result; @@ -284,7 +285,7 @@ impl Whisper for WhisperClien let mut rng = OsRng::new() .map_err(|_| whisper_error("unable to acquire secure randomness"))?; - let key = Memzero::from(rng.gen::<[u8; 32]>()); + let key = Zeroizing::new(rng.gen::<[u8; 32]>()); if req.topics.is_empty() { return Err(whisper_error("must supply at least one topic for broadcast message")); }