Skip to content
This repository has been archived by the owner on Nov 6, 2020. It is now read-only.

Commit

Permalink
Backports for stable 2.1.7 (#9975)
Browse files Browse the repository at this point in the history
* version: bump stable to 2.1.7

* Adjust requests costs for light client (#9925)

* PIP Table Cost relative to average peers instead of max peers

* Add tracing in PIP new_cost_table

* Update stat peer_count

* Use number of leeching peers for Light serve costs

* Fix test::light_params_load_share_depends_on_max_peers (wrong type)

* Remove (now) useless test

* Remove `load_share` from LightParams.Config
Prevent div. by 0

* Add LEECHER_COUNT_FACTOR

* PR Grumble: u64 to u32 for f64 casting

* Prevent u32 overflow for avg_peer_count

* Add tests for LightSync::Statistics

* Fix empty steps (#9939)

* Don't send empty step twice or empty step then block.

* Perform basic validation of locally sealed blocks.

* Don't include empty step twice.

* prevent silent errors in daemon mode, closes #9367 (#9946)

* Fix light client informant while syncing (#9932)

* Add `is_idle` to LightSync to check importing status

* Use SyncStateWrapper to make sure is_idle gets updates

* Update is_major_import to use verified queue size as well

* Add comment for `is_idle`

* Add Debug to `SyncStateWrapper`

* `fn get` -> `fn into_inner`

*  ci: rearrange pipeline by logic (#9970)

* ci: rearrange pipeline by logic

* ci: rename docs script

* Add readiness check for docker container (#9804)

* Update Dockerfile

Since parity is built for "mission critical use", I thought other operators may see the need for this.

Adding the `curl` and `jq` commands allows for an extremely simple health check to be usable in container orchestrators.

For example. Here is a health check for a parity docker container running in Kubernetes.

This can be setup as a readiness Probe that would prevent clustered nodes that aren't ready from serving traffic.

```bash
#!/bin/bash

ETH_SYNCING=$(curl -X POST --data '{"jsonrpc":"2.0","method":"eth_syncing","params":[],"id":1}' http://localhost:8545 -H 'Content-Type: application/json')
RESULT=$(echo "$ETH_SYNCING | jq -r .result)

if [ "$RESULT" == "false" ]; then
  echo "Parity is ready to start accepting traffic"
  exit 0
else
  echo "Parity is still syncing the blockchain"
  exit 1
fi
```

* add sync check script

* Fix docker script (#9854)


* Dockerfile: change source path of the newly added check_sync.sh (#9869)

* Do not use the home directory as the working dir in docker (#9834)

* Do not create a home directory.

* Re-add -m flag

* fix docker build (#9971)

* bump smallvec to 0.6 in ethcore-light, ethstore and whisper (#9588)

* bump smallvec to 0.6 in ethcore-light, ethstore and whisper

* bump transaction-pool

* Fix test.

* patch cargo to use tokio-proto from git repo

this makes sure we no longer depend on smallvec 0.2.1 which is
affected by servo/rust-smallvec#96

* use patched version of untrusted 0.5.1

* ci: allow audit to fail
  • Loading branch information
5chdn authored Nov 28, 2018
1 parent 491f17f commit 126208c
Show file tree
Hide file tree
Showing 22 changed files with 432 additions and 220 deletions.
24 changes: 14 additions & 10 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,14 @@ test-linux:
tags:
- rust-stable

test-audit:
stage: test
script:
- scripts/gitlab/cargo-audit.sh
tags:
- rust-stable
allow_failure: true

build-linux:
stage: build
only: *releaseable_branches
Expand Down Expand Up @@ -103,25 +111,18 @@ publish-awss3:
tags:
- shell

docs-jsonrpc:
stage: optional
publish-docs:
stage: publish
only:
- tags
except:
- nightly
cache: {}
script:
- scripts/gitlab/docs-jsonrpc.sh
- scripts/gitlab/publish-docs.sh
tags:
- shell

cargo-audit:
stage: optional
script:
- scripts/gitlab/cargo-audit.sh
tags:
- rust-stable

build-android:
stage: optional
image: parity/rust-android:gitlab-ci
Expand All @@ -131,6 +132,7 @@ build-android:
- scripts/gitlab/build-unix.sh
tags:
- rust-arm
allow_failure: true

test-beta:
stage: optional
Expand All @@ -140,6 +142,7 @@ test-beta:
- scripts/gitlab/test-all.sh beta
tags:
- rust-beta
allow_failure: true

test-nightly:
stage: optional
Expand All @@ -149,3 +152,4 @@ test-nightly:
- scripts/gitlab/test-all.sh nightly
tags:
- rust-nightly
allow_failure: true
77 changes: 29 additions & 48 deletions Cargo.lock

Large diffs are not rendered by default.

4 changes: 3 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
description = "Parity Ethereum client"
name = "parity-ethereum"
# NOTE Make sure to update util/version/Cargo.toml as well
version = "2.1.6"
version = "2.1.7"
license = "GPL-3.0"
authors = ["Parity Technologies <admin@parity.io>"]

Expand Down Expand Up @@ -140,3 +140,5 @@ members = [

[patch.crates-io]
ring = { git = "https://github.com/paritytech/ring" }
tokio-proto = { git = "https://github.com/tokio-rs/tokio-proto.git", rev = "56c720e" }
untrusted = { git = "https://github.com/paritytech/untrusted", branch = "0.5" }
2 changes: 1 addition & 1 deletion ethcore/light/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ vm = { path = "../vm" }
fastmap = { path = "../../util/fastmap" }
rlp = { version = "0.2.4", features = ["ethereum"] }
rlp_derive = { path = "../../util/rlp_derive" }
smallvec = "0.4"
smallvec = "0.6"
futures = "0.1"
rand = "0.4"
itertools = "0.5"
Expand Down
88 changes: 80 additions & 8 deletions ethcore/light/src/net/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ use parking_lot::{Mutex, RwLock};
use provider::Provider;
use request::{Request, NetworkRequests as Requests, Response};
use rlp::{RlpStream, Rlp};
use std::collections::{HashMap, HashSet};
use std::collections::{HashMap, HashSet, VecDeque};
use std::fmt;
use std::ops::{BitOr, BitAnd, Not};
use std::sync::Arc;
Expand All @@ -38,7 +38,7 @@ use std::time::{Duration, Instant};
use self::request_credits::{Credits, FlowParams};
use self::context::{Ctx, TickCtx};
use self::error::Punishment;
use self::load_timer::{LoadDistribution, NullStore};
use self::load_timer::{LoadDistribution, NullStore, MOVING_SAMPLE_SIZE};
use self::request_set::RequestSet;
use self::id_guard::IdGuard;

Expand Down Expand Up @@ -70,6 +70,16 @@ const PROPAGATE_TIMEOUT_INTERVAL: Duration = Duration::from_secs(5);
const RECALCULATE_COSTS_TIMEOUT: TimerToken = 3;
const RECALCULATE_COSTS_INTERVAL: Duration = Duration::from_secs(60 * 60);

const STATISTICS_TIMEOUT: TimerToken = 4;
const STATISTICS_INTERVAL: Duration = Duration::from_secs(15);

/// Maximum load share for the light server
pub const MAX_LIGHTSERV_LOAD: f64 = 0.5;

/// Factor to multiply leecher count to cater for
/// extra sudden connections (should be >= 1.0)
pub const LEECHER_COUNT_FACTOR: f64 = 1.25;

// minimum interval between updates.
const UPDATE_INTERVAL: Duration = Duration::from_millis(5000);

Expand Down Expand Up @@ -256,18 +266,18 @@ pub trait Handler: Send + Sync {
pub struct Config {
/// How many stored seconds of credits peers should be able to accumulate.
pub max_stored_seconds: u64,
/// How much of the total load capacity each peer should be allowed to take.
pub load_share: f64,
/// The network config median peers (used as default peer count)
pub median_peers: f64,
}

impl Default for Config {
fn default() -> Self {
const LOAD_SHARE: f64 = 1.0 / 25.0;
const MEDIAN_PEERS: f64 = 25.0;
const MAX_ACCUMULATED: u64 = 60 * 5; // only charge for 5 minutes.

Config {
max_stored_seconds: MAX_ACCUMULATED,
load_share: LOAD_SHARE,
median_peers: MEDIAN_PEERS,
}
}
}
Expand Down Expand Up @@ -335,6 +345,42 @@ mod id_guard {
}
}

/// Provides various statistics that could
/// be used to compute costs
pub struct Statistics {
/// Samples of peer count
peer_counts: VecDeque<usize>,
}

impl Statistics {
/// Create a new Statistics instance
pub fn new() -> Self {
Statistics {
peer_counts: VecDeque::with_capacity(MOVING_SAMPLE_SIZE),
}
}

/// Add a new peer_count sample
pub fn add_peer_count(&mut self, peer_count: usize) {
while self.peer_counts.len() >= MOVING_SAMPLE_SIZE {
self.peer_counts.pop_front();
}
self.peer_counts.push_back(peer_count);
}

/// Get the average peer count from previous samples. Is always >= 1.0
pub fn avg_peer_count(&self) -> f64 {
let len = self.peer_counts.len();
if len == 0 {
return 1.0;
}
let avg = self.peer_counts.iter()
.fold(0, |sum: u32, &v| sum.saturating_add(v as u32)) as f64
/ len as f64;
avg.max(1.0)
}
}

/// This is an implementation of the light ethereum network protocol, abstracted
/// over a `Provider` of data and a p2p network.
///
Expand All @@ -359,6 +405,7 @@ pub struct LightProtocol {
req_id: AtomicUsize,
sample_store: Box<SampleStore>,
load_distribution: LoadDistribution,
statistics: RwLock<Statistics>,
}

impl LightProtocol {
Expand All @@ -369,9 +416,11 @@ impl LightProtocol {
let genesis_hash = provider.chain_info().genesis_hash;
let sample_store = params.sample_store.unwrap_or_else(|| Box::new(NullStore));
let load_distribution = LoadDistribution::load(&*sample_store);
// Default load share relative to median peers
let load_share = MAX_LIGHTSERV_LOAD / params.config.median_peers;
let flow_params = FlowParams::from_request_times(
|kind| load_distribution.expected_time(kind),
params.config.load_share,
load_share,
Duration::from_secs(params.config.max_stored_seconds),
);

Expand All @@ -389,6 +438,7 @@ impl LightProtocol {
req_id: AtomicUsize::new(0),
sample_store,
load_distribution,
statistics: RwLock::new(Statistics::new()),
}
}

Expand All @@ -408,6 +458,16 @@ impl LightProtocol {
)
}

/// Get the number of active light peers downloading from the
/// node
pub fn leecher_count(&self) -> usize {
let credit_limit = *self.flow_params.read().limit();
// Count the number of peers that used some credit
self.peers.read().iter()
.filter(|(_, p)| p.lock().local_credits.current() < credit_limit)
.count()
}

/// Make a request to a peer.
///
/// Fails on: nonexistent peer, network error, peer not server,
Expand Down Expand Up @@ -772,12 +832,16 @@ impl LightProtocol {
fn begin_new_cost_period(&self, io: &IoContext) {
self.load_distribution.end_period(&*self.sample_store);

let avg_peer_count = self.statistics.read().avg_peer_count();
// Load share relative to average peer count +LEECHER_COUNT_FACTOR%
let load_share = MAX_LIGHTSERV_LOAD / (avg_peer_count * LEECHER_COUNT_FACTOR);
let new_params = Arc::new(FlowParams::from_request_times(
|kind| self.load_distribution.expected_time(kind),
self.config.load_share,
load_share,
Duration::from_secs(self.config.max_stored_seconds),
));
*self.flow_params.write() = new_params.clone();
trace!(target: "pip", "New cost period: avg_peers={} ; cost_table:{:?}", avg_peer_count, new_params.cost_table());

let peers = self.peers.read();
let now = Instant::now();
Expand All @@ -797,6 +861,11 @@ impl LightProtocol {
peer_info.awaiting_acknowledge = Some((now, new_params.clone()));
}
}

fn tick_statistics(&self) {
let leecher_count = self.leecher_count();
self.statistics.write().add_peer_count(leecher_count);
}
}

impl LightProtocol {
Expand Down Expand Up @@ -1099,6 +1168,8 @@ impl NetworkProtocolHandler for LightProtocol {
.expect("Error registering sync timer.");
io.register_timer(RECALCULATE_COSTS_TIMEOUT, RECALCULATE_COSTS_INTERVAL)
.expect("Error registering request timer interval token.");
io.register_timer(STATISTICS_TIMEOUT, STATISTICS_INTERVAL)
.expect("Error registering statistics timer.");
}

fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) {
Expand All @@ -1119,6 +1190,7 @@ impl NetworkProtocolHandler for LightProtocol {
TICK_TIMEOUT => self.tick_handlers(&io),
PROPAGATE_TIMEOUT => self.propagate_transactions(&io),
RECALCULATE_COSTS_TIMEOUT => self.begin_new_cost_period(&io),
STATISTICS_TIMEOUT => self.tick_statistics(),
_ => warn!(target: "pip", "received timeout on unknown token {}", timer),
}
}
Expand Down
34 changes: 33 additions & 1 deletion ethcore/light/src/net/tests/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,10 @@ use ethcore::client::{EachBlockWith, TestBlockChainClient};
use ethcore::encoded;
use ethcore::ids::BlockId;
use ethereum_types::{H256, U256, Address};
use net::{LightProtocol, Params, packet, Peer};
use net::{LightProtocol, Params, packet, Peer, Statistics};
use net::context::IoContext;
use net::status::{Capabilities, Status};
use net::load_timer::MOVING_SAMPLE_SIZE;
use network::{PeerId, NodeId};
use provider::Provider;
use request;
Expand Down Expand Up @@ -780,3 +781,34 @@ fn get_transaction_index() {
let expected = Expect::Respond(packet::RESPONSE, response);
proto.handle_packet(&expected, 1, packet::REQUEST, &request_body);
}

#[test]
fn sync_statistics() {
let mut stats = Statistics::new();

// Empty set should return 1.0
assert_eq!(stats.avg_peer_count(), 1.0);

// Average < 1.0 should return 1.0
stats.add_peer_count(0);
assert_eq!(stats.avg_peer_count(), 1.0);

stats = Statistics::new();

const N: f64 = 50.0;

for i in 1..(N as usize + 1) {
stats.add_peer_count(i);
}

// Compute the average for the sum 1..N
assert_eq!(stats.avg_peer_count(), N * (N + 1.0) / 2.0 / N);

for _ in 1..(MOVING_SAMPLE_SIZE + 1) {
stats.add_peer_count(40);
}

// Test that it returns the average of the last
// `MOVING_SAMPLE_SIZE` values
assert_eq!(stats.avg_peer_count(), 40.0);
}
21 changes: 15 additions & 6 deletions ethcore/src/client/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2252,35 +2252,44 @@ impl ImportSealedBlock for Client {
fn import_sealed_block(&self, block: SealedBlock) -> ImportResult {
let h = block.header().hash();
let start = Instant::now();
let header = block.header().clone();
let route = {
// Do a super duper basic verification to detect potential bugs
if let Err(e) = self.engine.verify_block_basic(&header) {
self.importer.bad_blocks.report(
block.rlp_bytes(),
format!("Detected an issue with locally sealed block: {}", e),
);
return Err(e.into());
}

// scope for self.import_lock
let _import_lock = self.importer.import_lock.lock();
trace_time!("import_sealed_block");

let number = block.header().number();
let block_data = block.rlp_bytes();
let header = block.header().clone();

let route = self.importer.commit_block(block, &header, encoded::Block::new(block_data), self);
trace!(target: "client", "Imported sealed block #{} ({})", number, h);
trace!(target: "client", "Imported sealed block #{} ({})", header.number(), header.hash());
self.state_db.write().sync_cache(&route.enacted, &route.retracted, false);
route
};
let h = header.hash();
let route = ChainRoute::from([route].as_ref());
self.importer.miner.chain_new_blocks(
self,
&[h.clone()],
&[h],
&[],
route.enacted(),
route.retracted(),
self.engine.seals_internally().is_some(),
);
self.notify(|notify| {
notify.new_blocks(
vec![h.clone()],
vec![h],
vec![],
route.clone(),
vec![h.clone()],
vec![h],
vec![],
start.elapsed(),
);
Expand Down
Loading

0 comments on commit 126208c

Please sign in to comment.