Skip to content
This repository has been archived by the owner on Aug 2, 2022. It is now read-only.

Commit

Permalink
# This is a combination of 11 commits.
Browse files Browse the repository at this point in the history
# This is the 1st commit message:

Update amazon_linux-2-pinned.dockerfile

# This is the commit message #2:

Update centos-7.7-pinned.dockerfile

# This is the commit message #3:

Update ubuntu-18.04-pinned.dockerfile

# This is the commit message #4:

Update ubuntu-20.04-pinned.dockerfile

# This is the commit message #5:

Update amazon_linux-2-unpinned.dockerfile

# This is the commit message #6:

Update centos-7.7-unpinned.dockerfile

# This is the commit message #7:

Update ubuntu-18.04-unpinned.dockerfile

# This is the commit message #8:

Update ubuntu-20.04-unpinned.dockerfile

# This is the commit message #9:

Update protocol.hpp

# This is the commit message #10:

Update net_plugin.cpp

# This is the commit message #11:

Update CMakeLists.txt
  • Loading branch information
Farhad Shahabi committed Mar 11, 2022
1 parent b910fd9 commit 6d324e2
Show file tree
Hide file tree
Showing 11 changed files with 52 additions and 41 deletions.
3 changes: 2 additions & 1 deletion .cicd/platforms/pinned/amazon_linux-2-pinned.dockerfile
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
FROM amazonlinux:2.0.20190508
ENV VERSION 1
# install dependencies.
# iproute-tc configures traffic control for p2p_high_latency_test.py test
RUN yum update -y && \
yum install -y which git sudo procps-ng util-linux autoconf automake \
libtool make bzip2 bzip2-devel openssl-devel gmp-devel libstdc++ libcurl-devel \
libusbx-devel python3 python3-devel python-devel libedit-devel doxygen \
graphviz patch gcc gcc-c++ vim-common jq net-tools \
libuuid-devel libtasn1-devel expect socat libseccomp-devel && \
libuuid-devel libtasn1-devel expect socat libseccomp-devel iproute-tc&& \
yum clean all && rm -rf /var/cache/yum
# install erlang and rabbitmq
RUN curl -s https://packagecloud.io/install/repositories/rabbitmq/erlang/script.rpm.sh | bash && \
Expand Down
1 change: 1 addition & 0 deletions .cicd/platforms/pinned/centos-7.7-pinned.dockerfile
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
FROM centos:7.7.1908
ENV VERSION 1
# install dependencies.
# iproute configures traffic control for p2p_high_latency_test.py test
RUN yum update -y && \
yum install -y epel-release && \
yum --enablerepo=extras install -y centos-release-scl && \
Expand Down
3 changes: 2 additions & 1 deletion .cicd/platforms/pinned/ubuntu-18.04-pinned.dockerfile
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
FROM ubuntu:18.04
ENV VERSION 1
# install dependencies.
# iproute2 configures traffic control for p2p_high_latency_test.py test
RUN apt-get update && \
apt-get upgrade -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y git make \
Expand All @@ -9,7 +10,7 @@ RUN apt-get update && \
python3-dev python-configparser python-requests python-pip \
autoconf libtool g++ gcc curl zlib1g-dev sudo ruby libusb-1.0-0-dev \
libcurl4-gnutls-dev pkg-config patch vim-common jq rabbitmq-server \
libtasn1-dev libnss3-dev iproute2 expect gawk socat python3-pip libseccomp-dev uuid-dev && \
libtasn1-dev libnss3-dev iproute2 expect gawk socat python3-pip libseccomp-dev uuid-dev iproute2 && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# install request and requests_unixsocket module
Expand Down
3 changes: 2 additions & 1 deletion .cicd/platforms/pinned/ubuntu-20.04-pinned.dockerfile
Original file line number Diff line number Diff line change
@@ -1,14 +1,15 @@
FROM ubuntu:20.04
ENV VERSION 1
# install dependencies.
# iproute2 configures traffic control for p2p_high_latency_test.py test
RUN apt-get update && \
apt-get upgrade -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y git make \
bzip2 automake libbz2-dev libssl-dev doxygen graphviz libgmp3-dev \
autotools-dev python2.7 python2.7-dev python3 \
python3-dev python-configparser python3-pip \
autoconf libtool g++ gcc curl zlib1g-dev sudo ruby libusb-1.0-0-dev \
libcurl4-gnutls-dev pkg-config patch vim-common jq gnupg rabbitmq-server && \
libcurl4-gnutls-dev pkg-config patch vim-common jq gnupg rabbitmq-server iproute2 && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# install request and requests_unixsocket module
Expand Down
3 changes: 2 additions & 1 deletion .cicd/platforms/unpinned/amazon_linux-2-unpinned.dockerfile
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
FROM amazonlinux:2.0.20190508
ENV VERSION 1
# install dependencies.
# iproute-tc configures traffic control for p2p_high_latency_test.py test
RUN yum update -y && \
yum install -y which git sudo procps-ng util-linux autoconf automake \
libtool make bzip2 bzip2-devel openssl-devel gmp-devel libstdc++ libcurl-devel \
libusbx-devel python3 python3-devel python-devel python3-pip libedit-devel doxygen \
graphviz clang patch llvm-devel llvm-static vim-common jq && \
graphviz clang patch llvm-devel llvm-static vim-common jq iproute-tc && \
yum clean all && rm -rf /var/cache/yum
# install erlang and rabbitmq
RUN curl -s https://packagecloud.io/install/repositories/rabbitmq/erlang/script.rpm.sh | bash && \
Expand Down
3 changes: 2 additions & 1 deletion .cicd/platforms/unpinned/centos-7.7-unpinned.dockerfile
Original file line number Diff line number Diff line change
@@ -1,14 +1,15 @@
FROM centos:7.7.1908
ENV VERSION 1
# install dependencies.
# iproute configures traffic control for p2p_high_latency_test.py test
RUN yum update -y && \
yum install -y epel-release && \
yum --enablerepo=extras install -y centos-release-scl && \
yum --enablerepo=extras install -y devtoolset-8 && \
yum --enablerepo=extras install -y which git autoconf automake libtool make bzip2 doxygen \
graphviz bzip2-devel openssl-devel gmp-devel ocaml \
python python-devel rh-python36 file libusbx-devel \
libcurl-devel patch vim-common jq llvm-toolset-7.0-llvm-devel llvm-toolset-7.0-llvm-static && \
libcurl-devel patch vim-common jq llvm-toolset-7.0-llvm-devel llvm-toolset-7.0-llvm-static iproute && \
yum clean all && rm -rf /var/cache/yum
# install erlang and rabbitmq
RUN curl -s https://packagecloud.io/install/repositories/rabbitmq/erlang/script.rpm.sh | bash && \
Expand Down
3 changes: 2 additions & 1 deletion .cicd/platforms/unpinned/ubuntu-18.04-unpinned.dockerfile
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
FROM ubuntu:18.04
ENV VERSION 1
# install dependencies.
# iproute2 configures traffic control for p2p_high_latency_test.py test
RUN apt-get update && \
apt-get upgrade -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y git make \
bzip2 automake libbz2-dev libssl-dev doxygen graphviz libgmp3-dev \
autotools-dev python2.7 python2.7-dev python3 python3-dev python3-pip \
autoconf libtool curl zlib1g-dev sudo ruby libusb-1.0-0-dev \
libcurl4-gnutls-dev pkg-config patch llvm-7-dev clang-7 vim-common jq rabbitmq-server && \
libcurl4-gnutls-dev pkg-config patch llvm-7-dev clang-7 vim-common jq rabbitmq-server iproute2 && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# install request and requests_unixsocket module
Expand Down
3 changes: 2 additions & 1 deletion .cicd/platforms/unpinned/ubuntu-20.04-unpinned.dockerfile
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
FROM ubuntu:20.04
ENV VERSION 1
# install dependencies.
# iproute2 configures traffic control for p2p_high_latency_test.py test
RUN apt-get update && \
apt-get upgrade -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y git make \
bzip2 automake libbz2-dev libssl-dev doxygen graphviz libgmp3-dev \
autotools-dev python2.7 python2.7-dev python3 python3-dev python3-pip \
autoconf libtool curl zlib1g-dev sudo ruby libusb-1.0-0-dev \
libcurl4-gnutls-dev pkg-config patch llvm-7-dev clang-7 vim-common jq g++ gnupg rabbitmq-server && \
libcurl4-gnutls-dev pkg-config patch llvm-7-dev clang-7 vim-common jq g++ gnupg rabbitmq-server iproute2 && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# install request and requests_unixsocket module
Expand Down
2 changes: 1 addition & 1 deletion plugins/net_plugin/include/eosio/net_plugin/protocol.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ namespace eosio {
chain_id_type chain_id; ///< used to identify chain
fc::sha256 node_id; ///< used to identify peers and prevent self-connect
chain::public_key_type key; ///< authentication key; may be a producer or peer key, or empty
tstamp time{0};
long long time{0}; // this value is nanoseconds
fc::sha256 token; ///< digest of time to prove we own the private key of the key above
chain::signature_type sig; ///< signature for the digest
string p2p_address;
Expand Down
65 changes: 32 additions & 33 deletions plugins/net_plugin/net_plugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ namespace eosio {

using connection_ptr = std::shared_ptr<connection>;
using connection_wptr = std::weak_ptr<connection>;
namespace sc = std::chrono;

template <typename Strand>
void verify_strand_in_this_thread(const Strand& strand, const char* func, int line) {
Expand Down Expand Up @@ -138,6 +139,7 @@ namespace eosio {
uint32_t sync_req_span{0};
connection_ptr sync_source;
std::atomic<stages> sync_state{in_sync};
static const uint32_t block_interval_ns = sc::duration_cast<sc::nanoseconds>(sc::milliseconds(config::block_interval_ms)).count();

private:
constexpr static auto stage_str( stages s );
Expand Down Expand Up @@ -242,9 +244,6 @@ namespace eosio {
bool p2p_accept_transactions = true;
bool p2p_reject_incomplete_blocks = true;

/// Peer clock may be no more than 1 second skewed from our clock, including network latency.
const std::chrono::system_clock::duration peer_authentication_interval{std::chrono::seconds{1}};

chain_id_type chain_id;
fc::sha256 node_id;
string user_agent_name;
Expand Down Expand Up @@ -1806,6 +1805,7 @@ namespace eosio {
}
sync_next_expected_num = std::max( lib_num + 1, sync_next_expected_num );

// p2p_high_latency_test.py test depends on this exact log statement.
peer_ilog( c, "Catching up with chain, our last req is ${cc}, theirs is ${t}",
("cc", sync_last_requested_num)("t", target) );

Expand Down Expand Up @@ -1839,36 +1839,37 @@ namespace eosio {

sync_reset_lib_num(c, false);

long long current_time_ns = sc::duration_cast<sc::nanoseconds>(sc::system_clock::now().time_since_epoch()).count();
auto network_latency_ns = current_time_ns - msg.time; // net latency in nanoseconds
// number of blocks syncing node is behind from a peer node
uint32_t nblk_behind_by_net_latency = static_cast<uint32_t>(network_latency_ns / block_interval_ns);
// Multiplied by 2 to compensate the time it takes for message to reach peer node, and plus 1 to compensate for integer division truncation
uint32_t nblk_combined_latency = 2 * nblk_behind_by_net_latency + 1;
// message in the log below is used in p2p_high_latency_test.py test
peer_dlog(c, "Network latency is ${lat}ms, ${num} blocks discrepancy by network latency, ${tot_num} blocks discrepancy expected once message received",
("lat", network_latency_ns/1000000)("num", nblk_behind_by_net_latency)("tot_num", nblk_combined_latency));
//--------------------------------
// sync need checks; (lib == last irreversible block)
//
// 0. my head block id == peer head id means we are all caught up block wise
// 1. my head block num < peer lib - send handshake (if not sent in handle_message) and wait for receipt of notice message to start syncing
// 2. my lib > peer head num - send an last_irr_catch_up notice if not the first generation
// 2. my lib > peer head num + nblk_combined_latency - send last_irr_catch_up notice if not the first generation
//
// 3 my head block num < peer head block num - update sync state and send a catchup request
// 4 my head block num >= peer block num send a notice catchup if this is not the first generation
// 3 my head block num + nblk_combined_latency < peer head block num - update sync state and send a catchup request
// 4 my head block num >= peer block num + nblk_combined_latency send a notice catchup if this is not the first generation
// 4.1 if peer appears to be on a different fork ( our_id_for( msg.head_num ) != msg.head_id )
// then request peer's blocks
//
//-----------------------------

uint32_t network_latency = (std::chrono::system_clock::now().time_since_epoch().count() - msg.time); //calculating network latency ==> current_time - handshake message timestamp
uint32_t num_block_behind_by_latency = 1 + (std::chrono::milliseconds(network_latency) / std::chrono::milliseconds(config::block_interval_ms)); // Number of blocks produced during networl latency period
if (head_id == msg.head_id || ( lib_num < msg.last_irreversible_block_num && msg.last_irreversible_block_num - lib_num <= num_block_behind_by_latency)) {
if (head_id == msg.head_id) {
peer_ilog( c, "handshake lib ${lib}, head ${head}, head id ${id}.. sync 0",
("lib", msg.last_irreversible_block_num)("head", msg.head_num)("id", msg.head_id.str().substr(8,16)) );
c->syncing = false;
notice_message note;
note.known_blocks.mode = none; //block mode none
if (head_id != msg.head_id){
note.known_trx.mode = none; // transaction mode none, since it is a few blocks behind
note.known_trx.pending = head;
}
else{
note.known_trx.mode = catch_up;
note.known_trx.pending = 0;
}
notice_message note;
note.known_blocks.mode = none;
note.known_trx.mode = catch_up;
note.known_trx.pending = 0;
c->enqueue( note );
return;
}
Expand All @@ -1881,7 +1882,7 @@ namespace eosio {
}
return;
}
if (lib_num > msg.head_num ) {
if (lib_num > msg.head_num + nblk_combined_latency ) {
peer_ilog( c, "handshake lib ${lib}, head ${head}, head id ${id}.. sync 2",
("lib", msg.last_irreversible_block_num)("head", msg.head_num)("id", msg.head_id.str().substr(8,16)) );
if (msg.generation > 1 || c->protocol_version > proto_base) {
Expand All @@ -1896,13 +1897,13 @@ namespace eosio {
return;
}

if (head < msg.head_num ) {
if (head + nblk_combined_latency < msg.head_num ) {
peer_ilog( c, "handshake lib ${lib}, head ${head}, head id ${id}.. sync 3",
("lib", msg.last_irreversible_block_num)("head", msg.head_num)("id", msg.head_id.str().substr(8,16)) );
c->syncing = false;
verify_catchup(c, msg.head_num, msg.head_id);
return;
} else {
} else if(head >= msg.head_num + nblk_combined_latency) {
peer_ilog( c, "handshake lib ${lib}, head ${head}, head id ${id}.. sync 4",
("lib", msg.last_irreversible_block_num)("head", msg.head_num)("id", msg.head_id.str().substr(8,16)) );
if (msg.generation > 1 || c->protocol_version > proto_base) {
Expand Down Expand Up @@ -1931,6 +1932,8 @@ namespace eosio {
}
} );
return;
} else {
peer_dlog( c, "Block discrepancy is within network latency range.");
}
}

Expand Down Expand Up @@ -2724,9 +2727,11 @@ namespace eosio {
pending_message_buffer.advance_read_ptr( message_length );
return true;
}

peer_dlog( this, "received block ${num}, id ${id}..., latency: ${latency}",
("num", bh.block_num())("id", blk_id.str().substr(8,16))
("latency", (fc::time_point::now() - bh.timestamp).count()/1000) );

if( !my_impl->sync_master->syncing_with_peer() ) { // guard against peer thinking it needs to send us old blocks
uint32_t lib = 0;
std::tie( lib, std::ignore, std::ignore, std::ignore, std::ignore, std::ignore ) = my_impl->get_chain_info();
Expand All @@ -2737,7 +2742,11 @@ namespace eosio {
if( blk_num < last_sent_lib ) {
peer_ilog( this, "received block ${n} less than sent lib ${lib}", ("n", blk_num)("lib", last_sent_lib) );
close();
} else {
}
/*else if(blk_num + num_block_behind_by_latency >= lib){
peer_ilog( this, "received block ${n} less than lib ${lib} which is beacuse net latency ${net}", ("n", blk_num)("lib", lib)("net", network_latency) );
} */
else {
peer_ilog( this, "received block ${n} less than lib ${lib}", ("n", blk_num)("lib", lib) );
my_impl->sync_master->reset_last_requested_num(my_impl->sync_master->locked_sync_mutex());
enqueue( (sync_request_message) {0, 0} );
Expand Down Expand Up @@ -3598,15 +3607,6 @@ namespace eosio {
}
}

namespace sc = std::chrono;
sc::system_clock::duration msg_time(msg.time);
auto time = sc::system_clock::now().time_since_epoch();
if(time - msg_time > peer_authentication_interval) {
fc_elog( logger, "Peer ${peer} sent a handshake with a timestamp skewed by more than ${time}.",
("peer", msg.p2p_address)("time", "1 second")); // TODO Add to_variant for std::chrono::system_clock::duration
return false;
}

if(msg.sig != chain::signature_type() && msg.token != sha256()) {
sha256 hash = fc::sha256::hash(msg.time);
if(hash != msg.token) {
Expand Down Expand Up @@ -3654,7 +3654,6 @@ namespace eosio {

// call from connection strand
bool connection::populate_handshake( handshake_message& hello ) {
namespace sc = std::chrono;
hello.network_version = net_version_base + net_version;
uint32_t lib, head;
std::tie( lib, std::ignore, head,
Expand Down
4 changes: 4 additions & 0 deletions tests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/rodeos_multi_ship_test.py ${CMAKE_CUR
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/rodeos_multi_ship_kill_restart.py ${CMAKE_CURRENT_BINARY_DIR}/rodeos_multi_ship_kill_restart.py COPYONLY)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/rodeos_wasmQL_http_timeout.py ${CMAKE_CURRENT_BINARY_DIR}/rodeos_wasmQL_http_timeout.py COPYONLY)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/read_only_query_tests.py ${CMAKE_CURRENT_BINARY_DIR}/read_only_query_tests.py COPYONLY)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_high_latency_test.py ${CMAKE_CURRENT_BINARY_DIR}/p2p_high_latency_test.py COPYONLY)

#To run plugin_test with all log from blockchain displayed, put --verbose after --, i.e. plugin_test -- --verbose
add_test(NAME plugin_test COMMAND plugin_test --report_level=detailed --color_output)
Expand Down Expand Up @@ -221,6 +222,9 @@ set_property(TEST nodeos_run_check_lr_test PROPERTY LABELS long_running_tests)
add_test(NAME nodeos_remote_lr_test COMMAND tests/nodeos_run_remote_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
set_property(TEST nodeos_remote_lr_test PROPERTY LABELS long_running_tests)

add_test(NAME p2p_high_latency_test COMMAND tests/p2p_high_latency_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
set_property(TEST p2p_high_latency_test PROPERTY LABELS nonparallelizable_tests)

#add_test(NAME distributed_transactions_lr_test COMMAND tests/distributed-transactions-test.py -d 2 -p 21 -n 21 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
#set_property(TEST distributed_transactions_lr_test PROPERTY LABELS long_running_tests)

Expand Down

0 comments on commit 6d324e2

Please sign in to comment.