diff --git a/.cicd/platforms/pinned/amazon_linux-2-pinned.dockerfile b/.cicd/platforms/pinned/amazon_linux-2-pinned.dockerfile index e01a9b1c6a1..18628da1930 100644 --- a/.cicd/platforms/pinned/amazon_linux-2-pinned.dockerfile +++ b/.cicd/platforms/pinned/amazon_linux-2-pinned.dockerfile @@ -3,7 +3,7 @@ ENV VERSION 1 # install dependencies. RUN yum update -y && \ yum install -y which git sudo procps-ng util-linux autoconf automake \ - libtool make bzip2 bzip2-devel openssl-devel gmp-devel libstdc++ libcurl-devel \ + libtool make bzip2 bzip2-devel openssl openssl-devel gmp-devel libstdc++ libcurl-devel \ libusbx-devel python3 python3-devel python-devel libedit-devel doxygen \ graphviz patch gcc gcc-c++ vim-common jq && \ yum clean all && rm -rf /var/cache/yum diff --git a/.cicd/platforms/pinned/centos-7.7-pinned.dockerfile b/.cicd/platforms/pinned/centos-7.7-pinned.dockerfile index 219a08da5d1..3ad169134fa 100644 --- a/.cicd/platforms/pinned/centos-7.7-pinned.dockerfile +++ b/.cicd/platforms/pinned/centos-7.7-pinned.dockerfile @@ -6,7 +6,7 @@ RUN yum update -y && \ yum --enablerepo=extras install -y centos-release-scl && \ yum --enablerepo=extras install -y devtoolset-8 && \ yum --enablerepo=extras install -y which git autoconf automake libtool make bzip2 doxygen \ - graphviz bzip2-devel openssl-devel gmp-devel ocaml \ + graphviz bzip2-devel openssl openssl-devel gmp-devel ocaml \ python python-devel rh-python36 file libusbx-devel \ libcurl-devel patch vim-common jq glibc-locale-source glibc-langpack-en && \ yum clean all && rm -rf /var/cache/yum diff --git a/.cicd/platforms/unpinned/amazon_linux-2-unpinned.dockerfile b/.cicd/platforms/unpinned/amazon_linux-2-unpinned.dockerfile index 00a9004932e..3987704d858 100644 --- a/.cicd/platforms/unpinned/amazon_linux-2-unpinned.dockerfile +++ b/.cicd/platforms/unpinned/amazon_linux-2-unpinned.dockerfile @@ -3,7 +3,7 @@ ENV VERSION 1 # install dependencies. RUN yum update -y && \ yum install -y which git sudo procps-ng util-linux autoconf automake \ - libtool make bzip2 bzip2-devel openssl-devel gmp-devel libstdc++ libcurl-devel \ + libtool make bzip2 bzip2-devel openssl openssl-devel gmp-devel libstdc++ libcurl-devel \ libusbx-devel python3 python3-devel python-devel libedit-devel doxygen \ graphviz clang patch llvm-devel llvm-static vim-common jq && \ yum clean all && rm -rf /var/cache/yum diff --git a/.cicd/platforms/unpinned/centos-7.7-unpinned.dockerfile b/.cicd/platforms/unpinned/centos-7.7-unpinned.dockerfile index 42f557efdf2..2893a9546ae 100644 --- a/.cicd/platforms/unpinned/centos-7.7-unpinned.dockerfile +++ b/.cicd/platforms/unpinned/centos-7.7-unpinned.dockerfile @@ -6,7 +6,7 @@ RUN yum update -y && \ yum --enablerepo=extras install -y centos-release-scl && \ yum --enablerepo=extras install -y devtoolset-8 && \ yum --enablerepo=extras install -y which git autoconf automake libtool make bzip2 doxygen \ - graphviz bzip2-devel openssl-devel gmp-devel ocaml \ + graphviz bzip2-devel openssl openssl-devel gmp-devel ocaml \ python python-devel rh-python36 file libusbx-devel \ libcurl-devel patch vim-common jq llvm-toolset-7.0-llvm-devel llvm-toolset-7.0-llvm-static \ glibc-locale-source glibc-langpack-en && \ diff --git a/.gitignore b/.gitignore index 8cd0213e1d7..db9cde05559 100644 --- a/.gitignore +++ b/.gitignore @@ -87,4 +87,4 @@ var/lib/node_* *.iws .DS_Store -!*.swagger.* +!*.swagger.* \ No newline at end of file diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index d4c1bbb6e43..7613d66422b 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -71,6 +71,7 @@ set(CHAIN_WEBASSEMBLY_SOURCES webassembly/softfloat.cpp webassembly/system.cpp webassembly/transaction.cpp + webassembly/security_group.cpp ) ## SORT .cpp by most likely to change / break compile diff --git a/libraries/chain/block_header_state.cpp b/libraries/chain/block_header_state.cpp index b7cd3036ac3..ace383921a5 100644 --- a/libraries/chain/block_header_state.cpp +++ b/libraries/chain/block_header_state.cpp @@ -167,6 +167,8 @@ namespace eosio { namespace chain { result.producer_to_last_implied_irb[proauth.producer_name] = dpos_proposed_irreversible_blocknum; } + result.security_group = get_security_group_info(); + return result; } @@ -314,6 +316,8 @@ namespace eosio { namespace chain { result.activated_protocol_features = std::move( new_activated_protocol_features ); + result.set_security_group_info(std::move(security_group)); + return result; } diff --git a/libraries/chain/combined_database.cpp b/libraries/chain/combined_database.cpp index 4795ac15edd..dbb89b775e2 100644 --- a/libraries/chain/combined_database.cpp +++ b/libraries/chain/combined_database.cpp @@ -389,6 +389,8 @@ namespace eosio { namespace chain { header.validate(); }); + snapshot->chain_snapshot_version = header.version; + db.create([](auto&) {}); check_backing_store_setting(true); diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 89951daa385..c5d55772145 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -252,6 +252,7 @@ struct controller_impl { set_activation_handler(); set_activation_handler(); set_activation_handler(); + set_activation_handler(); self.irreversible_block.connect([this](const block_state_ptr& bsp) { wasmif.current_lib(bsp->block_num); @@ -1432,6 +1433,29 @@ struct controller_impl { }); } + if (gpo.proposed_security_group_block_num) { + if (gpo.proposed_security_group_block_num <= pbhs.dpos_irreversible_blocknum) { + + // Promote proposed security group to pending. + if( !replay_head_time ) { + ilog( "promoting proposed security group (set in block ${proposed_num}) to pending; current block: ${n} lib: ${lib} participants: ${participants} ", + ("proposed_num", gpo.proposed_security_group_block_num)("n", pbhs.block_num) + ("lib", pbhs.dpos_irreversible_blocknum) + ("participants", gpo.proposed_security_group_participants ) ); + } + + ++bb._pending_block_header_state.security_group.version; + bb._pending_block_header_state.security_group.participants = { + gpo.proposed_security_group_participants.begin(), + gpo.proposed_security_group_participants.end()}; + + db.modify(gpo, [&](auto& gp) { + gp.proposed_security_group_block_num = 0; + gp.proposed_security_group_participants.clear(); + }); + } + } + try { transaction_metadata_ptr onbtrx = transaction_metadata::create_no_recover_keys( std::make_shared( get_on_block_transaction(), true ), @@ -1736,6 +1760,9 @@ struct controller_impl { if( !use_bsp_cached ) { bsp->set_trxs_metas( std::move( ab._trx_metas ), !skip_auth_checks ); } + + auto& pbsh = ab._pending_block_header_state; + bsp->set_security_group_info(std::move(pbsh.security_group)); // create completed_block with the existing block_state as we just verified it is the same as assembled_block pending->_block_stage = completed_block{ bsp }; @@ -2232,6 +2259,37 @@ struct controller_impl { return deep_mind_logger; } + int64_t propose_security_group(std::function&)> && modify_participants) { + const auto& gpo = self.get_global_properties(); + auto cur_block_num = head->block_num + 1; + + if (!self.is_builtin_activated(builtin_protocol_feature_t::security_group)) { + return -1; + } + + flat_set proposed_participants = gpo.proposed_security_group_block_num == 0 + ? self.active_security_group().participants + : flat_set{gpo.proposed_security_group_participants.begin(), + gpo.proposed_security_group_participants.end()}; + + auto orig_participants_size = proposed_participants.size(); + + modify_participants(proposed_participants); + + if (orig_participants_size == proposed_participants.size()) { + // no changes in the participants + return -1; + } + + db.modify(gpo, [&proposed_participants, cur_block_num](auto& gp) { + gp.proposed_security_group_block_num = cur_block_num; + gp.set_proposed_security_group_participants(proposed_participants.begin(), + proposed_participants.end()); + }); + + return 0; + } + }; /// controller_impl const resource_limits_manager& controller::get_resource_limits_manager()const @@ -2817,6 +2875,44 @@ int64_t controller::set_proposed_producers( vector producers return version; } +const security_group_info_t& controller::active_security_group() const { + if( !(my->pending) ) + return my->head->get_security_group_info(); + + return std::visit( + overloaded{ + [](const building_block& bb) -> const security_group_info_t& { return bb._pending_block_header_state.security_group; }, + [](const assembled_block& ab) -> const security_group_info_t& { return ab._pending_block_header_state.security_group; }, + [](const completed_block& cb) -> const security_group_info_t& { return cb._block_state->get_security_group_info(); }}, + my->pending->_block_stage); +} + +flat_set controller::proposed_security_group_participants() const { + return {get_global_properties().proposed_security_group_participants.begin(), + get_global_properties().proposed_security_group_participants.end()}; +} + +int64_t controller::add_security_group_participants(const flat_set& participants) { + return participants.size() == 0 ? -1 : my->propose_security_group([&participants](auto& pending_participants) { + pending_participants.insert(participants.begin(), participants.end()); + }); +} + +int64_t controller::remove_security_group_participants(const flat_set& participants) { + return participants.size() == 0 ? -1 : my->propose_security_group([&participants](auto& pending_participants) { + flat_set::sequence_type tmp; + tmp.reserve(pending_participants.size()); + std::set_difference(pending_participants.begin(), pending_participants.end(), participants.begin(), + participants.end(), std::back_inserter(tmp)); + pending_participants.adopt_sequence(std::move(tmp)); + }); +} + +bool controller::in_active_security_group(const flat_set& participants) const { + const auto& active = active_security_group().participants; + return std::includes(active.begin(), active.end(), participants.begin(), participants.end()); +} + const producer_authority_schedule& controller::active_producers()const { if( !(my->pending) ) return my->head->active_schedule; @@ -3324,6 +3420,17 @@ void controller_impl::on_activation +void controller_impl::on_activation() { + db.modify( db.get(), [&]( auto& ps ) { + add_intrinsic_to_whitelist( ps.whitelisted_intrinsics, "add_security_group_participants" ); + add_intrinsic_to_whitelist( ps.whitelisted_intrinsics, "remove_security_group_participants" ); + add_intrinsic_to_whitelist( ps.whitelisted_intrinsics, "in_active_security_group" ); + add_intrinsic_to_whitelist( ps.whitelisted_intrinsics, "get_active_security_group" ); + } ); +} + /// End of protocol feature activation handlers } } /// eosio::chain diff --git a/libraries/chain/fork_database.cpp b/libraries/chain/fork_database.cpp index e14975cb37c..b74414d2a12 100644 --- a/libraries/chain/fork_database.cpp +++ b/libraries/chain/fork_database.cpp @@ -9,6 +9,7 @@ #include #include #include +#include namespace eosio { namespace chain { using boost::multi_index_container; @@ -17,7 +18,7 @@ namespace eosio { namespace chain { const uint32_t fork_database::magic_number = 0x30510FDB; const uint32_t fork_database::min_supported_version = 1; - const uint32_t fork_database::max_supported_version = 1; + const uint32_t fork_database::max_supported_version = 2; // work around block_state::is_valid being private inline bool block_state_is_valid( const block_state& bs ) { @@ -122,14 +123,23 @@ namespace eosio { namespace chain { ("max", max_supported_version) ); + // The unpack_strm here is used only to unpack `block_header_state` and `block_state`. However, those two + // classes are written to unpack based on the snapshot version; therefore, we orient it to the snapshot version. + + const bool has_block_header_state_extension = version > min_supported_version; + versioned_unpack_stream unpack_strm( + ds, has_block_header_state_extension + ? block_header_state::minimum_snapshot_version_with_state_extension + : block_header_state::minimum_snapshot_version_with_state_extension - 1); + block_header_state bhs; - fc::raw::unpack( ds, bhs ); + fc::raw::unpack( unpack_strm, bhs ); reset( bhs ); unsigned_int size; fc::raw::unpack( ds, size ); for( uint32_t i = 0, n = size.value; i < n; ++i ) { block_state s; - fc::raw::unpack( ds, s ); + fc::raw::unpack( unpack_strm, s ); // do not populate transaction_metadatas, they will be created as needed in apply_block with appropriate key recovery s.header_exts = s.block->validate_and_extract_header_extensions(); my->add( std::make_shared( move( s ) ), false, true, validator ); diff --git a/libraries/chain/include/eosio/chain/block_header_state.hpp b/libraries/chain/include/eosio/chain/block_header_state.hpp index 259a593fa21..5426b251ff2 100644 --- a/libraries/chain/include/eosio/chain/block_header_state.hpp +++ b/libraries/chain/include/eosio/chain/block_header_state.hpp @@ -3,6 +3,7 @@ #include #include #include +#include #include namespace eosio { namespace chain { @@ -47,6 +48,11 @@ using signer_callback_type = std::function(const dig struct block_header_state; +struct security_group_info_t { + uint32_t version = 0; + boost::container::flat_set participants; +}; + namespace detail { struct block_header_state_common { uint32_t block_num = 0; @@ -80,6 +86,7 @@ struct pending_block_header_state : public detail::block_header_state_common { block_timestamp_type timestamp; uint32_t active_schedule_version = 0; uint16_t confirmed = 1; + security_group_info_t security_group; signed_block_header make_block_header( const checksum256_type& transaction_mroot, const checksum256_type& action_mroot, @@ -110,11 +117,17 @@ struct pending_block_header_state : public detail::block_header_state_common { const vector& )>& validator )&&; }; + + /** * @struct block_header_state * @brief defines the minimum state necessary to validate transaction headers */ struct block_header_state : public detail::block_header_state_common { + + /// this version is coming from chain_snapshot_header.version + static constexpr uint32_t minimum_snapshot_version_with_state_extension = 6; + block_id_type id; signed_block_header header; detail::schedule_info pending_schedule; @@ -125,11 +138,22 @@ struct block_header_state : public detail::block_header_state_common { /// duplication of work flat_multimap header_exts; + struct state_extension_v0 { + security_group_info_t security_group_info; + }; + + // For future extension, one should use + // + // struct state_extension_v1 : state_extension_v0 { new_field_t new_field }; + // using state_extension_t = std::variant state_extension; + + using state_extension_t = std::variant; + state_extension_t state_extension; + block_header_state() = default; - explicit block_header_state( detail::block_header_state_common&& base ) - :detail::block_header_state_common( std::move(base) ) - {} + explicit block_header_state(detail::block_header_state_common&& base) + : detail::block_header_state_common(std::move(base)) {} explicit block_header_state( legacy::snapshot_block_header_state_v2&& snapshot ); @@ -152,7 +176,16 @@ struct block_header_state : public detail::block_header_state_common { void sign( const signer_callback_type& signer ); void verify_signee()const; - const vector& get_new_protocol_feature_activations()const; + const vector& get_new_protocol_feature_activations() const; + + void set_security_group_info(security_group_info_t&& new_info) { + std::visit([&new_info](auto& v) { v.security_group_info = std::move(new_info); }, state_extension); + } + + const security_group_info_t& get_security_group_info() const { + return std::visit([](const auto& v) -> const security_group_info_t& { return v.security_group_info; }, + state_extension); + } }; using block_header_state_ptr = std::shared_ptr; @@ -177,6 +210,12 @@ FC_REFLECT( eosio::chain::detail::schedule_info, (schedule) ) +FC_REFLECT(eosio::chain::security_group_info_t, (version)(participants)) + +FC_REFLECT( eosio::chain::block_header_state::state_extension_v0, + (security_group_info) +) + // @ignore header_exts FC_REFLECT_DERIVED( eosio::chain::block_header_state, (eosio::chain::detail::block_header_state_common), (id) @@ -184,9 +223,9 @@ FC_REFLECT_DERIVED( eosio::chain::block_header_state, (eosio::chain::detail::bl (pending_schedule) (activated_protocol_features) (additional_signatures) + (state_extension) ) - FC_REFLECT( eosio::chain::legacy::snapshot_block_header_state_v2::schedule_info, ( schedule_lib_num ) ( schedule_hash ) @@ -209,3 +248,50 @@ FC_REFLECT( eosio::chain::legacy::snapshot_block_header_state_v2, ( pending_schedule ) ( activated_protocol_features ) ) + +namespace fc { +namespace raw { +namespace detail { + +// C++20 Concept +// +// template +// concept VersionedStream = requires (T t) { +// t.version; +// } +// + +template +struct unpack_block_header_state_derived_visitor : fc::reflector_init_visitor { + + unpack_block_header_state_derived_visitor(Class& _c, VersionedStream& _s) + : fc::reflector_init_visitor(_c) + , s(_s) {} + + template + inline void operator()(const char* name) const { + try { + if constexpr (std::is_same_vobj.*p)>>) + if (s.version < eosio::chain::block_header_state::minimum_snapshot_version_with_state_extension) + return; + + fc::raw::unpack(s, this->obj.*p); + } + FC_RETHROW_EXCEPTIONS(warn, "Error unpacking field ${field}", ("field", name)) + } + + private: + VersionedStream& s; +}; + +template +struct unpack_object_visitor + : unpack_block_header_state_derived_visitor { + using Base = unpack_block_header_state_derived_visitor; + using Base::Base; +}; + +} // namespace detail +} // namespace raw +} // namespace fc diff --git a/libraries/chain/include/eosio/chain/block_state.hpp b/libraries/chain/include/eosio/chain/block_state.hpp index a507c373585..dc8f9b286b7 100644 --- a/libraries/chain/include/eosio/chain/block_state.hpp +++ b/libraries/chain/include/eosio/chain/block_state.hpp @@ -7,7 +7,7 @@ namespace eosio { namespace chain { - struct block_state : public block_header_state { + struct block_state final : block_header_state { block_state( const block_header_state& prev, signed_block_ptr b, const protocol_feature_set& pfs, @@ -71,3 +71,19 @@ namespace eosio { namespace chain { // @ignore _pub_keys_recovered _cached_trxs FC_REFLECT_DERIVED( eosio::chain::block_state, (eosio::chain::block_header_state), (block)(validated) ) + +namespace fc { +namespace raw { +namespace detail { + +template +struct unpack_object_visitor + : unpack_block_header_state_derived_visitor { + using Base = unpack_block_header_state_derived_visitor; + using Base::Base; +}; + +} // namespace detail +} // namespace raw +} // namespace fc + diff --git a/libraries/chain/include/eosio/chain/chain_snapshot.hpp b/libraries/chain/include/eosio/chain/chain_snapshot.hpp index d2bd01492f1..957c091db45 100644 --- a/libraries/chain/include/eosio/chain/chain_snapshot.hpp +++ b/libraries/chain/include/eosio/chain/chain_snapshot.hpp @@ -16,16 +16,19 @@ struct chain_snapshot_header { * - WebAuthn keys * - wtmsig block siganatures: the block header state changed to include producer authorities and additional signatures * - removed genesis_state and added chain ID to global_property_object - * 4: Updated for v3.0.0 protocol features: + * 4: Updated for v2.1.0 protocol features: * - forwards compatible with versions 2 and 3 * - kv database * - Configurable wasm limits - * 5: Updated for v3.0.0 eos features: + * 5: Updated for v2.1.0 eos features: * - chain_config update + * 6: Updated for v2.2.0 eos features: + * - block_header_state::state_extension + * - global_proper_state::extension */ static constexpr uint32_t minimum_compatible_version = 2; - static constexpr uint32_t current_version = 5; + static constexpr uint32_t current_version = 6; uint32_t version = current_version; diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index f0eccd31d7f..f65756a9289 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -294,6 +294,14 @@ namespace eosio { namespace chain { int64_t set_proposed_producers( vector producers ); + const security_group_info_t& active_security_group() const; + flat_set proposed_security_group_participants() const; + + int64_t add_security_group_participants(const flat_set& participants); + int64_t remove_security_group_participants(const flat_set& participants); + + bool in_active_security_group(const flat_set& participants) const; + bool light_validation_allowed() const; bool skip_auth_check()const; bool skip_trx_checks()const; diff --git a/libraries/chain/include/eosio/chain/exceptions.hpp b/libraries/chain/include/eosio/chain/exceptions.hpp index 21356b19dd5..34159e2861e 100644 --- a/libraries/chain/include/eosio/chain/exceptions.hpp +++ b/libraries/chain/include/eosio/chain/exceptions.hpp @@ -681,4 +681,11 @@ namespace eosio { namespace chain { FC_DECLARE_DERIVED_EXCEPTION( state_history_exception, chain_exception, 3280000, "State history exception" ) + FC_DECLARE_DERIVED_EXCEPTION( ssl_exception, chain_exception, + 3290000, "SSL exception") + + FC_DECLARE_DERIVED_EXCEPTION( ssl_incomplete_configuration, ssl_exception, + 3290001, "Incomplete SSL configuration") + FC_DECLARE_DERIVED_EXCEPTION( ssl_configuration_error, ssl_exception, + 3290002, "SSL configuration error") } } // eosio::chain diff --git a/libraries/chain/include/eosio/chain/global_property_object.hpp b/libraries/chain/include/eosio/chain/global_property_object.hpp index 7b7497e34ec..524a60ecbf9 100644 --- a/libraries/chain/include/eosio/chain/global_property_object.hpp +++ b/libraries/chain/include/eosio/chain/global_property_object.hpp @@ -62,7 +62,7 @@ namespace eosio { namespace chain { */ class global_property_object : public chainbase::object { - OBJECT_CTOR(global_property_object, (proposed_schedule)) + OBJECT_CTOR(global_property_object, (proposed_schedule)(proposed_security_group_participants)) public: id_type id; @@ -72,8 +72,12 @@ namespace eosio { namespace chain { chain_id_type chain_id; kv_database_config kv_configuration; wasm_config wasm_configuration; + block_num_type proposed_security_group_block_num = 0; + // members that are containers need to be shared_* containers, since this object is stored in a multi-index container + shared_set proposed_security_group_participants; - void initalize_from( const legacy::snapshot_global_property_object_v2& legacy, const chain_id_type& chain_id_val, const kv_database_config& kv_config_val, const wasm_config& wasm_config_val ) { + void initalize_from(const legacy::snapshot_global_property_object_v2& legacy, const chain_id_type& chain_id_val, + const kv_database_config& kv_config_val, const wasm_config& wasm_config_val) { proposed_schedule_block_num = legacy.proposed_schedule_block_num; proposed_schedule = producer_authority_schedule(legacy.proposed_schedule).to_shared(proposed_schedule.producers.get_allocator()); configuration = legacy.configuration; @@ -99,6 +103,13 @@ namespace eosio { namespace chain { kv_configuration = legacy.kv_configuration; wasm_configuration = legacy.wasm_configuration; } + + template + void set_proposed_security_group_participants(Iter begin, Iter end) { + proposed_security_group_participants = {begin, end, + proposed_security_group_participants.key_comp(), + proposed_security_group_participants.get_allocator()}; + } }; @@ -118,25 +129,77 @@ namespace eosio { namespace chain { chain_id_type chain_id; kv_database_config kv_configuration; wasm_config wasm_configuration; + + static constexpr uint32_t minimum_version_with_extension = 6; + + struct extension_v0 { + // libstdc++ requires the following two constructors to work. + extension_v0(){}; + extension_v0(block_num_type num, flat_set participants) + : proposed_security_group_block_num(num) + , proposed_security_group_participants(std::move(participants)) {} + + block_num_type proposed_security_group_block_num = 0; + flat_set proposed_security_group_participants; + }; + + // for future extensions, please use the following pattern: + // + // struct extension_v1 : extension_v0 { new_field_t new_field; }; + // using extension_t = std::variant; + // + // In addition, get_gpo_extension(), set_gpo_extension() and + // eosio::ship_protocol::global_property from ship_protocol.hpp + // in abieos has to be changed accordingly. + + using extension_t = std::variant; + extension_t extension; }; + inline snapshot_global_property_object::extension_t get_gpo_extension(const global_property_object& gpo) { + return snapshot_global_property_object::extension_v0{ + gpo.proposed_security_group_block_num, + {gpo.proposed_security_group_participants.begin(), gpo.proposed_security_group_participants.end()}}; + } + + inline void set_gpo_extension(global_property_object& gpo, + const snapshot_global_property_object::extension_t& extension) { + std::visit( + [&gpo](auto& ext) { + gpo.proposed_security_group_block_num = ext.proposed_security_group_block_num; + gpo.set_proposed_security_group_participants(ext.proposed_security_group_participants.begin(), + ext.proposed_security_group_participants.end()); + }, + extension); + } + namespace detail { template<> struct snapshot_row_traits { using value_type = global_property_object; using snapshot_type = snapshot_global_property_object; - static snapshot_global_property_object to_snapshot_row( const global_property_object& value, const chainbase::database& ) { - return {value.proposed_schedule_block_num, producer_authority_schedule::from_shared(value.proposed_schedule), value.configuration, value.chain_id, value.kv_configuration, value.wasm_configuration}; + static snapshot_global_property_object to_snapshot_row(const global_property_object& value, + const chainbase::database&) { + return {value.proposed_schedule_block_num, + producer_authority_schedule::from_shared(value.proposed_schedule), + value.configuration, + value.chain_id, + value.kv_configuration, + value.wasm_configuration, + get_gpo_extension(value)}; } - static void from_snapshot_row( snapshot_global_property_object&& row, global_property_object& value, chainbase::database& ) { + static void from_snapshot_row(snapshot_global_property_object&& row, global_property_object& value, + chainbase::database&) { value.proposed_schedule_block_num = row.proposed_schedule_block_num; - value.proposed_schedule = row.proposed_schedule.to_shared(value.proposed_schedule.producers.get_allocator()); - value.configuration = row.configuration; - value.chain_id = row.chain_id; - value.kv_configuration = row.kv_configuration; + value.proposed_schedule = + row.proposed_schedule.to_shared(value.proposed_schedule.producers.get_allocator()); + value.configuration = row.configuration; + value.chain_id = row.chain_id; + value.kv_configuration = row.kv_configuration; value.wasm_configuration = row.wasm_configuration; + set_gpo_extension(value, row.extension); } }; } @@ -172,6 +235,7 @@ CHAINBASE_SET_INDEX_TYPE(eosio::chain::dynamic_global_property_object, FC_REFLECT(eosio::chain::global_property_object, (proposed_schedule_block_num)(proposed_schedule)(configuration)(chain_id)(kv_configuration)(wasm_configuration) + (proposed_security_group_block_num)(proposed_security_group_participants) ) FC_REFLECT(eosio::chain::legacy::snapshot_global_property_object_v2, @@ -186,10 +250,45 @@ FC_REFLECT(eosio::chain::legacy::snapshot_global_property_object_v4, (proposed_schedule_block_num)(proposed_schedule)(configuration)(chain_id)(kv_configuration)(wasm_configuration) ) +FC_REFLECT(eosio::chain::snapshot_global_property_object::extension_v0, + (proposed_security_group_block_num)(proposed_security_group_participants) + ) + FC_REFLECT(eosio::chain::snapshot_global_property_object, - (proposed_schedule_block_num)(proposed_schedule)(configuration)(chain_id)(kv_configuration)(wasm_configuration) + (proposed_schedule_block_num)(proposed_schedule)(configuration)(chain_id)(kv_configuration)(wasm_configuration)(extension) ) FC_REFLECT(eosio::chain::dynamic_global_property_object, (global_action_sequence) ) + +namespace fc { +namespace raw { +namespace detail { + +template +struct unpack_object_visitor + : fc::reflector_init_visitor { + unpack_object_visitor(eosio::chain::snapshot_global_property_object& _c, VersionedStream& _s) + : fc::reflector_init_visitor(_c) + , s(_s) {} + + template + inline void operator()(const char* name) const { + try { + if constexpr (std::is_same_vobj.*p)>>) + if (s.version < eosio::chain::snapshot_global_property_object::minimum_version_with_extension) + return; + + fc::raw::unpack(s, this->obj.*p); + } + FC_RETHROW_EXCEPTIONS(warn, "Error unpacking field ${field}", ("field", name)) + } + + private: + VersionedStream& s; +}; +} // namespace detail +} // namespace raw +} // namespace fc diff --git a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp index 22b0df48905..bf3ecb7cfa8 100644 --- a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp +++ b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp @@ -26,7 +26,8 @@ enum class builtin_protocol_feature_t : uint32_t { action_return_value, kv_database, configurable_wasm_limits, - blockchain_parameters + blockchain_parameters, + security_group }; struct protocol_feature_subjective_restrictions { diff --git a/libraries/chain/include/eosio/chain/snapshot.hpp b/libraries/chain/include/eosio/chain/snapshot.hpp index e5bfd262439..2b0f2fc6663 100644 --- a/libraries/chain/include/eosio/chain/snapshot.hpp +++ b/libraries/chain/include/eosio/chain/snapshot.hpp @@ -2,6 +2,7 @@ #include #include +#include #include #include #include @@ -10,8 +11,10 @@ namespace eosio { namespace chain { /** * History: * Version 1: initial version with string identified sections and rows + * Version 2: support block_header_state with state extensions */ - static const uint32_t current_snapshot_version = 1; + static const uint32_t minimum_snapshot_version = 1; + static const uint32_t current_snapshot_version = 2; namespace detail { template @@ -156,7 +159,7 @@ namespace eosio { namespace chain { namespace detail { struct abstract_snapshot_row_reader { - virtual void provide(std::istream& in) const = 0; + virtual void provide(versioned_unpack_stream& in) const = 0; virtual void provide(const fc::variant&) const = 0; virtual std::string row_type_name() const = 0; }; @@ -195,8 +198,7 @@ namespace eosio { namespace chain { explicit snapshot_row_reader( T& data ) :data(data) {} - - void provide(std::istream& in) const override { + void provide(versioned_unpack_stream& in) const override { row_validation_helper::apply(data, [&in,this](){ fc::raw::unpack(in, data); }); @@ -283,6 +285,8 @@ namespace eosio { namespace chain { virtual ~snapshot_reader(){}; + uint32_t chain_snapshot_version = 0; + protected: virtual bool has_section( const std::string& section_name ) = 0; virtual void set_section( const std::string& section_name ) = 0; @@ -359,8 +363,7 @@ namespace eosio { namespace chain { private: bool validate_section() const; - - std::istream& snapshot; + std::istream& snapshot; std::streampos header_pos; uint64_t num_rows; uint64_t cur_row; diff --git a/libraries/chain/include/eosio/chain/types.hpp b/libraries/chain/include/eosio/chain/types.hpp index d8184035a29..a161853e02c 100644 --- a/libraries/chain/include/eosio/chain/types.hpp +++ b/libraries/chain/include/eosio/chain/types.hpp @@ -330,7 +330,6 @@ namespace eosio { namespace chain { } }; - /** * Extentions are prefixed with type and are a buffer that can be * interpreted by code that is aware and ignored by unaware code. diff --git a/libraries/chain/include/eosio/chain/versioned_unpack_stream.hpp b/libraries/chain/include/eosio/chain/versioned_unpack_stream.hpp new file mode 100644 index 00000000000..c4c0ea19787 --- /dev/null +++ b/libraries/chain/include/eosio/chain/versioned_unpack_stream.hpp @@ -0,0 +1,29 @@ +#pragma once + +namespace eosio { namespace chain { + +/// +/// Provide an extra `version` context to a stream to unpack eosio::chain::block_header_state and +/// eosio::chain::global_property_object +/// +/// eosio::chain::block_header_state was not designed to be extensible by itself. In order to +/// add new field to eosio::chain::block_header_state which provie backward compatiblity, we +/// need to add version support for eosio::chain::block_header_state. The version is not embedded +/// to eosio::chain::block_header_state, it is derived from the version of snapshot and fork +/// database. This class provides the version information so that eosio::chain::block_header_state +/// can be correctly unpacked. +/// +/// +template +struct versioned_unpack_stream { + + versioned_unpack_stream(Stream& stream, uint32_t ver) + : strm(stream) + , version(ver) {} + Stream& strm; + uint32_t version; + inline void read(char* data, std::size_t len) { strm.read(data, len); } + inline auto get(char& c) ->decltype(strm.get(c)) { return strm.get(c); } +}; +}} + diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/intrinsic_mapping.hpp b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/intrinsic_mapping.hpp index fdf5d162ce8..db050ca5c50 100644 --- a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/intrinsic_mapping.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/intrinsic_mapping.hpp @@ -277,7 +277,11 @@ inline constexpr auto get_intrinsic_table() { "env.get_wasm_parameters_packed", "env.set_wasm_parameters_packed", "env.get_parameters_packed", - "env.set_parameters_packed" + "env.set_parameters_packed", + "env.add_security_group_participants", + "env.remove_security_group_participants", + "env.in_active_security_group", + "env.get_active_security_group" ); } inline constexpr std::size_t find_intrinsic_index(std::string_view hf) { diff --git a/libraries/chain/include/eosio/chain/webassembly/interface.hpp b/libraries/chain/include/eosio/chain/webassembly/interface.hpp index bc53b9ee72d..2f1d9abd36e 100644 --- a/libraries/chain/include/eosio/chain/webassembly/interface.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/interface.hpp @@ -1959,6 +1959,48 @@ namespace webassembly { int32_t __lttf2(uint64_t, uint64_t, uint64_t, uint64_t) const; int32_t __unordtf2(uint64_t, uint64_t, uint64_t, uint64_t) const; + // security group api + /** + * Propose new participants to the security group. + * + * @ingroup security-group + * @param packed_participants - the buffer containing the packed participants. + * + * @return -1 if proposing a new security group was unsuccessful, otherwise returns 0. + */ + int64_t add_security_group_participants(span packed_participants); + + /** + * Propose to remove participants from the security group. + * + * @ingroup security-group + * @param packed_participants - the buffer containing the packed participants. + * + * @return -1 if proposing a new security group was unsuccessful, otherwise returns 0. + */ + int64_t remove_security_group_participants(span packed_participants); + + /** + * Check if the specified accounts are all in the active security group. + * + * @ingroup security-group + * @param packed_participants - the buffer containing the packed participants. + * + * @return Returns true if the specified accounts are all in the active security group. + */ + bool in_active_security_group(span packed_participants) const; + + /** + * Gets the active security group + * + * @ingroup security-group + * @param[out] packed_security_group - the buffer containing the packed security_group. + * + * @return Returns the size required in the buffer (if the buffer is too small, nothing is written). + * + */ + uint32_t get_active_security_group(span packed_security_group) const; + private: apply_context& context; }; diff --git a/libraries/chain/protocol_feature_manager.cpp b/libraries/chain/protocol_feature_manager.cpp index 82da9deb542..312366909a5 100644 --- a/libraries/chain/protocol_feature_manager.cpp +++ b/libraries/chain/protocol_feature_manager.cpp @@ -230,6 +230,17 @@ Builtin protocol feature: BLOCKCHAIN_PARAMETERS Allows privileged contracts to get and set subsets of blockchain parameters. */ + ( builtin_protocol_feature_t::security_group, builtin_protocol_feature_spec{ + "SECURITY_GROUP", + fc::variant("72ec6337e369cbb33ef7716d3267db9d5678fe54555c25ca4c9f5b9dfb7739f3").as(), + // SHA256 hash of the raw message below within the comment delimiters (do not modify message below). +/* +Builtin protocol feature: SECURITY_GROUP + +Allows privileged contracts to add/remove participants for mutual TLS enforcement. +*/ + {} + } ) ; diff --git a/libraries/chain/snapshot.cpp b/libraries/chain/snapshot.cpp index 66966ef6be5..80e3f3f850d 100644 --- a/libraries/chain/snapshot.cpp +++ b/libraries/chain/snapshot.cpp @@ -47,9 +47,10 @@ void variant_snapshot_reader::validate() const { EOS_ASSERT(version.is_integer(), snapshot_validation_exception, "Variant snapshot version is not an integer"); - EOS_ASSERT(version.as_uint64() == (uint64_t)current_snapshot_version, snapshot_validation_exception, - "Variant snapshot is an unsuppored version. Expected : ${expected}, Got: ${actual}", - ("expected", current_snapshot_version)("actual",o["version"].as_uint64())); + uint64_t got_version = version.as_uint64(); + EOS_ASSERT(got_version >= minimum_snapshot_version && got_version<= current_snapshot_version, snapshot_validation_exception, + "Variant snapshot is an unsupported version. Expected : [${min_version}, ${current}] , Got: ${actual}", + ("min_version", minimum_snapshot_version) ("current", current_snapshot_version)("actual",got_version)); EOS_ASSERT(o.contains("sections"), snapshot_validation_exception, "Variant snapshot has no sections"); @@ -217,12 +218,11 @@ void istream_snapshot_reader::validate() const { "Binary snapshot has unexpected magic number!"); // validate version - auto expected_version = current_snapshot_version; - decltype(expected_version) actual_version; - snapshot.read((char*)&actual_version, sizeof(actual_version)); - EOS_ASSERT(actual_version == expected_version, snapshot_exception, - "Binary snapshot is an unsuppored version. Expected : ${expected}, Got: ${actual}", - ("expected", expected_version)("actual", actual_version)); + uint32_t version; + snapshot.read((char*)&version, sizeof(version)); + EOS_ASSERT(version > 0 && version <= current_snapshot_version, snapshot_exception, + "Binary snapshot is an unsupported version. version is ${actual} while code supports version(s) [1,${max}]", + ("max", current_snapshot_version)("actual", version)); while (validate_section()) {} } catch( const std::exception& e ) { \ @@ -328,7 +328,8 @@ void istream_snapshot_reader::set_section( const string& section_name ) { } bool istream_snapshot_reader::read_row( detail::abstract_snapshot_row_reader& row_reader ) { - row_reader.provide(snapshot); + versioned_unpack_stream unpack_strm(snapshot, chain_snapshot_version); + row_reader.provide(unpack_strm); return ++cur_row < num_rows; } diff --git a/libraries/chain/webassembly/runtimes/eos-vm.cpp b/libraries/chain/webassembly/runtimes/eos-vm.cpp index 1c53b62d654..66aed48eae5 100644 --- a/libraries/chain/webassembly/runtimes/eos-vm.cpp +++ b/libraries/chain/webassembly/runtimes/eos-vm.cpp @@ -275,6 +275,12 @@ REGISTER_HOST_FUNCTION(set_kv_parameters_packed, privileged_check); REGISTER_HOST_FUNCTION(is_privileged, privileged_check); REGISTER_HOST_FUNCTION(set_privileged, privileged_check); +// security group api +REGISTER_HOST_FUNCTION(add_security_group_participants, privileged_check); +REGISTER_HOST_FUNCTION(remove_security_group_participants, privileged_check); +REGISTER_HOST_FUNCTION(in_active_security_group); +REGISTER_HOST_FUNCTION(get_active_security_group); + // softfloat api REGISTER_INJECTED_HOST_FUNCTION(_eosio_f32_add); REGISTER_INJECTED_HOST_FUNCTION(_eosio_f32_sub); diff --git a/libraries/chain/webassembly/security_group.cpp b/libraries/chain/webassembly/security_group.cpp new file mode 100644 index 00000000000..2ebd538ac51 --- /dev/null +++ b/libraries/chain/webassembly/security_group.cpp @@ -0,0 +1,43 @@ + +#include +#include + +namespace eosio { +namespace chain { +namespace webassembly { + +int64_t interface::add_security_group_participants(span packed_participants) { + datastream ds(packed_participants.data(), packed_participants.size()); + flat_set participants; + fc::raw::unpack(ds, participants); + return context.control.add_security_group_participants(participants); +} + +int64_t interface::remove_security_group_participants(span packed_participants) { + datastream ds(packed_participants.data(), packed_participants.size()); + flat_set participants; + fc::raw::unpack(ds, participants); + return context.control.remove_security_group_participants(participants); +} + +bool interface::in_active_security_group(span packed_participants) const { + datastream ds(packed_participants.data(), packed_participants.size()); + flat_set participants; + fc::raw::unpack(ds, participants); + return context.control.in_active_security_group(participants); +} + +uint32_t interface::get_active_security_group(span packed_security_group) const { + datastream size_strm; + const auto& active_security_group = context.control.active_security_group(); + fc::raw::pack(size_strm, active_security_group); + if (size_strm.tellp() <= packed_security_group.size()) { + datastream ds(packed_security_group.data(), packed_security_group.size()); + fc::raw::pack(ds, active_security_group); + } + return size_strm.tellp(); +} + +} // namespace webassembly +} // namespace chain +} // namespace eosio diff --git a/libraries/state_history/include/eosio/state_history/serialization.hpp b/libraries/state_history/include/eosio/state_history/serialization.hpp index f6522422593..aaa14773a98 100644 --- a/libraries/state_history/include/eosio/state_history/serialization.hpp +++ b/libraries/state_history/include/eosio/state_history/serialization.hpp @@ -359,13 +359,16 @@ ST& operator<<(ST& ds, const history_serial_wrapper& template ST& operator<<(ST& ds, const history_serial_wrapper& obj) { - fc::raw::pack(ds, fc::unsigned_int(1)); + const fc::unsigned_int global_property_version = 2; + fc::raw::pack(ds, global_property_version); fc::raw::pack(ds, as_type>(obj.obj.proposed_schedule_block_num)); fc::raw::pack(ds, make_history_serial_wrapper( obj.db, as_type(obj.obj.proposed_schedule))); fc::raw::pack(ds, make_history_serial_wrapper(obj.db, as_type(obj.obj.configuration))); fc::raw::pack(ds, as_type(obj.obj.chain_id)); - + fc::raw::pack(ds, as_type(obj.obj.kv_configuration)); + fc::raw::pack(ds, as_type(obj.obj.wasm_configuration)); + fc::raw::pack(ds, eosio::chain::get_gpo_extension(obj.obj)); return ds; } diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index dfec8b90f10..b57cbdfa246 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -225,6 +225,7 @@ namespace eosio { namespace testing { const variant_object& data, uint32_t expiration = DEFAULT_EXPIRATION_DELTA, uint32_t delay_sec = 0 ); + transaction_trace_ptr push_action_no_produce(action&& act, uint64_t authorizer); action get_action( account_name code, action_name acttype, vector auths, @@ -480,6 +481,10 @@ namespace eosio { namespace testing { tester(const std::function& control_setup, setup_policy policy = setup_policy::full, db_read_mode read_mode = db_read_mode::SPECULATIVE); + tester(const std::function& lambda) { + lambda(*this); + } + using base_tester::produce_block; signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index 71d01654b04..cbce14de3ef 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -632,6 +632,20 @@ namespace eosio { namespace testing { return success(); } + transaction_trace_ptr base_tester::push_action_no_produce(action&& act, uint64_t authorizer) { + signed_transaction trx; + if (authorizer) { + act.authorization = vector{{account_name(authorizer), config::active_name}}; + } + trx.actions.emplace_back(std::move(act)); + set_transaction_headers(trx); + if (authorizer) { + trx.sign(get_private_key(account_name(authorizer), "active"), control->get_chain_id()); + } + + return push_transaction(trx); + } + transaction_trace_ptr base_tester::push_action( const account_name& code, const action_name& acttype, const account_name& actor, diff --git a/pipeline.jsonc b/pipeline.jsonc index 3217ea5df1d..0a138350169 100644 --- a/pipeline.jsonc +++ b/pipeline.jsonc @@ -40,7 +40,7 @@ "test": [ { - "commit": "16074742f8cfd481b029073e6f01bb920a1bad38" // test backwards-compatibility + "commit": "85ac2aae5f52c68b6b1d764b997bb020f3c496f8" // test backwards-compatibility } ] } diff --git a/plugins/net_plugin/CMakeLists.txt b/plugins/net_plugin/CMakeLists.txt index 3b8c1cd7b71..81130708d3b 100644 --- a/plugins/net_plugin/CMakeLists.txt +++ b/plugins/net_plugin/CMakeLists.txt @@ -1,7 +1,11 @@ file(GLOB HEADERS "include/eosio/net_plugin/*.hpp" ) add_library( net_plugin net_plugin.cpp + security_group_manager.cpp ${HEADERS} ) target_link_libraries( net_plugin chain_plugin producer_plugin appbase fc ) target_include_directories( net_plugin PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include ${CMAKE_CURRENT_SOURCE_DIR}/../chain_interface/include "${CMAKE_CURRENT_SOURCE_DIR}/../../libraries/appbase/include") + +add_subdirectory(test) + diff --git a/plugins/net_plugin/include/eosio/net_plugin/security_group_manager.hpp b/plugins/net_plugin/include/eosio/net_plugin/security_group_manager.hpp new file mode 100644 index 00000000000..79e89f1ab3a --- /dev/null +++ b/plugins/net_plugin/include/eosio/net_plugin/security_group_manager.hpp @@ -0,0 +1,30 @@ +#pragma once +#include + +#include + +#include + +namespace eosio { + /** \brief Manages the security group cache */ + class security_group_manager { + public: + using participant_list_t = boost::container::flat_set; + /** @brief Provides the current security group version */ + auto current_version() const { return version_; } + /** @brief Update the security group participants + * + * @param version The version number for this update + * @param participant_list The list of accounts for the security group + * @return True if an update was performed. + */ + bool update_cache(const uint32_t version, const participant_list_t& participant_list); + /** @brief Determine if a participant is in the security group */ + bool is_in_security_group(chain::account_name participant) const { + return cache_.empty() || (cache_.find(participant) != cache_.end()); + } + private: + uint32_t version_ {0}; ///! The security group version + participant_list_t cache_; ///! Cache of participants in the security group + }; +} diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 198a97b700c..78f1e11e68f 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2,6 +2,7 @@ #include #include +#include #include #include #include @@ -24,11 +25,17 @@ #include #include #include +#include +#include +#include #include #include +#include using namespace eosio::chain::plugin_interface; +using namespace boost::asio; +namespace bfs = boost::filesystem; namespace eosio { static appbase::abstract_plugin& _net_plugin = app().register_plugin(); @@ -38,6 +45,7 @@ namespace eosio { using boost::asio::ip::tcp; using boost::asio::ip::address_v4; using boost::asio::ip::host_name; + using boost::system::error_code; using boost::multi_index_container; using fc::time_point; @@ -51,6 +59,8 @@ namespace eosio { using connection_wptr = std::weak_ptr; using io_work_t = boost::asio::executor_work_guard; + using ssl_context_ptr = std::unique_ptr; + template void verify_strand_in_this_thread(const Strand& strand, const char* func, int line) { @@ -206,6 +216,8 @@ namespace eosio { class net_plugin_impl : public std::enable_shared_from_this { public: unique_ptr acceptor; + ssl_context_ptr ssl_context; + bool ssl_enabled; std::atomic current_connection_id{0}; unique_ptr< sync_manager > sync_master; @@ -254,8 +266,9 @@ namespace eosio { bool use_socket_read_watermark = false; /** @} */ - mutable std::shared_mutex connections_mtx; + mutable std::shared_mutex connections_mtx; // protects both connections and security_group std::set< connection_ptr > connections; // todo: switch to a thread safe container to avoid big mutex over complete collection + security_group_manager security_group; std::mutex connector_check_timer_mtx; unique_ptr connector_check_timer; @@ -336,6 +349,18 @@ namespace eosio { constexpr static uint16_t to_protocol_version(uint16_t v); connection_ptr find_connection(const string& host)const; // must call with held mutex + /** \brief Update security group information + * + * Retrieves the security group information from the indicated block and + * updates the cached security group information. If a participant is + * removed from the cached security group, it is marked as non-participating + * and some messages are not sent. + * + * \param bs The block_state containing the security group update + */ + void update_security_group(const block_state_ptr& bs); + + void init_ssl_context(const std::string& cert, const std::string& pkey, const std::string& ca); }; const fc::string logger_name("net_plugin_impl"); @@ -573,6 +598,48 @@ namespace eosio { block_status_monitor& operator=( block_status_monitor&& ) = delete; }; + struct flex_socket { + using socket_ptr = std::shared_ptr; + using ssl_stream = ssl::stream; + using ssl_socket_ptr = std::shared_ptr; + using socket_variant = std::variant; + + private: + socket_variant socket; + public: + flex_socket() { + reset_socket(); + } + + inline tcp::socket::lowest_layer_type& raw_socket() { + if (my_impl->ssl_enabled){ + return ssl_socket()->lowest_layer(); + } + return tcp_socket()->lowest_layer(); + } + + inline socket_ptr& tcp_socket() { + return std::get(socket); + } + + inline ssl_socket_ptr& ssl_socket() { + return std::get(socket); + } + + void reset_socket() { + + if (my_impl->ssl_enabled) { + socket = ssl_socket_ptr{}; + EOS_ASSERT(my_impl->ssl_context, ssl_configuration_error, "ssl context is empty"); + ssl_socket().reset( new ssl_stream{my_impl->thread_pool->get_executor(), *my_impl->ssl_context} ); + } + else { + socket = socket_ptr{}; + tcp_socket().reset( new tcp::socket( my_impl->thread_pool->get_executor() ) ); + } + } + }; + class connection : public std::enable_shared_from_this { public: explicit connection( string endpoint ); @@ -580,7 +647,10 @@ namespace eosio { ~connection() {} - bool start_session(); + //this function calls callback synchronously in case of non-ssl connection + //and asynchronously in case of ssl after asynchronous ssl handshake + template + void start_session(bool server, _Callback callback); bool socket_is_open() const { return socket_open.load(); } // thread safe, atomic const string& peer_address() const { return peer_addr; } // thread safe, const @@ -610,10 +680,9 @@ namespace eosio { }; std::atomic connection_type{both}; - public: + flex_socket socket; // only accessed through strand after construction boost::asio::io_context::strand strand; - std::shared_ptr socket; // only accessed through strand after construction fc::message_buffer<1024*1024> pending_message_buffer; std::atomic outstanding_read_bytes{0}; // accessed only from strand threads @@ -789,6 +858,118 @@ namespace eosio { ( "_lport", local_endpoint_port ); return mvo; } + + // security group management + // + private: + std::optional participant_name_; + std::atomic participating_{true}; + + public: + /** @brief Returns the optional name of the participant */ + inline auto participant_name() { return participant_name_; } + /** @brief Set the participating status */ + inline void set_participating(bool status) { + if (my_impl->ssl_enabled) + participating_.store(status, std::memory_order_relaxed); + } + /** returns true if connection is in the security group */ + inline bool is_participating() const { + if (my_impl->ssl_enabled) + return participating_.load(std::memory_order_relaxed); + + return true; + } + inline void setup_participant() { + if(my_impl->ssl_enabled) { + account_name participant = participant_name_ ? *participant_name_ : account_name{}; + + auto is_participating = [](account_name part) { + std::shared_lock connection_guard(my_impl->connections_mtx); + return my_impl->security_group.is_in_security_group(part); + }; + + const bool participating = is_participating(participant); + + fc_dlog( logger, "[${peer}] participant: [${name}] participating: [${enabled}]", + ("peer", peer_name())("name", participant_name())("enabled", participating)); + set_participating(participating); + } + } + + std::string certificate_subject(ssl::verify_context &ctx) { + X509* cert = X509_STORE_CTX_get_current_cert(ctx.native_handle()); + if (!cert) + return ""; + + char buf[256]; + //printing full subject string of rejected certificate + //openssl doesn't recommend this function for use due to lack of support of unicode and issues with '\' + //however there is no single function that can do the same. + //TODO: investigate usage of X509_NAME_print_ex , compare performance + //see http://openssl.6102.n7.nabble.com/openssl-org-1425-Request-make-X509-NAME-oneline-use-same-formatter-as-X509-NAME-print-ex-td33142.html + char* subject_str = X509_NAME_oneline(X509_get_subject_name(cert), buf, std::size(buf)); + + return {subject_str}; + } + + bool process_certificate(bool preverified, ssl::verify_context& ctx) { + peer_dlog(this, "preverified: [${p}] certificate subject: ${s}", ("p", preverified)("s", certificate_subject(ctx))); + //certificate depth means number of certificate issuers verified current certificate + //openssl provides those one by one starting from root certificate + //we don't use CA certificate or intermidiate issuers, so skipping those + //we interested only in last certificate in the chain, i.e. the one that identifies client + auto depth = X509_STORE_CTX_get_error_depth(ctx.native_handle()); + if (depth > 0) { + //preverified is true when certificate matches verification chain, provided via load_verify_file + return preverified; + } + + //return pointer is managed by openssl + X509* cert = X509_STORE_CTX_get_current_cert(ctx.native_handle()); + if (!cert) { + peer_dlog(this, "X509_STORE_CTX_get_current_cert returned null certificate"); + return false; + } + + extract_participant_name(cert); + + //we keep connection if peer has valid certificate but participant name is not authorized + //however that connection doesn't receive updates + return preverified; + } + + void reset_socket() { + socket.reset_socket(); + if (my_impl->ssl_enabled) { + socket.ssl_socket()->set_verify_callback( + [this](auto bverified, auto& ctx){ + return process_certificate(bverified, ctx); + }); + } + } + + private: + void extract_participant_name(X509* cert) + { + //max subject size is 256 bytes + char buf[256]; + //return pointer is managed by openssl + X509_NAME* subj = X509_get_subject_name(cert); + int length = X509_NAME_get_text_by_NID(subj, NID_organizationName, buf, std::size(buf)); + + if (length > 0) { + std::string organization{buf, (size_t)length}; + if (is_string_valid_name(organization)){ + participant_name_ = account_name{organization}; + //participant name will be later used in start_session to determine if + //participant is participating or not + } + else { + peer_dlog(this, "received unauthorized participant: ${s}", ("s", organization)); + } + } + } }; const string connection::unknown = ""; @@ -867,33 +1048,31 @@ namespace eosio { connection::connection( string endpoint ) : peer_addr( endpoint ), + socket(), strand( my_impl->thread_pool->get_executor() ), - socket( new tcp::socket( my_impl->thread_pool->get_executor() ) ), connection_id( ++my_impl->current_connection_id ), response_expected_timer( my_impl->thread_pool->get_executor() ), last_handshake_recv(), last_handshake_sent() { - fc_ilog( logger, "creating connection to ${n}", ("n", endpoint) ); + reset_socket(); + + if (endpoint.empty()) + fc_dlog( logger, "new connection object created" ); + else + fc_ilog( logger, "created connection to ${n}", ("n", endpoint) ); } connection::connection() - : peer_addr(), - strand( my_impl->thread_pool->get_executor() ), - socket( new tcp::socket( my_impl->thread_pool->get_executor() ) ), - connection_id( ++my_impl->current_connection_id ), - response_expected_timer( my_impl->thread_pool->get_executor() ), - last_handshake_recv(), - last_handshake_sent() + : connection(string{}) { - fc_dlog( logger, "new connection object created" ); } void connection::update_endpoints() { boost::system::error_code ec; boost::system::error_code ec2; - auto rep = socket->remote_endpoint(ec); - auto lep = socket->local_endpoint(ec2); + auto rep = socket.raw_socket().remote_endpoint(ec); + auto lep = socket.raw_socket().local_endpoint(ec2); std::lock_guard g_conn( conn_mtx ); remote_endpoint_ip = ec ? unknown : rep.address().to_string(); remote_endpoint_port = ec ? unknown : std::to_string(rep.port()); @@ -936,22 +1115,45 @@ namespace eosio { return stat; } - bool connection::start_session() { + template + void connection::start_session(bool server, _Callback callback) { verify_strand_in_this_thread( strand, __func__, __LINE__ ); update_endpoints(); boost::asio::ip::tcp::no_delay nodelay( true ); boost::system::error_code ec; - socket->set_option( nodelay, ec ); + socket.raw_socket().set_option( nodelay, ec ); if( ec ) { fc_elog( logger, "connection failed (set_option) ${peer}: ${e1}", ("peer", peer_name())( "e1", ec.message() ) ); close(); - return false; + callback(false); } else { - fc_dlog( logger, "connected to ${peer}", ("peer", peer_name()) ); - socket_open = true; - start_read_message(); - return true; + auto start_read = [this, callback](){ + socket_open = true; + setup_participant(); + fc_dlog( logger, "connected to ${peer}", ("peer", peer_name()) ); + start_read_message(); + callback(true); + }; + if (my_impl->ssl_enabled) { + socket.ssl_socket()->async_handshake(server ? ssl::stream_base::server : ssl::stream_base::client, + boost::asio::bind_executor(strand, + [start_read, c = shared_from_this(), socket=socket, callback](const auto& ec){ + //we use socket just to retain connection shared_ptr just in case it will be deleted + //when we will need it inside start_read_message + std::ignore = socket; + if (ec) { + fc_elog( logger, "ssl handshake error: ${e}", ("e", ec.message()) ); + c->close(); + return; + } + + start_read(); + })); + } + else { + start_read(); + } } } @@ -975,12 +1177,15 @@ namespace eosio { void connection::_close( connection* self, bool reconnect, bool shutdown ) { self->socket_open = false; + self->set_participating(false); boost::system::error_code ec; - if( self->socket->is_open() ) { - self->socket->shutdown( tcp::socket::shutdown_both, ec ); - self->socket->close( ec ); + auto& cur_sock = self->socket.raw_socket(); + if( cur_sock.is_open() ) { + cur_sock.shutdown( tcp::socket::shutdown_both, ec ); + cur_sock.close( ec ); } - self->socket.reset( new tcp::socket( my_impl->thread_pool->get_executor() ) ); + self->reset_socket(); + self->flush_queues(); self->connecting = false; self->syncing = false; @@ -1194,12 +1399,12 @@ namespace eosio { buffer_queue.fill_out_buffer( bufs ); strand.post( [c{std::move(c)}, bufs{std::move(bufs)}]() { - boost::asio::async_write( *c->socket, bufs, - boost::asio::bind_executor( c->strand, [c, socket=c->socket]( boost::system::error_code ec, std::size_t w ) { + //capture by value is const so we need mutable keyword + auto write_lambda = [c, socket=c->socket]( boost::system::error_code ec, std::size_t w ) mutable { try { c->buffer_queue.clear_out_queue(); // May have closed connection and cleared buffer_queue - if( !c->socket_is_open() || socket != c->socket ) { + if( !c->socket_is_open() || &socket.raw_socket() != &c->socket.raw_socket() ) { fc_ilog( logger, "async write socket ${r} before callback: ${p}", ("r", c->socket_is_open() ? "changed" : "closed")("p", c->peer_name()) ); c->close(); @@ -1231,7 +1436,14 @@ namespace eosio { } catch( ... ) { fc_elog( logger, "Exception in do_queue_write to ${p}", ("p", c->peer_name()) ); } - })); + }; + + if (my_impl->ssl_enabled){ + boost::asio::async_write( *c->socket.ssl_socket(), bufs, boost::asio::bind_executor( c->strand, write_lambda )); + } + else { + boost::asio::async_write( *c->socket.tcp_socket(), bufs, boost::asio::bind_executor( c->strand, write_lambda )); + } }); } @@ -1435,6 +1647,18 @@ namespace eosio { void connection::enqueue( const net_message& m ) { verify_strand_in_this_thread( strand, __func__, __LINE__ ); + // for tls connections, when the connection is not in the security group + // certain message types will not be transmitted + if(!is_participating()) { + const bool ignore = std::holds_alternative(m) || + std::holds_alternative(m) || + std::holds_alternative(m) || + std::holds_alternative(m) || + std::holds_alternative(m); + if(ignore) { + return; + } + } go_away_reason close_after_send = no_reason; if (std::holds_alternative(m)) { close_after_send = std::get(m).reason; @@ -1446,6 +1670,9 @@ namespace eosio { } void connection::enqueue_block( const signed_block_ptr& b, bool to_sync_queue) { + if(!is_participating()) { + return; + } fc_dlog( logger, "enqueue block ${num}", ("num", b->block_num()) ); verify_strand_in_this_thread( strand, __func__, __LINE__ ); @@ -2185,7 +2412,7 @@ namespace eosio { for_each_block_connection( [this, &id, &bnum, &b, &buff_factory]( auto& cp ) { peer_dlog( cp, "socket_is_open ${s}, connecting ${c}, syncing ${ss}", ("s", cp->socket_is_open())("c", cp->connecting.load())("ss", cp->syncing.load()) ); - if( !cp->current() ) return true; + if( !cp->current() || !cp->is_participating() ) return true; send_buffer_type sb = buff_factory.get_send_buffer( b, cp->protocol_version.load() ); if( !sb ) { peer_wlog( cp, "Sending go away for incomplete block #${n} ${id}...", @@ -2240,7 +2467,7 @@ namespace eosio { trx_buffer_factory buff_factory; for_each_connection( [this, &trx, &nts, &buff_factory]( auto& cp ) { - if( cp->is_blocks_only_connection() || !cp->current() ) { + if( cp->is_blocks_only_connection() || !cp->current() || !cp->is_participating() ) { return true; } nts.connection_id = cp->connection_id; @@ -2407,13 +2634,16 @@ namespace eosio { connecting = true; pending_message_buffer.reset(); buffer_queue.clear_out_queue(); - boost::asio::async_connect( *socket, endpoints, + boost::asio::async_connect( socket.raw_socket(), endpoints, boost::asio::bind_executor( strand, - [resolver, c = shared_from_this(), socket=socket]( const boost::system::error_code& err, const tcp::endpoint& endpoint ) { - if( !err && socket->is_open() && socket == c->socket ) { - if( c->start_session() ) { - c->send_handshake(); - } + [resolver, + c = shared_from_this(), + socket=socket]( const boost::system::error_code& err, const tcp::endpoint& endpoint ) mutable { + if( !err && socket.raw_socket().is_open() && &socket.raw_socket() == &c->socket.raw_socket() ) { + c->start_session(false, [c](bool success){ + if (success) + c->send_handshake(); + }); } else { fc_elog( logger, "connection failed to ${peer}: ${error}", ("peer", c->peer_name())( "error", err.message())); c->close( false ); @@ -2425,13 +2655,13 @@ namespace eosio { connection_ptr new_connection = std::make_shared(); new_connection->connecting = true; new_connection->strand.post( [this, new_connection = std::move( new_connection )](){ - acceptor->async_accept( *new_connection->socket, - boost::asio::bind_executor( new_connection->strand, [new_connection, socket=new_connection->socket, this]( boost::system::error_code ec ) { + acceptor->async_accept( new_connection->socket.raw_socket(), + boost::asio::bind_executor( new_connection->strand, [new_connection, socket=new_connection->socket, this]( boost::system::error_code ec ) mutable { if( !ec ) { uint32_t visitors = 0; uint32_t from_addr = 0; boost::system::error_code rec; - const auto& paddr_add = socket->remote_endpoint( rec ).address(); + const auto& paddr_add = socket.raw_socket().remote_endpoint( rec ).address(); string paddr_str; if( rec ) { fc_elog( logger, "Error getting remote endpoint: ${m}", ("m", rec.message())); @@ -2452,10 +2682,12 @@ namespace eosio { if( from_addr < max_nodes_per_host && (max_client_count == 0 || visitors < max_client_count)) { fc_ilog( logger, "Accepted new connection: " + paddr_str ); new_connection->set_heartbeat_timeout( heartbeat_timeout ); - if( new_connection->start_session()) { - std::lock_guard g_unique( connections_mtx ); - connections.insert( new_connection ); - } + new_connection->start_session(true, [c = shared_from_this(), new_connection](bool success) { + if (success) { + std::lock_guard g_unique( c->connections_mtx ); + c->connections.insert( new_connection ); + } + }); } else { if( from_addr >= max_nodes_per_host ) { @@ -2466,8 +2698,8 @@ namespace eosio { } // new_connection never added to connections and start_session not called, lifetime will end boost::system::error_code ec; - socket->shutdown( tcp::socket::shutdown_both, ec ); - socket->close( ec ); + socket.raw_socket().shutdown( tcp::socket::shutdown_both, ec ); + socket.raw_socket().close( ec ); } } } else { @@ -2496,13 +2728,12 @@ namespace eosio { std::size_t minimum_read = std::atomic_exchange( &outstanding_read_bytes, 0 ); minimum_read = minimum_read != 0 ? minimum_read : message_header_size; - if (my_impl->use_socket_read_watermark) { const size_t max_socket_read_watermark = 4096; std::size_t socket_read_watermark = std::min(minimum_read, max_socket_read_watermark); boost::asio::socket_base::receive_low_watermark read_watermark_opt(socket_read_watermark); boost::system::error_code ec; - socket->set_option( read_watermark_opt, ec ); + socket.raw_socket().set_option( read_watermark_opt, ec ); if( ec ) { fc_elog( logger, "unable to set read watermark ${peer}: ${e1}", ("peer", peer_name())( "e1", ec.message() ) ); } @@ -2523,13 +2754,9 @@ namespace eosio { close( false ); return; } - - boost::asio::async_read( *socket, - pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, - boost::asio::bind_executor( strand, - [conn = shared_from_this(), socket=socket]( boost::system::error_code ec, std::size_t bytes_transferred ) { + auto handle_read = [conn = shared_from_this(), socket=socket]( boost::system::error_code ec, std::size_t bytes_transferred ) mutable { // may have closed connection and cleared pending_message_buffer - if( !conn->socket_is_open() || socket != conn->socket ) return; + if( !conn->socket_is_open() || &socket.raw_socket() != &conn->socket.raw_socket() ) return; bool close_connection = false; try { @@ -2612,7 +2839,19 @@ namespace eosio { fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); conn->close(); } - })); + }; + if (my_impl->ssl_enabled){ + boost::asio::async_read( *socket.ssl_socket(), + pending_message_buffer.get_buffer_sequence_for_boost_async_read(), + completion_handler, + boost::asio::bind_executor( strand, handle_read)); + } + else { + boost::asio::async_read( *socket.tcp_socket(), + pending_message_buffer.get_buffer_sequence_for_boost_async_read(), + completion_handler, + boost::asio::bind_executor( strand, handle_read)); + } } catch (...) { fc_elog( logger, "Undefined exception in start_read_message, closing connection to: ${p}", ("p", peer_name()) ); close(); @@ -3428,10 +3667,48 @@ namespace eosio { } } + // called from application thread + void net_plugin_impl::update_security_group(const block_state_ptr& bs) { + // update cache + // + // Check the version before taking the lock. Since this is the only thread that + // touches the version information, no need to take the lock if the version is + // unchanged. + // + auto& update = bs->get_security_group_info(); + if(security_group.current_version() == update.version) { + return; + } + else { + std::lock_guard connection_guard(my_impl->connections_mtx); + if(!security_group.update_cache(update.version, update.participants)) { + return; + } + } + + std::shared_lock connection_guard(my_impl->connections_mtx); + // update connections + // + fc_dlog( logger, "SecurityGroup changed to version: ${v}", ("v", security_group.current_version()) ); + for(auto& connection : connections) { + const auto& participant = connection->participant_name(); + if(participant && security_group.is_in_security_group(participant.value())) { + if(!connection->is_participating()) { + connection->set_participating(true); + connection->send_handshake(); + } + } + else { + connection->set_participating(false); + } + }; + } + // called from application thread void net_plugin_impl::on_accepted_block(const block_state_ptr& bs) { update_chain_info(); controller& cc = chain_plug->chain(); + update_security_group(bs); dispatcher->strand.post( [this, bs]() { fc_dlog( logger, "signaled accepted_block, blk num = ${num}, id = ${id}", ("num", bs->block_num)("id", bs->id) ); @@ -3563,6 +3840,33 @@ namespace eosio { return chain::signature_type(); } + void net_plugin_impl::init_ssl_context(const std::string& cert, const std::string& pkey, const std::string& ca){ + ssl_context.reset( new ssl::context(ssl::context::sslv23) ); + + //TLS-only connection, no SSL + ssl_context->set_options(ssl::context::default_workarounds | + ssl::context::no_sslv2 | + ssl::context::no_sslv3 ); + + error_code ec; + + if (!ca.empty()){ + dlog("using verify file: ${p}", ("p", ca)); + ssl_context->load_verify_file(ca, ec); + EOS_ASSERT(!ec, ssl_configuration_error, "load_verify_file: ${e}", ("e", ec.message())); + + //this ensures peer has trusted certificate. no certificate-less connections + ssl_context->set_verify_mode(ssl::context::verify_peer | ssl::context::verify_fail_if_no_peer_cert); + } + dlog("using private key file: ${p}", ("p", pkey)); + ssl_context->use_private_key_file(pkey, ssl::context::pem, ec); + EOS_ASSERT(!ec, ssl_configuration_error, "use_private_key_file: ${e}", ("e", ec.message())); + + dlog("using chain file: ${p}", ("p", cert)); + ssl_context->use_certificate_chain_file(cert, ec); + EOS_ASSERT(!ec, ssl_configuration_error, "use_certificate_chain_file: ${e}", ("e", ec.message())); + } + // call from connection strand bool connection::populate_handshake( handshake_message& hello, bool force ) { namespace sc = std::chrono; @@ -3642,7 +3946,7 @@ namespace eosio { "Number of worker threads in net_plugin thread pool" ) ( "sync-fetch-span", bpo::value()->default_value(def_sync_fetch_span), "number of blocks to retrieve in a chunk from any individual peer during synchronization") ( "use-socket-read-watermark", bpo::value()->default_value(false), "Enable experimental socket read watermark optimization") - ( "peer-log-format", bpo::value()->default_value( "[\"${_name}\" ${_ip}:${_port}]" ), + ( "peer-log-format", bpo::value()->default_value( "[\"${_name}\" ${_ip}:${_port}] " ), "The string used to format peers when logging messages about them. Variables are escaped with ${}.\n" "Available Variables:\n" " _name \tself-reported name\n\n" @@ -3653,7 +3957,9 @@ namespace eosio { " _lip \tlocal IP address connected to peer\n\n" " _lport \tlocal port number connected to peer\n\n") ( "p2p-keepalive-interval-ms", bpo::value()->default_value(def_keepalive_interval), "peer heartbeat keepalive message interval in milliseconds") - + ( "p2p-tls-security-group-ca-file", bpo::value(), "Certificate Authority's certificate file used for verifying peers TLS connection when security groups feature enabled" ) + ( "p2p-tls-own-certificate-file", bpo::value(), "Certificate file that will be used to authenticate running node if TLS is enabled") + ( "p2p-tls-private-key-file", bpo::value(), "Private key file that is used in conjunction with p2p-tls-own-certificate-file for server authorization in TLS connection. Together p2p-tls-private-key-file + p2p-tsl-own-certificate-file automatically enables TLS-only connection for peers.") ; } @@ -3762,6 +4068,38 @@ namespace eosio { my->chain_plug->enable_accept_transactions(); } + //if we have certificate option that TLS must be enabled + const bool tls_own_certificate_file = options.count("p2p-tls-own-certificate-file"); + EOS_ASSERT(tls_own_certificate_file == options.count("p2p-tls-private-key-file"), + ssl_incomplete_configuration, + "\"p2p-tls-own-certificate-file\" and \"p2p-tls-private-key-file\" either both need to be provided " + "or neither of them provided."); + EOS_ASSERT(!options.count("p2p-tls-security-group-ca-file") || tls_own_certificate_file, + ssl_incomplete_configuration, + "\"p2p-tls-security-group-ca-file\" cannot be provided without \"p2p-tls-own-certificate-file\" and \"p2p-tls-private-key-file\"."); + if ( tls_own_certificate_file ) { + auto certificate = options["p2p-tls-own-certificate-file"].as(); + auto pkey = options["p2p-tls-private-key-file"].as(); + auto ca_cert = options["p2p-tls-security-group-ca-file"].as(); + auto relative_to_absolute = [](bfs::path& file) { + if( file.is_relative()) { + file = bfs::current_path() / file; + } + }; + relative_to_absolute(certificate); + relative_to_absolute(pkey); + relative_to_absolute(ca_cert); + + EOS_ASSERT(fc::is_regular_file(certificate), ssl_incomplete_configuration, "p2p-tls-own-certificate-file doesn't contain regular file: ${p}", ("p", certificate.generic_string())); + EOS_ASSERT(fc::is_regular_file(pkey), ssl_incomplete_configuration, "p2p-tls-private-key-file doesn't contain regular file: ${p}", ("p", pkey.generic_string())); + my->ssl_enabled = true; + if (!ca_cert.empty()){ + EOS_ASSERT(fc::is_regular_file(ca_cert), ssl_incomplete_configuration, "p2p-tls-security-group-ca-file doesn't contain regular file: ${p}", ("p", ca_cert.generic_string())); + } + + my->init_ssl_context(certificate.generic_string(), pkey.generic_string(), ca_cert.generic_string()); + } + } FC_LOG_AND_RETHROW() } diff --git a/plugins/net_plugin/security_group_manager.cpp b/plugins/net_plugin/security_group_manager.cpp new file mode 100644 index 00000000000..30dbcc28328 --- /dev/null +++ b/plugins/net_plugin/security_group_manager.cpp @@ -0,0 +1,16 @@ +#include + +#include + +namespace eosio { + bool security_group_manager::update_cache(const uint32_t version, const participant_list_t& participant_list) { + if(version == version_) + return false; + EOS_ASSERT(version == version_ +1, eosio::chain::plugin_exception, + "The active security group version should only ever increase by one. Current version: " + "${current}, new version: ${new}", ("current", version_)("new", version)); + version_ = version; + cache_ = participant_list; + return true; + } +} diff --git a/plugins/net_plugin/test/CMakeLists.txt b/plugins/net_plugin/test/CMakeLists.txt new file mode 100644 index 00000000000..9b19540f673 --- /dev/null +++ b/plugins/net_plugin/test/CMakeLists.txt @@ -0,0 +1,24 @@ +file(GLOB UNIT_TESTS "*.cpp") + +add_executable( net_plugin_tests ${UNIT_TESTS} ) + +target_link_libraries( net_plugin_tests + net_plugin + chain_plugin + producer_plugin + appbase + fc + ${PLATFORM_SPECIFIC_LIBS} + eosio_testing + eosio_chain + ) +target_include_directories( net_plugin_tests PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include + ${CMAKE_SOURCE_DIR}/plugins/chain_interface/include + ${CMAKE_SOURCE_DIR}/libraries/appbase/include + ${CMAKE_SOURCE_DIR}/plugins/net_plugin/include + ${CMAKE_SOURCE_DIR}/plugins/chain_plugin/include + ${CMAKE_SOURCE_DIR}/libraries/testing/include + ${CMAKE_SOURCE_DIR}/cmake-build-debug/unittests/include + ) + +add_test(NAME net_plugin_tests COMMAND plugins/net_plugin/test/net_plugin_tests WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) diff --git a/plugins/net_plugin/test/main.cpp b/plugins/net_plugin/test/main.cpp new file mode 100644 index 00000000000..810ec77d511 --- /dev/null +++ b/plugins/net_plugin/test/main.cpp @@ -0,0 +1,25 @@ +#include +#include + +void translate_fc_exception(const fc::exception &e) { + std::cerr << "\033[33m" << e.to_detail_string() << "\033[0m" << std::endl; + BOOST_TEST_FAIL("Caught Unexpected Exception"); +} + +boost::unit_test::test_suite* init_unit_test_suite(int argc, char* argv[]) { + // Turn off logging if --verbose parameter not provided + // To enable, add --verbose to the command line + bool is_verbose = false; + std::string verbose_arg = "--verbose"; + for (int i = 0; i < argc; i++) { + if (verbose_arg == argv[i]) { + is_verbose = true; + break; + } + } + if(!is_verbose) fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::off); + + // Register fc::exception translator + boost::unit_test::unit_test_monitor.register_exception_translator(&translate_fc_exception); + return nullptr; +} diff --git a/plugins/net_plugin/test/security_group_manager_tests.cpp b/plugins/net_plugin/test/security_group_manager_tests.cpp new file mode 100644 index 00000000000..7c60fb3a625 --- /dev/null +++ b/plugins/net_plugin/test/security_group_manager_tests.cpp @@ -0,0 +1,116 @@ +#include +#include + +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +namespace { + using participant_list_t = boost::container::flat_set; + participant_list_t create_list(std::vector participants) { + participant_list_t participant_list; + for(auto participant : participants) { + participant_list.emplace(participant); + } + return participant_list; + } +} + +BOOST_AUTO_TEST_SUITE(security_group_tests) +using namespace eosio::testing; + +BOOST_AUTO_TEST_CASE(test_initial_population) { + auto populate = create_list({ 1, 2, 3, 4, 5, 6}); + eosio::security_group_manager manager; + BOOST_REQUIRE(manager.update_cache(1, populate)); + BOOST_REQUIRE(!manager.update_cache(1, populate)); + + for(auto participant : populate) { + BOOST_REQUIRE(manager.is_in_security_group(participant)); + } +} + +BOOST_AUTO_TEST_CASE(test_version) { + eosio::security_group_manager manager; + BOOST_REQUIRE(manager.current_version() == 0); + + auto populate = create_list({ 1, 2, 3, 4, 5, 6}); + BOOST_REQUIRE(manager.update_cache(1, populate)); + BOOST_REQUIRE(manager.current_version() == 1); +} + +BOOST_AUTO_TEST_CASE(test_remove_all) { + auto populate = create_list({1, 2, 3, 4, 5, 6}); + eosio::security_group_manager manager; + manager.update_cache(1, populate); + + participant_list_t clear; + BOOST_REQUIRE(manager.update_cache(2, clear)); + + for(auto participant : populate) { + BOOST_REQUIRE(manager.is_in_security_group(participant)); + } +} + +BOOST_AUTO_TEST_CASE(test_add_only) { + auto populate = create_list({1, 2, 3, 4, 5, 6}); + eosio::security_group_manager manager; + manager.update_cache(1, populate); + + auto add = create_list({7, 8, 9}); + for(auto participant : add) { + BOOST_REQUIRE(!manager.is_in_security_group(participant)); + } + + populate.insert(add.begin(), add.end()); + manager.update_cache(2, populate); + for(auto participant : populate) { + BOOST_REQUIRE(manager.is_in_security_group(participant)); + } +} + +BOOST_AUTO_TEST_CASE(test_remove_only) { + auto populate = create_list({1, 2, 3, 4, 5, 6}); + eosio::security_group_manager manager; + manager.update_cache(1, populate); + + auto update = create_list({2, 4, 6}); + manager.update_cache(2, update); + + auto removed = create_list({1, 3, 5}); + for(auto participant : removed) { + BOOST_REQUIRE(!manager.is_in_security_group(participant)); + } + + for (auto participant : update) { + BOOST_REQUIRE(manager.is_in_security_group(participant)); + } +} + +BOOST_AUTO_TEST_CASE(test_update) { + auto populate = create_list({1, 2, 3, 4, 5, 6}); + eosio::security_group_manager manager; + manager.update_cache(1, populate); + + auto update = create_list({2, 4, 6, 7, 8, 9}); + manager.update_cache(2, update); + + auto removed = create_list({1, 3, 5}); + for(auto participant : removed) { + BOOST_REQUIRE(!manager.is_in_security_group(participant)); + } + + auto added = create_list({7, 8, 9}); + for (auto participant : added) { + BOOST_REQUIRE(manager.is_in_security_group(participant)); + } +} +BOOST_AUTO_TEST_SUITE_END() diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 111cb246176..74a6e193920 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -46,6 +46,7 @@ using namespace eosio::launcher::config; const string block_dir = "blocks"; const string shared_mem_dir = "state"; +const int bios_node_num = -1; struct local_identity { vector addrs; @@ -403,8 +404,8 @@ struct launcher_def { bfs::path data_dir_base; bool skip_transaction_signatures = false; string eosd_extra_args; - std::map specific_nodeos_args; - std::map specific_nodeos_installation_paths; + std::map specific_nodeos_args; + std::map specific_nodeos_installation_paths; testnet_def network; string gelf_endpoint; vector aliases; @@ -486,9 +487,9 @@ launcher_def::set_options (bpo::options_description &cfg) { ("genesis,g",bpo::value()->default_value("./genesis.json"),"set the path to genesis.json") ("skip-signature", bpo::bool_switch(&skip_transaction_signatures)->default_value(false), (string(node_executable_name) + " does not require transaction signatures.").c_str()) (node_executable_name, bpo::value(&eosd_extra_args), ("forward " + string(node_executable_name) + " command line argument(s) to each instance of " + string(node_executable_name) + ", enclose arg(s) in quotes").c_str()) - ("specific-num", bpo::value>()->composing(), ("forward " + string(node_executable_name) + " command line argument(s) (using \"--specific-" + string(node_executable_name) + "\" flag) to this specific instance of " + string(node_executable_name) + ". This parameter can be entered multiple times and requires a paired \"--specific-" + string(node_executable_name) +"\" flag each time it is used").c_str()) + ("specific-num", bpo::value>()->composing(), ("forward " + string(node_executable_name) + " command line argument(s) (using \"--specific-" + string(node_executable_name) + "\" flag) to this specific instance of " + string(node_executable_name) + ". This parameter can be entered multiple times and requires a paired \"--specific-" + string(node_executable_name) +"\" flag each time it is used. Use -1 for bios node.").c_str()) (("specific-" + string(node_executable_name)).c_str(), bpo::value>()->composing(), ("forward " + string(node_executable_name) + " command line argument(s) to its paired specific instance of " + string(node_executable_name) + "(using \"--specific-num\"), enclose arg(s) in quotes").c_str()) - ("spcfc-inst-num", bpo::value>()->composing(), ("Specify a specific version installation path (using \"--spcfc-inst-"+ string(node_executable_name) + "\" flag) for launching this specific instance of " + string(node_executable_name) + ". This parameter can be entered multiple times and requires a paired \"--spcfc-inst-" + string(node_executable_name) + "\" flag each time it is used").c_str()) + ("spcfc-inst-num", bpo::value>()->composing(), ("Specify a specific version installation path (using \"--spcfc-inst-"+ string(node_executable_name) + "\" flag) for launching this specific instance of " + string(node_executable_name) + ". This parameter can be entered multiple times and requires a paired \"--spcfc-inst-" + string(node_executable_name) + "\" flag each time it is used. Use -1 for bios node.").c_str()) (("spcfc-inst-" + string(node_executable_name)).c_str(), bpo::value>()->composing(), ("Provide a specific version installation path to its paired specific instance of " + string(node_executable_name) + "(using \"--spcfc-inst-num\")").c_str()) ("delay,d",bpo::value(&start_delay)->default_value(0),"seconds delay before starting each node after the first") ("boot",bpo::bool_switch(&boot)->default_value(false),"After deploying the nodes and generating a boot script, invoke it.") @@ -514,9 +515,9 @@ inline enum_type& operator|=(enum_type&lhs, const enum_type& rhs) } template -void retrieve_paired_array_parameters (const variables_map &vmap, const std::string& num_selector, const std::string& paired_selector, std::map& selector_map) { +void retrieve_paired_array_parameters (const variables_map &vmap, const std::string& num_selector, const std::string& paired_selector, std::map& selector_map) { if (vmap.count(num_selector)) { - const auto specific_nums = vmap[num_selector].as>(); + const auto specific_nums = vmap[num_selector].as>(); const auto specific_args = vmap[paired_selector].as>(); if (specific_nums.size() != specific_args.size()) { cerr << "ERROR: every " << num_selector << " argument must be paired with a " << paired_selector << " argument" << endl; @@ -526,10 +527,15 @@ void retrieve_paired_array_parameters (const variables_map &vmap, const std::str for(uint i = 0; i < specific_nums.size(); ++i) { const auto& num = specific_nums[i]; - if (num >= total_nodes) { + if (num >= static_cast(total_nodes)) { cerr << "\"--" << num_selector << "\" provided value= " << num << " is higher than \"--nodes\" provided value=" << total_nodes << endl; exit (-1); } + else if (num < bios_node_num) { + cerr << "\"--" << num_selector << "\" provided value= " << num << " is negative, and the only negative value allowed is " + << bios_node_num << "(bios indicator)" << endl; + exit (-1); + } selector_map[num] = specific_args[i]; } } @@ -623,26 +629,6 @@ launcher_def::initialize (const variables_map &vmap) { exit (-1); } - if (vmap.count("specific-num")) { - const auto specific_nums = vmap["specific-num"].as>(); - const auto specific_args = vmap["specific-" + string(node_executable_name)].as>(); - if (specific_nums.size() != specific_args.size()) { - cerr << "ERROR: every specific-num argument must be paired with a specific-" << node_executable_name << " argument" << endl; - exit (-1); - } - // don't include bios - const auto allowed_nums = total_nodes - 1; - for(uint i = 0; i < specific_nums.size(); ++i) - { - const auto& num = specific_nums[i]; - if (num >= allowed_nums) { - cerr << "\"--specific-num\" provided value= " << num << " is higher than \"--nodes\" provided value=" << total_nodes << endl; - exit (-1); - } - specific_nodeos_args[num] = specific_args[i]; - } - } - char* erd_env_var = getenv ("EOSIO_HOME"); if (erd_env_var == nullptr || std::string(erd_env_var).empty()) { erd_env_var = getenv ("PWD"); @@ -1517,9 +1503,9 @@ launcher_def::launch (eosd_def &instance, string >s) { node_rt_info info; info.remote = !host->is_local(); + const auto node_num = (instance.name == "bios") ? bios_node_num : boost::lexical_cast(instance.get_node_num()); string install_path; - if (instance.name != "bios" && !specific_nodeos_installation_paths.empty()) { - const auto node_num = boost::lexical_cast(instance.get_node_num()); + if (!specific_nodeos_installation_paths.empty()) { if (specific_nodeos_installation_paths.count(node_num)) { install_path = specific_nodeos_installation_paths[node_num] + "/"; } @@ -1531,8 +1517,7 @@ launcher_def::launch (eosd_def &instance, string >s) { if (!eosd_extra_args.empty()) { eosdcmd += eosd_extra_args + " "; } - if (instance.name != "bios" && !specific_nodeos_args.empty()) { - const auto node_num = boost::lexical_cast(instance.get_node_num()); + if (!specific_nodeos_args.empty()) { if (specific_nodeos_args.count(node_num)) { eosdcmd += specific_nodeos_args[node_num] + " "; } diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 7e0d3500f65..6597c735808 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -17,6 +17,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/testUtils.py ${CMAKE_CURRENT_BINARY_D configure_file(${CMAKE_CURRENT_SOURCE_DIR}/WalletMgr.py ${CMAKE_CURRENT_BINARY_DIR}/WalletMgr.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/Node.py ${CMAKE_CURRENT_BINARY_DIR}/Node.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/Cluster.py ${CMAKE_CURRENT_BINARY_DIR}/Cluster.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/SecurityGroup.py ${CMAKE_CURRENT_BINARY_DIR}/SecurityGroup.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/TestHelper.py ${CMAKE_CURRENT_BINARY_DIR}/TestHelper.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_tests/dawn_515/test.sh ${CMAKE_CURRENT_BINARY_DIR}/p2p_tests/dawn_515/test.sh COPYONLY) @@ -62,6 +63,10 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_filter.wasm ${CMAKE_CURRENT_BINA configure_file(${CMAKE_CURRENT_SOURCE_DIR}/trace_plugin_test.py ${CMAKE_CURRENT_BINARY_DIR}/trace_plugin_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_contrl_c_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_contrl_c_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/blockvault_tests.py ${CMAKE_CURRENT_BINARY_DIR}/blockvault_tests.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/generate-certificates.sh ${CMAKE_CURRENT_BINARY_DIR}/generate-certificates.sh COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/privacy_startup_network.py ${CMAKE_CURRENT_BINARY_DIR}/privacy_startup_network.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/privacy_simple_network.py ${CMAKE_CURRENT_BINARY_DIR}/privacy_simple_network.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/privacy_tls_test.py ${CMAKE_CURRENT_BINARY_DIR}/privacy_tls_test.py COPYONLY) #To run plugin_test with all log from blockchain displayed, put --verbose after --, i.e. plugin_test -- --verbose add_test(NAME plugin_test COMMAND plugin_test --report_level=detailed --color_output) @@ -118,6 +123,12 @@ add_test(NAME light_validation_sync_test COMMAND tests/light_validation_sync_tes set_property(TEST light_validation_sync_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME eosio_blocklog_prune_test COMMAND tests/eosio_blocklog_prune_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST eosio_blocklog_prune_test PROPERTY LABELS nonparallelizable_tests) +add_test(NAME privacy_startup_network COMMAND tests/privacy_startup_network.py -p 2 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST privacy_startup_network PROPERTY LABELS nonparallelizable_tests) +add_test(NAME privacy_simple_network COMMAND tests/privacy_simple_network.py -p 2 -n 3 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST privacy_simple_network PROPERTY LABELS nonparallelizable_tests) +add_test(NAME privacy_tls_test COMMAND tests/privacy_tls_test.py -v --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST privacy_tls_test PROPERTY LABELS nonparallelizable_tests) # Long running tests add_test(NAME nodeos_sanity_lr_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) diff --git a/tests/Cluster.py b/tests/Cluster.py index bfd7e0e0bb2..1b4ad093fbd 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -18,6 +18,7 @@ from testUtils import BlockLogAction from Node import BlockType from Node import Node +from SecurityGroup import SecurityGroup from WalletMgr import WalletMgr # Protocol Feature Setup Policy @@ -121,6 +122,8 @@ def __init__(self, walletd=False, localCluster=True, host="localhost", port=8888 self.alternateVersionLabels=Cluster.__defaultAlternateVersionLabels() self.biosNode = None + self.securityGroupEnabled = False + def setChainStrategy(self, chainSyncStrategy=Utils.SyncReplayTag): self.__chainSyncStrategy=self.__chainSyncStrategies.get(chainSyncStrategy) @@ -159,6 +162,46 @@ def setAlternateVersionLabels(self, file): self.alternateVersionLabels[label]=path if Utils.Debug: Utils.Print("Version label \"%s\" maps to \"%s\"" % (label, path)) + @staticmethod + def generateCertificates(directoryName, certNumber): + """ + Generates TLS certificates + directoryName: directory where to save certificates, it will be created in Utils.ConfigDir + """ + #path is relative + privacyDir=os.path.join(Utils.ConfigDir, directoryName) + if not os.path.isdir(privacyDir): + if Utils.Debug: Utils.Print("creating dir {} in dir: {}".format(privacyDir, os.getcwd())) + os.mkdir(privacyDir) + + old_cwd=os.getcwd() + os.chdir(privacyDir) + if Utils.Debug: Utils.Print("change to dir: {}".format(os.getcwd())) + genCertScript=os.path.join(old_cwd, "tests", "generate-certificates.sh") + cmd = "{} --days 1 --CA-org Block.one --CA-CN test-domain --org-mask node{{NUMBER}} --cn-mask test-domain{{NUMBER}} --group-size {} --use-RSA".format(genCertScript, certNumber) + rtn = Utils.runCmdReturnStr(cmd, silentErrors=False) + + with open("generate.log", 'w') as f: + f.write("executed cmd: {}".format(cmd)) + f.write("=========================") + f.write("OUTPUT") + f.write("=========================") + f.write("{}".format(rtn)) + os.chdir(old_cwd) + if Utils.Debug: Utils.Print("changed back to dir: {}".format(os.getcwd())) + + @staticmethod + def getPrivacyArguments(privacyDir, index): + """ + Generates TLS arguments for nodeos + """ + privacyDir=os.path.join(Utils.ConfigDir, privacyDir) + participantName = Node.participantName(index+1) + certAuth = os.path.join(privacyDir, "CA_cert.pem") + nodeCert = os.path.join(privacyDir, "{}.crt".format(participantName)) + nodeKey = os.path.join(privacyDir, "{}_key.pem".format(participantName)) + return "--p2p-tls-own-certificate-file {} --p2p-tls-private-key-file {} --p2p-tls-security-group-ca-file {}".format(nodeCert, nodeKey, certAuth) + # launch local nodes and set self.nodes # pylint: disable=too-many-locals # pylint: disable=too-many-return-statements @@ -166,7 +209,8 @@ def setAlternateVersionLabels(self, file): # pylint: disable=too-many-statements def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="mesh", delay=1, onlyBios=False, dontBootstrap=False, totalProducers=None, sharedProducers=0, extraNodeosArgs="", useBiosBootFile=True, specificExtraNodeosArgs=None, onlySetProds=False, - pfSetupPolicy=PFSetupPolicy.FULL, alternateVersionLabelsFile=None, associatedNodeLabels=None, loadSystemContract=True, manualProducerNodeConf={}): + pfSetupPolicy=PFSetupPolicy.FULL, alternateVersionLabelsFile=None, associatedNodeLabels=None, loadSystemContract=True, manualProducerNodeConf={}, + configSecurityGroup=False, printInfo=False): """Launch cluster. pnodes: producer nodes count unstartedNodes: non-producer nodes that are configured into the launch, but not started. Should be included in totalNodes. @@ -189,8 +233,14 @@ def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="me associatedNodeLabels: Supply a dictionary of node numbers to use an alternate label for a specific node. loadSystemContract: indicate whether the eosio.system contract should be loaded (setting this to False causes useBiosBootFile to be treated as False) manualProducerNodeConf: additional producer public keys which is not automatically generated by launcher + configSecurityGroup: configure the network for TLS and setup a certificate authority so the security group can be used + printInfo: prints information about cluster """ - assert(isinstance(topo, str)) + if printInfo: + Utils.Print("SERVER: {}".format(self.host)) + Utils.Print("PORT: {}".format(self.port)) + + assert(isinstance(topo, (str,dict))) assert PFSetupPolicy.isValid(pfSetupPolicy) if alternateVersionLabelsFile is not None: assert(isinstance(alternateVersionLabelsFile, str)) @@ -214,6 +264,28 @@ def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="me if pnodes + unstartedNodes > totalNodes: raise RuntimeError("totalNodes (%d) must be equal to or greater than pnodes(%d) + unstartedNodes(%d)." % (totalNodes, pnodes, unstartedNodes)) + def insertSpecificExtraNodeosArgs(node, insertStr): + arg = specificExtraNodeosArgs.get(node, "") + specificExtraNodeosArgs[node] = arg + " " + insertStr + + if configSecurityGroup: + self.securityGroupEnabled = True + Cluster.generateCertificates("privacy", totalNodes + 1) + + if specificExtraNodeosArgs is None: + specificExtraNodeosArgs = {} + + for node in range(totalNodes): + arguments = Cluster.getPrivacyArguments("privacy", node) + if Utils.Debug: Utils.Print("adding arguments: {}".format(arguments)) + insertSpecificExtraNodeosArgs(node, arguments) + + arguments = Cluster.getPrivacyArguments("privacy", totalNodes) + if Utils.Debug: Utils.Print("adding arguments: {}".format(arguments)) + biosNodeNum = -1 + insertSpecificExtraNodeosArgs(biosNodeNum, arguments) + + if self.walletMgr is None: self.walletMgr=WalletMgr(True) @@ -254,12 +326,11 @@ def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="me specificExtraNodeosArgs = {} for node, conf in manualProducerNodeConf.items(): - arg = specificExtraNodeosArgs.get(node, "") account = conf['key'] - arg = arg + " --plugin eosio::producer_plugin --signature-provider {}=KEY:{} ".format(account.ownerPublicKey, account.ownerPrivateKey) + arg = "--plugin eosio::producer_plugin --signature-provider {}=KEY:{} ".format(account.ownerPublicKey, account.ownerPrivateKey) for name in conf['names']: arg = arg + "--producer-name {} ".format(name) - specificExtraNodeosArgs[node] = arg + insertSpecificExtraNodeosArgs(node, arg) Utils.Print("specificExtraNodeosArgs=", specificExtraNodeosArgs) @@ -304,25 +375,66 @@ def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="me cmdArr.append("--spcfc-inst-nodeos") cmdArr.append(path) - # must be last cmdArr.append before subprocess.call, so that everything is on the command line - # before constructing the shape.json file for "bridge" - if topo=="bridge": - shapeFilePrefix="shape_bridge" - shapeFile=shapeFilePrefix+".json" + def createDefaultShapeFile(newFile, cmdArr): cmdArrForOutput=copy.deepcopy(cmdArr) + cmdArrForOutput.append("--shape") + cmdArrForOutput.append("ring") cmdArrForOutput.append("--output") - cmdArrForOutput.append(shapeFile) - s=" ".join(cmdArrForOutput) + cmdArrForOutput.append(newFile) + s=" ".join([("'{0}'".format(element) if (' ' in element) else element) for element in cmdArrForOutput.copy()]) if Utils.Debug: Utils.Print("cmd: %s" % (s)) if 0 != subprocess.call(cmdArrForOutput): - Utils.Print("ERROR: Launcher failed to create shape file \"%s\"." % (shapeFile)) + Utils.Print("ERROR: Launcher failed to create shape file \"{}\".".format(newFile)) return False - Utils.Print("opening %s shape file: %s, current dir: %s" % (topo, shapeFile, os.getcwd())) - f = open(shapeFile, "r") - shapeFileJsonStr = f.read() - f.close() - shapeFileObject = json.loads(shapeFileJsonStr) + Utils.Print("opening shape file: {}, current dir: {}".format(newFile, os.getcwd())) + with open(newFile, 'r') as f: + newFileJsonStr = f.read() + return json.loads(newFileJsonStr) + + testnetPrefix = "testnet_" + def getTestnetNodeNum(nodeName): + p=re.compile(r'^{}(\d+)$'.format(testnetPrefix)) + m=p.match(nodeName) + return int(m.group(1)) + + if isinstance(topo, dict): + if Utils.Debug: Utils.Print("Creating custom shape topology with the following node connections: {}".format(json.dumps(topo, indent=4, sort_keys=True))) + customShapeFile=os.path.join(Utils.ConfigDir, "customShape.json") + customShapeFileObject = createDefaultShapeFile(customShapeFile, cmdArr) + nodeArray = customShapeFileObject["nodes"] + for nodePair in nodeArray: + assert(len(nodePair)==2) + nodeName=nodePair[0] + nodeObject=nodePair[1] + if nodeName == "bios": + continue + nodeNum=getTestnetNodeNum(nodeName) + customShapePeers = [] + if nodeNum not in topo: + if Utils.Debug: Utils.Print("node name: {} was not included in the topo structure: {}".format(nodeName, topo)) + nodeObject["peers"] = customShapePeers + continue + peers = topo[nodeNum] + Utils.Print("nodeNum: {}, peers: {}".format(nodeNum, peers)) + for peer in peers: + Utils.Print("peer: {}".format(peer)) + assert(peer < totalNodes) + customShapePeers.append("{}{:02}".format(testnetPrefix, peer)) + nodeObject["peers"] = customShapePeers + + with open(customShapeFile, 'w') as f: + f.write(json.dumps(customShapeFileObject, indent=4, sort_keys=True)) + + cmdArr.append("--shape") + cmdArr.append(customShapeFile) + + # must be last cmdArr.append before subprocess.call, so that everything is on the command line + # before constructing the shape.json file for "bridge" + elif topo=="bridge": + shapeFilePrefix="shape_bridge" + shapeFile=shapeFilePrefix+".json" + shapeFileObject = createDefaultShapeFile(shapeFile, cmdArr) Utils.Print("shapeFileObject=%s" % (shapeFileObject)) # retrieve the nodes, which as a map of node name to node definition, which the fc library prints out as # an array of array, the first level of arrays is the pair entries of the map, the second is an array @@ -351,11 +463,6 @@ def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="me Utils.Print("producers=%s" % (producers)) shapeFileNodeMap = {} - def getNodeNum(nodeName): - p=re.compile(r'^testnet_(\d+)$') - m=p.match(nodeName) - return int(m.group(1)) - for shapeFileNodePair in shapeFileNodes: assert(len(shapeFileNodePair)==2) nodeName=shapeFileNodePair[0] @@ -365,7 +472,7 @@ def getNodeNum(nodeName): if nodeName=="bios": biosNodeObject=shapeFileNode continue - nodeNum=getNodeNum(nodeName) + nodeNum=getTestnetNodeNum(nodeName) Utils.Print("nodeNum=%d, shapeFileNode=%s" % (nodeNum, shapeFileNode)) assert("producers" in shapeFileNode) shapeFileNodeProds=shapeFileNode["producers"] @@ -456,6 +563,8 @@ def connectGroup(group, producerNodes, bridgeNodes) : if onlyBios: self.nodes=[biosNode] + self.totalNodes = totalNodes + # ensure cluster node are inter-connected by ensuring everyone has block 1 Utils.Print("Cluster viability smoke test. Validate every cluster node has block 1. ") if not self.waitOnClusterBlockNumSync(1): @@ -474,8 +583,9 @@ def connectGroup(group, producerNodes, bridgeNodes) : Utils.Print("Bootstrap cluster.") if not loadSystemContract: useBiosBootFile=False #ensure we use Cluster.bootstrap - if onlyBios or not useBiosBootFile: - self.biosNode=self.bootstrap(biosNode, startedNodes, prodCount + sharedProducers, totalProducers, pfSetupPolicy, onlyBios, onlySetProds, loadSystemContract, manualProducerNodeConf) + if onlyBios or not useBiosBootFile or configSecurityGroup: + delayProductionTransfer = 35 if configSecurityGroup else None # when TLS delay is analyzed, then this delay and ignoring of useBiosBootFile can be removed + self.biosNode=self.bootstrap(biosNode, startedNodes, prodCount + sharedProducers, totalProducers, pfSetupPolicy, onlyBios, onlySetProds, loadSystemContract, manualProducerNodeConf, delayProductionTransfer=delayProductionTransfer) if self.biosNode is None: Utils.Print("ERROR: Bootstrap failed.") return False @@ -617,16 +727,16 @@ def doNodesHaveBlockNum(nodes, targetBlockNum, blockType, printCount): return ret @staticmethod - def getClientVersion(verbose=False): + def getClientVersion(): """Returns client version (string)""" - p = re.compile(r'^Build version:\s(\w+)\n$') + p = re.compile(r'^v?(.+)\n$') try: cmd="%s version client" % (Utils.EosClientPath) - if verbose: Utils.Print("cmd: %s" % (cmd)) + if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) response=Utils.checkOutput(cmd.split()) assert(response) assert(isinstance(response, str)) - if verbose: Utils.Print("response: <%s>" % (response)) + if Utils.Debug: Utils.Print("response: <%s>" % (response)) m=p.match(response) if m is None: Utils.Print("ERROR: client version regex mismatch") @@ -949,24 +1059,6 @@ def parseProducerKeys(configFile, nodeName): return producerKeys - @staticmethod - def parseProducers(nodeNum): - """Parse node config file for producers.""" - - configFile=Utils.getNodeConfigDir(nodeNum, "config.ini") - if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) - configStr=None - with open(configFile, 'r') as f: - configStr=f.read() - - pattern=r"^\s*producer-name\s*=\W*(\w+)\W*$" - producerMatches=re.findall(pattern, configStr, re.MULTILINE) - if producerMatches is None: - if Utils.Debug: Utils.Print("Failed to find producers.") - return None - - return producerMatches - @staticmethod def parseClusterKeys(totalNodes): """Parse cluster config file. Updates producer keys data members.""" @@ -1081,7 +1173,7 @@ def bios_bootstrap(self, biosNode, totalNodes, pfSetupPolicy, silent=False): return biosNode - def bootstrap(self, biosNode, totalNodes, prodCount, totalProducers, pfSetupPolicy, onlyBios=False, onlySetProds=False, loadSystemContract=True, manualProducerNodeConf=[]): + def bootstrap(self, biosNode, totalNodes, prodCount, totalProducers, pfSetupPolicy, onlyBios=False, onlySetProds=False, loadSystemContract=True, manualProducerNodeConf=[], delayProductionTransfer=None): """Create 'prodCount' init accounts and deposits 10000000000 SYS in each. If prodCount is -1 will initialize all possible producers. Ensure nodes are inter-connected prior to this call. One way to validate this will be to check if every node has block 1.""" @@ -1211,6 +1303,10 @@ def bootstrap(self, biosNode, totalNodes, prodCount, totalProducers, pfSetupPoli if Utils.Debug: Utils.Print("setprods: %s" % (setProdsStr)) Utils.Print("Setting producers: %s." % (", ".join(prodNames))) opts="--permission eosio@active" + + if delayProductionTransfer: + time.sleep(delayProductionTransfer) + # pylint: disable=redefined-variable-type trans=biosNode.pushMessage("eosio", "setprods", setProdsStr, opts) if trans is None or not trans[0]: @@ -1412,6 +1508,21 @@ def discoverLocalNodes(self, totalNodes, timeout=None): if Utils.Debug: Utils.Print("Found %d nodes" % (len(nodes))) return nodes + @staticmethod + def extractParticipant(pgrepStr): + pattern = r"\s--p2p-tls-own-certificate-file(?:\s+.*?/|\s+)(node[1-5](?:.[1-5])*).crt" + m = re.search(pattern, pgrepStr, re.MULTILINE) + if m is not None: + Utils.Print("FOUND participant: {}, pgrepStr: {}".format(m.group(1), pgrepStr)) + return m.group(1) + + pattern = r"\s--p2p-tls-own-certificate-file" + m = re.search(pattern, pgrepStr, re.MULTILINE) + if m is not None: + Utils.Print("FOUND participant start: {}".format(m.group(0))) + + return None + # Populate a node matched to actual running instance def discoverLocalNode(self, nodeNum, psOut=None, timeout=None): if psOut is None: @@ -1424,7 +1535,8 @@ def discoverLocalNode(self, nodeNum, psOut=None, timeout=None): if m is None: Utils.Print("ERROR: Failed to find %s pid. Pattern %s" % (Utils.EosServerName, pattern)) return None - instance=Node(self.host, self.port + nodeNum, nodeNum, pid=int(m.group(1)), cmd=m.group(2), walletMgr=self.walletMgr) + participant = Cluster.extractParticipant(m.group(2)) + instance=Node(self.host, self.port + nodeNum, nodeNum, pid=int(m.group(1)), cmd=m.group(2), walletMgr=self.walletMgr, participant=participant) if Utils.Debug: Utils.Print("Node>", instance) return instance @@ -1437,7 +1549,8 @@ def discoverBiosNode(self, timeout=None): Utils.Print("ERROR: Failed to find %s pid. Pattern %s" % (Utils.EosServerName, pattern)) return None else: - return Node(Cluster.__BiosHost, Cluster.__BiosPort, "bios", pid=int(m.group(1)), cmd=m.group(2), walletMgr=self.walletMgr) + participant = Cluster.extractParticipant(m.group(2)) + return Node(Cluster.__BiosHost, Cluster.__BiosPort, "bios", pid=int(m.group(1)), cmd=m.group(2), walletMgr=self.walletMgr, participant=participant) # Kills a percentange of Eos instances starting from the tail and update eosInstanceInfos state def killSomeEosInstances(self, killCount, killSignalStr=Utils.SigKillTag): @@ -1502,7 +1615,7 @@ def dumpErrorDetails(self): if self.useBiosBootFile: Cluster.dumpErrorDetailImpl(Cluster.__bootlog) - def killall(self, kill=True, silent=True, allInstances=False): + def killall(self, kill=True, silent=True, allInstances=False, cleanup=False): """Kill cluster nodeos instances. allInstances will kill all nodeos instances running on the system.""" signalNum=9 if kill else 15 cmd="%s -k %d" % (Utils.EosLauncherPath, signalNum) @@ -1524,6 +1637,9 @@ def killall(self, kill=True, silent=True, allInstances=False): os.kill(node.pid, signal.SIGKILL) except OSError as _: pass + + if cleanup: + self.cleanup() def bounce(self, nodes, silent=True): """Bounces nodeos instances as indicated by parameter nodes. @@ -1735,3 +1851,88 @@ def stripValues(lowestMaxes,greaterThan): while len(lowestMaxes)>0 and compareCommon(blockLogs, blockNameExtensions, first, lowestMaxes[0]): first=lowestMaxes[0]+1 lowestMaxes=stripValues(lowestMaxes,lowestMaxes[0]) + + def getAllNodes(self): + nodes = [] + nodes.extend(self.getNodes()) + if self.biosNode is not None: + nodes.append(self.biosNode) + return nodes + + def reportInfo(self, nodes=None): + Utils.Print("\n\n\n*****************************") + Utils.Print("All Nodes current info:") + if nodes is None: + nodes = self.getAllNodes() + assert isinstance(nodes, list) + for node in nodes: + Utils.Print("Info: {}".format(json.dumps(node.getInfo(), indent=4, sort_keys=True))) + Utils.Print("\n*****************************") + + def verifyInSync(self, sourceNodeNum=0, specificNodes=None): + assert isinstance(sourceNodeNum, int) + desc = "provided " if specificNodes else "" + + if specificNodes is None: + specificNodes = self.getAllNodes() + assert sourceNodeNum < len(specificNodes) + Utils.Print("Ensure all {}nodes are in-sync".format(desc)) + source = specificNodes[sourceNodeNum] + lib = source.getInfo()["last_irreversible_block_num"] + headBlockNum = source.getBlockNum() + headBlock = source.getBlock(headBlockNum) + Utils.Print("headBlock: {}".format(json.dumps(headBlock, indent=4, sort_keys=True))) + headBlockId = headBlock["id"] + error = None + for node in specificNodes: + if node.waitForBlock(headBlockNum, reportInterval = 1) is None: + error = "Node failed to get block number {}. Current node info: {}".format(headBlockNum, json.dumps(node.getInfo(), indent=4, sort_keys=True)) + break + + if node.waitForNextBlock() is None: + error = "Node failed to advance head. Current node info: {}".format(json.dumps(node.getInfo(), indent=4, sort_keys=True)) + break + + if node.getBlock(headBlockId) is None: + error = "Producer node has block number: {}, but it is not id: {}. Block: {}".format(headBlockNum, headBlockId, json.dumps(node.getBlock(headBlockNum), indent=4, sort_keys=True)) + break + + if node.waitForBlock(lib, blockType=BlockType.lib) is None: + error = "Producer node is failing to advance its lib ({}) with producer {} ({})".format(node.getInfo()["last_irreversible_block_num"], producerNum, lib) + break + + Utils.Print("Ensure all nodes are advancing lib") + if node.waitForBlock(lib + 1, blockType=BlockType.lib, reportInterval = 1) == None: + error = "Producer node failed to advance lib ahead one block to: {}".format(lib + 1) + break + + if error: + self.reportInfo() + Utils.errorExit(error) + + def getParticipantNum(self, nodeToIdentify): + for num, node in zip(range(len(self.nodes)), self.nodes): + if node == nodeToIdentify: + return num + assert nodeToIdentify == self.biosNode + return self.totalNodes + + def getProducingNodeIndex(self, blockProducer): + featureProdNum = 0 + while featureProdNum < pnodes: + if blockProducer in self.nodes[featureProdNum].getProducers(): + return featureProdNum + + featureProdNum += 1 + + assert blockProducer in self.biosNode.getProducers(), "Checked all nodes but could not find producer: {}".format(blockProducer) + return "bios" + + def getSecurityGroup(self, require=True): + assert not require or self.securityGroupEnabled, "Need to launch Cluster with configSecurityGroup=True to create a SecurityGroup" + if self.securityGroupEnabled: + for node in self.getAllNodes(): + Utils.Print("Creating securityGroup with participant: {}".format(node.getParticipant())) + return SecurityGroup(self.getAllNodes(), self.eosioAccount, defaultNode=self.nodes[0]) + + return None \ No newline at end of file diff --git a/tests/Node.py b/tests/Node.py index 39a06f5ea9f..7417f8d86d3 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -29,7 +29,7 @@ class Node(object): # pylint: disable=too-many-instance-attributes # pylint: disable=too-many-arguments - def __init__(self, host, port, nodeId, pid=None, cmd=None, walletMgr=None): + def __init__(self, host, port, nodeId, pid=None, cmd=None, walletMgr=None, participant=None): self.host=host self.port=port self.pid=pid @@ -48,6 +48,8 @@ def __init__(self, host, port, nodeId, pid=None, cmd=None, walletMgr=None): self.transCache={} self.walletMgr=walletMgr self.missingTransaction=False + self.participant=participant + if participant is not None: Utils.Print("Creating participant: {}".format(participant)) self.popenProc=None # initial process is started by launcher, this will only be set on relaunch def eosClientArgs(self): @@ -55,7 +57,11 @@ def eosClientArgs(self): return self.endpointArgs + walletArgs + " " + Utils.MiscEosClientArgs def __str__(self): - return "Host: %s, Port:%d, NodeNum:%s, Pid:%s" % (self.host, self.port, self.nodeId, self.pid) + participantStr = ", Participant: {}".format(self.participant) if self.participant else "" + return "Host: {}, Port:{}, NodeNum: {}, Pid: {}{}".format(self.host, self.port, self.nodeId, self.pid, participantStr) + + def __eq__(self, obj): + return isinstance(obj, Node) and str(self) == str(obj) @staticmethod def validateTransaction(trans): @@ -230,6 +236,10 @@ def getBlock(self, blockNumOrId, silentErrors=False, exitOnError=False): msg="(block %s=%s)" % (numOrId, blockNumOrId) return self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=msg) + def getHeadOrLib(self, blockType=BlockType.head, silentErrors=False, exitOnError=False): + blockNum = self.getBlockNum(blockType=blockType) + return self.getBlock(blockNum, silentErrors=silentErrors, exitOnError=exitOnError) + def isBlockPresent(self, blockNum, blockType=BlockType.head): """Does node have head_block_num/last_irreversible_block_num >= blockNum""" assert isinstance(blockNum, int) @@ -488,15 +498,11 @@ def waitForTransFinalization(self, transId, timeout=None): ret=Utils.waitForTruth(lam, timeout) return ret - def waitForNextBlock(self, timeout=WaitSpec.default(), blockType=BlockType.head): + def waitForNextBlock(self, timeout=WaitSpec.default(), blockType=BlockType.head, sleepTime=3): num=self.getBlockNum(blockType=blockType) - if isinstance(timeout, WaitSpec): - timeout = timeout.seconds(num, num+1) - lam = lambda: self.getHeadBlockNum() > num - ret=Utils.waitForTruth(lam, timeout) - return ret + return self.waitForBlock(num+1, timeout=timeout, blockType=blockType, sleepTime=sleepTime) - def waitForBlock(self, blockNum, timeout=WaitSpec.default(), blockType=BlockType.head, reportInterval=None, errorContext=None): + def waitForBlock(self, blockNum, timeout=WaitSpec.default(), blockType=BlockType.head, sleepTime=3, reportInterval=None, errorContext=None): currentBlockNum=self.getBlockNum(blockType=blockType) currentTime=time.time() if isinstance(timeout, WaitSpec): @@ -529,12 +535,12 @@ def __call__(self): currentBlockNum = self.node.getBlockNum(blockType=blockType) self.advanced = False if self.lastBlockNum is None or self.lastBlockNum < currentBlockNum: - self.advanced = True + self.advanced = True if self.lastBlockNum is not None else False elif self.lastBlockNum > currentBlockNum: Utils.Print("waitForBlock is waiting to reach block number: %d and the block number has rolled back from %d to %d." % (self.blockNum, self.lastBlockNum, currentBlockNum)) self.lastBlockNum = currentBlockNum - self.passed = self.lastBlockNum > self.blockNum + self.passed = self.lastBlockNum >= self.blockNum return self.passed def __enter__(self): @@ -821,6 +827,7 @@ def getTableColumns(self, contract, scope, table): # returns tuple with indication if transaction was successfully sent and either the transaction or else the exception output def pushMessage(self, account, action, data, opts, silentErrors=False, signatures=None): cmd="%s %s push action -j %s %s" % (Utils.EosClientPath, self.eosClientArgs(), account, action) + Utils.Print("cmd: {}".format(cmd)) cmdArr=cmd.split() # not using __sign_str, since cmdArr messes up the string if signatures is not None: @@ -1195,7 +1202,7 @@ def getBlockProducerByNum(self, blockNum, timeout=None, waitForBlock=True, exitO block=self.getBlock(blockNum, exitOnError=exitOnError) return Node.getBlockAttribute(block, "producer", blockNum, exitOnError=exitOnError) - def getBlockProducer(self, timeout=None, waitForBlock=True, exitOnError=True, blockType=BlockType.head): + def getBlockProducer(self, timeout=None, exitOnError=True, blockType=BlockType.head): blockNum=self.getBlockNum(blockType=blockType) block=self.getBlock(blockNum, exitOnError=exitOnError, blockType=blockType) return Node.getBlockAttribute(block, "producer", blockNum, exitOnError=exitOnError) @@ -1410,12 +1417,23 @@ def getSupportedProtocolFeatureDict(self, excludeDisabled=False, excludeUnactiva break return protocolFeatureDigestDict - def waitForHeadToAdvance(self, blocksToAdvance=1, timeout=None): - currentHead = self.getHeadBlockNum() + def waitForHeadToAdvance(self, blocksToAdvance=1, timeout=None, reportInterval=None): + originalHead = self.getHeadBlockNum() + targetHead = originalHead + blocksToAdvance if timeout is None: timeout = 6 + blocksToAdvance / 2 + count = 0 def isHeadAdvancing(): - return self.getHeadBlockNum() >= currentHead + blocksToAdvance + nonlocal count + nonlocal reportInterval + nonlocal targetHead + head = self.getHeadBlockNum() + count += 1 + done = head >= targetHead + if not done and reportInterval and count % reportInterval == 0: + Utils.Print("waitForHeadToAdvance to {}, currently at {}".format(targetHead, head)) + + return done return Utils.waitForTruth(isHeadAdvancing, timeout) def waitForLibToAdvance(self, timeout=30): @@ -1424,16 +1442,76 @@ def isLibAdvancing(): return self.getIrreversibleBlockNum() > currentLib return Utils.waitForTruth(isLibAdvancing, timeout) - # Require producer_api_plugin - def activatePreactivateFeature(self): - protocolFeatureDigestDict = self.getSupportedProtocolFeatureDict() - preactivateFeatureDigest = protocolFeatureDigestDict["PREACTIVATE_FEATURE"]["feature_digest"] - assert preactivateFeatureDigest + def waitUntilBeginningOfProdTurn(self, producerName, timeout=30, sleepTime=0.4): + beginningOfProdTurnHead = 0 + def isDesiredProdTurn(): + nonlocal beginningOfProdTurnHead + beginningOfProdTurnHead = self.getHeadBlockNum() + res = self.getBlock(beginningOfProdTurnHead)["producer"] == producerName and \ + self.getBlock(beginningOfProdTurnHead-1)["producer"] != producerName + return res + ret = Utils.waitForTruth(isDesiredProdTurn, timeout, sleepTime) + assert ret != None, "Expected producer to arrive within {} seconds".format(timeout) + return beginningOfProdTurnHead - self.scheduleProtocolFeatureActivations([preactivateFeatureDigest]) + # Require producer_api_plugin + def activateFeatures(self, features, blocksToAdvance=2): + assert blocksToAdvance >= 0 + featureDigests = [] + for feature in features: + protocolFeatureDigestDict = self.getSupportedProtocolFeatureDict() + assert feature in protocolFeatureDigestDict + featureDigest = protocolFeatureDigestDict[feature]["feature_digest"] + assert featureDigest + featureDigests.append(featureDigest) + + self.scheduleProtocolFeatureActivations(featureDigests) # Wait for the next block to be produced so the scheduled protocol feature is activated - assert self.waitForHeadToAdvance(blocksToAdvance=2), print("ERROR: TIMEOUT WAITING FOR PREACTIVATE") + assert self.waitForHeadToAdvance(blocksToAdvance=blocksToAdvance), print("ERROR: TIMEOUT WAITING FOR activating features: {}".format(",".join(features))) + + def activateAndVerifyFeatures(self, features): + self.activateFeatures(features, blocksToAdvance=0) + headBlockNum = self.getBlockNum() + blockNum = headBlockNum + producers = {} + while True: + block = self.getBlock(blockNum) + blockHeaderState = self.getBlockHeaderState(blockNum) + if self.containsFeatures(features, blockHeaderState): + return + + producer = block["producer"] + producers[producer] += 1 + + # feature should be in block for this node's producers, if it is at least 2 blocks after we sent the activate + minBlocksForGuarantee = 2 + assert producer not in self.getProducers() or blockNum - headBlockNum < minBlocksForGuarantee, \ + "It is {} blocks past the block when we activated the features and block num {} was produced by this \ + node, so features should have been set.".format(blockNum - headBlockNum, blockNum) + self.waitForBlock(blockNum + 1) + blockNum = self.getBlockNum() + + + + # Require producer_api_plugin + def activatePreactivateFeature(self): + return self.activateFeatures(["PREACTIVATE_FEATURE"]) + + def containsFeatures(self, features, blockHeaderState=None): + protocolFeatureDict = self.getSupportedProtocolFeatureDict() + if blockHeaderState is None: + blockHeaderState = self.getLatestBlockHeaderState() + for feature in features: + featureDigest = protocolFeatureDict[feature]["feature_digest"] + assert featureDigest, "{}'s Digest should not be empty".format(feature) + activatedProtocolFeatures = blockHeaderState["activated_protocol_features"]["protocol_features"] + if featureDigest not in activatedProtocolFeatures: + return False + return True + + def containsPreactivateFeature(self): + return self.containsFeatures(["PREACTIVATE_FEATURE"]) # Return an array of feature digests to be preactivated in a correct order respecting dependencies # Require producer_api_plugin @@ -1469,13 +1547,26 @@ def preactivateAllBuiltinProtocolFeature(self): def getLatestBlockHeaderState(self): headBlockNum = self.getHeadBlockNum() - cmdDesc = "get block {} --header-state".format(headBlockNum) - latestBlockHeaderState = self.processCleosCmd(cmdDesc, cmdDesc) - return latestBlockHeaderState - - def getActivatedProtocolFeatures(self): - latestBlockHeaderState = self.getLatestBlockHeaderState() - return latestBlockHeaderState["activated_protocol_features"]["protocol_features"] + return self.getBlockHeaderState(headBlockNum) + + def getBlockHeaderState(self, blockNum, errorOnNone=True): + cmdDesc = "get block {} --header-state".format(blockNum) + blockHeaderState = self.processCleosCmd(cmdDesc, cmdDesc) + if blockHeaderState is None and errorOnNone: + info = self.getInfo() + lib = info["last_irreversible_block_num"] + head = info["head_block_num"] + assert head == lib + 1, "getLatestBlockHeaderState failed to retrieve the latest block. This should be investigated." + Utils.errorExit("Called getLatestBlockHeaderState, which can only retrieve blocks in reversible database, but the test setup only has one producer so there" + + " is only 1 block in the reversible database. Test should be redesigned to aquire this information via another interface.") + return blockHeaderState + + def getActivatedProtocolFeatures(self, blockHeaderState=None): + if blockHeaderState is None: + blockHeaderState = self.getLatestBlockHeaderState() + if "activated_protocol_features" not in blockHeaderState or "protocol_features" not in blockHeaderState["activated_protocol_features"]: + Utils.errorExit("getLatestBlockHeaderState did not return expected output, should contain [\"activated_protocol_features\"][\"protocol_features\"]: {}".format(latestBlockHeaderState)) + return blockHeaderState["activated_protocol_features"]["protocol_features"] def modifyBuiltinPFSubjRestrictions(self, featureCodename, subjectiveRestriction={}): jsonPath = os.path.join(Utils.getNodeConfigDir(self.nodeId), @@ -1529,6 +1620,28 @@ def findStderrFiles(path): files.sort() return files + @staticmethod + def participantName(nodeNumber): + # this function converts number to eos name string + # eos name can have only numbers 1-5 + # e.g. 0 -> 1, 6 -> 5.1, 12 -> 5.5.2 + def normalizeNumber(number): + assert(number > 0) + if number <= 5: + return str(number) + cnt = number + ret = "5" + while cnt > 5: + cnt = cnt - 5 + if cnt > 5: + ret = "{}.5".format(ret) + else: + ret = "{}.{}".format(ret, cnt) + assert(len(ret) <= 13) + + return ret + return "node{}".format(normalizeNumber(nodeNumber)) + def analyzeProduction(self, specificBlockNum=None, thresholdMs=500): dataDir=Utils.getNodeDataDir(self.nodeId) files=Node.findStderrFiles(dataDir) @@ -1607,3 +1720,27 @@ def waitForIrreversibleBlockProducedBy(self, producer, startBlockNum=0, retry=10 retry = retry - 1 startBlockNum = latestBlockNum + 1 return False + + @staticmethod + def parseProducers(nodeNum): + """Parse node config file for producers.""" + + configFile=Utils.getNodeConfigDir(nodeNum, "config.ini") + if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) + configStr=None + with open(configFile, 'r') as f: + configStr=f.read() + + pattern=r"^\s*producer-name\s*=\W*(\w+)\W*$" + producerMatches=re.findall(pattern, configStr, re.MULTILINE) + if producerMatches is None: + if Utils.Debug: Utils.Print("Failed to find producers.") + return None + + return producerMatches + + def getProducers(self): + return Node.parseProducers(self.nodeId) + + def getParticipant(self): + return self.participant diff --git a/tests/SecurityGroup.py b/tests/SecurityGroup.py new file mode 100644 index 00000000000..9a5f94a23d1 --- /dev/null +++ b/tests/SecurityGroup.py @@ -0,0 +1,174 @@ +import copy +import json + +from Node import BlockType +from Node import Node +from Node import ReturnType +from testUtils import Utils + +# pylint: disable=too-many-public-methods +class SecurityGroup(object): + + # pylint: disable=too-many-instance-attributes + # pylint: disable=too-many-arguments + def __init__(self, nonParticipants, contractAccount, defaultNode=None, minAddRemEntriesToPublish=100, activateAndPublish=True): + self.participants = [] + self.contractAccount = contractAccount + assert len(nonParticipants) > 0 + # copy over all the running processes + self.nonParticipants = copy.copy(nonParticipants) + if Utils.Debug: Utils.Print("Creating SecurityGroup with the following nonParticipants: []".format(SecurityGroup.createAction(self.nonParticipants))) + def findDefault(nodes): + for node in nodes: + if node.pid: + return node + + Utils.errorExit("SecurityGroup is being constructed with no running nodes, there needs to be at least one running node") + + self.defaultNode = defaultNode if defaultNode else findDefault(self.nonParticipants) + self.publishProcessNum = minAddRemEntriesToPublish + if activateAndPublish: + SecurityGroup.activateFeature(self.defaultNode) + + self.contractTrans = SecurityGroup.publishContract(self.defaultNode, self.contractAccount) + else: + self.contractTrans = None + + self.publishTrans = None + + # activate the SECURITY_GROUP feature + @staticmethod + def activateFeature(node): + feature = "SECURITY_GROUP" + Utils.Print("Activating {} Feature".format(feature)) + node.activateAndVerifyFeatures({feature}) + + featureDict = node.getSupportedProtocolFeatureDict() + Utils.Print("feature dict: {}".format(json.dumps(featureDict, indent=4, sort_keys=True))) + + Utils.Print("{} Feature activated".format(feature)) + + # publish the eosio.secgrp contract + @staticmethod + def publishContract(node, account): + contract = "eosio.secgrp" + Utils.Print("Publish {} contract".format(contract)) + return node.publishContract(account, "unittests/test-contracts/security_group_test/", "{}.wasm".format(contract), "{}.abi".format(contract), waitForTransBlock=True) + + # move the provided nodes from the nonParticipants list to the participants list + def __addParticipants(self, nodes): + if len(nodes) == 0: + return + + if Utils.Debug: Utils.Print("Moving the following: {}, from the nonParticipants list: {}, to the participants list: {}".format(SecurityGroup.createAction(nodes), SecurityGroup.createAction(self.nonParticipants), SecurityGroup.createAction(self.participants))) + for node in nodes: + assert node in self.nonParticipants, "Cannot remove {} from nonParticipants list: {}".format(node, SecurityGroup.createAction(self.nonParticipants)) + self.nonParticipants.remove(node) + + self.participants.extend(nodes) + + # move the provided nodes from the participants list to the nonParticipants list + def __remParticipants(self, nodes): + if len(nodes) == 0: + return + + if Utils.Debug: Utils.Print("Moving the following: {}, from the participants list: {}, to the non-participants list: {}".format(SecurityGroup.createAction(nodes), SecurityGroup.createAction(self.participants), SecurityGroup.createAction(self.nonParticipants))) + for node in nodes: + self.participants.remove(node) + + self.nonParticipants.extend(nodes) + + # create the action payload for an add or remove action + @staticmethod + def createAction(nodes): + return None if len(nodes) == 0 else \ + "[[{}]]".format(','.join(['"{}"'.format(node.getParticipant()) for node in nodes])) + + # sends actions to add/remove the provided nodes to/from the network's security group + def editSecurityGroup(self, addNodes=[], removeNodes=[]): + + def copyIfNeeded(nodes): + # doing deep copy in case the passed in list IS one of our lists, which will be adjusted + if nodes is self.participants or nodes is self.nonParticipants: + return copy.copy(nodes) + return nodes + + addAction = SecurityGroup.createAction(addNodes) + self.__addParticipants(copyIfNeeded(addNodes)) + + removeAction = SecurityGroup.createAction(removeNodes) + self.__remParticipants(copyIfNeeded(removeNodes)) + + if addAction: + Utils.Print("adding {} to the security group".format(addAction)) + trans = self.defaultNode.pushMessage(self.contractAccount.name, "add", addAction, "--permission eosio@active") + Utils.Print("add trans: {}".format(json.dumps(trans, indent=4, sort_keys=True))) + + if removeAction: + Utils.Print("removing {} from the security group".format(removeAction)) + trans = self.defaultNode.pushMessage(self.contractAccount.name, "remove", removeAction, "--permission eosio@active") + Utils.Print("remove trans: {}".format(json.dumps(trans, indent=4, sort_keys=True))) + + self.publishProcessNum += 1 + self.publishTrans = self.defaultNode.pushMessage(self.contractAccount.name, "publish", "[{}]".format(self.publishProcessNum), "--permission eosio@active")[1] + Utils.Print("publish action trans: {}".format(json.dumps(self.publishTrans, indent=4, sort_keys=True))) + return self.publishTrans + + # verify that the transaction ID is found, and finalized, in every node in the participants list + def verifyParticipantsTransactionFinalized(self, transId): + Utils.Print("Verify participants are in sync") + assert transId + atLeastOne = False + for part in self.participants: + if part.pid is None: + continue + atLeastOne = True + if part.waitForTransFinalization(transId) == None: + Utils.errorExit("Transaction: {}, never finalized".format(trans)) + assert atLeastOne, "None of the participants are currently running, no reason to call verifyParticipantsTransactionFinalized" + + # verify that the block for the transaction ID is never finalized in nonParticipants + def verifyNonParticipants(self, transId): + Utils.Print("Verify non-participants don't receive blocks") + assert transId + publishBlock = self.defaultNode.getBlockIdByTransId(transId) + + # first ensure that enough time has passed that the nonParticipant is not just trailing behind + prodLib = self.defaultNode.getBlockNum(blockType=BlockType.lib) + waitForLib = prodLib + 3 * 12 + if self.defaultNode.waitForBlock(waitForLib, blockType=BlockType.lib) == None: + Utils.errorExit("Producer did not advance lib the expected amount. Starting lib: {}, exp lib: {}, actual state: {}".format(prodLib, waitForLib, self.defaultNode.getInfo())) + producerHead = self.defaultNode.getBlockNum() + + # verify each nonParticipant in the list has not advanced its lib to the publish block, since the block that would cause it to become finalized would + # never have been forwarded to a nonParticipant + for nonParticipant in self.nonParticipants: + if nonParticipant.pid is None: + continue + nonParticipantPostLIB = nonParticipant.getBlockNum(blockType=BlockType.lib) + assert nonParticipantPostLIB < publishBlock, "Participants not in security group should not have advanced LIB to {}, but it has advanced to {}".format(publishBlock, nonParticipantPostLIB) + nonParticipantHead = nonParticipant.getBlockNum() + assert nonParticipantHead < producerHead, "Participants (that are not producers themselves) should not advance head to {}, but it has advanced to {}".format(producerHead, nonParticipantHead) + + def getLatestPublishTransId(self): + return Node.getTransId(self.publishTrans) + + # verify that the participants' and nonParticipants' nodes are consistent based on the publish transaction + def verifySecurityGroup(self, publishTrans = None): + if publishTrans is None: + publishTrans = self.publishTrans + publishTransId = Node.getTransId(publishTrans) + self.verifyParticipantsTransactionFinalized(publishTransId) + self.verifyNonParticipants(publishTransId) + + def moveToSecurityGroup(self, index = 0): + assert abs(index) < len(self.nonParticipants) + trans = self.editSecurityGroup([self.nonParticipants[index]]) + Utils.Print("Take a non-participant and make a participant. Now there are {} participants and {} non-participants".format(len(self.participants), len(self.nonParticipants))) + return trans + + def removeFromSecurityGroup(self, index = -1): + assert abs(index) < len(self.participants) + trans = self.editSecurityGroup(removeNodes=[self.participants[index]]) + Utils.Print("Take a participant and make a non-participant. Now there are {} participants and {} non-participants".format(len(self.participants), len(self.nonParticipants))) + return trans diff --git a/tests/WalletMgr.py b/tests/WalletMgr.py index 8b7e4957277..dbc891f9259 100644 --- a/tests/WalletMgr.py +++ b/tests/WalletMgr.py @@ -127,7 +127,7 @@ def create(self, name, accounts=None, exitOnError=True): portStatus="AVAILABLE" else: portStatus="NOT AVAILABLE" - if Utils.Debug: Utils.Print("%s was not accepted, delaying for %d seconds and trying again. port %d is %s. %s - {%s}" % (cmdDesc, delay, self.port, pgrepCmd, psOut)) + if Utils.Debug: Utils.Print("%s was not accepted, delaying for %d seconds and trying again. port %d is %s. %s - {%s}" % (cmdDesc, delay, self.port, portStatus, pgrepCmd, psOut)) time.sleep(delay) continue @@ -153,6 +153,7 @@ def create(self, name, accounts=None, exitOnError=True): if accounts: self.importKeys(accounts,wallet) + Utils.Print("Wallet \"%s\" password=%s." % (name, p.encode("utf-8"))) return wallet def importKeys(self, accounts, wallet, ignoreDupKeyWarning=False): diff --git a/tests/generate-certificates.sh b/tests/generate-certificates.sh new file mode 100755 index 00000000000..01d4a6dd565 --- /dev/null +++ b/tests/generate-certificates.sh @@ -0,0 +1,130 @@ +#!/bin/bash + +function parse-args() { +while [[ $# > 0 ]] +do + case "$1" in + --days|-d) + DAYS=${2} + shift + ;; + --CA-org|-o) + CA_ORG=${2} + ;; + --CA-CN|-n) + CA_CN=${2} + shift + ;; + --org-mask|-m) + ORG_MASK=${2} + shift + ;; + --cn-mask|-cm) + CN_MASK=${2} + shift + ;; + --group-size|-s) + GROUP_SIZE=${2} + shift + ;; + --use-EC|-e) + USE_EC=1 + ;; + --use-RSA|-r) + USE_EC=0 + ;; + esac + shift +done +} + +function get-algo-str { + #1 means elliptic curve. for elliptic curve we need parameter generated file + if [[ $1 == 1 ]] + then + echo "ec:ECPARAM.pem" + else + echo "rsa:2048" + fi +} + +function normalized-name { + if [ $2 -le 5 ] + then + NAME=$(sed "s/{NUMBER}/$2/" <<< "$1") + else + CNT=$2 + NUM="5" + while [ $CNT -gt 5 ] + do + CNT=$(( $CNT - 5 )) + if [ $CNT -gt 5 ] + then + NUM="${NUM}.5" + else + NUM="${NUM}.${CNT}" + fi + done + NAME=$(sed "s/{NUMBER}/$NUM/" <<< "$1") + fi + + echo $NAME +} + +if [[ $1 == "--help" ]] +then + echo "Usage:" + echo "--days: Number of days for certificate to expire" + echo "--CA-org: Certificate Authority organization name" + echo "--CA-CN: Certificate Authority common name" + echo "--org-mask: Paritipant certificates name mask in format of name{number}" + echo "--cn-mask: Paritipant certificates common name mask in format of name{number}" + echo "--group-size: Number of participants signed by generated CA" + echo "--use-EC: Use EC algorithm. Enabled by default." + echo "--use-RSA: Use RSA algorithm. Default is EC" + exit 0 +fi + +#default arguments: +DAYS=1 +USE_EC=1 +CA_ORG="Block.one" +CA_CN="test-domain" +ORG_MASK="node{NUMBER}" +CN_MASK="test-domain{NUMBER}" +GROUP_SIZE=4 + +#overrides default is set +parse-args "${@}" + +if [[ $USE_EC == 1 ]] +then + openssl genpkey -genparam -algorithm ec -pkeyopt ec_paramgen_curve:P-384 -out ECPARAM.pem +fi + +echo "*************************************************" +echo " generating CA_cert.pem " +echo "*************************************************" + +openssl req -newkey $(get-algo-str $USE_EC) -nodes -keyout CA_key.pem -x509 -days ${DAYS} -out CA_cert.pem -subj "/C=US/ST=VA/L=Blocksburg/O=${CA_ORG}/CN=${CA_CN}" +echo "*************************************************" +openssl x509 -in CA_cert.pem -text -noout + +echo "*************************************************" +echo " generating nodes certificates " +echo "*************************************************" + +#client certificate requests + private keys +for n in $(seq 1 $(($GROUP_SIZE)) ) +do + ORG_NAME=$(normalized-name "$ORG_MASK" $n) + CN_NAME=$(normalized-name "$CN_MASK" $n) + echo "*************************************************" + echo "generating certificate for $ORG_NAME / $CN_NAME " + echo "*************************************************" + openssl req -newkey $(get-algo-str $USE_EC) -nodes -keyout "${ORG_NAME}_key.pem" -out "${ORG_NAME}.csr" -subj "/C=US/ST=VA/L=Blockburg/O=${ORG_NAME}/CN=${CN_NAME}" + openssl x509 -req -in "${ORG_NAME}.csr" -CA CA_cert.pem -CAkey CA_key.pem -CAcreateserial -out "${ORG_NAME}.crt" -days ${DAYS} -sha256 + echo "*************************************************" + openssl x509 -in "${ORG_NAME}.crt" -text -noout + echo "" +done \ No newline at end of file diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index 06ae59646a9..9baa3b1a7e5 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -207,7 +207,7 @@ def getMinHeadAndLib(prodNodes): producers=[] for i in range(0, totalNodes): node=cluster.getNode(i) - node.producers=Cluster.parseProducers(i) + node.producers=node.getProducers() numProducers=len(node.producers) Print("node has producers=%s" % (node.producers)) if numProducers==0: diff --git a/tests/nodeos_high_transaction_test.py b/tests/nodeos_high_transaction_test.py index 6108af9ffa0..8107e973ce5 100755 --- a/tests/nodeos_high_transaction_test.py +++ b/tests/nodeos_high_transaction_test.py @@ -116,7 +116,7 @@ allNodes=cluster.getNodes() for i in range(0, totalNodes): node=allNodes[i] - nodeProducers=Cluster.parseProducers(i) + nodeProducers=node.getProducers() numProducers=len(nodeProducers) Print("node has producers=%s" % (nodeProducers)) if numProducers==0: diff --git a/tests/nodeos_multiple_version_protocol_feature_test.py b/tests/nodeos_multiple_version_protocol_feature_test.py index 384be146fb9..555e9abe512 100755 --- a/tests/nodeos_multiple_version_protocol_feature_test.py +++ b/tests/nodeos_multiple_version_protocol_feature_test.py @@ -43,24 +43,6 @@ def restartNode(node: Node, chainArg=None, addSwapFlags=None, nodeosPath=None): timeout=5, cachePopen=True, nodeosPath=nodeosPath) assert isRelaunchSuccess, "Fail to relaunch" -def shouldNodeContainPreactivateFeature(node): - preactivateFeatureDigest = node.getSupportedProtocolFeatureDict()["PREACTIVATE_FEATURE"]["feature_digest"] - assert preactivateFeatureDigest, "preactivateFeatureDigest should not be empty" - blockHeaderState = node.getLatestBlockHeaderState() - assert blockHeaderState, "blockHeaderState should not be empty" - activatedProtocolFeatures = blockHeaderState["activated_protocol_features"]["protocol_features"] - return preactivateFeatureDigest in activatedProtocolFeatures - -beginningOfProdTurnHead = 0 -def waitUntilBeginningOfProdTurn(node, producerName, timeout=30, sleepTime=0.4): - def isDesiredProdTurn(): - beginningOfProdTurnHead = node.getHeadBlockNum() - res = node.getBlock(beginningOfProdTurnHead)["producer"] == producerName and \ - node.getBlock(beginningOfProdTurnHead-1)["producer"] != producerName - return res - ret = Utils.waitForTruth(isDesiredProdTurn, timeout, sleepTime) - assert ret != None, "Expected producer to arrive within 19 seconds (with 3 other producers)" - def waitForOneRound(): time.sleep(24) # We have 4 producers for this test @@ -176,15 +158,15 @@ def nodeHasBlocks(node, blockIds, blockNums): for i in range(3): Utils.Print("1st node tries activatePreactivateFeature time(s): {}".format(i+1)) # 1st node waits for the start of the production turn each time it tries activatePreactivateFeature() - waitUntilBeginningOfProdTurn(newNodes[0], "defproducera") + beginningOfProdTurnHead = newNodes[0].waitUntilBeginningOfProdTurn("defproducera") newNodes[0].activatePreactivateFeature() - if shouldNodeContainPreactivateFeature(newNodes[0]): + if newNodes[0].containsPreactivateFeature(): break diff = newNodes[0].getInfo()["head_block_num"] - beginningOfProdTurnHead assert diff >= 12, "1st node should contain PREACTIVATE FEATURE since we set it during its production window" - assert shouldNodeContainPreactivateFeature(newNodes[0]), "1st node should contain PREACTIVATE FEATURE" - assert not (shouldNodeContainPreactivateFeature(newNodes[1]) or shouldNodeContainPreactivateFeature(newNodes[2])), \ + assert newNodes[0].containsPreactivateFeature(), "1st node should contain PREACTIVATE FEATURE" + assert not (newNodes[1].containsPreactivateFeature() or newNodes[2].containsPreactivateFeature()), \ "2nd and 3rd node should not contain PREACTIVATE FEATURE" Utils.Print("+++ 2nd, 3rd and 4th node should be in sync, and 1st node should be out of sync +++") assert areNodesInSync([newNodes[1], newNodes[2], oldNode], pauseAll=True, resumeAll=False), "2nd, 3rd and 4th node should be in sync" @@ -192,7 +174,7 @@ def nodeHasBlocks(node, blockIds, blockNums): waitForOneRound() - assert not shouldNodeContainPreactivateFeature(newNodes[0]), "PREACTIVATE_FEATURE should be dropped" + assert not newNodes[0].containsPreactivateFeature(), "PREACTIVATE_FEATURE should be dropped" assert areNodesInSync(allNodes), "All nodes should be in sync" # Then we set the earliest_allowed_activation_time of 2nd node and 3rd node with valid value @@ -203,13 +185,13 @@ def nodeHasBlocks(node, blockIds, blockNums): setValidityOfActTimeSubjRestriction(newNodes[1], "PREACTIVATE_FEATURE", True) setValidityOfActTimeSubjRestriction(newNodes[2], "PREACTIVATE_FEATURE", True) - waitUntilBeginningOfProdTurn(newNodes[0], "defproducera") + newNodes[0].waitUntilBeginningOfProdTurn("defproducera") libBeforePreactivation = newNodes[0].getIrreversibleBlockNum() newNodes[0].activatePreactivateFeature() assert areNodesInSync(newNodes, pauseAll=True, resumeAll=False), "New nodes should be in sync" assert not areNodesInSync(allNodes, pauseAll=False, resumeAll=True), "Nodes should not be in sync after preactivation" - for node in newNodes: assert shouldNodeContainPreactivateFeature(node), "New node should contain PREACTIVATE_FEATURE" + for node in newNodes: assert node.containsPreactivateFeature(), "New node should contain PREACTIVATE_FEATURE" activatedBlockNum = newNodes[0].getHeadBlockNum() # The PREACTIVATE_FEATURE should have been activated before or at this block num assert waitUntilBlockBecomeIrr(newNodes[0], activatedBlockNum), \ @@ -236,7 +218,7 @@ def nodeHasBlocks(node, blockIds, blockNums): time.sleep(2) # Give some time to replay assert areNodesInSync(allNodes), "All nodes should be in sync" - assert shouldNodeContainPreactivateFeature(oldNode), "4th node should contain PREACTIVATE_FEATURE" + assert oldNode.containsPreactivateFeature(), "4th node should contain PREACTIVATE_FEATURE" testSuccessful = True finally: diff --git a/tests/nodeos_short_fork_take_over_test.py b/tests/nodeos_short_fork_take_over_test.py index 29aa223aee2..f09b860fb74 100755 --- a/tests/nodeos_short_fork_take_over_test.py +++ b/tests/nodeos_short_fork_take_over_test.py @@ -170,7 +170,7 @@ def getMinHeadAndLib(prodNodes): producers=[] for i in range(0, totalNodes): node=cluster.getNode(i) - node.producers=Cluster.parseProducers(i) + node.producers=node.getProducers() numProducers=len(node.producers) Print("node has producers=%s" % (node.producers)) if numProducers==0: diff --git a/tests/nodeos_voting_test.py b/tests/nodeos_voting_test.py index a3c157e8027..ae951ef096b 100755 --- a/tests/nodeos_voting_test.py +++ b/tests/nodeos_voting_test.py @@ -202,7 +202,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): for i in range(0, totalNodes): node=cluster.getNode(i) - node.producers=Cluster.parseProducers(i) + node.producers=node.getProducers() for prod in node.producers: trans=node.regproducer(cluster.defProducerAccounts[prod], "http::/mysite.com", 0, waitForTransBlock=False, exitOnError=True) diff --git a/tests/privacy_simple_network.py b/tests/privacy_simple_network.py new file mode 100755 index 00000000000..e9f161ff0fd --- /dev/null +++ b/tests/privacy_simple_network.py @@ -0,0 +1,188 @@ +#!/usr/bin/env python3 + +from testUtils import Account +from testUtils import Utils +from Cluster import Cluster +from WalletMgr import WalletMgr +from Node import BlockType +from Node import Node +from Node import ReturnType +from TestHelper import TestHelper + +import decimal +import re +import signal +import json +import os +import time + +############################################################### +# privacy_simple_network +# +# Implements Privacy Test Case #2 (and other misc scenarios). It creates a simple network of mesh connected +# producers and non-producer nodes. It adds the producers to the security group and verifies they are in +# sync and the non-producers are not. Then, one by one it adds the non-producing nodes to the security +# group, and verifies that the correct nodes are in sync and the others are not. It also repeatedly changes +# the security group, not letting it finalize, to verify Test Case #2. +# +############################################################### + +Print=Utils.Print +errorExit=Utils.errorExit +cmdError=Utils.cmdError +from core_symbol import CORE_SYMBOL + +args = TestHelper.parse_args({"--port","-p","-n","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run" + ,"--wallet-port"}) +port=args.port +pnodes=args.p +apiNodes=1 # minimum number of apiNodes that will be used in this test +# bios is also treated as an API node, but doesn't count against totalnodes count +minTotalNodes=pnodes+apiNodes +totalNodes=args.n if args.n >= minTotalNodes else minTotalNodes +if totalNodes > minTotalNodes: + apiNodes += totalNodes - minTotalNodes + +Utils.Debug=args.v +dumpErrorDetails=args.dump_error_details +keepLogs=args.keep_logs +dontKill=args.leave_running +onlyBios=False +killAll=args.clean_run +walletPort=args.wallet_port + +cluster=Cluster(host=TestHelper.LOCAL_HOST, port=port, walletd=True) +walletMgr=WalletMgr(True, port=walletPort) +testSuccessful=False +killEosInstances=not dontKill +killWallet=not dontKill + +WalletdName=Utils.EosWalletName +ClientName="cleos" +timeout = .5 * 12 * pnodes + 60 # time for finalization with 1 producer + 60 seconds padding +Utils.setIrreversibleTimeout(timeout) + +try: + TestHelper.printSystemInfo("BEGIN") + cluster.setWalletMgr(walletMgr) + Print("SERVER: {}".format(TestHelper.LOCAL_HOST)) + Print("PORT: {}".format(port)) + + cluster.killall(allInstances=killAll) + cluster.cleanup() + Print("Stand up cluster") + + # adjust prodCount to ensure that lib trails more than 1 block behind head + prodCount = 1 if pnodes > 1 else 2 + + if cluster.launch(pnodes=pnodes, totalNodes=totalNodes, prodCount=prodCount, onlyBios=False, configSecurityGroup=True) is False: + cmdError("launcher") + errorExit("Failed to stand up eos cluster.") + + Print("Validating system accounts after bootstrap") + cluster.validateAccounts(None) + + Utils.Print("\n\n\n\n\nNext Round of Info:") + cluster.reportInfo() + + producers = [cluster.getNode(x) for x in range(pnodes) ] + apiNodes = [cluster.getNode(x) for x in range(pnodes, totalNodes)] + apiNodes.append(cluster.biosNode) + Utils.Print("producer participants: [{}]".format(", ".join([x.getParticipant() for x in producers]))) + Utils.Print("api participants: [{}]".format(", ".join([x.getParticipant() for x in apiNodes]))) + + securityGroup = cluster.getSecurityGroup() + cluster.reportInfo() + + Utils.Print("Add all producers to security group") + securityGroup.editSecurityGroup([cluster.getNodes()[x] for x in range(pnodes)]) + securityGroup.verifySecurityGroup() + + cluster.reportInfo() + + Utils.Print("One by one, add each API Node to the security group") + # one by one add each nonParticipant to the security group + while len(securityGroup.nonParticipants) > 0: + securityGroup.moveToSecurityGroup() + securityGroup.verifySecurityGroup() + cluster.reportInfo() + + + removeTrans = None + Utils.Print("One by one, remove each API Node from the security group") + # one by one remove each (original) nonParticipant from the security group + while len(securityGroup.participants) > pnodes: + removeTrans = securityGroup.removeFromSecurityGroup() + securityGroup.verifySecurityGroup() + cluster.reportInfo() + + + Utils.Print("Add all api nodes to security group at the same time") + securityGroup.editSecurityGroup(addNodes=securityGroup.nonParticipants) + securityGroup.verifySecurityGroup() + + cluster.reportInfo() + + # waiting for a block to change (2 blocks since transaction indication could be 1 behind) to prevent duplicate remove transactions + removeBlockNum = Node.getTransBlockNum(removeTrans) + securityGroup.defaultNode.waitForBlock(removeBlockNum + 2) + + # alternate adding/removing participants to ensure the security group doesn't change + initialBlockNum = None + blockNums = [] + lib = None + + # draw out sending each + blocksPerProducer = 12 + # space out each send so that by the time we have removed and added each api node (except one), that we have covered more than a + # full production window + numberOfSends = len(apiNodes) * 2 - 1 + blocksToWait = (blocksPerProducer + numberOfSends - 1) / numberOfSends + 1 + + def wait(): + securityGroup.defaultNode.waitForBlock(blockNums[-1] + blocksToWait, sleepTime=0.4) + + while len(securityGroup.participants) > pnodes: + publishTrans = securityGroup.removeFromSecurityGroup() + blockNums.append(Node.getTransBlockNum(publishTrans)) + if initialBlockNum is None: + initialBlockNum = blockNums[-1] + wait() + + while True: + publishTrans = securityGroup.moveToSecurityGroup() + blockNums.append(Node.getTransBlockNum(publishTrans)) + if len(securityGroup.nonParticipants) > 1: + wait() + else: + break + + Utils.Print("Adjustments to security group were made in block nums: [{}], verifying no changes till block num: {} is finalized".format(", ".join([str(x) for x in blockNums]), blockNums[-1])) + lastBlockNum = blockNums[0] + for blockNum in blockNums[1:]: + # because the transaction's block number is only a possible block number, assume that the second block really was sent in the next block + if blockNum + 1 - lastBlockNum >= blocksPerProducer: + Utils.Print("WARNING: Had a gap of {} blocks between publish actions due to sleep not being exact, so if the security group verification fails " + + "it is likely due to that") + lastBlockNum = blockNum + securityGroup.verifySecurityGroup() + + cluster.reportInfo() + + Utils.Print("Add all remaining non-participants to security group at the same time, so all api nodes can be removed as one group next") + securityGroup.editSecurityGroup(addNodes=securityGroup.nonParticipants) + securityGroup.verifySecurityGroup() + + cluster.reportInfo() + + Utils.Print("Remove all api nodes from security group at the same time") + securityGroup.editSecurityGroup(removeNodes=apiNodes) + securityGroup.verifySecurityGroup() + + cluster.reportInfo() + + testSuccessful=True +finally: + TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + +exit(0) diff --git a/tests/privacy_startup_network.py b/tests/privacy_startup_network.py new file mode 100755 index 00000000000..3f52c50d451 --- /dev/null +++ b/tests/privacy_startup_network.py @@ -0,0 +1,137 @@ +#!/usr/bin/env python3 + +from testUtils import Account +from testUtils import Utils +from Cluster import Cluster +from WalletMgr import WalletMgr +from Node import BlockType +from Node import Node +from Node import ReturnType +from TestHelper import TestHelper + +import copy +import decimal +import re +import signal +import json +import os + +############################################################### +# privacy_startup_network +# +# Script implements Privacy Test Case #1. It pairs up producers with p2p connections with relay nodes +# and the relay nodes connected to at least 2 or more API nodes. The producers and relay nodes are +# added to the security Group and then it validates they are in sync and the api nodes do not receive +# blocks. Then it adds all but one api nodes and verifies they are in sync with producers, then all +# nodes are added and verifies that all nodes are in sync. +# +# NOTE: A relay node is a node that an entity running a producer uses to prevent outside nodes from +# affecting the producing node. An API Node is a node that is setup for the general community to +# connect to and will have more p2p connections. This script doesn't necessarily setup the API nodes +# the way that they are setup in the real world, but it is referencing them this way to explain what +# the test is intending to verify. +# +############################################################### + +Print=Utils.Print +errorExit=Utils.errorExit +cmdError=Utils.cmdError +from core_symbol import CORE_SYMBOL + +args = TestHelper.parse_args({"--port","-p","-n","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run" + ,"--sanity-test","--wallet-port"}) +port=args.port +pnodes=args.p +relayNodes=pnodes # every pnode paired with a relay node +apiNodes=2 # minimum number of apiNodes that will be used in this test +minTotalNodes=pnodes+relayNodes+apiNodes +totalNodes=args.n if args.n >= minTotalNodes else minTotalNodes +if totalNodes >= minTotalNodes: + apiNodes += totalNodes - minTotalNodes +else: + Utils.Print("Requested {} total nodes, but since the minumum number of API nodes is {}, there will be {} total nodes".format(args.n, apiNodes, totalNodes)) + +Utils.Debug=args.v +dumpErrorDetails=args.dump_error_details +keepLogs=args.keep_logs +dontKill=args.leave_running +onlyBios=False +killAll=args.clean_run +sanityTest=args.sanity_test +walletPort=args.wallet_port + +cluster=Cluster(host=TestHelper.LOCAL_HOST, port=port, walletd=True) +walletMgr=WalletMgr(True, port=walletPort) +testSuccessful=False +killEosInstances=not dontKill +killWallet=not dontKill +dontBootstrap=sanityTest # intent is to limit the scope of the sanity test to just verifying that nodes can be started + +WalletdName=Utils.EosWalletName +ClientName="cleos" +timeout = .5 * 12 * pnodes + 60 # time for finalization with 1 producer + 60 seconds padding +Utils.setIrreversibleTimeout(timeout) + +try: + TestHelper.printSystemInfo("BEGIN") + cluster.setWalletMgr(walletMgr) + Print("SERVER: {}".format(TestHelper.LOCAL_HOST)) + Print("PORT: {}".format(port)) + + cluster.killall(allInstances=killAll) + cluster.cleanup() + Print("Stand up cluster") + topo = {} + firstApiNodeNum = pnodes + relayNodes + apiNodeNums = [x for x in range(firstApiNodeNum, totalNodes)] + for producerNum in range(pnodes): + pairedRelayNodeNum = pnodes + producerNum + # p2p connection between producer and relay + topo[producerNum] = [pairedRelayNodeNum] + # p2p connections between relays + topo[pairedRelayNodeNum] = [x + producerNum for x in range(pnodes) if x != producerNum] + # p2p connections between relay and all api nodes + topo[pairedRelayNodeNum].extend(apiNodeNums) + Utils.Print("topo: {}".format(json.dumps(topo, indent=4, sort_keys=True))) + + # adjust prodCount to ensure that lib trails more than 1 block behind head + prodCount = 1 if pnodes > 1 else 2 + + if cluster.launch(pnodes=pnodes, totalNodes=totalNodes, prodCount=prodCount, onlyBios=False, dontBootstrap=dontBootstrap, configSecurityGroup=True, topo=topo) is False: + cmdError("launcher") + errorExit("Failed to stand up eos cluster.") + + Print("Validating system accounts after bootstrap") + cluster.validateAccounts(None) + + cluster.biosNode.kill(signal.SIGTERM) + + producers = [cluster.getNode(x) for x in range(pnodes) ] + relays = [cluster.getNode(pnodes + x) for x in range(pnodes) ] + apiNodes = [cluster.getNode(x) for x in apiNodeNums] + + securityGroup = cluster.getSecurityGroup() + cluster.reportInfo() + + Utils.Print("Add all producers and relay nodes to security group") + prodsAndRelays = copy.copy(producers) + prodsAndRelays.extend(relays) + securityGroup.editSecurityGroup(prodsAndRelays) + securityGroup.verifySecurityGroup() + + allButLastApiNodes = apiNodes[:-1] + lastApiNode = [apiNodes[-1]] + + Utils.Print("Add all but last API node and verify they receive blocks and the last API node does not") + securityGroup.editSecurityGroup(addNodes=allButLastApiNodes) + securityGroup.verifySecurityGroup() + + Utils.Print("Add the last API node and verify it receives blocks") + securityGroup.editSecurityGroup(addNodes=lastApiNode) + securityGroup.verifySecurityGroup() + + testSuccessful=True +finally: + TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + +exit(0) diff --git a/tests/privacy_tls_test.py b/tests/privacy_tls_test.py new file mode 100755 index 00000000000..91696a39347 --- /dev/null +++ b/tests/privacy_tls_test.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python3 + +from testUtils import Account +from testUtils import Utils +from Cluster import Cluster +from WalletMgr import WalletMgr +from Node import BlockType +from Node import Node +from Node import ReturnType +from TestHelper import TestHelper + +import decimal +import re +import signal +import json +import os + +def makeRootCertArgs(privacyDir): + privacyDir=os.path.join(Utils.ConfigDir, privacyDir) + certAuth = os.path.join(privacyDir, "CA_cert.pem") + nodeCert = os.path.join(privacyDir, "CA_cert.pem") + nodeKey = os.path.join(privacyDir, "CA_key.pem") + return "--p2p-tls-own-certificate-file {} --p2p-tls-private-key-file {} --p2p-tls-security-group-ca-file {}".format(nodeCert, nodeKey, certAuth) + +def makeWrongPrivateKeyArgs(index): + participantName = Node.participantName(index+1) + certAuth = os.path.join(os.path.join(Utils.ConfigDir, "privacy1"), "CA_cert.pem") + nodeCert = os.path.join(os.path.join(Utils.ConfigDir, "privacy1"), "{}.crt".format(participantName)) + nodeKey = os.path.join(os.path.join(Utils.ConfigDir, "privacy2"), "{}_key.pem".format(participantName)) + return "--p2p-tls-own-certificate-file {} --p2p-tls-private-key-file {} --p2p-tls-security-group-ca-file {}".format(nodeCert, nodeKey, certAuth) + + +############################################################### +# privacy_tls_network +# +# General test for TLS peer to peer connections with different certificates or without certificates at all +# +############################################################### + +Print=Utils.Print + +args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v"}) + +pnodes=1 #always one as we testing just connection between peers so we don't need bios setup and need just eosio producer +totalNodes=8 #3 valid and 5 invalid cases, exclusing bios +dumpErrorDetails=args.dump_error_details +keepLogs=args.keep_logs +Utils.Debug=args.v + +testSuccessful=False +cluster=Cluster(host=TestHelper.LOCAL_HOST, port=TestHelper.DEFAULT_PORT, walletd=True) +try: + TestHelper.printSystemInfo("BEGIN") + cluster.killall(allInstances=True, cleanup=True) + + #this is for producer and 1 valid node + Cluster.generateCertificates("privacy1", 2) + #those are certificates for invalid cases + Cluster.generateCertificates("privacy2", 2) + + specificExtraNodeosArgs = {} + + #VALID CASES + #producer node + specificExtraNodeosArgs[-1] = Cluster.getPrivacyArguments("privacy1", 0) + #valid network member + specificExtraNodeosArgs[0] = Cluster.getPrivacyArguments("privacy1", 1) + #testing duplicate + specificExtraNodeosArgs[1] = Cluster.getPrivacyArguments("privacy1", 1) + #valid root certificate used as participant + specificExtraNodeosArgs[2] = makeRootCertArgs("privacy1") + + #INVALID CASES + #certificate out of group with same name #1 + specificExtraNodeosArgs[3] = Cluster.getPrivacyArguments("privacy2", 0) + #certificate out of group with same name #2 + specificExtraNodeosArgs[4] = Cluster.getPrivacyArguments("privacy2", 1) + #using invalid self-signed certificate + specificExtraNodeosArgs[5] = makeRootCertArgs("privacy2") + #valid CA and certificate but invalid password + specificExtraNodeosArgs[6] = makeWrongPrivateKeyArgs(1) + #no TLS arguments at all + specificExtraNodeosArgs[7] = "" + + if not cluster.launch(pnodes=pnodes, totalNodes=totalNodes, dontBootstrap=True, configSecurityGroup=False, specificExtraNodeosArgs=specificExtraNodeosArgs, printInfo=True): + Utils.cmdError("launcher") + Utils.errorExit("Failed to stand up eos cluster.") + + cluster.biosNode.waitForLibToAdvance() + + validNodes = [cluster.getNode(x) for x in range(3)] + [cluster.biosNode] + cluster.verifyInSync(specificNodes=validNodes) + if Utils.Debug: + Print("*****************************") + Print(" Valid nodes ") + Print("*****************************") + cluster.reportInfo(validNodes) + + invalidNodes = [cluster.getNode(x) for x in range(3, totalNodes)] + for node in invalidNodes: + assert node.getHeadBlockNum() == 1 + if Utils.Debug: + Print("*****************************") + Print(" Invalid nodes ") + Print("*****************************") + cluster.reportInfo(invalidNodes) + + testSuccessful=True +finally: + TestHelper.shutdown(cluster, cluster.walletMgr, testSuccessful, True, True, keepLogs, True, dumpErrorDetails) \ No newline at end of file diff --git a/tests/validate-reflection.py b/tests/validate-reflection.py index 263eda54ce7..c846f08c5b7 100755 --- a/tests/validate-reflection.py +++ b/tests/validate-reflection.py @@ -426,7 +426,7 @@ def next_scope(self, end = None): return new_scope[0] class Namespace(ClassStruct): - namespace_class_pattern = re.compile(r'((?]*>)?)?\s*\{' % (EmptyScope.namespace_str, EmptyScope.struct_str, EmptyScope.class_str, EmptyScope.enum_str), re.MULTILINE | re.DOTALL) + namespace_class_pattern = re.compile(r'((?]*>)?)?\s*\{' % (EmptyScope.namespace_str, EmptyScope.struct_str, EmptyScope.class_str, EmptyScope.enum_str), re.MULTILINE | re.DOTALL) def __init__(self, name, inherit, start, content, parent_scope): assert inherit is None, "namespace %s should not inherit from %s" % (name, inherit) diff --git a/unittests/contracts.hpp.in b/unittests/contracts.hpp.in index 2b675f43b43..274ce0a8d86 100644 --- a/unittests/contracts.hpp.in +++ b/unittests/contracts.hpp.in @@ -59,6 +59,7 @@ namespace eosio { MAKE_READ_WASM_ABI(params_test, params_test, test-contracts) MAKE_READ_WASM_ABI(kv_table_test, kv_table_test, test-contracts) MAKE_READ_WASM_ABI(kv_addr_book, kv_addr_book, test-contracts) + MAKE_READ_WASM_ABI(security_group_test, security_group_test, test-contracts) }; } /// eosio::testing } /// eosio diff --git a/unittests/security_group_tests.cpp b/unittests/security_group_tests.cpp new file mode 100644 index 00000000000..4aeb98df909 --- /dev/null +++ b/unittests/security_group_tests.cpp @@ -0,0 +1,630 @@ +#include +#include +#include +#include +#include + + +using boost::container::flat_set; +using eosio::chain::name; + +namespace eosio { namespace chain { + +// block_header_state_v0 should has the same layout with block_header_state except +// the lack of the state_extension data member +struct block_header_state_v0 : public detail::block_header_state_common { + block_id_type id; + signed_block_header header; + detail::schedule_info pending_schedule; + protocol_feature_activation_set_ptr activated_protocol_features; + vector additional_signatures; + + /// this data is redundant with the data stored in header, but it acts as a cache that avoids + /// duplication of work + flat_multimap header_exts; +}; + +struct snapshot_global_property_object_v5 { + std::optional proposed_schedule_block_num; + producer_authority_schedule proposed_schedule; + chain_config configuration; + chain_id_type chain_id; + kv_database_config kv_configuration; + wasm_config wasm_configuration; + + snapshot_global_property_object_v5(const global_property_object& value) + : proposed_schedule_block_num(value.proposed_schedule_block_num) + , proposed_schedule(producer_authority_schedule::from_shared(value.proposed_schedule)) + , configuration(value.configuration) + , chain_id(value.chain_id) + , kv_configuration(value.kv_configuration) + , wasm_configuration(value.wasm_configuration) {} +}; + +bool operator == (const security_group_info_t& lhs, const security_group_info_t& rhs) { + return lhs.version == rhs.version && std::equal(lhs.participants.begin(), lhs.participants.end(), + rhs.participants.begin(), rhs.participants.end()); +} +}} + +FC_REFLECT_DERIVED( eosio::chain::block_header_state_v0, (eosio::chain::detail::block_header_state_common), + (id) + (header) + (pending_schedule) + (activated_protocol_features) + (additional_signatures) +) + +FC_REFLECT(eosio::chain::snapshot_global_property_object_v5, + (proposed_schedule_block_num)(proposed_schedule)(configuration)(chain_id)(kv_configuration)(wasm_configuration) + ) + + +BOOST_AUTO_TEST_SUITE(security_group_tests) + +using participants_t = boost::container::flat_set; + +BOOST_AUTO_TEST_CASE(test_unpack_legacy_block_state) { + eosio::testing::tester main; + + using namespace eosio::chain::literals; + + // First we create a valid block with valid transaction + main.create_account("newacc"_n); + auto b = main.produce_block(); + + auto bs = main.control->head_block_state(); + + // pack block_header_state as the legacy format + fc::datastream> out_strm; + fc::raw::pack(out_strm, reinterpret_cast(*bs)); + + BOOST_CHECK_NE(out_strm.storage().size(), 0); + + // make sure we can unpack block_header_state + { + fc::datastream in_strm(out_strm.storage().data(), out_strm.storage().size()); + eosio::chain::versioned_unpack_stream unpack_strm(in_strm, eosio::chain::block_header_state::minimum_snapshot_version_with_state_extension-1); + eosio::chain::block_header_state tmp; + BOOST_CHECK_NO_THROW(fc::raw::unpack(unpack_strm, tmp)); + BOOST_CHECK_EQUAL(bs->id, tmp.id); + BOOST_CHECK_EQUAL(in_strm.remaining(), 0); + } + + // manual pack legacy block_state + fc::raw::pack(out_strm, bs->block); + fc::raw::pack(out_strm, false); + + // make sure we can unpack block_state + { + fc::datastream in_strm(out_strm.storage().data(), out_strm.storage().size()); + eosio::chain::versioned_unpack_stream unpack_strm(in_strm, eosio::chain::block_header_state::minimum_snapshot_version_with_state_extension-1); + eosio::chain::block_state tmp; + BOOST_CHECK_NO_THROW(fc::raw::unpack(unpack_strm, tmp)); + BOOST_CHECK_EQUAL(bs->id, tmp.id); + BOOST_CHECK_EQUAL(bs->block->previous, tmp.block->previous); + BOOST_CHECK_EQUAL(in_strm.remaining(), 0); + } +} + +BOOST_AUTO_TEST_CASE(test_unpack_new_block_state) { + eosio::testing::tester main; + + using namespace eosio::chain::literals; + + // First we create a valid block with valid transaction + main.create_account("newacc"_n); + auto b = main.produce_block(); + + auto bs = main.control->head_block_state(); + bs->set_security_group_info( { .version =1, .participants={"adam"_n }} ); + + + // pack block_header_state as the legacy format + fc::datastream> out_strm; + fc::raw::pack(out_strm, *bs); + + BOOST_CHECK_NE(out_strm.storage().size(), 0); + + + // make sure we can unpack block_state + { + fc::datastream in_strm(out_strm.storage().data(), out_strm.storage().size()); + eosio::chain::versioned_unpack_stream unpack_strm(in_strm, eosio::chain::block_header_state::minimum_snapshot_version_with_state_extension); + eosio::chain::block_state tmp; + BOOST_CHECK_NO_THROW(fc::raw::unpack(unpack_strm, tmp)); + BOOST_CHECK_EQUAL(bs->id, tmp.id); + BOOST_CHECK_EQUAL(bs->block->previous, tmp.block->previous); + BOOST_CHECK_EQUAL(bs->get_security_group_info().version, tmp.get_security_group_info().version); + BOOST_TEST(bs->get_security_group_info().participants == tmp.get_security_group_info().participants); + BOOST_CHECK_EQUAL(in_strm.remaining(), 0); + } +} + +BOOST_AUTO_TEST_CASE(test_snapshot_global_property_object) { + eosio::testing::tester main; + + using namespace eosio::chain::literals; + + // First we create a valid block with valid transaction + main.create_account("newacc"_n); + auto b = main.produce_block(); + + auto gpo = main.control->get_global_properties(); + + using row_traits = eosio::chain::detail::snapshot_row_traits; + + { + // pack snapshot_global_property_object as the legacy format + fc::datastream> out_strm; + fc::raw::pack(out_strm, eosio::chain::snapshot_global_property_object_v5(gpo)); + + // make sure we can unpack snapshot_global_property_object + { + fc::datastream in_strm(out_strm.storage().data(), out_strm.storage().size()); + uint32_t version = 5; + eosio::chain::versioned_unpack_stream unpack_strm(in_strm, version); + + eosio::chain::snapshot_global_property_object row; + BOOST_CHECK_NO_THROW(fc::raw::unpack(unpack_strm, row)); + BOOST_CHECK_EQUAL(in_strm.remaining(), 0); + BOOST_CHECK_EQUAL(row.chain_id, gpo.chain_id); + } + } + + { + // pack snapshot_global_property_object as the new format + gpo.proposed_security_group_block_num = 10; + gpo.proposed_security_group_participants = {"adam"_n}; + + fc::datastream> out_strm; + fc::raw::pack(out_strm, row_traits::to_snapshot_row(gpo, main.control->db())); + + // make sure we can unpack snapshot_global_property_object + { + fc::datastream in_strm(out_strm.storage().data(), out_strm.storage().size()); + eosio::chain::versioned_unpack_stream unpack_strm(in_strm, eosio::chain::snapshot_global_property_object::minimum_version_with_extension); + + eosio::chain::snapshot_global_property_object row; + BOOST_CHECK_NO_THROW(fc::raw::unpack(unpack_strm, row)); + BOOST_CHECK_EQUAL(in_strm.remaining(), 0); + + BOOST_CHECK_EQUAL(row.chain_id, gpo.chain_id); + std::visit( + [&gpo](const auto& ext) { + BOOST_CHECK_EQUAL(ext.proposed_security_group_block_num, gpo.proposed_security_group_block_num); + flat_set gpo_sg_participants{gpo.proposed_security_group_participants.begin(), + gpo.proposed_security_group_participants.end()}; + BOOST_TEST(ext.proposed_security_group_participants == gpo_sg_participants); + }, + row.extension); + } + } +} + +BOOST_AUTO_TEST_CASE(test_participants_change) { + // Need to create a 2 chain version of this test, but will require using a transaction, + // instead of calling add_security_group_participants directly, so the second chain can + // process it as part of the block + eosio::testing::tester chain; + using namespace eosio::chain::literals; + + chain.create_accounts( {"alice"_n,"bob"_n,"charlie"_n} ); + chain.produce_block(); + + { + const auto& cur_security_group = chain.control->active_security_group(); + BOOST_REQUIRE_EQUAL(cur_security_group.version, 0); + BOOST_REQUIRE_EQUAL(cur_security_group.participants.size(), 0); + } + + participants_t new_participants({"alice"_n, "bob"_n}); + chain.control->add_security_group_participants(new_participants); + + BOOST_TEST(chain.control->proposed_security_group_participants() == new_participants); + BOOST_CHECK_EQUAL(chain.control->active_security_group().participants.size() , 0); + BOOST_CHECK(!chain.control->in_active_security_group(participants_t({"alice"_n, "bob"_n}))); + + { + const auto& cur_security_group = chain.control->active_security_group(); + BOOST_REQUIRE_EQUAL(cur_security_group.version, 0); + } + + chain.produce_block(); + + { + const auto& cur_security_group = chain.control->active_security_group(); + BOOST_REQUIRE_EQUAL(cur_security_group.version, 1); + } + + BOOST_CHECK_EQUAL(chain.control->proposed_security_group_participants().size(), 0); + BOOST_TEST(chain.control->active_security_group().participants == new_participants); + BOOST_CHECK(chain.control->in_active_security_group(participants_t({"alice"_n, "bob"_n}))); + BOOST_CHECK(!chain.control->in_active_security_group(participants_t{"bob"_n, "charlie"_n})); + + chain.control->remove_security_group_participants({"alice"_n}); + BOOST_TEST(chain.control->proposed_security_group_participants() == participants_t{"bob"_n}); + BOOST_CHECK(chain.control->in_active_security_group(participants_t({"alice"_n, "bob"_n}))); + + { + const auto& cur_security_group = chain.control->active_security_group(); + BOOST_REQUIRE_EQUAL(cur_security_group.version, 1); + } + + chain.produce_block(); + + BOOST_CHECK_EQUAL(chain.control->proposed_security_group_participants().size() , 0); + BOOST_TEST(chain.control->active_security_group().participants == participants_t{"bob"_n}); + BOOST_CHECK(chain.control->in_active_security_group(participants_t{"bob"_n})); + BOOST_CHECK(!chain.control->in_active_security_group(participants_t({"alice"_n, "bob"_n}))); + + { + const auto& cur_security_group = chain.control->active_security_group(); + BOOST_REQUIRE_EQUAL(cur_security_group.version, 2); + } + + chain.produce_block(); + + BOOST_CHECK_EQUAL(chain.control->proposed_security_group_participants().size() , 0); + BOOST_TEST(chain.control->active_security_group().participants == participants_t{"bob"_n}); + BOOST_CHECK(chain.control->in_active_security_group(participants_t{"bob"_n})); + + { + const auto& cur_security_group = chain.control->active_security_group(); + BOOST_REQUIRE_EQUAL(cur_security_group.version, 2); + } + +} + +void push_blocks( eosio::testing::tester& from, eosio::testing::tester& to ) { + while( to.control->fork_db_pending_head_block_num() + < from.control->fork_db_pending_head_block_num() ) + { + auto fb = from.control->fetch_block_by_number( to.control->fork_db_pending_head_block_num()+1 ); + to.push_block( fb ); + } +} + +// The webassembly in text format to add security group participants +static const char add_security_group_participants_wast[] = R"=====( +(module + (func $action_data_size (import "env" "action_data_size") (result i32)) + (func $read_action_data (import "env" "read_action_data") (param i32 i32) (result i32)) + (func $add_security_group_participants (import "env" "add_security_group_participants") (param i32 i32)(result i64)) + (memory 1) + (func (export "apply") (param i64 i64 i64) + (local $bytes_remaining i32) + (set_local $bytes_remaining (call $action_data_size)) + (drop (call $read_action_data (i32.const 0) (get_local $bytes_remaining))) + (drop (call $add_security_group_participants (i32.const 0) (get_local $bytes_remaining))) + ) +) +)====="; + +// The webassembly in text format to remove security group participants +static const char remove_security_group_participants_wast[] = R"=====( +(module + (func $action_data_size (import "env" "action_data_size") (result i32)) + (func $read_action_data (import "env" "read_action_data") (param i32 i32) (result i32)) + (func $remove_security_group_participants (import "env" "remove_security_group_participants") (param i32 i32)(result i64)) + (memory 1) + (func (export "apply") (param i64 i64 i64) + (local $bytes_remaining i32) + (set_local $bytes_remaining (call $action_data_size)) + (drop (call $read_action_data (i32.const 0) (get_local $bytes_remaining))) + (drop (call $remove_security_group_participants (i32.const 0) (get_local $bytes_remaining))) + ) +) +)====="; + +// The webassembly in text format to assert the given participants are all in active security group +static const char assert_in_security_group_wast[] = R"=====( +(module + (func $action_data_size (import "env" "action_data_size") (result i32)) + (func $read_action_data (import "env" "read_action_data") (param i32 i32) (result i32)) + (func $in_active_security_group (import "env" "in_active_security_group") (param i32 i32)(result i32)) + (func $eosio_assert (import "env" "eosio_assert") (param i32 i32)) + (memory 1) + (func (export "apply") (param i64 i64 i64) + (local $bytes_remaining i32) + (local $in_group i32) + (set_local $bytes_remaining (call $action_data_size)) + (drop (call $read_action_data (i32.const 0) (get_local $bytes_remaining))) + (set_local $in_group (call $in_active_security_group (i32.const 0) (get_local $bytes_remaining))) + (call $eosio_assert (get_local $in_group)(i32.const 512)) + ) + (data (i32.const 512) "in_active_security_group should return true") +) +)====="; + +// The webassembly in text format to assert the given participants are exactly the entire active security group +static const char assert_get_security_group_wast[] = R"=====( +(module + (func $action_data_size (import "env" "action_data_size") (result i32)) + (func $read_action_data (import "env" "read_action_data") (param i32 i32) (result i32)) + (func $get_active_security_group (import "env" "get_active_security_group") (param i32 i32)(result i32)) + (func $eosio_assert (import "env" "eosio_assert") (param i32 i32)) + (func $memcmp (import "env" "memcmp") (param i32 i32 i32) (result i32)) + (memory 1) + (func (export "apply") (param i64 i64 i64) + (local $bytes_remaining i32) + (local $required_bytes i32) + (set_local $bytes_remaining (call $action_data_size)) + (drop (call $read_action_data (i32.const 0) (get_local $bytes_remaining))) + (set_local $required_bytes (call $get_active_security_group (i32.const 0) (i32.const 0))) + (call $eosio_assert (i32.eq (i32.const 13) (get_local $required_bytes))(i32.const 512)) + (drop (call $get_active_security_group (i32.const 256) (get_local $required_bytes))) + (call $eosio_assert (i32.eq (call $memcmp (i32.const 0) (i32.const 256) (get_local $required_bytes))(i32.const 0)) (i32.const 768)) + ) + (data (i32.const 512) "get_active_security_group should return the right size") + (data (i32.const 768) "get_active_security_group output buffer must match the input") +) +)====="; + +std::vector participants_payload( participants_t names ) { + fc::datastream> ds; + fc::raw::pack(ds, names); + return ds.storage(); +} + +BOOST_AUTO_TEST_CASE(test_participants_change_modified) { + eosio::testing::tester chain; + using namespace eosio::chain::literals; + + chain.create_accounts( {"alice"_n,"bob"_n,"charlie"_n} ); + chain.produce_block(); + + { + const auto& cur_security_group = chain.control->active_security_group(); + BOOST_REQUIRE_EQUAL(cur_security_group.version, 0); + BOOST_REQUIRE_EQUAL(cur_security_group.participants.size(), 0); + } + + chain.create_accounts({ "addmember"_n, "rmmember"_n }); + + chain.produce_block(); + + chain.set_code( "addmember"_n, add_security_group_participants_wast ); + chain.set_code( "rmmember"_n, remove_security_group_participants_wast ); + + chain.produce_block(); + + chain.push_action( "eosio"_n, "setpriv"_n, "eosio"_n, fc::mutable_variant_object()("account", "addmember"_n)("is_priv", 1)); + chain.push_action( "eosio"_n, "setpriv"_n, "eosio"_n, fc::mutable_variant_object()("account", "rmmember"_n)("is_priv", 1)); + + chain.produce_block(); + + BOOST_CHECK_EQUAL(chain.control->proposed_security_group_participants().size() , 0); + BOOST_CHECK_EQUAL(chain.control->active_security_group().participants.size(), 0); + + BOOST_TEST_REQUIRE(chain.push_action_no_produce( eosio::chain::action({}, "addmember"_n, {}, participants_payload({"alice"_n})), "addmember"_n.to_uint64_t())); + BOOST_TEST_REQUIRE(chain.push_action_no_produce( eosio::chain::action({}, "addmember"_n, {}, participants_payload({"bob"_n})), "addmember"_n.to_uint64_t())); + + BOOST_CHECK_EQUAL(chain.control->proposed_security_group_participants().size() , 2); + BOOST_CHECK_EQUAL(chain.control->active_security_group().participants.size(), 0); + + chain.produce_block(); + + BOOST_CHECK_EQUAL(chain.control->proposed_security_group_participants().size() , 0); + BOOST_CHECK(chain.control->in_active_security_group(participants_t({"alice"_n, "bob"_n}))); + + + BOOST_TEST_REQUIRE(chain.push_action_no_produce( eosio::chain::action({}, "rmmember"_n, {}, participants_payload({"alice"_n})), "rmmember"_n.to_uint64_t())); + BOOST_TEST(chain.control->proposed_security_group_participants() == participants_t{"bob"_n}); + BOOST_CHECK(chain.control->in_active_security_group(participants_t({"alice"_n, "bob"_n}))); + + chain.produce_block(); + + BOOST_CHECK_EQUAL(chain.control->proposed_security_group_participants().size() , 0); + BOOST_TEST(chain.control->active_security_group().participants == participants_t{"bob"_n}); + BOOST_CHECK(chain.control->in_active_security_group(participants_t{"bob"_n})); +} + +BOOST_AUTO_TEST_CASE(test_participants_change_2_chains) { + /* Note: + * because produce_block calls start_block after finalizing the produced block, + * and push_block does not, chain2 will trail behind chain + */ + eosio::testing::tester chain; + eosio::testing::tester chain2; + using namespace eosio::chain::literals; + + chain.create_accounts( {"alice"_n,"bob"_n,"charlie"_n} ); + + chain2.push_block(chain.produce_block()); + + { + const auto& cur_security_group = chain.control->active_security_group(); + BOOST_REQUIRE_EQUAL(cur_security_group.version, 0); + BOOST_REQUIRE_EQUAL(cur_security_group.participants.size(), 0); + } + + chain.create_accounts({ "addmember"_n, "rmmember"_n }); + + chain2.push_block(chain.produce_block()); + + chain.set_code( "addmember"_n, add_security_group_participants_wast ); + chain.set_code( "rmmember"_n, remove_security_group_participants_wast ); + + chain2.push_block(chain.produce_block()); + + BOOST_CHECK_EQUAL(chain.control->proposed_security_group_participants().size() , 0); + BOOST_CHECK_EQUAL(chain.control->active_security_group().participants.size(), 0); + BOOST_CHECK_EQUAL(chain2.control->proposed_security_group_participants().size(), 0); + BOOST_CHECK_EQUAL(chain2.control->active_security_group().participants.size(), 0); + + chain.push_action( "eosio"_n, "setpriv"_n, "eosio"_n, fc::mutable_variant_object()("account", "addmember"_n)("is_priv", 1)); + chain.push_action( "eosio"_n, "setpriv"_n, "eosio"_n, fc::mutable_variant_object()("account", "rmmember"_n)("is_priv", 1)); + + chain2.push_block(chain.produce_block()); + + BOOST_CHECK_EQUAL(chain.control->proposed_security_group_participants().size(), 0); + BOOST_CHECK_EQUAL(chain.control->active_security_group().participants.size(), 0); + BOOST_CHECK_EQUAL(chain2.control->proposed_security_group_participants().size(), 0); + BOOST_CHECK_EQUAL(chain2.control->active_security_group().participants.size(), 0); + + BOOST_TEST_REQUIRE(chain.push_action_no_produce( eosio::chain::action({}, "addmember"_n, {}, participants_payload({"alice"_n})), "addmember"_n.to_uint64_t())); + BOOST_TEST_REQUIRE(chain.push_action_no_produce( eosio::chain::action({}, "addmember"_n, {}, participants_payload({"bob"_n})), "addmember"_n.to_uint64_t())); + + BOOST_CHECK_EQUAL(chain.control->proposed_security_group_participants().size(), 2); + BOOST_CHECK_EQUAL(chain.control->active_security_group().participants.size(), 0); + BOOST_CHECK_EQUAL(chain2.control->proposed_security_group_participants().size(), 0); + BOOST_CHECK_EQUAL(chain2.control->active_security_group().participants.size(), 0); + + chain2.push_block(chain.produce_block()); + + BOOST_CHECK_EQUAL(chain.control->proposed_security_group_participants().size() , 0); + BOOST_CHECK(chain.control->in_active_security_group(participants_t({"alice"_n, "bob"_n}))); + BOOST_CHECK_EQUAL(chain2.control->proposed_security_group_participants().size(), 2); + BOOST_CHECK_EQUAL(chain2.control->active_security_group().participants.size(), 0); + + chain2.push_block(chain.produce_block()); + + BOOST_CHECK_EQUAL(chain.control->proposed_security_group_participants().size() , 0); + BOOST_CHECK(chain.control->in_active_security_group(participants_t({"alice"_n, "bob"_n}))); + BOOST_CHECK_EQUAL(chain2.control->proposed_security_group_participants().size(), 0); // + BOOST_CHECK(chain2.control->in_active_security_group(participants_t({"alice"_n, "bob"_n}))); + + BOOST_TEST_REQUIRE(chain.push_action_no_produce(eosio::chain::action({}, "rmmember"_n, {}, participants_payload({"alice"_n})), "rmmember"_n.to_uint64_t())); + + BOOST_TEST(chain.control->proposed_security_group_participants() == participants_t{"bob"_n}); + BOOST_CHECK(chain.control->in_active_security_group(participants_t({"alice"_n, "bob"_n}))); + BOOST_CHECK_EQUAL(chain2.control->proposed_security_group_participants().size(), 0); // + BOOST_CHECK(chain2.control->in_active_security_group(participants_t({"alice"_n, "bob"_n}))); + + chain2.push_block(chain.produce_block()); + + BOOST_CHECK_EQUAL(chain.control->proposed_security_group_participants().size() , 0); + BOOST_TEST(chain.control->active_security_group().participants == participants_t{"bob"_n}); + BOOST_TEST(chain2.control->proposed_security_group_participants() == participants_t{"bob"_n}); + BOOST_CHECK(chain2.control->in_active_security_group(participants_t({"alice"_n, "bob"_n}))); + + chain2.push_block(chain.produce_block()); + + BOOST_CHECK_EQUAL(chain.control->proposed_security_group_participants().size() , 0); + BOOST_TEST(chain.control->active_security_group().participants == participants_t{"bob"_n}); + BOOST_CHECK_EQUAL(chain2.control->proposed_security_group_participants().size() , 0); + BOOST_TEST(chain2.control->active_security_group().participants == participants_t{"bob"_n}); +} + +BOOST_AUTO_TEST_CASE(test_security_group_intrinsic) { + + eosio::testing::tester chain1; + using namespace eosio::chain::literals; + + chain1.create_accounts( {"alice"_n,"bob"_n,"charlie"_n} ); + chain1.produce_blocks(3); + + chain1.create_accounts({ "addmember"_n, "rmmember"_n, "ingroup"_n, "getgroup"_n }); + chain1.produce_block(); + + chain1.set_code( "addmember"_n, add_security_group_participants_wast ); + chain1.set_code( "rmmember"_n, remove_security_group_participants_wast ); + chain1.set_code( "ingroup"_n, assert_in_security_group_wast ); + chain1.set_code( "getgroup"_n, assert_get_security_group_wast ); + chain1.produce_block(); + + chain1.push_action( "eosio"_n, "setpriv"_n, "eosio"_n, fc::mutable_variant_object()("account", "addmember"_n)("is_priv", 1)); + chain1.push_action( "eosio"_n, "setpriv"_n, "eosio"_n, fc::mutable_variant_object()("account", "rmmember"_n)("is_priv", 1)); + chain1.produce_blocks(24); + + chain1.set_producers( {"alice"_n,"bob"_n} ); + chain1.produce_blocks(3); // Starts new blocks which promotes the proposed schedule to pending + BOOST_REQUIRE_EQUAL( chain1.control->active_producers().version, 1u ); + + BOOST_TEST_REQUIRE(chain1.push_action( eosio::chain::action({}, "addmember"_n, {}, participants_payload({"alice"_n})), "addmember"_n.to_uint64_t() ) == ""); + chain1.produce_block(); + BOOST_TEST_REQUIRE(chain1.push_action( eosio::chain::action({}, "addmember"_n, {}, participants_payload({"bob"_n})), "addmember"_n.to_uint64_t() ) == ""); + chain1.produce_blocks(10+11); + BOOST_CHECK_EQUAL(chain1.control->proposed_security_group_participants().size() , 2); + BOOST_CHECK_EQUAL(chain1.control->active_security_group().participants.size(), 0); + chain1.produce_blocks(1); + BOOST_CHECK_EQUAL(chain1.control->proposed_security_group_participants().size() , 0); + BOOST_CHECK(chain1.control->in_active_security_group(participants_t({"alice"_n, "bob"_n}))); + + BOOST_TEST_REQUIRE(chain1.push_action( eosio::chain::action({}, "rmmember"_n, {}, participants_payload({"alice"_n})), "rmmember"_n.to_uint64_t() ) == ""); + BOOST_CHECK_EQUAL(chain1.control->proposed_security_group_participants().size() , 1); + chain1.produce_blocks(11+12); + BOOST_CHECK(!chain1.control->in_active_security_group(participants_t({"alice"_n}))); + + BOOST_TEST_REQUIRE(chain1.push_action( eosio::chain::action({}, "ingroup"_n, {}, participants_payload({"bob"_n})), "ingroup"_n.to_uint64_t() ) == ""); + + eosio::chain::security_group_info_t grp{ + .version = 2, + .participants = {"bob"_n} + }; + + fc::datastream> strm; + fc::raw::pack(strm, grp); + + BOOST_TEST_REQUIRE(chain1.push_action(eosio::chain::action({}, "getgroup"_n, {}, strm.storage()), "getgroup"_n.to_uint64_t()) == ""); + BOOST_TEST_REQUIRE(chain1.push_action( eosio::chain::action({}, "addmember"_n, {}, participants_payload({"charlie"_n})), "addmember"_n.to_uint64_t() ) == ""); + chain1.produce_blocks(11); + BOOST_TEST(chain1.control->proposed_security_group_participants() == participants_t({"bob"_n, "charlie"_n})); + chain1.control->abort_block(); + + /// Test snapshot recovery + + std::stringstream snapshot_strm; + auto writer = std::make_shared(snapshot_strm); + chain1.control->write_snapshot(writer); + writer->finalize(); + + auto cfg = chain1.get_config(); + fc::temp_directory tmp_dir; + cfg.blog.log_dir = tmp_dir.path() / "blocks"; + cfg.state_dir = tmp_dir.path() / "state"; + + auto reader = std::make_shared(snapshot_strm); + eosio::testing::tester chain2([&cfg, &reader](eosio::testing::tester& self) { self.init(cfg, reader); }); + { + const auto& active_security_group = chain2.control->active_security_group(); + BOOST_CHECK_EQUAL(2, active_security_group.version); + BOOST_TEST(active_security_group.participants == participants_t{"bob"_n}); + BOOST_TEST(chain2.control->proposed_security_group_participants() == participants_t({"bob"_n, "charlie"_n})); + } +} + + +BOOST_AUTO_TEST_CASE(test_security_group_contract) { + eosio::testing::tester chain; + using namespace eosio::chain::literals; + + chain.create_accounts({"secgrptest"_n,"alice"_n,"bob"_n,"charlie"_n}); + chain.produce_block(); + chain.set_code("secgrptest"_n, eosio::testing::contracts::security_group_test_wasm()); + chain.set_abi("secgrptest"_n, eosio::testing::contracts::security_group_test_abi().data()); + chain.produce_block(); + chain.push_action( "eosio"_n, "setpriv"_n, "eosio"_n, fc::mutable_variant_object()("account", "secgrptest"_n)("is_priv", 1)); + chain.produce_block(); + + chain.push_action("secgrptest"_n, "add"_n, "secgrptest"_n, fc::mutable_variant_object() + ( "nm", "alice" ) + ); + chain.push_action("secgrptest"_n, "add"_n, "secgrptest"_n, fc::mutable_variant_object() + ( "nm", "bob" ) + ); + chain.produce_block(); + BOOST_CHECK(chain.control->in_active_security_group(participants_t({"alice"_n, "bob"_n}))); + + chain.push_action("secgrptest"_n, "remove"_n, "secgrptest"_n, fc::mutable_variant_object() + ( "nm", "alice" ) + ); + chain.produce_block(); + BOOST_CHECK(chain.control->in_active_security_group(participants_t({"bob"_n}))); + + { + auto result = + chain.push_action("secgrptest"_n, "ingroup"_n, "secgrptest"_n, fc::mutable_variant_object()("nm", "alice")); + + BOOST_CHECK_EQUAL(false, fc::raw::unpack(result->action_traces[0].return_value)); + } + + { + auto result = chain.push_action("secgrptest"_n, "activegroup"_n, "secgrptest"_n, fc::mutable_variant_object()); + auto participants = fc::raw::unpack(result->action_traces[0].return_value); + BOOST_TEST(participants == participants_t({"bob"_n})); + } +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/state_history_tests.cpp b/unittests/state_history_tests.cpp index 711c89be5fe..196c95e922b 100644 --- a/unittests/state_history_tests.cpp +++ b/unittests/state_history_tests.cpp @@ -858,7 +858,7 @@ BOOST_AUTO_TEST_CASE(test_deltas_global_property_history) { BOOST_REQUIRE(result.first); auto &it_global_property = result.second; BOOST_REQUIRE_EQUAL(it_global_property->rows.obj.size(), 1); - auto global_properties = chain.deserialize_data(it_global_property); + auto global_properties = chain.deserialize_data(it_global_property); auto configuration = std::get(global_properties[0].configuration); BOOST_REQUIRE_EQUAL(configuration.max_transaction_delay, 60); } diff --git a/unittests/test-contracts/CMakeLists.txt b/unittests/test-contracts/CMakeLists.txt index 6ac0c78bc51..56006197e82 100644 --- a/unittests/test-contracts/CMakeLists.txt +++ b/unittests/test-contracts/CMakeLists.txt @@ -35,3 +35,4 @@ add_subdirectory( wasm_config_bios ) add_subdirectory( params_test ) add_subdirectory( kv_table_test ) add_subdirectory( kv_addr_book ) +add_subdirectory( security_group_test ) diff --git a/unittests/test-contracts/security_group_test/CMakeLists.txt b/unittests/test-contracts/security_group_test/CMakeLists.txt new file mode 100644 index 00000000000..b69f12723ef --- /dev/null +++ b/unittests/test-contracts/security_group_test/CMakeLists.txt @@ -0,0 +1,9 @@ +if( EOSIO_COMPILE_TEST_CONTRACTS ) + add_contract( security_group_test security_group_test security_group_test.cpp ) +else() + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/security_group_test.wasm ${CMAKE_CURRENT_BINARY_DIR}/security_group_test.wasm COPYONLY ) + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/security_group_test.abi ${CMAKE_CURRENT_BINARY_DIR}/security_group_test.abi COPYONLY ) +endif() + +configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/eosio.secgrp.wasm ${CMAKE_CURRENT_BINARY_DIR}/eosio.secgrp.wasm COPYONLY ) +configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/eosio.secgrp.abi ${CMAKE_CURRENT_BINARY_DIR}/eosio.secgrp.abi COPYONLY ) diff --git a/unittests/test-contracts/security_group_test/eosio.secgrp.abi b/unittests/test-contracts/security_group_test/eosio.secgrp.abi new file mode 100644 index 00000000000..445cdd5b6fb --- /dev/null +++ b/unittests/test-contracts/security_group_test/eosio.secgrp.abi @@ -0,0 +1,88 @@ +{ + "____comment": "This file was generated with eosio-abigen. DO NOT EDIT ", + "version": "eosio::abi/1.2", + "types": [ + { + "new_type_name": "participants_t", + "type": "name[]" + } + ], + "structs": [ + { + "name": "add", + "base": "", + "fields": [ + { + "name": "participants", + "type": "participants_t" + } + ] + }, + { + "name": "clear", + "base": "", + "fields": [ + { + "name": "max", + "type": "uint32" + } + ] + }, + { + "name": "publish", + "base": "", + "fields": [ + { + "name": "max", + "type": "uint32" + } + ] + }, + { + "name": "remove", + "base": "", + "fields": [ + { + "name": "participants", + "type": "participants_t" + } + ] + } + ], + "actions": [ + { + "name": "add", + "type": "add", + "ricardian_contract": "" + }, + { + "name": "clear", + "type": "clear", + "ricardian_contract": "" + }, + { + "name": "publish", + "type": "publish", + "ricardian_contract": "" + }, + { + "name": "remove", + "type": "remove", + "ricardian_contract": "" + } + ], + "tables": [], + "kv_tables": {}, + "ricardian_clauses": [], + "variants": [], + "action_results": [ + { + "name": "clear", + "result_type": "bool" + }, + { + "name": "publish", + "result_type": "bool" + } + ] +} \ No newline at end of file diff --git a/unittests/test-contracts/security_group_test/eosio.secgrp.wasm b/unittests/test-contracts/security_group_test/eosio.secgrp.wasm new file mode 100644 index 00000000000..387eca0b1a6 Binary files /dev/null and b/unittests/test-contracts/security_group_test/eosio.secgrp.wasm differ diff --git a/unittests/test-contracts/security_group_test/security_group_test.abi b/unittests/test-contracts/security_group_test/security_group_test.abi new file mode 100644 index 00000000000..2d01efba645 --- /dev/null +++ b/unittests/test-contracts/security_group_test/security_group_test.abi @@ -0,0 +1,78 @@ +{ + "____comment": "This file was generated with eosio-abigen. DO NOT EDIT ", + "version": "eosio::abi/1.2", + "types": [], + "structs": [ + { + "name": "activegroup", + "base": "", + "fields": [] + }, + { + "name": "add", + "base": "", + "fields": [ + { + "name": "nm", + "type": "name" + } + ] + }, + { + "name": "ingroup", + "base": "", + "fields": [ + { + "name": "nm", + "type": "name" + } + ] + }, + { + "name": "remove", + "base": "", + "fields": [ + { + "name": "nm", + "type": "name" + } + ] + } + ], + "actions": [ + { + "name": "activegroup", + "type": "activegroup", + "ricardian_contract": "" + }, + { + "name": "add", + "type": "add", + "ricardian_contract": "" + }, + { + "name": "ingroup", + "type": "ingroup", + "ricardian_contract": "" + }, + { + "name": "remove", + "type": "remove", + "ricardian_contract": "" + } + ], + "tables": [], + "kv_tables": {}, + "ricardian_clauses": [], + "variants": [], + "action_results": [ + { + "name": "activegroup", + "result_type": "name[]" + }, + { + "name": "ingroup", + "result_type": "bool" + } + ] +} \ No newline at end of file diff --git a/unittests/test-contracts/security_group_test/security_group_test.cpp b/unittests/test-contracts/security_group_test/security_group_test.cpp new file mode 100644 index 00000000000..c7171620621 --- /dev/null +++ b/unittests/test-contracts/security_group_test/security_group_test.cpp @@ -0,0 +1,38 @@ +#include +#include +#include + +using namespace eosio; + +class [[eosio::contract]] security_group_test : public contract { + public: + using contract::contract; + + [[eosio::action]] + void add( name nm ); + [[eosio::action]] + void remove( name nm ); + + [[eosio::action]] + bool ingroup( name nm ) const; + + [[eosio::action]] + std::set activegroup() const; + + using add_action = action_wrapper<"add"_n, &security_group_test::add>; + using remove_action = action_wrapper<"remove"_n, &security_group_test::remove>; + using in_active_group_action = action_wrapper<"ingroup"_n, &security_group_test::ingroup>; + using active_group_action = action_wrapper<"activegroup"_n, &security_group_test::activegroup>; +}; + +[[eosio::action]] void security_group_test::add(name nm) { eosio::add_security_group_participants({nm}); } + +[[eosio::action]] void security_group_test::remove(name nm) { eosio::remove_security_group_participants({nm}); } + +[[eosio::action]] bool security_group_test::ingroup(name nm) const { + return eosio::in_active_security_group({nm}); +} + +[[eosio::action]] std::set security_group_test::activegroup() const { + return eosio::get_active_security_group().participants; +} \ No newline at end of file diff --git a/unittests/test-contracts/security_group_test/security_group_test.wasm b/unittests/test-contracts/security_group_test/security_group_test.wasm new file mode 100755 index 00000000000..0e0112c621c Binary files /dev/null and b/unittests/test-contracts/security_group_test/security_group_test.wasm differ