From 9ff1ad40445b713ad3cd401af704c8ff7887fc78 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 4 May 2023 10:57:31 -0400 Subject: [PATCH 001/180] Add boost as submodule --- .gitmodules | 3 +++ CMakeLists.txt | 4 ++-- libraries/CMakeLists.txt | 4 ++++ libraries/boost | 1 + libraries/libfc/CMakeLists.txt | 2 +- 5 files changed, 11 insertions(+), 3 deletions(-) create mode 160000 libraries/boost diff --git a/.gitmodules b/.gitmodules index ab01b3d5c0..f6cea8d706 100644 --- a/.gitmodules +++ b/.gitmodules @@ -31,3 +31,6 @@ [submodule "libraries/cli11/cli11"] path = libraries/cli11/cli11 url = https://github.com/AntelopeIO/CLI11.git +[submodule "libraries/boost"] + path = libraries/boost + url = https://github.com/boostorg/boost diff --git a/CMakeLists.txt b/CMakeLists.txt index 049183b252..42067d3e91 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -101,11 +101,11 @@ else() set(no_whole_archive_flag "--no-whole-archive") endif() -set(Boost_USE_MULTITHREADED ON) +set( Boost_USE_MULTITHREADED ON ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) # Most boost deps get implictly picked up via fc, as just about everything links to fc. In addition we pick up # the pthread dependency through fc. -find_package(Boost 1.71 REQUIRED COMPONENTS program_options unit_test_framework system) +#find_package(Boost 1.71 REQUIRED COMPONENTS program_options unit_test_framework system) if( APPLE AND UNIX ) # Apple Specific Options Here diff --git a/libraries/CMakeLists.txt b/libraries/CMakeLists.txt index 4b041dd047..8e79b46c59 100644 --- a/libraries/CMakeLists.txt +++ b/libraries/CMakeLists.txt @@ -5,6 +5,10 @@ set(SOFTFLOAT_INSTALL_COMPONENT "dev") set(EOSVM_INSTALL_COMPONENT "dev") set(BN256_INSTALL_COMPONENT "dev") +set( BOOST_INCLUDE_LIBRARIES iostreams date_time system program_options chrono test ) +add_subdirectory( boost EXCLUDE_FROM_ALL ) +#set( Boost_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/boost ) + add_subdirectory( libfc ) add_subdirectory( builtins ) add_subdirectory( softfloat ) diff --git a/libraries/boost b/libraries/boost new file mode 160000 index 0000000000..b6928ae5c9 --- /dev/null +++ b/libraries/boost @@ -0,0 +1 @@ +Subproject commit b6928ae5c92e21a04bbe17a558e6e066dbe632f6 diff --git a/libraries/libfc/CMakeLists.txt b/libraries/libfc/CMakeLists.txt index ac86842034..148197be5f 100644 --- a/libraries/libfc/CMakeLists.txt +++ b/libraries/libfc/CMakeLists.txt @@ -91,7 +91,7 @@ if(APPLE) add_library(zstd INTERFACE) endif() -find_package(Boost 1.66 REQUIRED COMPONENTS +find_package(Boost REQUIRED COMPONENTS date_time chrono unit_test_framework From 1cfbf0a76971efe63c420e212574cba6b98d327c Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 8 May 2023 10:13:39 -0400 Subject: [PATCH 002/180] Leap builds with boost 1.82 as submodule. --- CMakeLists.txt | 4 ++++ libraries/CMakeLists.txt | 4 ---- libraries/appbase | 2 +- libraries/chain/CMakeLists.txt | 9 +++++++++ libraries/chainbase | 2 +- libraries/libfc/CMakeLists.txt | 11 +++++------ plugins/chain_plugin/CMakeLists.txt | 4 ++-- programs/cleos/CMakeLists.txt | 2 +- programs/nodeos/CMakeLists.txt | 3 ++- tests/CMakeLists.txt | 4 ++-- 10 files changed, 27 insertions(+), 18 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 42067d3e91..3910a3381d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -103,6 +103,10 @@ endif() set( Boost_USE_MULTITHREADED ON ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) + +#set( BOOST_INCLUDE_LIBRARIES headers iostreams date_time system program_options chrono test interprocess multi_index lexical_cast asio thread serialization multiprecision beast unit_test_framework ) +add_subdirectory( libraries/boost ) + # Most boost deps get implictly picked up via fc, as just about everything links to fc. In addition we pick up # the pthread dependency through fc. #find_package(Boost 1.71 REQUIRED COMPONENTS program_options unit_test_framework system) diff --git a/libraries/CMakeLists.txt b/libraries/CMakeLists.txt index 8e79b46c59..4b041dd047 100644 --- a/libraries/CMakeLists.txt +++ b/libraries/CMakeLists.txt @@ -5,10 +5,6 @@ set(SOFTFLOAT_INSTALL_COMPONENT "dev") set(EOSVM_INSTALL_COMPONENT "dev") set(BN256_INSTALL_COMPONENT "dev") -set( BOOST_INCLUDE_LIBRARIES iostreams date_time system program_options chrono test ) -add_subdirectory( boost EXCLUDE_FROM_ALL ) -#set( Boost_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/boost ) - add_subdirectory( libfc ) add_subdirectory( builtins ) add_subdirectory( softfloat ) diff --git a/libraries/appbase b/libraries/appbase index c7ce7c2024..e785cc75ae 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit c7ce7c202497d772f8bbaf34a3ced0df136ec9fd +Subproject commit e785cc75aebda9a048f657ce4244014b40139fea diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index 7796205658..6484e46f96 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -129,8 +129,17 @@ add_library( eosio_chain ${HEADERS} ) +## Boost::accumulators depends on Boost::numeric_ublas, which is still missing cmake support (see +## https://github.com/boostorg/cmake/issues/39). Until this is fixed, manually add Boost::numeric_ublas +## as an interface library +## ---------------------------------------------------------------------------------------------------- +add_library(boost_numeric_ublas INTERFACE) +add_library(Boost::numeric_ublas ALIAS boost_numeric_ublas) + target_link_libraries( eosio_chain PUBLIC bn256 fc chainbase eosio_rapidjson Logging IR WAST WASM softfloat builtins ${CHAIN_EOSVM_LIBRARIES} ${LLVM_LIBS} ${CHAIN_RT_LINKAGE} + Boost::signals2 Boost::hana Boost::property_tree Boost::multi_index Boost::asio Boost::lockfree + Boost::assign Boost::accumulators ) target_include_directories( eosio_chain PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_BINARY_DIR}/include" diff --git a/libraries/chainbase b/libraries/chainbase index c1d30da95c..7b3badc5ec 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit c1d30da95c9f5e2e80d32732d3063671ff23b123 +Subproject commit 7b3badc5ecf5d1c6d41d9932811a1df994bbed51 diff --git a/libraries/libfc/CMakeLists.txt b/libraries/libfc/CMakeLists.txt index 148197be5f..683cdb31b8 100644 --- a/libraries/libfc/CMakeLists.txt +++ b/libraries/libfc/CMakeLists.txt @@ -91,11 +91,9 @@ if(APPLE) add_library(zstd INTERFACE) endif() -find_package(Boost REQUIRED COMPONENTS - date_time - chrono - unit_test_framework - iostreams) +if(NOT boost_headers_SOURCE_DIR) + find_package(Boost REQUIRED COMPONENTS date_time chrono unit_test_framework iostreams) +endif() find_path(GMP_INCLUDE_DIR NAMES gmp.h) find_library(GMP_LIBRARY gmp) @@ -130,7 +128,8 @@ if(APPLE) find_library(security_framework Security) find_library(corefoundation_framework CoreFoundation) endif() -target_link_libraries( fc PUBLIC Boost::date_time Boost::chrono Boost::iostreams Threads::Threads +target_link_libraries( fc PUBLIC Boost::date_time Boost::chrono Boost::iostreams Boost::interprocess Boost::multi_index + Boost::multiprecision Boost::beast Boost::asio Boost::thread Boost::unit_test_framework Threads::Threads OpenSSL::Crypto ZLIB::ZLIB ${PLATFORM_SPECIFIC_LIBS} ${CMAKE_DL_LIBS} secp256k1 ${security_framework} ${corefoundation_framework}) # Critically, this ensures that OpenSSL 1.1 & 3.0 both have a variant of BN_zero() with void return value. But it also allows access diff --git a/plugins/chain_plugin/CMakeLists.txt b/plugins/chain_plugin/CMakeLists.txt index 0648d20fb4..ae21541990 100644 --- a/plugins/chain_plugin/CMakeLists.txt +++ b/plugins/chain_plugin/CMakeLists.txt @@ -11,7 +11,7 @@ if(EOSIO_ENABLE_DEVELOPER_OPTIONS) target_compile_definitions(chain_plugin PUBLIC EOSIO_DEVELOPER) endif() -target_link_libraries( chain_plugin eosio_chain custom_appbase appbase resource_monitor_plugin ) +target_link_libraries( chain_plugin eosio_chain custom_appbase appbase resource_monitor_plugin Boost::bimap ) target_include_directories( chain_plugin PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_SOURCE_DIR}/../chain_interface/include" "${CMAKE_CURRENT_SOURCE_DIR}/../../libraries/appbase/include" "${CMAKE_CURRENT_SOURCE_DIR}/../resource_monitor_plugin/include") -add_subdirectory( test ) \ No newline at end of file +add_subdirectory( test ) diff --git a/programs/cleos/CMakeLists.txt b/programs/cleos/CMakeLists.txt index bf184cf927..71b9d6c866 100644 --- a/programs/cleos/CMakeLists.txt +++ b/programs/cleos/CMakeLists.txt @@ -13,7 +13,7 @@ set(LOCALEDOMAIN ${CLI_CLIENT_EXECUTABLE_NAME}) target_include_directories(${CLI_CLIENT_EXECUTABLE_NAME} PUBLIC ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}) target_link_libraries( ${CLI_CLIENT_EXECUTABLE_NAME} - PRIVATE appbase version leap-cli11 chain_api_plugin producer_plugin chain_plugin http_plugin eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) + PRIVATE appbase version leap-cli11 chain_api_plugin producer_plugin chain_plugin http_plugin eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} Boost::process Boost::dll ) if (CURL_FOUND) target_sources(${CLI_CLIENT_EXECUTABLE_NAME} PRIVATE do_http_post_libcurl.cpp) diff --git a/programs/nodeos/CMakeLists.txt b/programs/nodeos/CMakeLists.txt index 23ac7269c2..493c9e3a43 100644 --- a/programs/nodeos/CMakeLists.txt +++ b/programs/nodeos/CMakeLists.txt @@ -40,7 +40,8 @@ target_link_libraries( ${NODE_EXECUTABLE_NAME} PRIVATE -Wl,${whole_archive_flag} prometheus_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${build_id_flag} PRIVATE chain_plugin http_plugin producer_plugin http_client_plugin - PRIVATE eosio_chain_wrap fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) + PRIVATE eosio_chain_wrap fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} + Boost::dll ) include(additionalPlugins) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 0f868a8f6b..8fbaa2b002 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -104,9 +104,9 @@ add_subdirectory( performance_tests ) find_package(Threads) add_executable(ship_client ship_client.cpp) -target_link_libraries(ship_client abieos Boost::program_options Boost::system Threads::Threads) +target_link_libraries(ship_client abieos Boost::program_options Boost::system Boost::algorithm Boost::asio Boost::beast Threads::Threads) add_executable(ship_streamer ship_streamer.cpp) -target_link_libraries(ship_streamer abieos Boost::program_options Boost::system Threads::Threads) +target_link_libraries(ship_streamer abieos Boost::program_options Boost::system Boost::asio Boost::beast Threads::Threads) add_test(NAME ship_test COMMAND tests/ship_test.py -v --num-clients 10 --num-requests 5000 --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST ship_test PROPERTY LABELS nonparallelizable_tests) From d22f340ba96e666459aa158c98b984e768b034e5 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 8 May 2023 10:46:11 -0400 Subject: [PATCH 003/180] Remove outdated comments. --- CMakeLists.txt | 5 ----- 1 file changed, 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3910a3381d..f44aba6cc3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -104,13 +104,8 @@ endif() set( Boost_USE_MULTITHREADED ON ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) -#set( BOOST_INCLUDE_LIBRARIES headers iostreams date_time system program_options chrono test interprocess multi_index lexical_cast asio thread serialization multiprecision beast unit_test_framework ) add_subdirectory( libraries/boost ) -# Most boost deps get implictly picked up via fc, as just about everything links to fc. In addition we pick up -# the pthread dependency through fc. -#find_package(Boost 1.71 REQUIRED COMPONENTS program_options unit_test_framework system) - if( APPLE AND UNIX ) # Apple Specific Options Here message( STATUS "Configuring Leap on macOS" ) From 0ae0289621f0f5a528aabdcdf739fd91a5ec8651 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 10 May 2023 10:26:33 -0400 Subject: [PATCH 004/180] Update submodules branches to tip --- libraries/appbase | 2 +- libraries/chainbase | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/appbase b/libraries/appbase index e785cc75ae..b9472c58c8 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit e785cc75aebda9a048f657ce4244014b40139fea +Subproject commit b9472c58c8d32b8e816cb4a8480d5b454f1bdbf4 diff --git a/libraries/chainbase b/libraries/chainbase index 7b3badc5ec..bf078f3ff6 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit 7b3badc5ecf5d1c6d41d9932811a1df994bbed51 +Subproject commit bf078f3ff6559fffe93b8195df702729ae4ba4f5 From 04bcbdb6eb73785849c3e7c90e30eaeb2025013a Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 17 May 2023 18:00:12 -0400 Subject: [PATCH 005/180] Remove boost install from cicd docker files and pinned build --- .cicd/platforms/ubuntu20.Dockerfile | 1 - .cicd/platforms/ubuntu22.Dockerfile | 1 - scripts/pinned_build.sh | 19 +------------------ 3 files changed, 1 insertion(+), 20 deletions(-) diff --git a/.cicd/platforms/ubuntu20.Dockerfile b/.cicd/platforms/ubuntu20.Dockerfile index baccb7c937..4296c802b9 100644 --- a/.cicd/platforms/ubuntu20.Dockerfile +++ b/.cicd/platforms/ubuntu20.Dockerfile @@ -6,7 +6,6 @@ RUN apt-get update && apt-get upgrade -y && \ cmake \ git \ jq \ - libboost-all-dev \ libcurl4-openssl-dev \ libgmp-dev \ libssl-dev \ diff --git a/.cicd/platforms/ubuntu22.Dockerfile b/.cicd/platforms/ubuntu22.Dockerfile index 1e5a936a4d..52ace75948 100644 --- a/.cicd/platforms/ubuntu22.Dockerfile +++ b/.cicd/platforms/ubuntu22.Dockerfile @@ -6,7 +6,6 @@ RUN apt-get update && apt-get upgrade -y && \ cmake \ git \ jq \ - libboost-all-dev \ libcurl4-openssl-dev \ libgmp-dev \ libssl-dev \ diff --git a/scripts/pinned_build.sh b/scripts/pinned_build.sh index c57257e957..36bd7ad397 100755 --- a/scripts/pinned_build.sh +++ b/scripts/pinned_build.sh @@ -101,27 +101,10 @@ install_llvm() { export LLVM_DIR="${LLVM_DIR}" } -install_boost() { - BOOST_DIR="$1" - - if [ ! -d "${BOOST_DIR}" ]; then - echo "Installing Boost ${BOOST_VER} @ ${BOOST_DIR}" - try wget -O "boost_${BOOST_VER//\./_}.tar.gz" "https://boostorg.jfrog.io/artifactory/main/release/${BOOST_VER}/source/boost_${BOOST_VER//\./_}.tar.gz" - try tar -xvzf "boost_${BOOST_VER//\./_}.tar.gz" -C "${DEP_DIR}" - pushdir "${BOOST_DIR}" - try ./bootstrap.sh -with-toolset=clang --prefix="${BOOST_DIR}/bin" - ./b2 toolset=clang cxxflags="-stdlib=libc++ -D__STRICT_ANSI__ -nostdinc++ -I\${CLANG_DIR}/include/c++/v1 -D_FORTIFY_SOURCE=2 -fstack-protector-strong -fPIE" linkflags='-stdlib=libc++ -pie' link=static threading=multi --with-iostreams --with-date_time --with-system --with-program_options --with-chrono --with-test -q -j "${JOBS}" install - popdir "${DEP_DIR}" - rm "boost_${BOOST_VER//\./_}.tar.gz" - fi - export BOOST_DIR="${BOOST_DIR}" -} - pushdir "${DEP_DIR}" install_clang "${DEP_DIR}/clang-${CLANG_VER}" install_llvm "${DEP_DIR}/llvm-${LLVM_VER}" -install_boost "${DEP_DIR}/boost_${BOOST_VER//\./_}" # go back to the directory where the script starts popdir "${START_DIR}" @@ -130,7 +113,7 @@ pushdir "${LEAP_DIR}" # build Leap echo "Building Leap ${SCRIPT_DIR}" -try cmake -DCMAKE_TOOLCHAIN_FILE="${SCRIPT_DIR}/pinned_toolchain.cmake" -DCMAKE_INSTALL_PREFIX=/usr/local -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH="${LLVM_DIR}/lib/cmake" -DCMAKE_PREFIX_PATH="${BOOST_DIR}/bin" "${SCRIPT_DIR}/.." +try cmake -DCMAKE_TOOLCHAIN_FILE="${SCRIPT_DIR}/pinned_toolchain.cmake" -DCMAKE_INSTALL_PREFIX=/usr/local -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH="${LLVM_DIR}/lib/cmake" "${SCRIPT_DIR}/.." try make -j "${JOBS}" try cpack From ee18ab5b88429cf236d4625d63503d4eadd8d3a5 Mon Sep 17 00:00:00 2001 From: 766C6164 Date: Wed, 24 May 2023 20:34:48 -0400 Subject: [PATCH 006/180] Make tester consumers built with same boost as leap --- CMakeModules/EosioTester.cmake.in | 26 +++++++++++++------------- CMakeModules/EosioTesterBuild.cmake.in | 26 +++++++++++++------------- 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index a12004f73f..3d4151ef91 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -42,13 +42,8 @@ else ( APPLE ) endif ( APPLE ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) -find_package(Boost @Boost_MAJOR_VERSION@.@Boost_MINOR_VERSION@ EXACT REQUIRED COMPONENTS - date_time - filesystem - system - chrono - iostreams - unit_test_framework) +add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost) +include_directories(${boostorg_SOURCE_DIR}) find_library(libtester eosio_testing @CMAKE_INSTALL_FULL_LIBDIR@ NO_DEFAULT_PATH) find_library(libchain eosio_chain @CMAKE_INSTALL_FULL_LIBDIR@ NO_DEFAULT_PATH) @@ -94,12 +89,18 @@ macro(add_eosio_test_executable test_name) ${libbn256} @GMP_LIBRARY@ - ${Boost_FILESYSTEM_LIBRARY} - ${Boost_SYSTEM_LIBRARY} - ${Boost_CHRONO_LIBRARY} - ${Boost_IOSTREAMS_LIBRARY} + Boost::date_time + Boost::filesystem + Boost::system + Boost::chrono + Boost::multi_index + Boost::multiprecision + Boost::interprocess + Boost::asio + Boost::signals2 + Boost::iostreams "-lz" # Needed by Boost iostreams - ${Boost_DATE_TIME_LIBRARY} + Boost::unit_test_framework ${LLVM_LIBS} @@ -115,7 +116,6 @@ macro(add_eosio_test_executable test_name) endif() target_include_directories( ${test_name} PUBLIC - ${Boost_INCLUDE_DIRS} @OPENSSL_INCLUDE_DIR@ @CMAKE_INSTALL_PREFIX@ @CMAKE_INSTALL_FULL_INCLUDEDIR@ diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index aa67d25595..d82a6e84b5 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -39,13 +39,8 @@ else ( APPLE ) endif ( APPLE ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) -find_package(Boost @Boost_MAJOR_VERSION@.@Boost_MINOR_VERSION@ EXACT REQUIRED COMPONENTS - date_time - filesystem - system - chrono - iostreams - unit_test_framework) +add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost) +include_directories(${boostorg_SOURCE_DIR}) find_library(libtester eosio_testing @CMAKE_BINARY_DIR@/libraries/testing NO_DEFAULT_PATH) find_library(libchain eosio_chain @CMAKE_BINARY_DIR@/libraries/chain NO_DEFAULT_PATH) @@ -91,12 +86,18 @@ macro(add_eosio_test_executable test_name) ${libbn256} @GMP_LIBRARY@ - ${Boost_FILESYSTEM_LIBRARY} - ${Boost_SYSTEM_LIBRARY} - ${Boost_CHRONO_LIBRARY} - ${Boost_IOSTREAMS_LIBRARY} + Boost::date_time + Boost::filesystem + Boost::system + Boost::chrono + Boost::multi_index + Boost::multiprecision + Boost::interprocess + Boost::asio + Boost::signals2 + Boost::iostreams "-lz" # Needed by Boost iostreams - ${Boost_DATE_TIME_LIBRARY} + Boost::unit_test_framework ${LLVM_LIBS} @@ -112,7 +113,6 @@ macro(add_eosio_test_executable test_name) endif() target_include_directories( ${test_name} PUBLIC - ${Boost_INCLUDE_DIRS} @OPENSSL_INCLUDE_DIR@ @CMAKE_SOURCE_DIR@/libraries/chain/include @CMAKE_BINARY_DIR@/libraries/chain/include From 3ca72151e430a9afd9dd2481c7fdb15f936892b5 Mon Sep 17 00:00:00 2001 From: 766C6164 Date: Wed, 24 May 2023 20:39:40 -0400 Subject: [PATCH 007/180] Removed accidental leftover --- CMakeModules/EosioTester.cmake.in | 1 - CMakeModules/EosioTesterBuild.cmake.in | 1 - 2 files changed, 2 deletions(-) diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index 3d4151ef91..a4a668c237 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -43,7 +43,6 @@ endif ( APPLE ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost) -include_directories(${boostorg_SOURCE_DIR}) find_library(libtester eosio_testing @CMAKE_INSTALL_FULL_LIBDIR@ NO_DEFAULT_PATH) find_library(libchain eosio_chain @CMAKE_INSTALL_FULL_LIBDIR@ NO_DEFAULT_PATH) diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index d82a6e84b5..609451efc2 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -40,7 +40,6 @@ endif ( APPLE ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost) -include_directories(${boostorg_SOURCE_DIR}) find_library(libtester eosio_testing @CMAKE_BINARY_DIR@/libraries/testing NO_DEFAULT_PATH) find_library(libchain eosio_chain @CMAKE_BINARY_DIR@/libraries/chain NO_DEFAULT_PATH) From 86c38831ff6ae1b5c8694f89a66b02fa9a4fbe45 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 23 Jun 2023 16:43:33 -0400 Subject: [PATCH 008/180] Output flag marking empty `std::optional` in `abi_serializer::_variant_to_binary` --- libraries/chain/abi_serializer.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/libraries/chain/abi_serializer.cpp b/libraries/chain/abi_serializer.cpp index 45e84e1b16..8010594acd 100644 --- a/libraries/chain/abi_serializer.cpp +++ b/libraries/chain/abi_serializer.cpp @@ -535,14 +535,15 @@ namespace eosio { namespace chain { bool disallow_additional_fields = false; for( uint32_t i = 0; i < st.fields.size(); ++i ) { const auto& field = st.fields[i]; - if( vo.contains( string(field.name).c_str() ) ) { + bool present = vo.contains(string(field.name).c_str()); + if( present || is_optional(field.type) ) { if( disallow_additional_fields ) EOS_THROW( pack_exception, "Unexpected field '${f}' found in input object while processing struct '${p}'", ("f", ctx.maybe_shorten(field.name))("p", ctx.get_path_string()) ); { auto h1 = ctx.push_to_path( impl::field_path_item{ .parent_struct_itr = s_itr, .field_ordinal = i } ); auto h2 = ctx.disallow_extensions_unless( &field == &st.fields.back() ); - _variant_to_binary(_remove_bin_extension(field.type), vo[field.name], ds, ctx); + _variant_to_binary(_remove_bin_extension(field.type), present ? vo[field.name] : fc::variant(nullptr), ds, ctx); } } else if( ends_with(field.type, "$") && ctx.extensions_allowed() ) { disallow_additional_fields = true; From f9e91f19bef6eb6b434a2d166d10afa6a140f035 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 23 Jun 2023 16:45:28 -0400 Subject: [PATCH 009/180] Use value from `numeric_limits` rather than hard coded hex value. --- libraries/libfc/include/fc/time.hpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/libraries/libfc/include/fc/time.hpp b/libraries/libfc/include/fc/time.hpp index 1c1b433bd5..29b0f7a211 100644 --- a/libraries/libfc/include/fc/time.hpp +++ b/libraries/libfc/include/fc/time.hpp @@ -1,6 +1,7 @@ #pragma once #include #include +#include #ifdef _MSC_VER #pragma warning (push) @@ -11,7 +12,7 @@ namespace fc { class microseconds { public: constexpr explicit microseconds( int64_t c = 0) :_count(c){} - static constexpr microseconds maximum() { return microseconds(0x7fffffffffffffffll); } + static constexpr microseconds maximum() { return microseconds(std::numeric_limits::max()); } friend constexpr microseconds operator + (const microseconds& l, const microseconds& r ) { return microseconds(l._count+r._count); } friend constexpr microseconds operator - (const microseconds& l, const microseconds& r ) { return microseconds(l._count-r._count); } From 93fe429bbc16bcc170de978bf791456a44660db9 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 23 Jun 2023 16:46:43 -0400 Subject: [PATCH 010/180] Update `abi_tests.cpp` to not enforce deadline in debug mode. --- unittests/abi_tests.cpp | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/unittests/abi_tests.cpp b/unittests/abi_tests.cpp index 6ad37bd994..f77c8fe42d 100644 --- a/unittests/abi_tests.cpp +++ b/unittests/abi_tests.cpp @@ -40,7 +40,17 @@ FC_REFLECT(act_sig, (sig) ) BOOST_AUTO_TEST_SUITE(abi_tests) +#ifdef NDEBUG fc::microseconds max_serialization_time = fc::seconds(1); // some test machines are very slow +#else +fc::microseconds max_serialization_time = fc::microseconds::maximum(); // don't check in debug builds +#endif + +static fc::time_point get_deadline() { + if (max_serialization_time == fc::microseconds::maximum()) + return fc::time_point(fc::microseconds::maximum()); + return fc::time_point::now() + max_serialization_time; +} // verify that round trip conversion, via bytes, reproduces the exact same data fc::variant verify_byte_round_trip_conversion( const abi_serializer& abis, const type_name& type, const fc::variant& var ) @@ -49,8 +59,6 @@ fc::variant verify_byte_round_trip_conversion( const abi_serializer& abis, const auto var2 = abis.binary_to_variant(type, bytes, abi_serializer::create_yield_function( max_serialization_time )); - std::string r = fc::json::to_string(var2, fc::time_point::now() + max_serialization_time); - auto bytes2 = abis.variant_to_binary(type, var2, abi_serializer::create_yield_function( max_serialization_time )); BOOST_TEST( fc::to_hex(bytes) == fc::to_hex(bytes2) ); @@ -64,7 +72,7 @@ void verify_round_trip_conversion( const abi_serializer& abis, const type_name& auto bytes = abis.variant_to_binary(type, var, abi_serializer::create_yield_function( max_serialization_time )); BOOST_REQUIRE_EQUAL(fc::to_hex(bytes), hex); auto var2 = abis.binary_to_variant(type, bytes, abi_serializer::create_yield_function( max_serialization_time )); - BOOST_REQUIRE_EQUAL(fc::json::to_string(var2, fc::time_point::now() + max_serialization_time), expected_json); + BOOST_REQUIRE_EQUAL(fc::json::to_string(var2, get_deadline()), expected_json); auto bytes2 = abis.variant_to_binary(type, var2, abi_serializer::create_yield_function( max_serialization_time )); BOOST_REQUIRE_EQUAL(fc::to_hex(bytes2), hex); } @@ -94,7 +102,7 @@ fc::variant verify_type_round_trip_conversion( const abi_serializer& abis, const fc::variant var2; abi_serializer::to_variant(obj, var2, get_resolver(), abi_serializer::create_yield_function( max_serialization_time )); - std::string r = fc::json::to_string(var2, fc::time_point::now() + max_serialization_time); + std::string r = fc::json::to_string(var2, get_deadline()); auto bytes2 = abis.variant_to_binary(type, var2, abi_serializer::create_yield_function( max_serialization_time )); @@ -2976,7 +2984,7 @@ BOOST_AUTO_TEST_CASE(abi_to_variant__add_action__good_return_value) mutable_variant_object mvo; eosio::chain::impl::abi_traverse_context ctx(abi_serializer::create_yield_function(max_serialization_time)); eosio::chain::impl::abi_to_variant::add(mvo, "action_traces", at, get_resolver(abidef), ctx); - std::string res = fc::json::to_string(mvo, fc::time_point::now() + max_serialization_time); + std::string res = fc::json::to_string(mvo, get_deadline()); BOOST_CHECK_EQUAL(res, expected_json); } @@ -3001,7 +3009,7 @@ BOOST_AUTO_TEST_CASE(abi_to_variant__add_action__bad_return_value) mutable_variant_object mvo; eosio::chain::impl::abi_traverse_context ctx(abi_serializer::create_yield_function(max_serialization_time)); eosio::chain::impl::abi_to_variant::add(mvo, "action_traces", at, get_resolver(abidef), ctx); - std::string res = fc::json::to_string(mvo, fc::time_point::now() + max_serialization_time); + std::string res = fc::json::to_string(mvo, get_deadline()); BOOST_CHECK_EQUAL(res, expected_json); } @@ -3036,7 +3044,7 @@ BOOST_AUTO_TEST_CASE(abi_to_variant__add_action__no_return_value) mutable_variant_object mvo; eosio::chain::impl::abi_traverse_context ctx(abi_serializer::create_yield_function(max_serialization_time)); eosio::chain::impl::abi_to_variant::add(mvo, "action_traces", at, get_resolver(abidef), ctx); - std::string res = fc::json::to_string(mvo, fc::time_point::now() + max_serialization_time); + std::string res = fc::json::to_string(mvo, get_deadline()); BOOST_CHECK_EQUAL(res, expected_json); } From f1eb13a02d25c4373a5545640193af3123adfa6d Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 23 Jun 2023 16:47:21 -0400 Subject: [PATCH 011/180] Add test case for using `std::optional` in abi type definitions. --- unittests/abi_tests.cpp | 86 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/unittests/abi_tests.cpp b/unittests/abi_tests.cpp index f77c8fe42d..4048b3b3ec 100644 --- a/unittests/abi_tests.cpp +++ b/unittests/abi_tests.cpp @@ -1935,6 +1935,92 @@ BOOST_AUTO_TEST_CASE(abi_type_loop) auto is_type_exception = [](fc::exception const & e) -> bool { return e.to_detail_string().find("type already exists") != std::string::npos; }; BOOST_CHECK_EXCEPTION( abi_serializer abis(fc::json::from_string(repeat_abi).as(), abi_serializer::create_yield_function( max_serialization_time )), duplicate_abi_type_def_exception, is_type_exception ); +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_CASE(abi_std_optional) +{ try { + const char* repeat_abi = R"=====( + { + "version": "eosio::abi/1.2", + "types": [], + "structs": [ + { + "name": "fees", + "base": "", + "fields": [ + { + "name": "gas_price", + "type": "uint64?" + }, + { + "name": "miner_cut", + "type": "uint32?" + }, + { + "name": "bridge_fee", + "type": "uint32?" + } + ] + } + ], + "actions": [ + { + "name": "fees", + "type": "fees", + "ricardian_contract": "" + } + ], + "tables": [], + "ricardian_clauses": [], + "variants": [], + "action_results": [] + } + )====="; + + abi_serializer abis(fc::json::from_string(repeat_abi).as(), abi_serializer::create_yield_function( max_serialization_time )); + { + // check conversion when all optional members are provided + std::string test_data = R"=====( + { + "gas_price" : "42", + "miner_cut" : "2", + "bridge_fee" : "2" + } + )====="; + + auto var = fc::json::from_string(test_data); + verify_byte_round_trip_conversion(abis, "fees", var); + } + + { + // check conversion when the first optional members is missing + std::string test_data = R"=====( + { + "miner_cut" : "2", + "bridge_fee" : "2" + } + )====="; + + auto var = fc::json::from_string(test_data); + verify_byte_round_trip_conversion(abis, "fees", var); + } + + { + // check conversion when the first optional members is missing + std::string test_data = R"=====( + { + "gas_price" : "42", + "miner_cut" : "2", + "bridge_fee" : "2" + } + )====="; + + auto var = fc::json::from_string(test_data); + verify_byte_round_trip_conversion(abis, "fees", var); + } + + + } FC_LOG_AND_RETHROW() } BOOST_AUTO_TEST_CASE(abi_type_redefine) From 05fe45a4e6b95058f4c632c5304c49bda4a9aed5 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 23 Jun 2023 17:28:38 -0400 Subject: [PATCH 012/180] Add a couple more test patterns. --- unittests/abi_tests.cpp | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/unittests/abi_tests.cpp b/unittests/abi_tests.cpp index 4048b3b3ec..db36c1eeb9 100644 --- a/unittests/abi_tests.cpp +++ b/unittests/abi_tests.cpp @@ -2010,7 +2010,6 @@ BOOST_AUTO_TEST_CASE(abi_std_optional) std::string test_data = R"=====( { "gas_price" : "42", - "miner_cut" : "2", "bridge_fee" : "2" } )====="; @@ -2019,7 +2018,29 @@ BOOST_AUTO_TEST_CASE(abi_std_optional) verify_byte_round_trip_conversion(abis, "fees", var); } - + { + // check conversion when all optional members are provided + std::string test_data = R"=====( + { + "gas_price" : "42", + "miner_cut" : "2", + } + )====="; + + auto var = fc::json::from_string(test_data); + verify_byte_round_trip_conversion(abis, "fees", var); + } + + { + // check conversion when all optional members are provided + std::string test_data = R"=====( + { + } + )====="; + + auto var = fc::json::from_string(test_data); + verify_byte_round_trip_conversion(abis, "fees", var); + } } FC_LOG_AND_RETHROW() } From fe0533236dcd9b3ec51f17a9dafcc7936d1c067c Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 30 Jun 2023 09:38:59 -0500 Subject: [PATCH 013/180] Create workflow to test backward compatibility of performance harness with prior leap releases. --- .../workflows/ph_backward_compatibility.yaml | 113 +++++++++++++++++- 1 file changed, 109 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index 8da13d18d9..2a0b415d32 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -12,10 +12,115 @@ defaults: shell: bash jobs: - tmp: - name: Stub + d: + name: Discover Platforms runs-on: ubuntu-latest + outputs: + missing-platforms: ${{steps.discover.outputs.missing-platforms}} + p: ${{steps.discover.outputs.platforms}} steps: - - name: Workflow Stub + - name: Discover Platforms + id: discover + uses: AntelopeIO/discover-platforms-action@v1 + with: + platform-file: .cicd/platforms.json + password: ${{secrets.GITHUB_TOKEN}} + package-name: builders + + build-platforms: + name: Build Platforms + needs: d + if: needs.d.outputs.missing-platforms != '[]' + strategy: + fail-fast: false + matrix: + platform: ${{fromJSON(needs.d.outputs.missing-platforms)}} + runs-on: ["self-hosted", "enf-x86-beefy"] + permissions: + packages: write + contents: read + steps: + - name: Login to Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{github.repository_owner}} + password: ${{secrets.GITHUB_TOKEN}} + - name: Build and push + uses: docker/build-push-action@v3 + with: + push: true + tags: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} + file: ${{fromJSON(needs.d.outputs.p)[matrix.platform].dockerfile}} + + Build: + needs: [d, build-platforms] + if: always() && needs.d.result == 'success' && (needs.build-platforms.result == 'success' || needs.build-platforms.result == 'skipped') + strategy: + fail-fast: false + matrix: + platform: [ubuntu20, ubuntu22] + runs-on: ["self-hosted", "enf-x86-beefy"] + container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} + steps: + - uses: actions/checkout@v3 + with: + submodules: recursive + - name: Build + id: build + run: | + # https://github.com/actions/runner/issues/2033 + chown -R $(id -u):$(id -g) $PWD + cmake -B build -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -GNinja + cmake --build build + tar -pc --exclude "*.o" build | zstd --long -T0 -9 > build.tar.zst + - name: Upload builddir + uses: AntelopeIO/upload-artifact-large-chunks-action@v1 + with: + name: ${{matrix.platform}}-build + path: build.tar.zst + + tests: + name: Tests + needs: [d, Build] + if: always() && needs.Build.result == 'success' + strategy: + fail-fast: false + matrix: + platform: [ubuntu20, ubuntu22] + release: [3.1, 3.2, 4.0] + runs-on: ["self-hosted", "enf-x86-hightier"] + container: + image: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} + options: --security-opt seccomp=unconfined + steps: + - uses: actions/checkout@v3 + - name: Download builddir + uses: actions/download-artifact@v3 + with: + name: ${{matrix.platform}}-build + - name: Extract Build Directory + run: | + # https://github.com/actions/runner/issues/2033 -- need this because of full version label test looking at git revs + chown -R $(id -u):$(id -g) $PWD + zstdcat build.tar.zst | tar x + - name: Download Prev Leap Version + uses: AntelopeIO/asset-artifact-download-action@v2 + with: + owner: AntelopeIO + repo: leap + file: 'leap*amd64.deb' + target: '${{matrix.release}}' + token: ${{github.token}} + - name: Extract and Place Rev Leap Version artifacts + run: | + mkdir tmp + dpkg -x leap*amd64.deb tmp + rm build/bin/nodeos + rm build/bin/cleos + mv tmp/usr/bin/nodeos build/bin + mv tmp/usr/bin/cleos build/bin + - name: Run Tests run: | - echo "Workflow Stub" + cd build + ctest --output-on-failure -j $(nproc) -R "performance_test_" --timeout 420 From f848de5bd61cbaf5b35eb98a84de1be99e217dec Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 30 Jun 2023 13:52:22 -0500 Subject: [PATCH 014/180] Try to placate different version artifact naming conventions. --- .github/workflows/ph_backward_compatibility.yaml | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index 2a0b415d32..91e385fae2 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -104,12 +104,22 @@ jobs: # https://github.com/actions/runner/issues/2033 -- need this because of full version label test looking at git revs chown -R $(id -u):$(id -g) $PWD zstdcat build.tar.zst | tar x - - name: Download Prev Leap Version + - if: ${{ matrix.release != '3.1' }} + name: Download Prev Leap Version (v3.2.x and after) uses: AntelopeIO/asset-artifact-download-action@v2 with: owner: AntelopeIO repo: leap - file: 'leap*amd64.deb' + file: 'leap_.*-${{matrix.platform}}.04_amd64.deb' # Ex. leap_3.2.3-ubuntu20.04_amd64.deb leap_4.0.3-ubuntu20.04_amd64.deb + target: '${{matrix.release}}' + token: ${{github.token}} + - if: ${{ matrix.release == '3.1' }} + name: Download Prev Leap Version (v3.1.x) + uses: AntelopeIO/asset-artifact-download-action@v2 + with: + owner: AntelopeIO + repo: leap + file: 'leap-.*-${{matrix.platform}}.04-x86_64.deb' # Ex. leap-3.1.4-ubuntu20.04-x86_64.deb target: '${{matrix.release}}' token: ${{github.token}} - name: Extract and Place Rev Leap Version artifacts From 3a720421e08ee7ebc52bb5c9199351cb795aa40b Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 30 Jun 2023 14:01:12 -0500 Subject: [PATCH 015/180] missed the extract step --- .github/workflows/ph_backward_compatibility.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index 91e385fae2..0a2a2d25a7 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -125,7 +125,7 @@ jobs: - name: Extract and Place Rev Leap Version artifacts run: | mkdir tmp - dpkg -x leap*amd64.deb tmp + dpkg -x leap*.deb tmp rm build/bin/nodeos rm build/bin/cleos mv tmp/usr/bin/nodeos build/bin From 2e90e39f7998f83ae946e62878eeec5388fb2447 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 30 Jun 2023 14:09:41 -0500 Subject: [PATCH 016/180] Do not run tests in parallel. --- .github/workflows/ph_backward_compatibility.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index 0a2a2d25a7..4c4db0db2c 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -133,4 +133,4 @@ jobs: - name: Run Tests run: | cd build - ctest --output-on-failure -j $(nproc) -R "performance_test_" --timeout 420 + ctest --output-on-failure -R "performance_test_" --timeout 420 From cd6926567c70092b9bec8b646fee93ace0b8d546 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 30 Jun 2023 14:28:28 -0500 Subject: [PATCH 017/180] Use low-tier for longer run time --- .github/workflows/ph_backward_compatibility.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index 4c4db0db2c..f8ccca5bcc 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -89,7 +89,7 @@ jobs: matrix: platform: [ubuntu20, ubuntu22] release: [3.1, 3.2, 4.0] - runs-on: ["self-hosted", "enf-x86-hightier"] + runs-on: ["self-hosted", "enf-x86-lowtier"] container: image: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} options: --security-opt seccomp=unconfined From a2aa513e467ae793e33e3be81e6633c5f97c7c6e Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 30 Jun 2023 17:23:46 -0500 Subject: [PATCH 018/180] Split up tests into individual runs. --- .../workflows/ph_backward_compatibility.yaml | 47 +++++++++++++++++-- 1 file changed, 44 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index f8ccca5bcc..723ae91ede 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -130,7 +130,48 @@ jobs: rm build/bin/cleos mv tmp/usr/bin/nodeos build/bin mv tmp/usr/bin/cleos build/bin - - name: Run Tests - run: | cd build - ctest --output-on-failure -R "performance_test_" --timeout 420 + + - name: Run BP Op Mode Performance Test + run: | + ctest --output-on-failure performance_test_bp --timeout 420 + + - name: Run CPU Trx Spec Performance Test + run: | + ctest --output-on-failure performance_test_cpu_trx_spec --timeout 420 + + - name: Run API Node Op Mode Performance Test + run: | + ctest --output-on-failure performance_test_api --timeout 420 + + - name: Run Read Only Trxs Performance Test + run: | + ctest --output-on-failure performance_test_read_only_trxs --timeout 420 + + - name: Run P2P Performance Basic Test + run: | + ctest --output-on-failure performance_test_basic_p2p --timeout 420 + + - name: Run User Defined Transfer Trx Spec Performance Basic Tests + run: | + ctest --output-on-failure performance_test_basic_transfer_trx_spec --timeout 420 + + - name: Run User Defined New Acct Trx Spec Performance Basic Tests + run: | + ctest --output-on-failure performance_test_basic_new_acct_trx_spec --timeout 420 + + - name: Run User Defined CPU Trx Spec Performance Basic Tests + run: | + ctest --output-on-failure performance_test_basic_cpu_trx_spec --timeout 420 + + - name: Run User Defined Ram Trx Spec Performance Basic Tests + run: | + ctest --output-on-failure performance_test_basic_ram_trx_spec --timeout 420 + + - name: Run API Node Op Mode Performance Basic Test + run: | + ctest --output-on-failure performance_test_basic_http --timeout 420 + + - name: Run Read Only Trx Performance Basic Test + run: | + ctest --output-on-failure performance_test_basic_read_only_trxs --timeout 420 From 258fefc41d0843389941df191aea419484011a02 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 30 Jun 2023 17:39:41 -0500 Subject: [PATCH 019/180] Tests not found - move change dir build into first test. --- .github/workflows/ph_backward_compatibility.yaml | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index 723ae91ede..174873a5cf 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -130,48 +130,37 @@ jobs: rm build/bin/cleos mv tmp/usr/bin/nodeos build/bin mv tmp/usr/bin/cleos build/bin - cd build - - name: Run BP Op Mode Performance Test run: | + cd build ctest --output-on-failure performance_test_bp --timeout 420 - - name: Run CPU Trx Spec Performance Test run: | ctest --output-on-failure performance_test_cpu_trx_spec --timeout 420 - - name: Run API Node Op Mode Performance Test run: | ctest --output-on-failure performance_test_api --timeout 420 - - name: Run Read Only Trxs Performance Test run: | ctest --output-on-failure performance_test_read_only_trxs --timeout 420 - - name: Run P2P Performance Basic Test run: | ctest --output-on-failure performance_test_basic_p2p --timeout 420 - - name: Run User Defined Transfer Trx Spec Performance Basic Tests run: | ctest --output-on-failure performance_test_basic_transfer_trx_spec --timeout 420 - - name: Run User Defined New Acct Trx Spec Performance Basic Tests run: | ctest --output-on-failure performance_test_basic_new_acct_trx_spec --timeout 420 - - name: Run User Defined CPU Trx Spec Performance Basic Tests run: | ctest --output-on-failure performance_test_basic_cpu_trx_spec --timeout 420 - - name: Run User Defined Ram Trx Spec Performance Basic Tests run: | ctest --output-on-failure performance_test_basic_ram_trx_spec --timeout 420 - - name: Run API Node Op Mode Performance Basic Test run: | ctest --output-on-failure performance_test_basic_http --timeout 420 - - name: Run Read Only Trx Performance Basic Test run: | ctest --output-on-failure performance_test_basic_read_only_trxs --timeout 420 From 99d23168bae18eef23cb36c270e278922da30cd5 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 3 Jul 2023 09:47:35 -0500 Subject: [PATCH 020/180] Add regex flag to ctest. --- .../workflows/ph_backward_compatibility.yaml | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index 174873a5cf..7b0bcc5d8d 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -133,34 +133,34 @@ jobs: - name: Run BP Op Mode Performance Test run: | cd build - ctest --output-on-failure performance_test_bp --timeout 420 + ctest --output-on-failure -R performance_test_bp --timeout 420 - name: Run CPU Trx Spec Performance Test run: | - ctest --output-on-failure performance_test_cpu_trx_spec --timeout 420 + ctest --output-on-failure -R performance_test_cpu_trx_spec --timeout 420 - name: Run API Node Op Mode Performance Test run: | - ctest --output-on-failure performance_test_api --timeout 420 + ctest --output-on-failure -R performance_test_api --timeout 420 - name: Run Read Only Trxs Performance Test run: | - ctest --output-on-failure performance_test_read_only_trxs --timeout 420 + ctest --output-on-failure -R performance_test_read_only_trxs --timeout 420 - name: Run P2P Performance Basic Test run: | - ctest --output-on-failure performance_test_basic_p2p --timeout 420 + ctest --output-on-failure -R performance_test_basic_p2p --timeout 420 - name: Run User Defined Transfer Trx Spec Performance Basic Tests run: | - ctest --output-on-failure performance_test_basic_transfer_trx_spec --timeout 420 + ctest --output-on-failure -R performance_test_basic_transfer_trx_spec --timeout 420 - name: Run User Defined New Acct Trx Spec Performance Basic Tests run: | - ctest --output-on-failure performance_test_basic_new_acct_trx_spec --timeout 420 + ctest --output-on-failure -R performance_test_basic_new_acct_trx_spec --timeout 420 - name: Run User Defined CPU Trx Spec Performance Basic Tests run: | - ctest --output-on-failure performance_test_basic_cpu_trx_spec --timeout 420 + ctest --output-on-failure -R performance_test_basic_cpu_trx_spec --timeout 420 - name: Run User Defined Ram Trx Spec Performance Basic Tests run: | - ctest --output-on-failure performance_test_basic_ram_trx_spec --timeout 420 + ctest --output-on-failure -R performance_test_basic_ram_trx_spec --timeout 420 - name: Run API Node Op Mode Performance Basic Test run: | - ctest --output-on-failure performance_test_basic_http --timeout 420 + ctest --output-on-failure -R performance_test_basic_http --timeout 420 - name: Run Read Only Trx Performance Basic Test run: | - ctest --output-on-failure performance_test_basic_read_only_trxs --timeout 420 + ctest --output-on-failure -R performance_test_basic_read_only_trxs --timeout 420 From 5a3cd673e9ce8cd63fd1bb5df6fc522bb9cac34d Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 3 Jul 2023 10:04:57 -0500 Subject: [PATCH 021/180] GH-1349 Close on async_read closed socket. Always shutdown socket. --- plugins/net_plugin/net_plugin.cpp | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 963f1a8bb8..2e4952dabd 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1022,10 +1022,8 @@ namespace eosio { void connection::_close( connection* self, bool reconnect, bool shutdown ) { self->socket_open = false; boost::system::error_code ec; - if( self->socket->is_open() ) { - self->socket->shutdown( tcp::socket::shutdown_both, ec ); - self->socket->close( ec ); - } + self->socket->shutdown( tcp::socket::shutdown_both, ec ); + self->socket->close( ec ); self->socket.reset( new tcp::socket( my_impl->thread_pool.get_executor() ) ); self->flush_queues(); self->connecting = false; @@ -2489,7 +2487,18 @@ namespace eosio { boost::asio::bind_executor( strand, [conn = shared_from_this(), socket=socket]( boost::system::error_code ec, std::size_t bytes_transferred ) { // may have closed connection and cleared pending_message_buffer - if( !conn->socket_is_open() || socket != conn->socket ) return; + if (!conn->socket_is_open() && conn->socket_open) { // if socket_open then close not called + peer_dlog( conn, "async_read socket not open, closing"); + conn->close(); + return; + } + if (socket != conn->socket ) { // different socket, conn must have created a new socket, make sure previous is closed + peer_dlog( conn, "async_read diff socket closing"); + boost::system::error_code ec; + socket->shutdown( tcp::socket::shutdown_both, ec ); + socket->close( ec ); + return; + } bool close_connection = false; try { From d46d7e903ad51bca5ec1584b12b1fd33525a5766 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 3 Jul 2023 10:05:23 -0500 Subject: [PATCH 022/180] GH-1349 Cleanup duplicate check --- plugins/net_plugin/net_plugin.cpp | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 2e4952dabd..bd93128d96 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1050,6 +1050,7 @@ namespace eosio { self->cancel_wait(); self->latest_msg_time = std::chrono::system_clock::time_point::min(); self->latest_blk_time = std::chrono::system_clock::time_point::min(); + self->org = std::chrono::nanoseconds{0}; if( reconnect && !shutdown ) { my_impl->start_conn_timer( std::chrono::milliseconds( 100 ), connection_wptr() ); @@ -2851,6 +2852,7 @@ namespace eosio { peer_lib_num = msg.last_irreversible_block_num; std::unique_lock g_conn( conn_mtx ); last_handshake_recv = msg; + auto c_time = last_handshake_sent.time; g_conn.unlock(); connecting = false; @@ -2876,14 +2878,9 @@ namespace eosio { return; } - if( peer_address().empty() ) { + if( incoming() ) { set_connection_type( msg.p2p_address ); - } - std::unique_lock g_conn( conn_mtx ); - if( peer_address().empty() || last_handshake_recv.node_id == fc::sha256()) { - auto c_time = last_handshake_sent.time; - g_conn.unlock(); peer_dlog( this, "checking for duplicate" ); std::shared_lock g_cnts( my_impl->connections_mtx ); for(const auto& check : my_impl->connections) { @@ -2929,9 +2926,7 @@ namespace eosio { } } } else { - peer_dlog( this, "skipping duplicate check, addr == ${pa}, id = ${ni}", - ("pa", peer_address())( "ni", last_handshake_recv.node_id ) ); - g_conn.unlock(); + peer_dlog(this, "skipping duplicate check, addr == ${pa}, id = ${ni}", ("pa", peer_address())("ni", msg.node_id)); } if( msg.chain_id != my_impl->chain_id ) { From 91cf4db380214cebaa3fdcde7aa99f71cb76a53d Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 3 Jul 2023 10:07:12 -0500 Subject: [PATCH 023/180] Give tests a little longer to complete. --- .../workflows/ph_backward_compatibility.yaml | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index 7b0bcc5d8d..209443270a 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -133,34 +133,34 @@ jobs: - name: Run BP Op Mode Performance Test run: | cd build - ctest --output-on-failure -R performance_test_bp --timeout 420 + ctest --output-on-failure -R performance_test_bp --timeout 480 - name: Run CPU Trx Spec Performance Test run: | - ctest --output-on-failure -R performance_test_cpu_trx_spec --timeout 420 + ctest --output-on-failure -R performance_test_cpu_trx_spec --timeout 480 - name: Run API Node Op Mode Performance Test run: | - ctest --output-on-failure -R performance_test_api --timeout 420 + ctest --output-on-failure -R performance_test_api --timeout 480 - name: Run Read Only Trxs Performance Test run: | - ctest --output-on-failure -R performance_test_read_only_trxs --timeout 420 + ctest --output-on-failure -R performance_test_read_only_trxs --timeout 480 - name: Run P2P Performance Basic Test run: | - ctest --output-on-failure -R performance_test_basic_p2p --timeout 420 + ctest --output-on-failure -R performance_test_basic_p2p --timeout 480 - name: Run User Defined Transfer Trx Spec Performance Basic Tests run: | - ctest --output-on-failure -R performance_test_basic_transfer_trx_spec --timeout 420 + ctest --output-on-failure -R performance_test_basic_transfer_trx_spec --timeout 480 - name: Run User Defined New Acct Trx Spec Performance Basic Tests run: | - ctest --output-on-failure -R performance_test_basic_new_acct_trx_spec --timeout 420 + ctest --output-on-failure -R performance_test_basic_new_acct_trx_spec --timeout 480 - name: Run User Defined CPU Trx Spec Performance Basic Tests run: | - ctest --output-on-failure -R performance_test_basic_cpu_trx_spec --timeout 420 + ctest --output-on-failure -R performance_test_basic_cpu_trx_spec --timeout 480 - name: Run User Defined Ram Trx Spec Performance Basic Tests run: | - ctest --output-on-failure -R performance_test_basic_ram_trx_spec --timeout 420 + ctest --output-on-failure -R performance_test_basic_ram_trx_spec --timeout 480 - name: Run API Node Op Mode Performance Basic Test run: | - ctest --output-on-failure -R performance_test_basic_http --timeout 420 + ctest --output-on-failure -R performance_test_basic_http --timeout 480 - name: Run Read Only Trx Performance Basic Test run: | - ctest --output-on-failure -R performance_test_basic_read_only_trxs --timeout 420 + ctest --output-on-failure -R performance_test_basic_read_only_trxs --timeout 480 From a8f35c73559b44f8080250e96ec16e5284581829 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Mon, 3 Jul 2023 11:27:13 -0400 Subject: [PATCH 024/180] no longer eagerly use tcmalloc if it's found --- CMakeLists.txt | 7 ++++--- CMakeModules/EosioTester.cmake.in | 6 ------ CMakeModules/EosioTesterBuild.cmake.in | 6 ------ 3 files changed, 4 insertions(+), 15 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index e49f000255..3a59d71074 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -159,9 +159,10 @@ endif() message( STATUS "Using '${EOSIO_ROOT_KEY}' as public key for 'eosio' account" ) -find_package( Gperftools QUIET ) -if( GPERFTOOLS_FOUND ) - message( STATUS "Found gperftools; compiling Leap with TCMalloc") +option(ENABLE_TCMALLOC "use tcmalloc (requires gperftools)" OFF) +if( ENABLE_TCMALLOC ) + find_package( Gperftools REQUIRED ) + message( STATUS "Compiling Leap with TCMalloc") #if doing this by the book, simply link_libraries( ${GPERFTOOLS_TCMALLOC} ) here. That will #give the performance benefits of tcmalloc but since it won't be linked last #the heap profiler & checker may not be accurate. This here is rather undocumented behavior diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index 297f0f0b72..54544db6e7 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -14,12 +14,6 @@ if (LLVM_DIR STREQUAL "" OR NOT LLVM_DIR) set(LLVM_DIR @LLVM_DIR@) endif() -find_package( Gperftools QUIET ) -if( GPERFTOOLS_FOUND ) - message( STATUS "Found gperftools; compiling tests with TCMalloc") - list( APPEND PLATFORM_SPECIFIC_LIBS tcmalloc ) -endif() - if(NOT "@LLVM_FOUND@" STREQUAL "") find_package(LLVM @LLVM_VERSION@ EXACT REQUIRED CONFIG) llvm_map_components_to_libnames(LLVM_LIBS support core passes mcjit native DebugInfoDWARF orcjit) diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index f00f1020cb..2c34c7e595 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -12,12 +12,6 @@ if (LLVM_DIR STREQUAL "" OR NOT LLVM_DIR) set(LLVM_DIR @LLVM_DIR@) endif() -find_package( Gperftools QUIET ) -if( GPERFTOOLS_FOUND ) - message( STATUS "Found gperftools; compiling tests with TCMalloc") - list( APPEND PLATFORM_SPECIFIC_LIBS tcmalloc ) -endif() - if(NOT "@LLVM_FOUND@" STREQUAL "") find_package(LLVM @LLVM_VERSION@ EXACT REQUIRED CONFIG) llvm_map_components_to_libnames(LLVM_LIBS support core passes mcjit native DebugInfoDWARF orcjit) From d42f0e784a761f958b0ce65313de9383d5857104 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 3 Jul 2023 10:34:33 -0500 Subject: [PATCH 025/180] Split test scenarios into separate jobs. --- .../workflows/ph_backward_compatibility.yaml | 236 ++++++++++++++++-- 1 file changed, 213 insertions(+), 23 deletions(-) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index 209443270a..ec99e9ea1e 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -80,8 +80,8 @@ jobs: name: ${{matrix.platform}}-build path: build.tar.zst - tests: - name: Tests + ph-basic-tests: + name: Performance Harness Basic Tests needs: [d, Build] if: always() && needs.Build.result == 'success' strategy: @@ -133,34 +133,224 @@ jobs: - name: Run BP Op Mode Performance Test run: | cd build - ctest --output-on-failure -R performance_test_bp --timeout 480 - - name: Run CPU Trx Spec Performance Test + ctest --output-on-failure -R performance_test_basic --timeout 480 + + bp-op-mode-tests: + name: BP Op Mode Performance Tests + needs: [d, Build] + if: always() && needs.Build.result == 'success' + strategy: + fail-fast: false + matrix: + platform: [ubuntu20, ubuntu22] + release: [3.1, 3.2, 4.0] + runs-on: ["self-hosted", "enf-x86-lowtier"] + container: + image: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} + options: --security-opt seccomp=unconfined + steps: + - uses: actions/checkout@v3 + - name: Download builddir + uses: actions/download-artifact@v3 + with: + name: ${{matrix.platform}}-build + - name: Extract Build Directory run: | - ctest --output-on-failure -R performance_test_cpu_trx_spec --timeout 480 - - name: Run API Node Op Mode Performance Test + # https://github.com/actions/runner/issues/2033 -- need this because of full version label test looking at git revs + chown -R $(id -u):$(id -g) $PWD + zstdcat build.tar.zst | tar x + - if: ${{ matrix.release != '3.1' }} + name: Download Prev Leap Version (v3.2.x and after) + uses: AntelopeIO/asset-artifact-download-action@v2 + with: + owner: AntelopeIO + repo: leap + file: 'leap_.*-${{matrix.platform}}.04_amd64.deb' # Ex. leap_3.2.3-ubuntu20.04_amd64.deb leap_4.0.3-ubuntu20.04_amd64.deb + target: '${{matrix.release}}' + token: ${{github.token}} + - if: ${{ matrix.release == '3.1' }} + name: Download Prev Leap Version (v3.1.x) + uses: AntelopeIO/asset-artifact-download-action@v2 + with: + owner: AntelopeIO + repo: leap + file: 'leap-.*-${{matrix.platform}}.04-x86_64.deb' # Ex. leap-3.1.4-ubuntu20.04-x86_64.deb + target: '${{matrix.release}}' + token: ${{github.token}} + - name: Extract and Place Rev Leap Version artifacts run: | - ctest --output-on-failure -R performance_test_api --timeout 480 - - name: Run Read Only Trxs Performance Test + mkdir tmp + dpkg -x leap*.deb tmp + rm build/bin/nodeos + rm build/bin/cleos + mv tmp/usr/bin/nodeos build/bin + mv tmp/usr/bin/cleos build/bin + - name: Run BP Op Mode Performance Test run: | - ctest --output-on-failure -R performance_test_read_only_trxs --timeout 480 - - name: Run P2P Performance Basic Test + cd build + ctest --output-on-failure -R performance_test_bp --timeout 480 + + cpu-trx-spec-tests: + name: CPU Trx Spec Performance Tests + needs: [d, Build] + if: always() && needs.Build.result == 'success' + strategy: + fail-fast: false + matrix: + platform: [ubuntu20, ubuntu22] + release: [3.1, 3.2, 4.0] + runs-on: ["self-hosted", "enf-x86-lowtier"] + container: + image: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} + options: --security-opt seccomp=unconfined + steps: + - uses: actions/checkout@v3 + - name: Download builddir + uses: actions/download-artifact@v3 + with: + name: ${{matrix.platform}}-build + - name: Extract Build Directory run: | - ctest --output-on-failure -R performance_test_basic_p2p --timeout 480 - - name: Run User Defined Transfer Trx Spec Performance Basic Tests + # https://github.com/actions/runner/issues/2033 -- need this because of full version label test looking at git revs + chown -R $(id -u):$(id -g) $PWD + zstdcat build.tar.zst | tar x + - if: ${{ matrix.release != '3.1' }} + name: Download Prev Leap Version (v3.2.x and after) + uses: AntelopeIO/asset-artifact-download-action@v2 + with: + owner: AntelopeIO + repo: leap + file: 'leap_.*-${{matrix.platform}}.04_amd64.deb' # Ex. leap_3.2.3-ubuntu20.04_amd64.deb leap_4.0.3-ubuntu20.04_amd64.deb + target: '${{matrix.release}}' + token: ${{github.token}} + - if: ${{ matrix.release == '3.1' }} + name: Download Prev Leap Version (v3.1.x) + uses: AntelopeIO/asset-artifact-download-action@v2 + with: + owner: AntelopeIO + repo: leap + file: 'leap-.*-${{matrix.platform}}.04-x86_64.deb' # Ex. leap-3.1.4-ubuntu20.04-x86_64.deb + target: '${{matrix.release}}' + token: ${{github.token}} + - name: Extract and Place Rev Leap Version artifacts run: | - ctest --output-on-failure -R performance_test_basic_transfer_trx_spec --timeout 480 - - name: Run User Defined New Acct Trx Spec Performance Basic Tests + mkdir tmp + dpkg -x leap*.deb tmp + rm build/bin/nodeos + rm build/bin/cleos + mv tmp/usr/bin/nodeos build/bin + mv tmp/usr/bin/cleos build/bin + - name: Run CPU Trx Spec Performance Test run: | - ctest --output-on-failure -R performance_test_basic_new_acct_trx_spec --timeout 480 - - name: Run User Defined CPU Trx Spec Performance Basic Tests + cd build + ctest --output-on-failure -R performance_test_cpu_trx_spec --timeout 480 + + api-op-mode-tests: + name: API Node Op Mode Performance Tests + needs: [d, Build] + if: always() && needs.Build.result == 'success' + strategy: + fail-fast: false + matrix: + platform: [ubuntu20, ubuntu22] + release: [3.1, 3.2, 4.0] + runs-on: ["self-hosted", "enf-x86-lowtier"] + container: + image: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} + options: --security-opt seccomp=unconfined + steps: + - uses: actions/checkout@v3 + - name: Download builddir + uses: actions/download-artifact@v3 + with: + name: ${{matrix.platform}}-build + - name: Extract Build Directory run: | - ctest --output-on-failure -R performance_test_basic_cpu_trx_spec --timeout 480 - - name: Run User Defined Ram Trx Spec Performance Basic Tests + # https://github.com/actions/runner/issues/2033 -- need this because of full version label test looking at git revs + chown -R $(id -u):$(id -g) $PWD + zstdcat build.tar.zst | tar x + - if: ${{ matrix.release != '3.1' }} + name: Download Prev Leap Version (v3.2.x and after) + uses: AntelopeIO/asset-artifact-download-action@v2 + with: + owner: AntelopeIO + repo: leap + file: 'leap_.*-${{matrix.platform}}.04_amd64.deb' # Ex. leap_3.2.3-ubuntu20.04_amd64.deb leap_4.0.3-ubuntu20.04_amd64.deb + target: '${{matrix.release}}' + token: ${{github.token}} + - if: ${{ matrix.release == '3.1' }} + name: Download Prev Leap Version (v3.1.x) + uses: AntelopeIO/asset-artifact-download-action@v2 + with: + owner: AntelopeIO + repo: leap + file: 'leap-.*-${{matrix.platform}}.04-x86_64.deb' # Ex. leap-3.1.4-ubuntu20.04-x86_64.deb + target: '${{matrix.release}}' + token: ${{github.token}} + - name: Extract and Place Rev Leap Version artifacts run: | - ctest --output-on-failure -R performance_test_basic_ram_trx_spec --timeout 480 - - name: Run API Node Op Mode Performance Basic Test + mkdir tmp + dpkg -x leap*.deb tmp + rm build/bin/nodeos + rm build/bin/cleos + mv tmp/usr/bin/nodeos build/bin + mv tmp/usr/bin/cleos build/bin + - name: Run API Node Op Mode Performance Test run: | - ctest --output-on-failure -R performance_test_basic_http --timeout 480 - - name: Run Read Only Trx Performance Basic Test + cd build + ctest --output-on-failure -R performance_test_api --timeout 480 + + read-only-trx-tests: + name: Read Only Trxs Performance Tests + needs: [d, Build] + if: always() && needs.Build.result == 'success' + strategy: + fail-fast: false + matrix: + platform: [ubuntu20, ubuntu22] + release: [3.1, 3.2, 4.0] + runs-on: ["self-hosted", "enf-x86-lowtier"] + container: + image: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} + options: --security-opt seccomp=unconfined + steps: + - uses: actions/checkout@v3 + - name: Download builddir + uses: actions/download-artifact@v3 + with: + name: ${{matrix.platform}}-build + - name: Extract Build Directory run: | - ctest --output-on-failure -R performance_test_basic_read_only_trxs --timeout 480 + # https://github.com/actions/runner/issues/2033 -- need this because of full version label test looking at git revs + chown -R $(id -u):$(id -g) $PWD + zstdcat build.tar.zst | tar x + - if: ${{ matrix.release != '3.1' }} + name: Download Prev Leap Version (v3.2.x and after) + uses: AntelopeIO/asset-artifact-download-action@v2 + with: + owner: AntelopeIO + repo: leap + file: 'leap_.*-${{matrix.platform}}.04_amd64.deb' # Ex. leap_3.2.3-ubuntu20.04_amd64.deb leap_4.0.3-ubuntu20.04_amd64.deb + target: '${{matrix.release}}' + token: ${{github.token}} + - if: ${{ matrix.release == '3.1' }} + name: Download Prev Leap Version (v3.1.x) + uses: AntelopeIO/asset-artifact-download-action@v2 + with: + owner: AntelopeIO + repo: leap + file: 'leap-.*-${{matrix.platform}}.04-x86_64.deb' # Ex. leap-3.1.4-ubuntu20.04-x86_64.deb + target: '${{matrix.release}}' + token: ${{github.token}} + - name: Extract and Place Rev Leap Version artifacts + run: | + mkdir tmp + dpkg -x leap*.deb tmp + rm build/bin/nodeos + rm build/bin/cleos + mv tmp/usr/bin/nodeos build/bin + mv tmp/usr/bin/cleos build/bin + - name: Run Read Only Trxs Performance Test + run: | + cd build + ctest --output-on-failure -R performance_test_read_only_trxs --timeout 480 \ No newline at end of file From 58b0f32894305ed7d655e61da5a93cb1b2f0ea16 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 3 Jul 2023 10:49:56 -0500 Subject: [PATCH 026/180] Revert "Split test scenarios into separate jobs." This reverts commit d42f0e784a761f958b0ce65313de9383d5857104. --- .../workflows/ph_backward_compatibility.yaml | 236 ++---------------- 1 file changed, 23 insertions(+), 213 deletions(-) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index ec99e9ea1e..209443270a 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -80,63 +80,8 @@ jobs: name: ${{matrix.platform}}-build path: build.tar.zst - ph-basic-tests: - name: Performance Harness Basic Tests - needs: [d, Build] - if: always() && needs.Build.result == 'success' - strategy: - fail-fast: false - matrix: - platform: [ubuntu20, ubuntu22] - release: [3.1, 3.2, 4.0] - runs-on: ["self-hosted", "enf-x86-lowtier"] - container: - image: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} - options: --security-opt seccomp=unconfined - steps: - - uses: actions/checkout@v3 - - name: Download builddir - uses: actions/download-artifact@v3 - with: - name: ${{matrix.platform}}-build - - name: Extract Build Directory - run: | - # https://github.com/actions/runner/issues/2033 -- need this because of full version label test looking at git revs - chown -R $(id -u):$(id -g) $PWD - zstdcat build.tar.zst | tar x - - if: ${{ matrix.release != '3.1' }} - name: Download Prev Leap Version (v3.2.x and after) - uses: AntelopeIO/asset-artifact-download-action@v2 - with: - owner: AntelopeIO - repo: leap - file: 'leap_.*-${{matrix.platform}}.04_amd64.deb' # Ex. leap_3.2.3-ubuntu20.04_amd64.deb leap_4.0.3-ubuntu20.04_amd64.deb - target: '${{matrix.release}}' - token: ${{github.token}} - - if: ${{ matrix.release == '3.1' }} - name: Download Prev Leap Version (v3.1.x) - uses: AntelopeIO/asset-artifact-download-action@v2 - with: - owner: AntelopeIO - repo: leap - file: 'leap-.*-${{matrix.platform}}.04-x86_64.deb' # Ex. leap-3.1.4-ubuntu20.04-x86_64.deb - target: '${{matrix.release}}' - token: ${{github.token}} - - name: Extract and Place Rev Leap Version artifacts - run: | - mkdir tmp - dpkg -x leap*.deb tmp - rm build/bin/nodeos - rm build/bin/cleos - mv tmp/usr/bin/nodeos build/bin - mv tmp/usr/bin/cleos build/bin - - name: Run BP Op Mode Performance Test - run: | - cd build - ctest --output-on-failure -R performance_test_basic --timeout 480 - - bp-op-mode-tests: - name: BP Op Mode Performance Tests + tests: + name: Tests needs: [d, Build] if: always() && needs.Build.result == 'success' strategy: @@ -189,168 +134,33 @@ jobs: run: | cd build ctest --output-on-failure -R performance_test_bp --timeout 480 - - cpu-trx-spec-tests: - name: CPU Trx Spec Performance Tests - needs: [d, Build] - if: always() && needs.Build.result == 'success' - strategy: - fail-fast: false - matrix: - platform: [ubuntu20, ubuntu22] - release: [3.1, 3.2, 4.0] - runs-on: ["self-hosted", "enf-x86-lowtier"] - container: - image: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} - options: --security-opt seccomp=unconfined - steps: - - uses: actions/checkout@v3 - - name: Download builddir - uses: actions/download-artifact@v3 - with: - name: ${{matrix.platform}}-build - - name: Extract Build Directory - run: | - # https://github.com/actions/runner/issues/2033 -- need this because of full version label test looking at git revs - chown -R $(id -u):$(id -g) $PWD - zstdcat build.tar.zst | tar x - - if: ${{ matrix.release != '3.1' }} - name: Download Prev Leap Version (v3.2.x and after) - uses: AntelopeIO/asset-artifact-download-action@v2 - with: - owner: AntelopeIO - repo: leap - file: 'leap_.*-${{matrix.platform}}.04_amd64.deb' # Ex. leap_3.2.3-ubuntu20.04_amd64.deb leap_4.0.3-ubuntu20.04_amd64.deb - target: '${{matrix.release}}' - token: ${{github.token}} - - if: ${{ matrix.release == '3.1' }} - name: Download Prev Leap Version (v3.1.x) - uses: AntelopeIO/asset-artifact-download-action@v2 - with: - owner: AntelopeIO - repo: leap - file: 'leap-.*-${{matrix.platform}}.04-x86_64.deb' # Ex. leap-3.1.4-ubuntu20.04-x86_64.deb - target: '${{matrix.release}}' - token: ${{github.token}} - - name: Extract and Place Rev Leap Version artifacts - run: | - mkdir tmp - dpkg -x leap*.deb tmp - rm build/bin/nodeos - rm build/bin/cleos - mv tmp/usr/bin/nodeos build/bin - mv tmp/usr/bin/cleos build/bin - name: Run CPU Trx Spec Performance Test run: | - cd build ctest --output-on-failure -R performance_test_cpu_trx_spec --timeout 480 - - api-op-mode-tests: - name: API Node Op Mode Performance Tests - needs: [d, Build] - if: always() && needs.Build.result == 'success' - strategy: - fail-fast: false - matrix: - platform: [ubuntu20, ubuntu22] - release: [3.1, 3.2, 4.0] - runs-on: ["self-hosted", "enf-x86-lowtier"] - container: - image: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} - options: --security-opt seccomp=unconfined - steps: - - uses: actions/checkout@v3 - - name: Download builddir - uses: actions/download-artifact@v3 - with: - name: ${{matrix.platform}}-build - - name: Extract Build Directory - run: | - # https://github.com/actions/runner/issues/2033 -- need this because of full version label test looking at git revs - chown -R $(id -u):$(id -g) $PWD - zstdcat build.tar.zst | tar x - - if: ${{ matrix.release != '3.1' }} - name: Download Prev Leap Version (v3.2.x and after) - uses: AntelopeIO/asset-artifact-download-action@v2 - with: - owner: AntelopeIO - repo: leap - file: 'leap_.*-${{matrix.platform}}.04_amd64.deb' # Ex. leap_3.2.3-ubuntu20.04_amd64.deb leap_4.0.3-ubuntu20.04_amd64.deb - target: '${{matrix.release}}' - token: ${{github.token}} - - if: ${{ matrix.release == '3.1' }} - name: Download Prev Leap Version (v3.1.x) - uses: AntelopeIO/asset-artifact-download-action@v2 - with: - owner: AntelopeIO - repo: leap - file: 'leap-.*-${{matrix.platform}}.04-x86_64.deb' # Ex. leap-3.1.4-ubuntu20.04-x86_64.deb - target: '${{matrix.release}}' - token: ${{github.token}} - - name: Extract and Place Rev Leap Version artifacts - run: | - mkdir tmp - dpkg -x leap*.deb tmp - rm build/bin/nodeos - rm build/bin/cleos - mv tmp/usr/bin/nodeos build/bin - mv tmp/usr/bin/cleos build/bin - name: Run API Node Op Mode Performance Test run: | - cd build ctest --output-on-failure -R performance_test_api --timeout 480 - - read-only-trx-tests: - name: Read Only Trxs Performance Tests - needs: [d, Build] - if: always() && needs.Build.result == 'success' - strategy: - fail-fast: false - matrix: - platform: [ubuntu20, ubuntu22] - release: [3.1, 3.2, 4.0] - runs-on: ["self-hosted", "enf-x86-lowtier"] - container: - image: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} - options: --security-opt seccomp=unconfined - steps: - - uses: actions/checkout@v3 - - name: Download builddir - uses: actions/download-artifact@v3 - with: - name: ${{matrix.platform}}-build - - name: Extract Build Directory + - name: Run Read Only Trxs Performance Test run: | - # https://github.com/actions/runner/issues/2033 -- need this because of full version label test looking at git revs - chown -R $(id -u):$(id -g) $PWD - zstdcat build.tar.zst | tar x - - if: ${{ matrix.release != '3.1' }} - name: Download Prev Leap Version (v3.2.x and after) - uses: AntelopeIO/asset-artifact-download-action@v2 - with: - owner: AntelopeIO - repo: leap - file: 'leap_.*-${{matrix.platform}}.04_amd64.deb' # Ex. leap_3.2.3-ubuntu20.04_amd64.deb leap_4.0.3-ubuntu20.04_amd64.deb - target: '${{matrix.release}}' - token: ${{github.token}} - - if: ${{ matrix.release == '3.1' }} - name: Download Prev Leap Version (v3.1.x) - uses: AntelopeIO/asset-artifact-download-action@v2 - with: - owner: AntelopeIO - repo: leap - file: 'leap-.*-${{matrix.platform}}.04-x86_64.deb' # Ex. leap-3.1.4-ubuntu20.04-x86_64.deb - target: '${{matrix.release}}' - token: ${{github.token}} - - name: Extract and Place Rev Leap Version artifacts + ctest --output-on-failure -R performance_test_read_only_trxs --timeout 480 + - name: Run P2P Performance Basic Test run: | - mkdir tmp - dpkg -x leap*.deb tmp - rm build/bin/nodeos - rm build/bin/cleos - mv tmp/usr/bin/nodeos build/bin - mv tmp/usr/bin/cleos build/bin - - name: Run Read Only Trxs Performance Test + ctest --output-on-failure -R performance_test_basic_p2p --timeout 480 + - name: Run User Defined Transfer Trx Spec Performance Basic Tests run: | - cd build - ctest --output-on-failure -R performance_test_read_only_trxs --timeout 480 \ No newline at end of file + ctest --output-on-failure -R performance_test_basic_transfer_trx_spec --timeout 480 + - name: Run User Defined New Acct Trx Spec Performance Basic Tests + run: | + ctest --output-on-failure -R performance_test_basic_new_acct_trx_spec --timeout 480 + - name: Run User Defined CPU Trx Spec Performance Basic Tests + run: | + ctest --output-on-failure -R performance_test_basic_cpu_trx_spec --timeout 480 + - name: Run User Defined Ram Trx Spec Performance Basic Tests + run: | + ctest --output-on-failure -R performance_test_basic_ram_trx_spec --timeout 480 + - name: Run API Node Op Mode Performance Basic Test + run: | + ctest --output-on-failure -R performance_test_basic_http --timeout 480 + - name: Run Read Only Trx Performance Basic Test + run: | + ctest --output-on-failure -R performance_test_basic_read_only_trxs --timeout 480 From 5499dd010eccdab793d85829dc0e9aa395f99ded Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 3 Jul 2023 10:52:06 -0500 Subject: [PATCH 027/180] Always run test scenarios. --- .github/workflows/ph_backward_compatibility.yaml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index 209443270a..262036955b 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -131,36 +131,47 @@ jobs: mv tmp/usr/bin/nodeos build/bin mv tmp/usr/bin/cleos build/bin - name: Run BP Op Mode Performance Test + if: always() run: | cd build ctest --output-on-failure -R performance_test_bp --timeout 480 - name: Run CPU Trx Spec Performance Test + if: always() run: | ctest --output-on-failure -R performance_test_cpu_trx_spec --timeout 480 - name: Run API Node Op Mode Performance Test + if: always() run: | ctest --output-on-failure -R performance_test_api --timeout 480 - name: Run Read Only Trxs Performance Test + if: always() run: | ctest --output-on-failure -R performance_test_read_only_trxs --timeout 480 - name: Run P2P Performance Basic Test + if: always() run: | ctest --output-on-failure -R performance_test_basic_p2p --timeout 480 - name: Run User Defined Transfer Trx Spec Performance Basic Tests + if: always() run: | ctest --output-on-failure -R performance_test_basic_transfer_trx_spec --timeout 480 - name: Run User Defined New Acct Trx Spec Performance Basic Tests + if: always() run: | ctest --output-on-failure -R performance_test_basic_new_acct_trx_spec --timeout 480 - name: Run User Defined CPU Trx Spec Performance Basic Tests + if: always() run: | ctest --output-on-failure -R performance_test_basic_cpu_trx_spec --timeout 480 - name: Run User Defined Ram Trx Spec Performance Basic Tests + if: always() run: | ctest --output-on-failure -R performance_test_basic_ram_trx_spec --timeout 480 - name: Run API Node Op Mode Performance Basic Test + if: always() run: | ctest --output-on-failure -R performance_test_basic_http --timeout 480 - name: Run Read Only Trx Performance Basic Test + if: always() run: | ctest --output-on-failure -R performance_test_basic_read_only_trxs --timeout 480 From ea37b69738f18e79d5ff10eeabde0d3a4ee446e0 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 3 Jul 2023 11:20:43 -0500 Subject: [PATCH 028/180] make sure to change to the build dir. --- .github/workflows/ph_backward_compatibility.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index 262036955b..347cceedd5 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -138,40 +138,50 @@ jobs: - name: Run CPU Trx Spec Performance Test if: always() run: | + cd build ctest --output-on-failure -R performance_test_cpu_trx_spec --timeout 480 - name: Run API Node Op Mode Performance Test if: always() run: | + cd build ctest --output-on-failure -R performance_test_api --timeout 480 - name: Run Read Only Trxs Performance Test if: always() run: | + cd build ctest --output-on-failure -R performance_test_read_only_trxs --timeout 480 - name: Run P2P Performance Basic Test if: always() run: | + cd build ctest --output-on-failure -R performance_test_basic_p2p --timeout 480 - name: Run User Defined Transfer Trx Spec Performance Basic Tests if: always() run: | + cd build ctest --output-on-failure -R performance_test_basic_transfer_trx_spec --timeout 480 - name: Run User Defined New Acct Trx Spec Performance Basic Tests if: always() run: | + cd build ctest --output-on-failure -R performance_test_basic_new_acct_trx_spec --timeout 480 - name: Run User Defined CPU Trx Spec Performance Basic Tests if: always() run: | + cd build ctest --output-on-failure -R performance_test_basic_cpu_trx_spec --timeout 480 - name: Run User Defined Ram Trx Spec Performance Basic Tests if: always() run: | + cd build ctest --output-on-failure -R performance_test_basic_ram_trx_spec --timeout 480 - name: Run API Node Op Mode Performance Basic Test if: always() run: | + cd build ctest --output-on-failure -R performance_test_basic_http --timeout 480 - name: Run Read Only Trx Performance Basic Test if: always() run: | + cd build ctest --output-on-failure -R performance_test_basic_read_only_trxs --timeout 480 From 79885499da6f67e853cb833f90aa3bf6a05ddde2 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 3 Jul 2023 13:15:15 -0400 Subject: [PATCH 029/180] Address PR comment (removed change unneeded in backport). --- libraries/libfc/include/fc/time.hpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/libraries/libfc/include/fc/time.hpp b/libraries/libfc/include/fc/time.hpp index 29b0f7a211..1c1b433bd5 100644 --- a/libraries/libfc/include/fc/time.hpp +++ b/libraries/libfc/include/fc/time.hpp @@ -1,7 +1,6 @@ #pragma once #include #include -#include #ifdef _MSC_VER #pragma warning (push) @@ -12,7 +11,7 @@ namespace fc { class microseconds { public: constexpr explicit microseconds( int64_t c = 0) :_count(c){} - static constexpr microseconds maximum() { return microseconds(std::numeric_limits::max()); } + static constexpr microseconds maximum() { return microseconds(0x7fffffffffffffffll); } friend constexpr microseconds operator + (const microseconds& l, const microseconds& r ) { return microseconds(l._count+r._count); } friend constexpr microseconds operator - (const microseconds& l, const microseconds& r ) { return microseconds(l._count-r._count); } From 6dd606d3dc27182aefd4cfccdc9fd05afaf16c54 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 3 Jul 2023 16:12:04 -0400 Subject: [PATCH 030/180] add `*.gdb_history` to `.gitignore` --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index d4636fd221..012739af57 100644 --- a/.gitignore +++ b/.gitignore @@ -75,6 +75,8 @@ witness_node_data_dir *.pyc *.pyo +*.gdb_history + Testing/* build.tar.gz [Bb]uild*/* From bd7ff14a6136219ff2f9c902718d63e892dc7a83 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 3 Jul 2023 20:15:08 -0500 Subject: [PATCH 031/180] GH-1349 If socket closed, but close not call, call close --- plugins/net_plugin/net_plugin.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index bd93128d96..1f051eca00 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1242,7 +1242,7 @@ namespace eosio { try { c->buffer_queue.clear_out_queue(); // May have closed connection and cleared buffer_queue - if( !c->socket_is_open() || socket != c->socket ) { + if( !c->socket->is_open() || socket != c->socket ) { peer_ilog( c, "async write socket ${r} before callback", ("r", c->socket_is_open() ? "changed" : "closed") ); c->close(); return; @@ -2488,7 +2488,7 @@ namespace eosio { boost::asio::bind_executor( strand, [conn = shared_from_this(), socket=socket]( boost::system::error_code ec, std::size_t bytes_transferred ) { // may have closed connection and cleared pending_message_buffer - if (!conn->socket_is_open() && conn->socket_open) { // if socket_open then close not called + if (!conn->socket->is_open() && conn->socket_is_open()) { // if socket_open then close not called peer_dlog( conn, "async_read socket not open, closing"); conn->close(); return; From d5d78a6321f8ed0aa97f4b9ea79cf91ab6d1903e Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 3 Jul 2023 21:52:41 -0500 Subject: [PATCH 032/180] GH-980 Removed deprecated from help for speculative mode --- plugins/chain_plugin/chain_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 6a08f37c79..574ddc71ec 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -294,7 +294,7 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip "In \"head\" mode: database contains state changes up to the head block; transactions received by the node are relayed if valid.\n" "In \"irreversible\" mode: database contains state changes up to the last irreversible block; " "transactions received via the P2P network are not relayed and transactions cannot be pushed via the chain API.\n" - "In \"speculative\" mode: (DEPRECATED: head mode recommended) database contains state changes by transactions in the blockchain " + "In \"speculative\" mode: database contains state changes by transactions in the blockchain " "up to the head block as well as some transactions not yet included in the blockchain; transactions received by the node are relayed if valid.\n" ) ( "api-accept-transactions", bpo::value()->default_value(true), "Allow API transactions to be evaluated and relayed if valid.") From 6dbe74c8921044ac7a05b32fc7781e45f6b568d5 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 3 Jul 2023 21:57:21 -0500 Subject: [PATCH 033/180] GH-980 Update docs --- docs/01_nodeos/03_plugins/chain_plugin/index.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/01_nodeos/03_plugins/chain_plugin/index.md b/docs/01_nodeos/03_plugins/chain_plugin/index.md index 458ff88f6f..de2fc6b771 100644 --- a/docs/01_nodeos/03_plugins/chain_plugin/index.md +++ b/docs/01_nodeos/03_plugins/chain_plugin/index.md @@ -131,8 +131,7 @@ Config Options for eosio::chain_plugin: received via the P2P network are not relayed and transactions cannot be pushed via the chain API. - In "speculative" mode: (DEPRECATED: - head mode recommended) database + In "speculative" mode: database contains state changes by transactions in the blockchain up to the head block as well as some transactions not yet From 59b0d08749ee864d0c431ca2f68a9147a03085a5 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 5 Jul 2023 07:42:17 -0500 Subject: [PATCH 034/180] GH-1349 close mismatched socket --- plugins/net_plugin/net_plugin.cpp | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 1f051eca00..7271d393cc 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1242,11 +1242,18 @@ namespace eosio { try { c->buffer_queue.clear_out_queue(); // May have closed connection and cleared buffer_queue - if( !c->socket->is_open() || socket != c->socket ) { - peer_ilog( c, "async write socket ${r} before callback", ("r", c->socket_is_open() ? "changed" : "closed") ); + if (!c->socket->is_open() && c->socket_is_open()) { // if socket_open then close not called + peer_ilog(c, "async write socket closed before callback"); c->close(); return; } + if (socket != c->socket ) { // different socket, c must have created a new socket, make sure previous is closed + peer_ilog( c, "async write socket changed before callback"); + boost::system::error_code ec; + socket->shutdown( tcp::socket::shutdown_both, ec ); + socket->close( ec ); + return; + } if( ec ) { if( ec.value() != boost::asio::error::eof ) { From f41ca84851ab6dddd8dcf241030f273a8ccfc48a Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 5 Jul 2023 10:58:08 -0400 Subject: [PATCH 035/180] Update comments. --- unittests/abi_tests.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/unittests/abi_tests.cpp b/unittests/abi_tests.cpp index db36c1eeb9..04bb00892f 100644 --- a/unittests/abi_tests.cpp +++ b/unittests/abi_tests.cpp @@ -1993,7 +1993,7 @@ BOOST_AUTO_TEST_CASE(abi_std_optional) } { - // check conversion when the first optional members is missing + // check conversion when the first optional member is missing std::string test_data = R"=====( { "miner_cut" : "2", @@ -2006,7 +2006,7 @@ BOOST_AUTO_TEST_CASE(abi_std_optional) } { - // check conversion when the first optional members is missing + // check conversion when the second optional member is missing std::string test_data = R"=====( { "gas_price" : "42", @@ -2019,7 +2019,7 @@ BOOST_AUTO_TEST_CASE(abi_std_optional) } { - // check conversion when all optional members are provided + // check conversion when the last optional member is missing std::string test_data = R"=====( { "gas_price" : "42", @@ -2032,7 +2032,7 @@ BOOST_AUTO_TEST_CASE(abi_std_optional) } { - // check conversion when all optional members are provided + // check conversion when all optional members are missing std::string test_data = R"=====( { } From 8b7f9f7fafbde6dfc446e91341bbef79b732f38f Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 5 Jul 2023 14:16:49 -0400 Subject: [PATCH 036/180] Address PR comment and small cleanup in `safe_add()`. --- libraries/libfc/include/fc/time.hpp | 13 +++++++------ unittests/abi_tests.cpp | 4 +--- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/libraries/libfc/include/fc/time.hpp b/libraries/libfc/include/fc/time.hpp index 53a284d788..96b7156781 100644 --- a/libraries/libfc/include/fc/time.hpp +++ b/libraries/libfc/include/fc/time.hpp @@ -12,7 +12,8 @@ namespace fc { class microseconds { public: constexpr explicit microseconds( int64_t c = 0) :_count(c){} - static constexpr microseconds maximum() { return microseconds(0x7fffffffffffffffll); } + static constexpr microseconds maximum() { return microseconds(std::numeric_limits::max()); } + static constexpr microseconds minimum() { return microseconds(std::numeric_limits::min()); } friend constexpr microseconds operator + (const microseconds& l, const microseconds& r ) { return microseconds(l._count+r._count); } friend constexpr microseconds operator - (const microseconds& l, const microseconds& r ) { return microseconds(l._count-r._count); } @@ -52,12 +53,12 @@ namespace fc { // protect against overflow/underflow constexpr time_point& safe_add( const microseconds& m ) { - if (m.count() > 0 && elapsed > fc::microseconds::maximum() - m) { + if (m.count() > 0 && elapsed > microseconds::maximum() - m) { elapsed = microseconds::maximum(); - } else if (m.count() < 0 && elapsed.count() < std::numeric_limits::min() - m.count()) { - elapsed = microseconds(std::numeric_limits::min()); + } else if (m.count() < 0 && elapsed < microseconds::minimum() - m) { + elapsed = microseconds::minimum(); } else { - elapsed += m; + elapsed += m; } return *this; } @@ -94,7 +95,7 @@ namespace fc { constexpr explicit time_point_sec( const time_point& t ) :utc_seconds( t.time_since_epoch().count() / 1000000ll ){} - static constexpr time_point_sec maximum() { return time_point_sec(0xffffffff); } + static constexpr time_point_sec maximum() { return time_point_sec(std::numeric_limits::max()); } static constexpr time_point_sec min() { return time_point_sec(0); } constexpr time_point to_time_point()const { return time_point( fc::seconds( utc_seconds) ); } diff --git a/unittests/abi_tests.cpp b/unittests/abi_tests.cpp index 0051e0447c..3506096632 100644 --- a/unittests/abi_tests.cpp +++ b/unittests/abi_tests.cpp @@ -48,9 +48,7 @@ fc::microseconds max_serialization_time = fc::microseconds::maximum(); // don't #endif static fc::time_point get_deadline() { - if (max_serialization_time == fc::microseconds::maximum()) - return fc::time_point(fc::microseconds::maximum()); - return fc::time_point::now() + max_serialization_time; + return fc::time_point::now().safe_add(max_serialization_time); } // verify that round trip conversion, via bytes, reproduces the exact same data From d43a7e720eed473fc7d3bc8cba6bcd146184f124 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 5 Jul 2023 16:22:41 -0400 Subject: [PATCH 037/180] Fix build with boost submodule after catchup with main --- libraries/chainbase | 2 +- libraries/libfc/CMakeLists.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/chainbase b/libraries/chainbase index 07e7d20d3d..00556160c4 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit 07e7d20d3d164bc56a926432f1625426bc1d29b1 +Subproject commit 00556160c4bafae6f2b19dc359f56bc56e76c11e diff --git a/libraries/libfc/CMakeLists.txt b/libraries/libfc/CMakeLists.txt index 8957a5ff89..3b00430669 100644 --- a/libraries/libfc/CMakeLists.txt +++ b/libraries/libfc/CMakeLists.txt @@ -115,7 +115,7 @@ if(APPLE) find_library(security_framework Security) find_library(corefoundation_framework CoreFoundation) endif() -target_link_libraries( fc PUBLIC Boost::date_time Boost::chrono Boost::iostreams Boost::interprocess Boost::multi_index +target_link_libraries( fc PUBLIC Boost::date_time Boost::chrono Boost::iostreams Boost::interprocess Boost::multi_index Boost::dll Boost::multiprecision Boost::beast Boost::asio Boost::thread Boost::unit_test_framework Threads::Threads OpenSSL::Crypto ZLIB::ZLIB ${PLATFORM_SPECIFIC_LIBS} ${CMAKE_DL_LIBS} secp256k1 ${security_framework} ${corefoundation_framework}) From 02011fb527eaa4f960f6415c639718ba3ffd2323 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 5 Jul 2023 16:27:48 -0400 Subject: [PATCH 038/180] catchup appbase --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index ae8944308a..0a82417e0a 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit ae8944308acb526a7ced103685f29b9aafe6741e +Subproject commit 0a82417e0a9ca521190c3f761902dd4267c5576c From a990d74a2965d7a4dae7c1166299c52611f549d9 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 5 Jul 2023 16:56:35 -0400 Subject: [PATCH 039/180] update chainbase --- libraries/chainbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chainbase b/libraries/chainbase index 00556160c4..3fbcd9c687 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit 00556160c4bafae6f2b19dc359f56bc56e76c11e +Subproject commit 3fbcd9c68758f43f1dd7aacd34f0ce98f6714da5 From f7affe87a465a41e808ecc8e4e67c5261417820a Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 6 Jul 2023 11:31:27 -0400 Subject: [PATCH 040/180] Update `chainbase` and `appbase` to tip --- libraries/appbase | 2 +- libraries/chainbase | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/appbase b/libraries/appbase index 0a82417e0a..f078f79ac0 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit 0a82417e0a9ca521190c3f761902dd4267c5576c +Subproject commit f078f79ac031837a312db4dc54903fe4104cf859 diff --git a/libraries/chainbase b/libraries/chainbase index 3fbcd9c687..148aac7461 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit 3fbcd9c68758f43f1dd7aacd34f0ce98f6714da5 +Subproject commit 148aac7461fffbe8730ba0b55367dde6fdaa0e08 From 633fda695caa8e8f4f498e41a84634af35e41a99 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 6 Jul 2023 13:23:59 -0500 Subject: [PATCH 041/180] GH-1340 Correctly handle possibly missing prev_block --- tests/ship_streamer.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/ship_streamer.cpp b/tests/ship_streamer.cpp index f95e9eb3d3..8ce32f096f 100644 --- a/tests/ship_streamer.cpp +++ b/tests/ship_streamer.cpp @@ -162,8 +162,10 @@ int main(int argc, char* argv[]) { this_block_id = result_document[1]["this_block"]["block_id"].GetString(); } std::string prev_block_id; - if( result_document[1]["prev_block"].HasMember("block_id") && result_document[1]["prev_block"]["block_id"].IsString() ) { - prev_block_id = result_document[1]["prev_block"]["block_id"].GetString(); + if( result_document[1].HasMember("prev_block") && result_document[1]["prev_block"].IsObject() ) { + if ( result_document[1]["prev_block"].HasMember("block_id") && result_document[1]["prev_block"]["block_id"].IsString() ) { + prev_block_id = result_document[1]["prev_block"]["block_id"].GetString(); + } } if( !irreversible_only && !this_block_id.empty() && !prev_block_id.empty() ) { // verify forks were sent From 30161af688df71d2d458d79281850ca077e46ed5 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 6 Jul 2023 13:26:27 -0500 Subject: [PATCH 042/180] GH-1340 Add integration test that verifies ship delta data available immediately after loading a snapshot --- tests/TestHarness/Cluster.py | 2 +- tests/TestHarness/testUtils.py | 4 +- tests/ship_streamer_test.py | 85 ++++++++++++++++++++++++++++++++-- 3 files changed, 85 insertions(+), 6 deletions(-) diff --git a/tests/TestHarness/Cluster.py b/tests/TestHarness/Cluster.py index fa6a290eda..c512f4dfe1 100644 --- a/tests/TestHarness/Cluster.py +++ b/tests/TestHarness/Cluster.py @@ -1731,6 +1731,6 @@ def waitForTrxGeneratorsSpinup(self, nodeId: int, numGenerators: int, timeout: i firstTrxs.append(line.rstrip('\n')) Utils.Print(f"first transactions: {firstTrxs}") status = node.waitForTransactionsInBlock(firstTrxs) - if status is None: + if status is None or status is False: Utils.Print('ERROR: Failed to spin up transaction generators: never received first transactions') return status diff --git a/tests/TestHarness/testUtils.py b/tests/TestHarness/testUtils.py index 9a08c68ad1..bca96d550d 100644 --- a/tests/TestHarness/testUtils.py +++ b/tests/TestHarness/testUtils.py @@ -159,11 +159,13 @@ def getNodeDataDir(ext, relativeDir=None, trailingSlash=False): return path @staticmethod - def rmNodeDataDir(ext, rmState=True, rmBlocks=True): + def rmNodeDataDir(ext, rmState=True, rmBlocks=True, rmStateHist=True): if rmState: shutil.rmtree(Utils.getNodeDataDir(ext, "state")) if rmBlocks: shutil.rmtree(Utils.getNodeDataDir(ext, "blocks")) + if rmStateHist: + shutil.rmtree(Utils.getNodeDataDir(ext, "state-history"), ignore_errors=True) @staticmethod def getNodeConfigDir(ext, relativeDir=None, trailingSlash=False): diff --git a/tests/ship_streamer_test.py b/tests/ship_streamer_test.py index 7d3816dfd3..9ee47c9338 100755 --- a/tests/ship_streamer_test.py +++ b/tests/ship_streamer_test.py @@ -55,6 +55,13 @@ WalletdName=Utils.EosWalletName shipTempDir=None +def getLatestSnapshot(nodeId): + snapshotDir = os.path.join(Utils.getNodeDataDir(nodeId), "snapshots") + snapshotDirContents = os.listdir(snapshotDir) + assert len(snapshotDirContents) > 0 + snapshotDirContents.sort() + return os.path.join(snapshotDir, snapshotDirContents[-1]) + try: TestHelper.printSystemInfo("BEGIN") @@ -71,7 +78,7 @@ shipNodeNum = 1 specificExtraNodeosArgs={} - specificExtraNodeosArgs[shipNodeNum]="--plugin eosio::state_history_plugin --disable-replay-opts --trace-history --chain-state-history --plugin eosio::net_api_plugin " + specificExtraNodeosArgs[shipNodeNum]="--plugin eosio::state_history_plugin --disable-replay-opts --trace-history --chain-state-history --plugin eosio::net_api_plugin --plugin eosio::producer_api_plugin " # producer nodes will be mapped to 0 through totalProducerNodes-1, so the number totalProducerNodes will be the non-producing node specificExtraNodeosArgs[totalProducerNodes]="--plugin eosio::test_control_api_plugin " @@ -123,13 +130,18 @@ trans=node.regproducer(cluster.defProducerAccounts[prod], "http://mysite.com", 0, waitForTransBlock=False, exitOnError=True) # create accounts via eosio as otherwise a bid is needed + transferAmount="100000000.0000 {0}".format(CORE_SYMBOL) for account in accounts: Print(f"Create new account {account.name} via {cluster.eosioAccount.name} with private key: {account.activePrivateKey}") - trans=nonProdNode.createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=True, stakeNet=10000, stakeCPU=10000, buyRAM=10000000, exitOnError=True) - transferAmount="100000000.0000 {0}".format(CORE_SYMBOL) + trans=nonProdNode.createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=False, stakeNet=10000, stakeCPU=10000, buyRAM=10000000, exitOnError=True) + nonProdNode.waitForTransBlockIfNeeded(trans, True, exitOnError=True) + for account in accounts: Print(f"Transfer funds {transferAmount} from account {cluster.eosioAccount.name} to {account.name}") - nonProdNode.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer", waitForTransBlock=True) + nonProdNode.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer", waitForTransBlock=False) + nonProdNode.waitForTransBlockIfNeeded(trans, True, exitOnError=True) + for account in accounts: trans=nonProdNode.delegatebw(account, 20000000.0000, 20000000.0000, waitForTransBlock=False, exitOnError=True) + nonProdNode.waitForTransBlockIfNeeded(trans, True, exitOnError=True) # *** vote using accounts *** @@ -150,6 +162,19 @@ cluster.waitOnClusterSync(blockAdvancing=3) Print("Shutdown unneeded bios node") cluster.biosNode.kill(signal.SIGTERM) + + Print("Configure and launch txn generators") + targetTpsPerGenerator = 10 + testTrxGenDurationSec=60*60 + numTrxGenerators=2 + cluster.launchTrxGenerators(contractOwnerAcctName=cluster.eosioAccount.name, acctNamesList=[accounts[0].name, accounts[1].name], + acctPrivKeysList=[accounts[0].activePrivateKey,accounts[1].activePrivateKey], nodeId=prodNode1.nodeId, + tpsPerGenerator=targetTpsPerGenerator, numGenerators=numTrxGenerators, durationSec=testTrxGenDurationSec, + waitToComplete=False) + + status = cluster.waitForTrxGeneratorsSpinup(nodeId=prodNode1.nodeId, numGenerators=numTrxGenerators) + assert status is not None and status is not False, "ERROR: Failed to spinup Transaction Generators" + prodNode0.waitForProducer("defproducerc") block_range = 350 @@ -226,9 +251,61 @@ block_num += 1 assert block_num-1 == end_block_num, f"{block_num-1} != {end_block_num}" + Print("Generate snapshot") + shipNode.createSnapshot() + Print("Shutdown state_history_plugin nodeos") shipNode.kill(signal.SIGTERM) + Print("Shutdown bridge node") + nonProdNode.kill(signal.SIGTERM) + + Print("Test starting ship from snapshot") + Utils.rmNodeDataDir(shipNodeNum) + isRelaunchSuccess = shipNode.relaunch(chainArg=" --snapshot {}".format(getLatestSnapshot(shipNodeNum))) + assert isRelaunchSuccess, "relaunch from snapshot failed" + + afterSnapshotBlockNum = shipNode.getBlockNum() + + Print("Verify we can stream from ship after start from a snapshot with no incoming trxs") + start_block_num = afterSnapshotBlockNum + block_range = 0 + end_block_num = start_block_num + block_range + cmd = f"{shipClient} --start-block-num {start_block_num} --end-block-num {end_block_num} --fetch-block --fetch-traces --fetch-deltas" + if Utils.Debug: Utils.Print(f"cmd: {cmd}") + clients = [] + files = [] + starts = [] + for i in range(0, args.num_clients): + start = time.perf_counter() + outFile = open(f"{shipClientFilePrefix}{i}_snapshot.out", "w") + errFile = open(f"{shipClientFilePrefix}{i}_snapshot.err", "w") + Print(f"Start client {i}") + popen=Utils.delayedCheckOutput(cmd, stdout=outFile, stderr=errFile) + starts.append(time.perf_counter()) + clients.append((popen, cmd)) + files.append((outFile, errFile)) + Print(f"Client {i} started, Ship node head is: {shipNode.getBlockNum()}") + + Print(f"Stopping all {args.num_clients} clients") + for index, (popen, _), (out, err), start in zip(range(len(clients)), clients, files, starts): + popen.wait() + Print(f"Stopped client {index}. Ran for {time.perf_counter() - start:.3f} seconds.") + out.close() + err.close() + outFile = open(f"{shipClientFilePrefix}{index}_snapshot.out", "r") + data = json.load(outFile) + block_num = start_block_num + for i in data: + # fork can cause block numbers to be repeated + this_block_num = i['get_blocks_result_v0']['this_block']['block_num'] + if this_block_num < block_num: + block_num = this_block_num + assert block_num == this_block_num, f"{block_num} != {this_block_num}" + assert isinstance(i['get_blocks_result_v0']['deltas'], str) # verify deltas in result + block_num += 1 + assert block_num-1 == end_block_num, f"{block_num-1} != {end_block_num}" + testSuccessful = True finally: TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) From 6c14f56ebef17017381555e5841352e2b9c8b1a1 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 6 Jul 2023 13:34:09 -0500 Subject: [PATCH 043/180] GH-1340 Make SHiP data available after a snapshot load even if no new blocks applied. Also do not repeatable send empty get_blocks_response messages but rather start sending first available block data. --- .../eosio/state_history_plugin/session.hpp | 2 +- .../state_history_plugin.cpp | 54 ++++++++++++++----- .../tests/session_test.cpp | 2 + 3 files changed, 45 insertions(+), 13 deletions(-) diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/session.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/session.hpp index 195d7da762..6b2e80f1d9 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/session.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/session.hpp @@ -447,7 +447,7 @@ struct session : session_base, std::enable_shared_from_thislogger(), "replying get_blocks_request_v0 = ${req}", ("req", req)); - to_send_block_num = req.start_block_num; + to_send_block_num = std::max(req.start_block_num, plugin->get_first_available_block_num()); for (auto& cp : req.have_positions) { if (req.start_block_num <= cp.block_num) continue; diff --git a/plugins/state_history_plugin/state_history_plugin.cpp b/plugins/state_history_plugin/state_history_plugin.cpp index 7d301f30df..d32842fd93 100644 --- a/plugins/state_history_plugin/state_history_plugin.cpp +++ b/plugins/state_history_plugin/state_history_plugin.cpp @@ -66,6 +66,7 @@ struct state_history_plugin_impl : std::enable_shared_from_this trace_log; std::optional chain_state_log; + uint32_t first_available_block = 0; bool trace_debug_mode = false; std::optional applied_transaction_connection; std::optional block_start_connection; @@ -137,10 +138,17 @@ struct state_history_plugin_impl : std::enable_shared_from_this get_block_id(uint32_t block_num) { - if (trace_log) - return trace_log->get_block_id(block_num); - if (chain_state_log) - return chain_state_log->get_block_id(block_num); + std::optional id; + if( trace_log ) { + id = trace_log->get_block_id( block_num ); + if( id ) + return id; + } + if( chain_state_log ) { + id = chain_state_log->get_block_id( block_num ); + if( id ) + return id; + } try { return chain_plug->chain().get_block_id_for_num(block_num); } catch (...) { @@ -166,6 +174,11 @@ struct state_history_plugin_impl : std::enable_shared_from_this void post_task_main_thread_medium(Task&& task) { app().post(priority::medium, std::forward(task)); @@ -269,15 +282,18 @@ struct state_history_plugin_impl : std::enable_shared_from_thischain(); + std::lock_guard g(mtx); + head_id = chain.head_block_id(); + lib_id = chain.last_irreversible_block_id(); + head_timestamp = chain.head_block_time(); + } + // called from main thread void on_accepted_block(const block_state_ptr& block_state) { - { - const auto& chain = chain_plug->chain(); - std::lock_guard g(mtx); - head_id = chain.head_block_id(); - lib_id = chain.last_irreversible_block_id(); - head_timestamp = chain.head_block_time(); - } + update_current(); try { store_traces(block_state); @@ -492,12 +508,26 @@ void state_history_plugin::plugin_initialize(const variables_map& options) { void state_history_plugin::plugin_startup() { try { - auto bsp = my->chain_plug->chain().head_block_state(); + const auto& chain = my->chain_plug->chain(); + my->update_current(); + auto bsp = chain.head_block_state(); if( bsp && my->chain_state_log && my->chain_state_log->empty() ) { fc_ilog( _log, "Storing initial state on startup, this can take a considerable amount of time" ); my->store_chain_state( bsp ); fc_ilog( _log, "Done storing initial state on startup" ); } + my->first_available_block = chain.earliest_available_block_num(); + if (my->trace_log) { + auto first_trace_block = my->trace_log->block_range().first; + if( first_trace_block > 0 ) + my->first_available_block = std::min( my->first_available_block, first_trace_block ); + } + if (my->chain_state_log) { + auto first_state_block = my->chain_state_log->block_range().first; + if( first_state_block > 0 ) + my->first_available_block = std::min( my->first_available_block, first_state_block ); + } + fc_ilog(_log, "First available block for SHiP ${b}", ("b", my->first_available_block)); my->listen(); // use of executor assumes only one thread my->thread_pool.start( 1, [](const fc::exception& e) { diff --git a/plugins/state_history_plugin/tests/session_test.cpp b/plugins/state_history_plugin/tests/session_test.cpp index 3069ca5660..b1f13752b0 100644 --- a/plugins/state_history_plugin/tests/session_test.cpp +++ b/plugins/state_history_plugin/tests/session_test.cpp @@ -130,6 +130,8 @@ struct mock_state_history_plugin { eosio::state_history::block_position get_block_head() { return block_head; } eosio::state_history::block_position get_last_irreversible() { return block_head; } + uint32_t get_first_available_block_num() const { return 0; } + void add_session(std::shared_ptr s) { session_mgr.insert(std::move(s)); } From 0ee195e398bbdac9634fa82e20832e0a27f1bbf2 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 6 Jul 2023 15:02:40 -0400 Subject: [PATCH 044/180] Add package zlib1g-dev to dockerfiles --- .cicd/platforms/ubuntu20.Dockerfile | 1 + .cicd/platforms/ubuntu22.Dockerfile | 1 + 2 files changed, 2 insertions(+) diff --git a/.cicd/platforms/ubuntu20.Dockerfile b/.cicd/platforms/ubuntu20.Dockerfile index 4296c802b9..24d197e7f0 100644 --- a/.cicd/platforms/ubuntu20.Dockerfile +++ b/.cicd/platforms/ubuntu20.Dockerfile @@ -12,4 +12,5 @@ RUN apt-get update && apt-get upgrade -y && \ llvm-11-dev \ ninja-build \ python3-numpy \ + zlib1g-dev \ zstd diff --git a/.cicd/platforms/ubuntu22.Dockerfile b/.cicd/platforms/ubuntu22.Dockerfile index 52ace75948..440ec5dc0a 100644 --- a/.cicd/platforms/ubuntu22.Dockerfile +++ b/.cicd/platforms/ubuntu22.Dockerfile @@ -12,4 +12,5 @@ RUN apt-get update && apt-get upgrade -y && \ llvm-11-dev \ ninja-build \ python3-numpy \ + zlib1g-dev \ zstd From 0d7d2fd798196e025fbf5a6a7cc35a5012060b9d Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 6 Jul 2023 15:58:42 -0500 Subject: [PATCH 045/180] GH-1340 Code cleanup --- tests/TestHarness/Cluster.py | 2 +- tests/ship_streamer.cpp | 14 ++++++++------ 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/tests/TestHarness/Cluster.py b/tests/TestHarness/Cluster.py index c512f4dfe1..6606224611 100644 --- a/tests/TestHarness/Cluster.py +++ b/tests/TestHarness/Cluster.py @@ -1731,6 +1731,6 @@ def waitForTrxGeneratorsSpinup(self, nodeId: int, numGenerators: int, timeout: i firstTrxs.append(line.rstrip('\n')) Utils.Print(f"first transactions: {firstTrxs}") status = node.waitForTransactionsInBlock(firstTrxs) - if status is None or status is False: + if not status: Utils.Print('ERROR: Failed to spin up transaction generators: never received first transactions') return status diff --git a/tests/ship_streamer.cpp b/tests/ship_streamer.cpp index 8ce32f096f..94a3c40fc9 100644 --- a/tests/ship_streamer.cpp +++ b/tests/ship_streamer.cpp @@ -154,17 +154,19 @@ int main(int argc, char* argv[]) { // validate after streaming, so that invalid entry is included in the output uint32_t this_block_num = 0; if( result_document[1].HasMember("this_block") && result_document[1]["this_block"].IsObject() ) { - if( result_document[1]["this_block"].HasMember("block_num") && result_document[1]["this_block"]["block_num"].IsUint() ) { - this_block_num = result_document[1]["this_block"]["block_num"].GetUint(); + const auto& this_block = result_document[1]["this_block"]; + if( this_block.HasMember("block_num") && this_block["block_num"].IsUint() ) { + this_block_num = this_block["block_num"].GetUint(); } std::string this_block_id; - if( result_document[1]["this_block"].HasMember("block_id") && result_document[1]["this_block"]["block_id"].IsString() ) { - this_block_id = result_document[1]["this_block"]["block_id"].GetString(); + if( this_block.HasMember("block_id") && this_block["block_id"].IsString() ) { + this_block_id = this_block["block_id"].GetString(); } std::string prev_block_id; if( result_document[1].HasMember("prev_block") && result_document[1]["prev_block"].IsObject() ) { - if ( result_document[1]["prev_block"].HasMember("block_id") && result_document[1]["prev_block"]["block_id"].IsString() ) { - prev_block_id = result_document[1]["prev_block"]["block_id"].GetString(); + const auto& prev_block = result_document[1]["prev_block"]; + if ( prev_block.HasMember("block_id") && prev_block["block_id"].IsString() ) { + prev_block_id = prev_block["block_id"].GetString(); } } if( !irreversible_only && !this_block_id.empty() && !prev_block_id.empty() ) { From 97aa4b80316b9a615846ab266da942de062010be Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 7 Jul 2023 11:17:05 -0500 Subject: [PATCH 046/180] GH-1354 Add test case for deferred trx id before/after protocol feature replace_deferred --- unittests/protocol_feature_tests.cpp | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index 7447e9ceef..a6b9d556df 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -412,6 +412,14 @@ BOOST_AUTO_TEST_CASE( replace_deferred_test ) try { cfg.disable_all_subjective_mitigations = true; c.init( cfg ); + transaction_trace_ptr trace; + auto h = c.control->applied_transaction.connect( [&](std::tuple x) { + auto& t = std::get<0>(x); + if( t && !eosio::chain::is_onblock(*t)) { + trace = t; + } + } ); + BOOST_CHECK_EQUAL( c.control->get_resource_limits_manager().get_account_ram_usage( "alice"_n ), alice_ram_usage0 ); c.push_action( "test"_n, "defercall"_n, "alice"_n, fc::mutable_variant_object() @@ -448,6 +456,8 @@ BOOST_AUTO_TEST_CASE( replace_deferred_test ) try { dtrxs = c.get_scheduled_transactions(); BOOST_CHECK_EQUAL( dtrxs.size(), 0 ); + // must be equal before builtin_protocol_feature_t::replace_deferred to support replay of blocks before activation + BOOST_CHECK( first_dtrx_id.str() == trace->id.str() ); c.produce_block(); @@ -507,6 +517,13 @@ BOOST_AUTO_TEST_CASE( replace_deferred_test ) try { BOOST_CHECK_EQUAL( dtrxs.size(), 1 ); BOOST_CHECK_EQUAL( first_dtrx_id2, dtrxs[0] ); + c.produce_block(); + + dtrxs = c.get_scheduled_transactions(); + BOOST_CHECK_EQUAL( dtrxs.size(), 0 ); + // Not equal after builtin_protocol_feature_t::replace_deferred activated + BOOST_CHECK( first_dtrx_id2.str() != trace->id.str() ); + } FC_LOG_AND_RETHROW() BOOST_AUTO_TEST_CASE( no_duplicate_deferred_id_test ) try { From 71ecec44f88c6a93a0d04dfb04cbf3a8880f00ff Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 7 Jul 2023 11:20:58 -0500 Subject: [PATCH 047/180] GH-1354 Provide transaction_context trx id for trace since it can differ from provided packed_transaction for deferred trxs before replace_deferred protocol feature is activated. --- libraries/chain/controller.cpp | 6 +++--- .../chain/include/eosio/chain/transaction_context.hpp | 2 ++ libraries/chain/transaction_context.cpp | 8 +++++--- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 65d679bb03..23e4077ebe 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -1154,7 +1154,7 @@ struct controller_impl { transaction_checktime_timer trx_timer(timer); const packed_transaction trx( std::move( etrx ) ); - transaction_context trx_context( self, trx, std::move(trx_timer), start ); + transaction_context trx_context( self, trx, trx.id(), std::move(trx_timer), start ); trx_context.block_deadline = block_deadline; trx_context.max_transaction_time_subjective = max_transaction_time; @@ -1315,7 +1315,7 @@ struct controller_impl { uint32_t cpu_time_to_bill_us = billed_cpu_time_us; transaction_checktime_timer trx_timer(timer); - transaction_context trx_context( self, *trx->packed_trx(), std::move(trx_timer) ); + transaction_context trx_context( self, *trx->packed_trx(), gtrx.trx_id, std::move(trx_timer) ); trx_context.leeway = fc::microseconds(0); // avoid stealing cpu resource trx_context.block_deadline = block_deadline; trx_context.max_transaction_time_subjective = max_transaction_time; @@ -1528,7 +1528,7 @@ struct controller_impl { const signed_transaction& trn = trx->packed_trx()->get_signed_transaction(); transaction_checktime_timer trx_timer(timer); - transaction_context trx_context(self, *trx->packed_trx(), std::move(trx_timer), start, trx->read_only); + transaction_context trx_context(self, *trx->packed_trx(), trx->id(), std::move(trx_timer), start, trx->read_only); if ((bool)subjective_cpu_leeway && pending->_block_status == controller::block_status::incomplete) { trx_context.leeway = *subjective_cpu_leeway; } diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index 0126dee936..83a428c84d 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -37,6 +37,7 @@ namespace eosio { namespace chain { transaction_context( controller& c, const packed_transaction& t, + const transaction_id_type& trx_id, // trx_id diff than t.id() before replace_deferred transaction_checktime_timer&& timer, fc::time_point start = fc::time_point::now(), bool read_only=false); @@ -121,6 +122,7 @@ namespace eosio { namespace chain { controller& control; const packed_transaction& packed_trx; + const transaction_id_type& id; std::optional undo_session; transaction_trace_ptr trace; fc::time_point start; diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index bd7d32ff30..cde37e5731 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -46,11 +46,13 @@ namespace eosio { namespace chain { transaction_context::transaction_context( controller& c, const packed_transaction& t, + const transaction_id_type& trx_id, transaction_checktime_timer&& tmr, fc::time_point s, bool read_only) :control(c) ,packed_trx(t) + ,id(trx_id) ,undo_session() ,trace(std::make_shared()) ,start(s) @@ -62,7 +64,7 @@ namespace eosio { namespace chain { if (!c.skip_db_sessions()) { undo_session.emplace(c.mutable_db().start_undo_session(true)); } - trace->id = packed_trx.id(); + trace->id = id; trace->block_num = c.head_block_num() + 1; trace->block_time = c.pending_block_time(); trace->producer_block_id = c.pending_producer_block_id(); @@ -271,7 +273,7 @@ namespace eosio { namespace chain { validate_referenced_accounts( trx, enforce_whiteblacklist && control.is_producing_block() ); } init( initial_net_usage); - record_transaction( packed_trx.id(), trx.expiration ); /// checks for dupes + record_transaction( id, trx.expiration ); /// checks for dupes } void transaction_context::init_for_deferred_trx( fc::time_point p ) @@ -700,7 +702,7 @@ namespace eosio { namespace chain { uint32_t trx_size = 0; const auto& cgto = control.mutable_db().create( [&]( auto& gto ) { - gto.trx_id = packed_trx.id(); + gto.trx_id = id; gto.payer = first_auth; gto.sender = account_name(); /// delayed transactions have no sender gto.sender_id = transaction_id_to_sender_id( gto.trx_id ); From 967e2178270a4ec6a4fd0f5bc16ffb015d61ab9b Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 7 Jul 2023 13:29:26 -0400 Subject: [PATCH 048/180] Update appbase to tip --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index f078f79ac0..02a08a374a 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit f078f79ac031837a312db4dc54903fe4104cf859 +Subproject commit 02a08a374a6018b9f9d067a7dfc35936d10a4c6d From 5ca2421b758cad9afdf12e9be96a3389b6e45da4 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 7 Jul 2023 15:51:14 -0400 Subject: [PATCH 049/180] Add `ubuntu-dev-tools` to .cicd Dockerfiles --- .cicd/platforms/ubuntu20.Dockerfile | 1 + .cicd/platforms/ubuntu22.Dockerfile | 1 + 2 files changed, 2 insertions(+) diff --git a/.cicd/platforms/ubuntu20.Dockerfile b/.cicd/platforms/ubuntu20.Dockerfile index 24d197e7f0..c60c53f5bb 100644 --- a/.cicd/platforms/ubuntu20.Dockerfile +++ b/.cicd/platforms/ubuntu20.Dockerfile @@ -12,5 +12,6 @@ RUN apt-get update && apt-get upgrade -y && \ llvm-11-dev \ ninja-build \ python3-numpy \ + ubuntu-dev-tools \ zlib1g-dev \ zstd diff --git a/.cicd/platforms/ubuntu22.Dockerfile b/.cicd/platforms/ubuntu22.Dockerfile index 440ec5dc0a..fd943f7043 100644 --- a/.cicd/platforms/ubuntu22.Dockerfile +++ b/.cicd/platforms/ubuntu22.Dockerfile @@ -12,5 +12,6 @@ RUN apt-get update && apt-get upgrade -y && \ llvm-11-dev \ ninja-build \ python3-numpy \ + ubuntu-dev-tools \ zlib1g-dev \ zstd From 3a0960e00441f6e0a820009bc142b08d52cd84f9 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 10 Jul 2023 09:00:57 -0500 Subject: [PATCH 050/180] Use full nodeosVersion. cleos time-limit option was added in 3.2 but is not in 3.1 so need to be able to differentiate on minor version number. --- tests/TestHarness/Node.py | 7 +++++-- tests/performance_tests/performance_test_basic.py | 12 ++++++------ 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/tests/TestHarness/Node.py b/tests/TestHarness/Node.py index 76b7aa868c..888048d6dd 100644 --- a/tests/TestHarness/Node.py +++ b/tests/TestHarness/Node.py @@ -75,8 +75,8 @@ def configureVersion(self): self.fetchBlock = lambda blockNum: self.processUrllibRequest("chain", "get_block", {"block_num_or_id":blockNum}, silentErrors=False, exitOnError=True) self.fetchKeyCommand = lambda: "[trx][trx][ref_block_num]" self.fetchRefBlock = lambda trans: trans["trx"]["trx"]["ref_block_num"] - self.cleosLimit = "" self.fetchHeadBlock = lambda node, headBlock: node.processUrllibRequest("chain", "get_block", {"block_num_or_id":headBlock}, silentErrors=False, exitOnError=True) + self.cleosLimit = "" else: self.fetchTransactionCommand = lambda: "get transaction_trace" @@ -84,8 +84,11 @@ def configureVersion(self): self.fetchBlock = lambda blockNum: self.processUrllibRequest("trace_api", "get_block", {"block_num":blockNum}, silentErrors=False, exitOnError=True) self.fetchKeyCommand = lambda: "[transaction][transaction_header][ref_block_num]" self.fetchRefBlock = lambda trans: trans["block_num"] - self.cleosLimit = "--time-limit 999" self.fetchHeadBlock = lambda node, headBlock: node.processUrllibRequest("chain", "get_block_info", {"block_num":headBlock}, silentErrors=False, exitOnError=True) + if 'v3.1' in self.nodeosVers: + self.cleosLimit = "" + else: + self.cleosLimit = "--time-limit 999" def __str__(self): return "Host: %s, Port:%d, NodeNum:%s, Pid:%s" % (self.host, self.port, self.nodeId, self.pid) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index a22b373c04..30c7f330d4 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -124,7 +124,7 @@ def __post_init__(self): def configureValidationNodes(): validationNodeSpecificNodeosStr = "" - if self.nodeosVers == "v2": + if "v2" in self.nodeosVers: validationNodeSpecificNodeosStr += '--plugin eosio::history_api_plugin --filter-on "*" ' else: #If prodsEnableTraceApi, then Cluster configures all nodes with trace_api_plugin so no need to duplicate here @@ -148,8 +148,8 @@ def configureApiNodes(): if self.apiNodeCount > 0: configureApiNodes() - assert self.nodeosVers != "v1" and self.nodeosVers != "v0", f"nodeos version {Utils.getNodeosVersion().split('.')[0]} is unsupported by performance test" - if self.nodeosVers == "v2": + assert "v1" not in self.nodeosVers and "v0" not in self.nodeosVers, f"nodeos version {Utils.getNodeosVersion()} is unsupported by performance test" + if "v2" in self.nodeosVers: self.writeTrx = lambda trxDataFile, blockNum, trx: [trxDataFile.write(f"{trx['trx']['id']},{blockNum},{trx['cpu_usage_us']},{trx['net_usage_words']}\n")] self.createBlockData = lambda block, blockTransactionTotal, blockNetTotal, blockCpuTotal: log_reader.blockData(blockId=block["payload"]["id"], blockNum=block['payload']['block_num'], transactions=blockTransactionTotal, net=blockNetTotal, cpu=blockCpuTotal, producer=block["payload"]["producer"], status=block["payload"]["confirmed"], _timestamp=block["payload"]["timestamp"]) self.updateTrxDict = lambda blockNum, transaction, trxDict: trxDict.update(dict([(transaction['trx']['id'], log_reader.trxData(blockNum, transaction['cpu_usage_us'], transaction['net_usage_words']))])) @@ -286,7 +286,7 @@ def fileOpenMode(self, filePath) -> str: def isOnBlockTransaction(self, transaction): # v2 history does not include onblock - if self.clusterConfig.nodeosVers == "v2": + if "v2" in self.clusterConfig.nodeosVers: return False else: if transaction['actions'][0]['account'] != 'eosio' or transaction['actions'][0]['action'] != 'onblock': @@ -642,8 +642,8 @@ def setupClusterConfig(args) -> ClusterConfig: httpPluginArgs = HttpPluginArgs(httpMaxBytesInFlightMb=args.http_max_bytes_in_flight_mb, httpMaxInFlightRequests=args.http_max_in_flight_requests, httpMaxResponseTimeMs=args.http_max_response_time_ms, httpThreads=args.http_threads) netPluginArgs = NetPluginArgs(netThreads=args.net_threads, maxClients=0) - nodeosVers=Utils.getNodeosVersion().split('.')[0] - resourceMonitorPluginArgs = ResourceMonitorPluginArgs(resourceMonitorNotShutdownOnThresholdExceeded=not nodeosVers == "v2") + nodeosVers=Utils.getNodeosVersion() + resourceMonitorPluginArgs = ResourceMonitorPluginArgs(resourceMonitorNotShutdownOnThresholdExceeded=not "v2" in nodeosVers) ENA = PerformanceTestBasic.ClusterConfig.ExtraNodeosArgs extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs, resourceMonitorPluginArgs=resourceMonitorPluginArgs) From 441f5d308c089690e3179d4f623ddf6db1f1c1a6 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 10 Jul 2023 10:50:40 -0500 Subject: [PATCH 051/180] Read only transaction feature support was added in 4.0, don't test in prior releases. --- .github/workflows/ph_backward_compatibility.yaml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index 347cceedd5..e8fe161207 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -145,7 +145,8 @@ jobs: run: | cd build ctest --output-on-failure -R performance_test_api --timeout 480 - - name: Run Read Only Trxs Performance Test + - if: ${{ matrix.release != '3.1' && matrix.release != '3.2' }} + name: Run Read Only Trxs Performance Test if: always() run: | cd build @@ -180,7 +181,8 @@ jobs: run: | cd build ctest --output-on-failure -R performance_test_basic_http --timeout 480 - - name: Run Read Only Trx Performance Basic Test + - if: ${{ matrix.release != '3.1' && matrix.release != '3.2' }} + name: Run Read Only Trx Performance Basic Test if: always() run: | cd build From e6ea54b81755195cbfaf7a949909782488d08572 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 10 Jul 2023 10:51:21 -0500 Subject: [PATCH 052/180] Read only transaction feature support was added in 4.0, remove unsupported command line options in earlier versions. --- tests/performance_tests/performance_test_basic.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 30c7f330d4..bdd2163b86 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -139,7 +139,8 @@ def configureApiNodes(): apiNodeSpecificNodeosStr = "" apiNodeSpecificNodeosStr += "--plugin eosio::chain_api_plugin " apiNodeSpecificNodeosStr += "--plugin eosio::net_api_plugin " - apiNodeSpecificNodeosStr += f"--read-only-threads {self.apiNodesReadOnlyThreadCount} " + if "v4" in self.nodeosVers: + apiNodeSpecificNodeosStr += f"--read-only-threads {self.apiNodesReadOnlyThreadCount} " if apiNodeSpecificNodeosStr: self.specificExtraNodeosArgs.update({f"{nodeId}" : apiNodeSpecificNodeosStr for nodeId in self._apiNodeIds}) From 8cb771a710071fb0b7d4aa8e02e158fb02cb2fae Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 10 Jul 2023 10:54:19 -0500 Subject: [PATCH 053/180] Fix multiple if statements in steps. --- .github/workflows/ph_backward_compatibility.yaml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index e8fe161207..cb9a22baff 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -145,9 +145,8 @@ jobs: run: | cd build ctest --output-on-failure -R performance_test_api --timeout 480 - - if: ${{ matrix.release != '3.1' && matrix.release != '3.2' }} + - if: always() && ${{ matrix.release != '3.1' && matrix.release != '3.2' }} name: Run Read Only Trxs Performance Test - if: always() run: | cd build ctest --output-on-failure -R performance_test_read_only_trxs --timeout 480 @@ -181,9 +180,8 @@ jobs: run: | cd build ctest --output-on-failure -R performance_test_basic_http --timeout 480 - - if: ${{ matrix.release != '3.1' && matrix.release != '3.2' }} + - if: always() && ${{ matrix.release != '3.1' && matrix.release != '3.2' }} name: Run Read Only Trx Performance Basic Test - if: always() run: | cd build ctest --output-on-failure -R performance_test_basic_read_only_trxs --timeout 480 From ff1a83761f119f9f50122cb30ef60830294e0633 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 10 Jul 2023 11:33:20 -0500 Subject: [PATCH 054/180] Shouldn't run read only tests in if < v4.0 --- .github/workflows/ph_backward_compatibility.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index cb9a22baff..40651bdc98 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -145,7 +145,7 @@ jobs: run: | cd build ctest --output-on-failure -R performance_test_api --timeout 480 - - if: always() && ${{ matrix.release != '3.1' && matrix.release != '3.2' }} + - if: ${{ matrix.release != '3.1' && matrix.release != '3.2' }} name: Run Read Only Trxs Performance Test run: | cd build @@ -180,7 +180,7 @@ jobs: run: | cd build ctest --output-on-failure -R performance_test_basic_http --timeout 480 - - if: always() && ${{ matrix.release != '3.1' && matrix.release != '3.2' }} + - if: ${{ matrix.release != '3.1' && matrix.release != '3.2' }} name: Run Read Only Trx Performance Basic Test run: | cd build From bb98e4d78de3bbc1fb076516f7c6064f0b6e5b6a Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 10 Jul 2023 12:25:34 -0500 Subject: [PATCH 055/180] GH-1331 Use std::map instead of flat_map as flat_map insert perf is terrible for large maps --- .../chain/include/eosio/chain/log_catalog.hpp | 27 ++++++++++--------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/libraries/chain/include/eosio/chain/log_catalog.hpp b/libraries/chain/include/eosio/chain/log_catalog.hpp index d56b623fe8..d86173390d 100644 --- a/libraries/chain/include/eosio/chain/log_catalog.hpp +++ b/libraries/chain/include/eosio/chain/log_catalog.hpp @@ -1,10 +1,10 @@ #pragma once -#include -#include -#include #include #include +#include +#include #include +#include namespace eosio { namespace chain { @@ -37,10 +37,10 @@ struct log_catalog { using block_num_t = uint32_t; struct mapped_type { - block_num_t last_block_num; + block_num_t last_block_num = 0; bfs::path filename_base; }; - using collection_t = boost::container::flat_map; + using collection_t = std::map; using size_type = typename collection_t::size_type; static constexpr size_type npos = std::numeric_limits::max(); @@ -138,7 +138,7 @@ struct log_catalog { std::optional get_block_position(uint32_t block_num) { try { if (active_index != npos) { - auto active_item = collection.nth(active_index); + auto active_item = std::next(collection.begin(), active_index); if (active_item->first <= block_num && block_num <= active_item->second.last_block_num) { return log_index.nth_block_position(block_num - log_data.first_block_num()); } @@ -152,7 +152,7 @@ struct log_catalog { auto name = it->second.filename_base; log_data.open(name.replace_extension("log")); log_index.open(name.replace_extension("index")); - active_index = collection.index_of(it); + active_index = std::distance(collection.begin(), it); //collection.index_of(it); return log_index.nth_block_position(block_num - log_data.first_block_num()); } return {}; @@ -205,7 +205,7 @@ struct log_catalog { /// Add a new entry into the catalog. /// /// Notice that \c start_block_num must be monotonically increasing between the invocations of this function - /// so that the new entry would be inserted at the end of the flat_map; otherwise, \c active_index would be + /// so that the new entry would be inserted at the 'end' of the map; otherwise, \c active_index would be /// invalidated and the mapping between the log data their block range would be wrong. This function is only used /// during the splitting of block log. Using this function for other purpose should make sure if the monotonically /// increasing block num guarantee can be met. @@ -221,19 +221,20 @@ struct log_catalog { if (collection.size() >= max_retained_files) { items_to_erase = max_retained_files > 0 ? collection.size() - max_retained_files : collection.size(); + auto end = std::next( collection.begin(), items_to_erase); - for (auto it = collection.begin(); it < collection.begin() + items_to_erase; ++it) { + for (auto it = collection.begin(); it != end; ++it) { auto orig_name = it->second.filename_base; if (archive_dir.empty()) { // delete the old files when no backup dir is specified bfs::remove(orig_name.replace_extension("log")); bfs::remove(orig_name.replace_extension("index")); } else { - // move the the archive dir + // move the archive dir rename_bundle(orig_name, archive_dir / orig_name.filename()); } } - collection.erase(collection.begin(), collection.begin() + items_to_erase); + collection.erase(collection.begin(), end); active_index = active_index == npos || active_index < items_to_erase ? npos : active_index - items_to_erase; @@ -259,7 +260,7 @@ struct log_catalog { active_index = npos; auto it = collection.upper_bound(block_num); - if (it == collection.begin() || block_num > (it - 1)->second.last_block_num) { + if (it == collection.begin() || block_num > std::prev(it)->second.last_block_num) { std::for_each(it, collection.end(), remove_files); collection.erase(it, collection.end()); return 0; @@ -268,7 +269,7 @@ struct log_catalog { auto name = truncate_it->second.filename_base; bfs::rename(name.replace_extension("log"), new_name.replace_extension("log")); bfs::rename(name.replace_extension("index"), new_name.replace_extension("index")); - std::for_each(truncate_it + 1, collection.end(), remove_files); + std::for_each(std::next(truncate_it), collection.end(), remove_files); auto result = truncate_it->first; collection.erase(truncate_it, collection.end()); return result; From 8651553d8a40361d20c45ba65fd162043b1aa66f Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 10 Jul 2023 12:26:21 -0500 Subject: [PATCH 056/180] GH-1331 Avoid copies for performance --- .../chain/include/eosio/chain/log_catalog.hpp | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/libraries/chain/include/eosio/chain/log_catalog.hpp b/libraries/chain/include/eosio/chain/log_catalog.hpp index d86173390d..c934cc1445 100644 --- a/libraries/chain/include/eosio/chain/log_catalog.hpp +++ b/libraries/chain/include/eosio/chain/log_catalog.hpp @@ -12,8 +12,8 @@ namespace chain { namespace bfs = boost::filesystem; template -void for_each_file_in_dir_matches(const bfs::path& dir, std::string pattern, Lambda&& lambda) { - const std::regex my_filter(pattern); +void for_each_file_in_dir_matches(const bfs::path& dir, std::string_view pattern, Lambda&& lambda) { + const std::regex my_filter(pattern.begin(), pattern.size()); std::smatch what; bfs::directory_iterator end_itr; // Default ctor yields past-the-end for (bfs::directory_iterator p(dir); p != end_itr; ++p) { @@ -85,9 +85,10 @@ struct log_catalog { archive_dir = make_absolute_dir(log_dir, archive_path); } - for_each_file_in_dir_matches(retained_dir, std::string(name) + suffix_pattern, [this](bfs::path path) { + std::string pattern = std::string(name) + suffix_pattern; + for_each_file_in_dir_matches(retained_dir, pattern, [this](bfs::path path) { auto log_path = path; - auto index_path = path.replace_extension("index"); + const auto& index_path = path.replace_extension("index"); auto path_without_extension = log_path.parent_path() / log_path.stem().string(); LogData log(log_path); @@ -95,8 +96,10 @@ struct log_catalog { verifier.verify(log, log_path); // check if index file matches the log file - if (!index_matches_data(index_path, log)) - log.construct_index(index_path); + if (!index_matches_data(index_path, log)) { + ilog("Recreating index for: ${i}", ("i", index_path.string())); + log.construct_index( index_path ); + } auto existing_itr = collection.find(log.first_block_num()); if (existing_itr != collection.end()) { @@ -113,7 +116,7 @@ struct log_catalog { } } - collection.insert_or_assign(log.first_block_num(), mapped_type{log.last_block_num(), path_without_extension}); + collection.insert_or_assign(log.first_block_num(), mapped_type{log.last_block_num(), std::move(path_without_extension)}); }); } @@ -217,7 +220,7 @@ struct log_catalog { bfs::path new_path = retained_dir / buf; rename_bundle(dir / name, new_path); size_type items_to_erase = 0; - collection.emplace(start_block_num, mapped_type{end_block_num, new_path}); + collection.emplace(start_block_num, mapped_type{end_block_num, std::move(new_path)}); if (collection.size() >= max_retained_files) { items_to_erase = max_retained_files > 0 ? collection.size() - max_retained_files : collection.size(); From 776d6757153207c1fd5a1ac00957a2ca4a2498b0 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 10 Jul 2023 12:27:13 -0500 Subject: [PATCH 057/180] GH-1331 Use LogIndex for verification of index to log data. Simpler and faster. --- .../chain/include/eosio/chain/log_catalog.hpp | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/libraries/chain/include/eosio/chain/log_catalog.hpp b/libraries/chain/include/eosio/chain/log_catalog.hpp index c934cc1445..c918ce487c 100644 --- a/libraries/chain/include/eosio/chain/log_catalog.hpp +++ b/libraries/chain/include/eosio/chain/log_catalog.hpp @@ -124,18 +124,13 @@ struct log_catalog { if (!bfs::exists(index_path)) return false; - auto num_blocks_in_index = bfs::file_size(index_path) / sizeof(uint64_t); - if (num_blocks_in_index != log.num_blocks()) + LogIndex log_i; + log_i.open(index_path); + + if (log_i.num_blocks() != log.num_blocks()) return false; - // make sure the last 8 bytes of index and log matches - fc::cfile index_file; - index_file.set_file_path(index_path); - index_file.open("r"); - index_file.seek_end(-sizeof(uint64_t)); - uint64_t pos; - index_file.read(reinterpret_cast(&pos), sizeof(pos)); - return pos == log.last_block_position(); + return log_i.back() == log.last_block_position(); } std::optional get_block_position(uint32_t block_num) { From 800a37e6055f73001c4ad496dd64b32126ec941f Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 10 Jul 2023 13:03:49 -0500 Subject: [PATCH 058/180] Collapse perf tests back into regular expressions to simplifly and reduce future maintenance --- .../workflows/ph_backward_compatibility.yaml | 55 ++----------------- 1 file changed, 5 insertions(+), 50 deletions(-) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index 40651bdc98..1ae2adfcf5 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -130,58 +130,13 @@ jobs: rm build/bin/cleos mv tmp/usr/bin/nodeos build/bin mv tmp/usr/bin/cleos build/bin - - name: Run BP Op Mode Performance Test - if: always() + - if: ${{ matrix.release == '3.1' || matrix.release == '3.2' }} + name: Run Performance Tests (=v4.0) run: | cd build - ctest --output-on-failure -R performance_test_basic_read_only_trxs --timeout 480 + ctest --output-on-failure -R performance_test --timeout 480 From c63042bb93291c44fda1933a6fe7c8321e91631f Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 10 Jul 2023 13:10:32 -0500 Subject: [PATCH 059/180] Try reducing steps using regex for deb name matching across versions. --- .github/workflows/ph_backward_compatibility.yaml | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index 1ae2adfcf5..4a3ceaa3d5 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -104,22 +104,12 @@ jobs: # https://github.com/actions/runner/issues/2033 -- need this because of full version label test looking at git revs chown -R $(id -u):$(id -g) $PWD zstdcat build.tar.zst | tar x - - if: ${{ matrix.release != '3.1' }} - name: Download Prev Leap Version (v3.2.x and after) + - name: Download Prev Leap Version uses: AntelopeIO/asset-artifact-download-action@v2 with: owner: AntelopeIO repo: leap - file: 'leap_.*-${{matrix.platform}}.04_amd64.deb' # Ex. leap_3.2.3-ubuntu20.04_amd64.deb leap_4.0.3-ubuntu20.04_amd64.deb - target: '${{matrix.release}}' - token: ${{github.token}} - - if: ${{ matrix.release == '3.1' }} - name: Download Prev Leap Version (v3.1.x) - uses: AntelopeIO/asset-artifact-download-action@v2 - with: - owner: AntelopeIO - repo: leap - file: 'leap-.*-${{matrix.platform}}.04-x86_64.deb' # Ex. leap-3.1.4-ubuntu20.04-x86_64.deb + file: '(leap).*${{matrix.platform}}.04.*(x86_64|amd64).deb' target: '${{matrix.release}}' token: ${{github.token}} - name: Extract and Place Rev Leap Version artifacts From cb447c579dfaba041661200e936849c1ebb0ad89 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 10 Jul 2023 13:11:32 -0500 Subject: [PATCH 060/180] Try removing workaround, as it shouldn't be necessary here. --- .github/workflows/ph_backward_compatibility.yaml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index 4a3ceaa3d5..cb5ac49338 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -69,8 +69,6 @@ jobs: - name: Build id: build run: | - # https://github.com/actions/runner/issues/2033 - chown -R $(id -u):$(id -g) $PWD cmake -B build -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -GNinja cmake --build build tar -pc --exclude "*.o" build | zstd --long -T0 -9 > build.tar.zst @@ -101,8 +99,6 @@ jobs: name: ${{matrix.platform}}-build - name: Extract Build Directory run: | - # https://github.com/actions/runner/issues/2033 -- need this because of full version label test looking at git revs - chown -R $(id -u):$(id -g) $PWD zstdcat build.tar.zst | tar x - name: Download Prev Leap Version uses: AntelopeIO/asset-artifact-download-action@v2 From 40d6b1f3e2cd7bb50b7ace71c183e11d162c0fbd Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 10 Jul 2023 13:23:13 -0500 Subject: [PATCH 061/180] GH-1331 Use better variable name --- libraries/chain/include/eosio/chain/log_catalog.hpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/libraries/chain/include/eosio/chain/log_catalog.hpp b/libraries/chain/include/eosio/chain/log_catalog.hpp index c918ce487c..ed18b862fd 100644 --- a/libraries/chain/include/eosio/chain/log_catalog.hpp +++ b/libraries/chain/include/eosio/chain/log_catalog.hpp @@ -150,7 +150,7 @@ struct log_catalog { auto name = it->second.filename_base; log_data.open(name.replace_extension("log")); log_index.open(name.replace_extension("index")); - active_index = std::distance(collection.begin(), it); //collection.index_of(it); + active_index = std::distance(collection.begin(), it); return log_index.nth_block_position(block_num - log_data.first_block_num()); } return {}; @@ -219,9 +219,9 @@ struct log_catalog { if (collection.size() >= max_retained_files) { items_to_erase = max_retained_files > 0 ? collection.size() - max_retained_files : collection.size(); - auto end = std::next( collection.begin(), items_to_erase); + auto last = std::next( collection.begin(), items_to_erase); - for (auto it = collection.begin(); it != end; ++it) { + for (auto it = collection.begin(); it != last; ++it) { auto orig_name = it->second.filename_base; if (archive_dir.empty()) { // delete the old files when no backup dir is specified @@ -232,7 +232,7 @@ struct log_catalog { rename_bundle(orig_name, archive_dir / orig_name.filename()); } } - collection.erase(collection.begin(), end); + collection.erase(collection.begin(), last); active_index = active_index == npos || active_index < items_to_erase ? npos : active_index - items_to_erase; From b7dea9bfde2dc023b71229ebd877d7ff61cd7acf Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 10 Jul 2023 13:29:38 -0500 Subject: [PATCH 062/180] Install the leap deb package to bring along dependencies and then use those nodeos and cleos versions. --- .github/workflows/ph_backward_compatibility.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index cb5ac49338..779a6cc559 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -110,12 +110,12 @@ jobs: token: ${{github.token}} - name: Extract and Place Rev Leap Version artifacts run: | - mkdir tmp - dpkg -x leap*.deb tmp + apt-get update + apt install -y leap*.deb rm build/bin/nodeos rm build/bin/cleos - mv tmp/usr/bin/nodeos build/bin - mv tmp/usr/bin/cleos build/bin + cp /usr/bin/nodeos build/bin + cp /usr/bin/cleos build/bin - if: ${{ matrix.release == '3.1' || matrix.release == '3.2' }} name: Run Performance Tests ( Date: Mon, 10 Jul 2023 13:42:40 -0500 Subject: [PATCH 063/180] GH-1331 Remove unneeded include --- libraries/chain/include/eosio/chain/log_catalog.hpp | 1 - 1 file changed, 1 deletion(-) diff --git a/libraries/chain/include/eosio/chain/log_catalog.hpp b/libraries/chain/include/eosio/chain/log_catalog.hpp index ed18b862fd..a1f998c5fd 100644 --- a/libraries/chain/include/eosio/chain/log_catalog.hpp +++ b/libraries/chain/include/eosio/chain/log_catalog.hpp @@ -2,7 +2,6 @@ #include #include #include -#include #include #include From bc46133ef840b502ca725ff8fe6fd4f4a621233b Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 10 Jul 2023 13:43:08 -0500 Subject: [PATCH 064/180] Working to fix install of leap deb pkg. --- .github/workflows/ph_backward_compatibility.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index 779a6cc559..18d68ff908 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -110,8 +110,9 @@ jobs: token: ${{github.token}} - name: Extract and Place Rev Leap Version artifacts run: | + ls -l apt-get update - apt install -y leap*.deb + apt-get install -y ./leap*.deb rm build/bin/nodeos rm build/bin/cleos cp /usr/bin/nodeos build/bin From b2b22ab99063086e801c4d42ed3a4cb9618b37e2 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 10 Jul 2023 14:09:19 -0500 Subject: [PATCH 065/180] Cleanup. --- .github/workflows/ph_backward_compatibility.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index 18d68ff908..b43d71c9fe 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -108,9 +108,8 @@ jobs: file: '(leap).*${{matrix.platform}}.04.*(x86_64|amd64).deb' target: '${{matrix.release}}' token: ${{github.token}} - - name: Extract and Place Rev Leap Version artifacts + - name: Install leap & replace binaries for PH use run: | - ls -l apt-get update apt-get install -y ./leap*.deb rm build/bin/nodeos From 265ee66b539e54f0564f2d1b3129f7a7cedc95fe Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 10 Jul 2023 14:15:54 -0500 Subject: [PATCH 066/180] Sanity check nodeos version. --- .github/workflows/ph_backward_compatibility.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index b43d71c9fe..2b017a5e7b 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -116,6 +116,7 @@ jobs: rm build/bin/cleos cp /usr/bin/nodeos build/bin cp /usr/bin/cleos build/bin + ./build/bin/nodeos --version - if: ${{ matrix.release == '3.1' || matrix.release == '3.2' }} name: Run Performance Tests ( Date: Tue, 11 Jul 2023 08:06:10 -0500 Subject: [PATCH 067/180] GH-1331 Replace boost filesystem include with std --- libraries/chain/include/eosio/chain/log_catalog.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/include/eosio/chain/log_catalog.hpp b/libraries/chain/include/eosio/chain/log_catalog.hpp index 4df1d97a8b..70a60574f7 100644 --- a/libraries/chain/include/eosio/chain/log_catalog.hpp +++ b/libraries/chain/include/eosio/chain/log_catalog.hpp @@ -1,7 +1,7 @@ #pragma once #include #include -#include +#include #include #include From df0791c1d09bf925450fe0367f867dbde28798a8 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 11 Jul 2023 08:33:24 -0500 Subject: [PATCH 068/180] GH-1275 Signal accepted_block after it is marked valid --- libraries/chain/controller.cpp | 28 +++++++++------------ unittests/chain_tests.cpp | 41 +++++++++++++++++++++++++++++++ unittests/deep-mind/deep-mind.log | 8 +++--- 3 files changed, 56 insertions(+), 21 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 2c5476c58c..2598b03a81 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -443,8 +443,6 @@ struct controller_impl { if( read_mode == db_read_mode::IRREVERSIBLE ) { controller::block_report br; apply_block( br, *bitr, controller::block_status::complete, trx_meta_cache_lookup{} ); - head = (*bitr); - fork_db.mark_valid( head ); } emit( self.irreversible_block, *bitr ); @@ -1931,7 +1929,7 @@ struct controller_impl { /** * @post regardless of the success of commit block there is no active pending block */ - void commit_block( bool add_to_fork_db ) { + void commit_block( controller::block_status s ) { auto reset_pending_on_exit = fc::make_scoped_exit([this]{ pending.reset(); }); @@ -1940,24 +1938,26 @@ struct controller_impl { EOS_ASSERT( std::holds_alternative(pending->_block_stage), block_validate_exception, "cannot call commit_block until pending block is completed" ); - auto bsp = std::get(pending->_block_stage)._block_state; + const auto& bsp = std::get(pending->_block_stage)._block_state; - if( add_to_fork_db ) { + if( s == controller::block_status::incomplete ) { fork_db.add( bsp ); fork_db.mark_valid( bsp ); emit( self.accepted_block_header, bsp ); - head = fork_db.head(); - EOS_ASSERT( bsp == head, fork_database_exception, "committed block did not become the new head in fork database"); + EOS_ASSERT( bsp == fork_db.head(), fork_database_exception, "committed block did not become the new head in fork database"); + } else if (s != controller::block_status::irreversible) { + fork_db.mark_valid( bsp ); } + head = bsp; // at block level, no transaction specific logging is possible - if (auto dm_logger = get_deep_mind_logger(false)) { + if (auto* dm_logger = get_deep_mind_logger(false)) { dm_logger->on_accepted_block(bsp); } emit( self.accepted_block, bsp ); - if( add_to_fork_db ) { + if( s == controller::block_status::incomplete ) { log_irreversible(); } } catch (...) { @@ -2157,7 +2157,7 @@ struct controller_impl { pending->_block_stage = completed_block{ bsp }; br = pending->_block_report; // copy before commit block destroys pending - commit_block(false); + commit_block(s); br.total_time = fc::time_point::now() - start; return; } catch ( const std::bad_alloc& ) { @@ -2309,7 +2309,6 @@ struct controller_impl { controller::block_report br; if( s == controller::block_status::irreversible ) { apply_block( br, bsp, s, trx_meta_cache_lookup{} ); - head = bsp; // On replay, log_irreversible is not called and so no irreversible_block signal is emitted. // So emit it explicitly here. @@ -2335,8 +2334,6 @@ struct controller_impl { if( new_head->header.previous == head->id ) { try { apply_block( br, new_head, s, trx_lookup ); - fork_db.mark_valid( new_head ); - head = new_head; } catch ( const std::exception& e ) { fork_db.remove( new_head->id ); throw; @@ -2369,8 +2366,6 @@ struct controller_impl { br = controller::block_report{}; apply_block( br, *ritr, (*ritr)->is_valid() ? controller::block_status::validated : controller::block_status::complete, trx_lookup ); - fork_db.mark_valid( *ritr ); - head = *ritr; } catch ( const std::bad_alloc& ) { throw; } catch ( const boost::interprocess::bad_alloc& ) { @@ -2401,7 +2396,6 @@ struct controller_impl { for( auto ritr = branches.second.rbegin(); ritr != branches.second.rend(); ++ritr ) { br = controller::block_report{}; apply_block( br, *ritr, controller::block_status::validated /* we previously validated these blocks*/, trx_lookup ); - head = *ritr; } std::rethrow_exception(except); } // end if exception @@ -2967,7 +2961,7 @@ block_state_ptr controller::finalize_block( block_report& br, const signer_callb void controller::commit_block() { validate_db_available_size(); - my->commit_block(true); + my->commit_block(block_status::incomplete); } deque controller::abort_block() { diff --git a/unittests/chain_tests.cpp b/unittests/chain_tests.cpp index 1be6138dee..c94c10c2bd 100644 --- a/unittests/chain_tests.cpp +++ b/unittests/chain_tests.cpp @@ -144,4 +144,45 @@ BOOST_AUTO_TEST_CASE( decompressed_size_under_limit ) try { } FC_LOG_AND_RETHROW() +// verify accepted_block signals validated blocks +BOOST_AUTO_TEST_CASE( signal_validated_blocks ) try { + tester chain; + tester validator; + + block_state_ptr accepted_bsp; + auto c = chain.control->accepted_block.connect([&](const block_state_ptr& b) { + BOOST_CHECK(b); + BOOST_CHECK(chain.control->fetch_block_state_by_id(b->id) == b); + BOOST_CHECK(chain.control->fetch_block_state_by_number(b->block_num) == b); // verify it can be found (has to be validated) + BOOST_CHECK(chain.control->fetch_block_by_id(b->id) == b->block); + BOOST_CHECK(chain.control->fetch_block_by_number(b->block_num) == b->block); + BOOST_REQUIRE(chain.control->fetch_block_header_by_number(b->block_num)); + BOOST_CHECK(chain.control->fetch_block_header_by_number(b->block_num)->calculate_id() == b->id); + BOOST_REQUIRE(chain.control->fetch_block_header_by_id(b->id)); + BOOST_CHECK(chain.control->fetch_block_header_by_id(b->id)->calculate_id() == b->id); + accepted_bsp = b; + }); + block_state_ptr validated_bsp; + auto c2 = validator.control->accepted_block.connect([&](const block_state_ptr& b) { + BOOST_CHECK(b); + BOOST_CHECK(validator.control->fetch_block_state_by_id(b->id) == b); + BOOST_CHECK(validator.control->fetch_block_state_by_number(b->block_num) == b); // verify it can be found (has to be validated) + BOOST_CHECK(validator.control->fetch_block_by_id(b->id) == b->block); + BOOST_CHECK(validator.control->fetch_block_by_number(b->block_num) == b->block); + BOOST_REQUIRE(validator.control->fetch_block_header_by_number(b->block_num)); + BOOST_CHECK(validator.control->fetch_block_header_by_number(b->block_num)->calculate_id() == b->id); + BOOST_REQUIRE(validator.control->fetch_block_header_by_id(b->id)); + BOOST_CHECK(validator.control->fetch_block_header_by_id(b->id)->calculate_id() == b->id); + validated_bsp = b; + }); + + chain.produce_blocks(1); + validator.push_block(accepted_bsp->block); + + auto trace_ptr = chain.create_account("hello"_n); + chain.produce_block(); + validator.push_block(accepted_bsp->block); + +} FC_LOG_AND_RETHROW() + BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/deep-mind/deep-mind.log b/unittests/deep-mind/deep-mind.log index 27f01404ec..d2be7b1237 100644 --- a/unittests/deep-mind/deep-mind.log +++ b/unittests/deep-mind/deep-mind.log @@ -29,7 +29,7 @@ DMLOG TRX_OP CREATE onblock ef240e45433c433de4061120632aa06e32ec3e77048abf55c62e DMLOG APPLIED_TRANSACTION 2 ef240e45433c433de4061120632aa06e32ec3e77048abf55c62e0612c22548ed02000000013b3d4b010000000213588be25132b4167ced6df22b5439e376d5a20284190bb94a43e3e801006400000000000000000000000000000000000000000001010000010000000000ea305506d4766d9dbedb630ad9546f583a9809539cf09d38fd1554b4216503113ff4e501000000000000000100000000000000010000000000ea3055010000000000000000000000000000ea30550000000000ea305500000000221acfa4010000000000ea305500000000a8ed323274003b3d4b000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000044423079ed372a4dda0bf89c3a594df409eaa8c1535451b7d5ca6a3d7a37691200000000000000000000000000000000ef240e45433c433de4061120632aa06e32ec3e77048abf55c62e0612c22548ed02000000013b3d4b010000000213588be25132b4167ced6df22b5439e376d5a20284190bb94a43e3e80000000000000000 DMLOG RLIMIT_OP STATE UPD {"average_block_net_usage":{"last_ordinal":0,"value_ex":0,"consumed":0},"average_block_cpu_usage":{"last_ordinal":0,"value_ex":0,"consumed":0},"pending_net_usage":0,"pending_cpu_usage":100,"total_net_weight":0,"total_cpu_weight":0,"total_ram_bytes":0,"virtual_net_limit":1048576,"virtual_cpu_limit":200000} DMLOG RLIMIT_OP STATE UPD {"average_block_net_usage":{"last_ordinal":2,"value_ex":0,"consumed":0},"average_block_cpu_usage":{"last_ordinal":2,"value_ex":833334,"consumed":100},"pending_net_usage":0,"pending_cpu_usage":0,"total_net_weight":0,"total_cpu_weight":0,"total_ram_bytes":0,"virtual_net_limit":1049625,"virtual_cpu_limit":200200} -DMLOG ACCEPTED_BLOCK 2 02000000020000000000000000000000010000000000ea3055000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add8010001000000015ab65a885a31e441ac485ebd2aeba87bf7ee6e7bcc40bf3a24506ba10100000000000000010000000000ea305502000000010000000000ea305500000000000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add80100000000000213588be25132b4167ced6df22b5439e376d5a20284190bb94a43e3e8013b3d4b0000000000ea30550000000000015ab65a885a31e441ac485ebd2aeba87bf7ee6e7bcc40bf3a24506ba1000000000000000000000000000000000000000000000000000000000000000062267e8b11d7d8f28e1f991a4de2b08cf92500861af2795765bdc9263cd6f4cd000000000001000021010ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd0020701fd1d2d6fbca71ad1df5bd09a987d6863f301b93acfc1c34857e4b2f53821a0b4ca8483cf594f845f3f4fc155dbbc98009cb9c7b7b60d449f922dc00abcb0f0000000029807708239aa7de914d3ed61e9009ab2280bfbc50f1d9769f27f8341ef26198000000000001010ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd0001013b3d4b0000000000ea30550000000000015ab65a885a31e441ac485ebd2aeba87bf7ee6e7bcc40bf3a24506ba1000000000000000000000000000000000000000000000000000000000000000062267e8b11d7d8f28e1f991a4de2b08cf92500861af2795765bdc9263cd6f4cd000000000001000021010ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd0020701fd1d2d6fbca71ad1df5bd09a987d6863f301b93acfc1c34857e4b2f53821a0b4ca8483cf594f845f3f4fc155dbbc98009cb9c7b7b60d449f922dc00abcb0f000000 +DMLOG ACCEPTED_BLOCK 2 02000000020000000000000000000000010000000000ea3055000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add8010001000000015ab65a885a31e441ac485ebd2aeba87bf7ee6e7bcc40bf3a24506ba10100000000000000010000000000ea305502000000010000000000ea305500000000000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add80100000000000213588be25132b4167ced6df22b5439e376d5a20284190bb94a43e3e8013b3d4b0000000000ea30550000000000015ab65a885a31e441ac485ebd2aeba87bf7ee6e7bcc40bf3a24506ba1000000000000000000000000000000000000000000000000000000000000000062267e8b11d7d8f28e1f991a4de2b08cf92500861af2795765bdc9263cd6f4cd000000000001000021010ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd0020701fd1d2d6fbca71ad1df5bd09a987d6863f301b93acfc1c34857e4b2f53821a0b4ca8483cf594f845f3f4fc155dbbc98009cb9c7b7b60d449f922dc00abcb0f0000000029807708239aa7de914d3ed61e9009ab2280bfbc50f1d9769f27f8341ef26198000000000001010ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd0001013b3d4b0000000000ea30550000000000015ab65a885a31e441ac485ebd2aeba87bf7ee6e7bcc40bf3a24506ba1000000000000000000000000000000000000000000000000000000000000000062267e8b11d7d8f28e1f991a4de2b08cf92500861af2795765bdc9263cd6f4cd000000000001000021010ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd0020701fd1d2d6fbca71ad1df5bd09a987d6863f301b93acfc1c34857e4b2f53821a0b4ca8483cf594f845f3f4fc155dbbc98009cb9c7b7b60d449f922dc00abcb0f000001 DMLOG START_BLOCK 3 DMLOG CREATION_OP ROOT 0 DMLOG RLIMIT_OP ACCOUNT_USAGE UPD {"owner":"eosio","net_usage":{"last_ordinal":1262304002,"value_ex":0,"consumed":0},"cpu_usage":{"last_ordinal":1262304002,"value_ex":1157,"consumed":101},"ram_usage":2724} @@ -121,7 +121,7 @@ DMLOG RLIMIT_OP ACCOUNT_USAGE UPD {"owner":"eosio","net_usage":{"last_ordinal":1 DMLOG APPLIED_TRANSACTION 3 04ba316cf9ddd86690833edc0f4548f8c07f0d66c09dca029b0a1fb96f16c62803000000023b3d4b01000000034b2b890c59dad3c04fd9300057ba6285196dc55f32e988a49d6059cd0100d007000010000000000000000080000000000000000001010000010000000000ea3055302a2f1713925c939a997367c967b457bfc2c580304f9686b1de22fc5946e40616000000000000001600000000000000010000000000ea3055160000000000000001010000000000ea30550000000000ea30550000002a9bed3232010000000000ea305500000000a8ed32322035c2186cc36f7bb4aeaf4487b36e57039ccf45a9136aa856a5d569ecca55ef2b0000000000000000000004ba316cf9ddd86690833edc0f4548f8c07f0d66c09dca029b0a1fb96f16c62803000000023b3d4b01000000034b2b890c59dad3c04fd9300057ba6285196dc55f32e988a49d6059cd0000000000000000 DMLOG RLIMIT_OP STATE UPD {"average_block_net_usage":{"last_ordinal":2,"value_ex":0,"consumed":0},"average_block_cpu_usage":{"last_ordinal":2,"value_ex":833334,"consumed":100},"pending_net_usage":9440,"pending_cpu_usage":40100,"total_net_weight":0,"total_cpu_weight":0,"total_ram_bytes":0,"virtual_net_limit":1049625,"virtual_cpu_limit":200200} DMLOG RLIMIT_OP STATE UPD {"average_block_net_usage":{"last_ordinal":3,"value_ex":78666667,"consumed":9440},"average_block_cpu_usage":{"last_ordinal":3,"value_ex":334993056,"consumed":40101},"pending_net_usage":0,"pending_cpu_usage":0,"total_net_weight":0,"total_cpu_weight":0,"total_ram_bytes":0,"virtual_net_limit":1050675,"virtual_cpu_limit":200400} -DMLOG ACCEPTED_BLOCK 3 03000000030000000200000000000000010000000000ea3055000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add80100012d5b1b639d6ae94fcdd0536b224644931573d1ccb2a0c548613cd1feea18888b0200000000000000010000000000ea305503000000010000000000ea305502000000000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add8010000000000034b2b890c59dad3c04fd9300057ba6285196dc55f32e988a49d6059cd023b3d4b0000000000ea305500000000000213588be25132b4167ced6df22b5439e376d5a20284190bb94a43e3e86c50d366bd80731342402e85b2ddc0052985fd31301156b938d7325ded2582756e40bfbc4f83b79f8de2f5d0c5394ffcc2f724830bb6b5ed9dcd5dbb4a09139800000000000000205d7ce507e9dbea47687e80fceaf2794b22bd883902adeb8c97de9f7283b614b0590bc4251ba5410cb035f88e60ffdf6fccecd10d83edfe36021227d1ee9e18830000000029807708239aa7de914d3ed61e9009ab2280bfbc50f1d9769f27f8341ef26198000000000001010ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd0001023b3d4b0000000000ea305500000000000213588be25132b4167ced6df22b5439e376d5a20284190bb94a43e3e86c50d366bd80731342402e85b2ddc0052985fd31301156b938d7325ded2582756e40bfbc4f83b79f8de2f5d0c5394ffcc2f724830bb6b5ed9dcd5dbb4a09139800000000000000205d7ce507e9dbea47687e80fceaf2794b22bd883902adeb8c97de9f7283b614b0590bc4251ba5410cb035f88e60ffdf6fccecd10d83edfe36021227d1ee9e18831400d0070000fb05010100203b7de491b51d3d74624078bc2c5dc4420985f0350afb6923a5585b5621750c9f126d7cff0efeade2068c7b618fc754b2abb5bff8cdb9bd0ecb4432b72ae1ed380100a82f78daed5c7b8c5ce755ff1ef7357b67e3ebc6d94c3609f9e662d0b8a4659bb8eb2575dbbddbc476694b9cca2dfea3b0bbd99d647776bdbb9e1da70e0adead081045158a7894b6405524a4d21424545aa8cacb0d0815a94891fa20414284ff2a025511a245ad54737ee77cf7ceeccb71f09a87545b9e7be77b9cef7ce79cef3cbf71f44fe94f1bf5d03d9f1951f447e343fdf3d87be873f2879efef473830dea77fff59e7bbef7f440d3bfd197d9f57368d1bfa54767949ab11b9736d48cd9b8840f7a0b372ed11f35136cf0436fe80dfac0b80dbc2afa67f84d6306e6063201ad97a8ff9234d00880f033d54c84469e48cd68b03c8b3ea54dd0909531c1fc52d0b0ed95c70e2dae4f3fd29eed5de8b6a767e77a8b8fcdf6daf32a42d7cd6bdd76d9548e51317aeaedd5f5c5d5e9d9f5f576b7a72c9aa273ed73ebed9e4af025c3b4d595e9f9d9deecf4fae2cfb4558d9b09defcf4409f1a2aa7cead3d2e53ebddf6f90b8b40e6426f41a568ba89e04eaf75171f5b5c6e3f4ac8d519393476dbebab17ba73ede9e5c5738bbd75358c9e70f6e155c24ae17d44a6aeaeadaeb7e7f1327f61aedd5d5737a1d3a1f3e1e5d5b9a5b985d9c595e9b5d9eeecb9768ffae9756e8956e29db9475f6918efa23e77a1db6daff4a67b8be7daea00d316339982ed81b579743afff0f4238b2bf3d38be347558696da34d17361b9b778af3a88ef0707693c3db73adf56868958aed36dcfb5097257d61a2280580ef09890d1fac2ec3d6f1c57af61e4a877bdb74a6445ffcd681aa6a60b6bf3e02dda0ed993275414abb8369444511c0f0d594b9f517c8b1e31237624a07ff4371cd123d60e51efd0adb7da86ff63ab8f46725b10ea353d34145aad7434623774b17959a51baaf8d45f568fb8a6c3d9b5b5e5c7d5eb6a07b42a745a7bfdd83d47c727ee7bd39b87fe66539f0854767bbaa9b5dd3093f2d7a9078655417f5be683f4a5c81ecb752737e3f44d5a9f9cccad539d22ee1417cfe76a9c1a9c29b29e53ef1ad64e4faa62e3c4b0a9dbb45007e81ff5e90e663b4d2fe83d39aca9bdf8cdcb2a33ce1e489d4d8d4ac7b5def8415a6e29a755c64d9d66d262f59651832ba175dc6cd2f3ad0a40313352c533b4f3ffd03ada2854d3601718b7043ccf3b757258611fef0076d96d07d2ecce62649cc0127ae5968b8d4e1e38ddc96ecbb17da75c405b74f67c6e4ed034553cd1c92da19207457c3ed70f0c1b0c21ac685a71b19387d4d78c9c75da192c1c776901daf9131d02648088f62d173b2e62184ec68434c5f29bca465367881c84970c54f4d1c22c80549d0a2430a126fe9ede4b742b469a9637a28be0ed843e6191fd00d024d49de6bd366d0a5a6777d2dc74429b0dde36f5df9e6bec7a5859225a9339fce1c9dc60ae39a894d39e26292146a426345d7a93f272c2484b6b9e2e1154e1a0398c01a6a8778011febd839629d7b3d95d34d54c62415e4c31a2584ca6381a31acea26051d200bf4245168a23feb1ca6d5d2043cd2d9e1eda8f8f61f4e43950da9f42744a85e22fae9c3a08b2e5e0021137ecde82da8ded0adb2d78ef257a75be822622d65756a7949d1bae92fd774c0846b1104fa0872b354c43fcee7e5eb2cceaa08c0b2a62194695a9245a3dc961b6c411509c9112f456fcd80799088f838bb54d8415018cf5c23410b00c783082a10f50e84dded3abb44840118013088481f4a76fd881cda17441ad78fc81dfb8288bb7e440eef0b22adeb47e4ee7d4164ecfa1139ba2f884c5c3f22c7f70591cb6a174cf45e9898014c4c05e33982a10750d17ba2a2050223a0592d1118361ae9778cd51be612eb3957aa3975c4aadc4cb9a78eab14d660aa456f43fc36466f357e9ba03728426c01e32d8f870db33cdef01bc66b7ec378b62d9fc883fbd4017a0b8ae4b1fbd44dfc96d1db30bf35e8ad8e193c2eaec645d5b8b01a17f0fa0d5edf1c57b70aee99c7e5f60a97d10a97db2a5c1abc0b8cbbb9dae36baa3d1eacf69809ce8a9118e10581c42db234bd1d1264d57dea2e2107b5fd4035eece6adc1d6459c844b286602bf4adefd3fe7f92f6da533efd522076fd194daed5619535e0fa38f56e78155bff121a57aefcf1b77ee7d73ffde2d44f929380af57ae7cf6db5fc35720b9b9b9f9fca7fff04f3e72cf43c356be5efe95ef50ef43c3817cddfc230c7ef770e22c7c910f12ba05b9544fd1d3d923f6297dccb263414ecb8f8ed693d42f71e55b1f7e71ea3dbcc4339f7cf1c57ff8e047bef6f98d3ed0bfffbddfa0efef1e8e05ea3c3dc8c59e119833c76c4b409205c8de305a8f539ef639d94705e5437ffbf257805a244096e9419a6541802c1cb3ce03719decded17a94fab537bffde13e10c0fc28808402e4494c08c8c5f6fbdba4fd251e4ed2c9de385a0f531979861ee1b8392de34e1fb3137ed844273b365a0ffcb01e3da271b326c3d68ed9861fd6e8643f365ab77ed83be9118f9b5332ecd4313be98791a20538e3c73d013cc6cd451977f198cdfcb8ac931d1fad6b3fec7df4a88d9bb332ecec313be6878d75b2b78c52f891dd415f9ed190a6d7283eb3194e0bf99b27b324fdb2d131046c8ce4ab19389231e8eea0198a568f24ccc8823c7e4064cec5c507d8f58eb3db9a86d1a0a6039d62ed3cbbc37007e32c240f3f2848d65b2e98526010b5769ab010ae038f30f1b0e277b025f8f92fc012a09310635fd260540df077b6d2bce4647f5eea12572b34fae9bc53d4007b414c1f3719351cc2e45a47da98c714f14094031716fa8220d5eabc4ea926751db1ae09479bbacec3d7e6082462fb1461abca25c5157dde4507b51a2086c978c36344650a3d2378e671fa73468757a36d79743d753d30ed296b52d09ec5612f0283b22d4fd91dd44c795b25e102f218997a4c0750d45614c9842289d0ac0145dae9d3e6886dbd0245a283666f5a0cf7652e3b927edb50e84a24f9b8b911f2f6450ad6157d667654f6725c1e13781095c6095c40a756866653a3bc550e555cd032934211daf1045303a7069d09efb9ea4c8ed96760595ee05e97205a1662d29e4bb22a1c7fa6ae9359cfe89cb9c55d2f6881ee71268c99452f700b562d5b1a1523aec20199181db4bb70e1e346d870f3e0d1c79cac96feaa3511197562c7a6be91227a4a1e93f2382d8fb3c29aa3f218ab38045e819050a478bb8c2816e738036dbe496c7b2b734d58365171658c8f34c2d75d5846ebcdc8eced1c6b0d722c138e3564d24cae847bf4581304060ec559728fe871baa9f138454a891e93cda1abf069c8c125c2790976e1d4a6de7960ee4ebf6775c207e6867108142639236748b4227fcf8884fefb560ebe02cf66fa3cdbd4b229614a764ab856bb1ad78840bb706d53ced910b85613ae65c0d8d5ae81718cc54bb2c31a2ca4eaaf98418892b289d978cc2ec8db647f6dac54cd430309821d9c450e083949b2b45f31bbb673bbb9f7b9f5d2f05e4e35e586844ea48239adfc6095dd46019b2246227596a5a3900f24d5c897ec33dbed18927e2e14b3ff4db5b71e8e2b5d9c94ba38f1eb267d5d9c6c93aaa4b4fd7071f6949a44a4060a93c5252b46af76aa9f17f9a8ed38d5a72be161d1b986537d7a40386604cfb395626a99fbd91010518ab173cd9a77ad2db8572bbef6ec575ffbe030ab7ea44c3397c7d43ab6ec7d8b182e223fcef421e535c0d2a77032e9f85b56ebe8815339b682d93966a4d726348cef82e03b431009d0e9a53c06b221840833428f28fca9af13a231231a6e4174461ef38209a000d1b08f682888f2bc15993a2f324be42e6596e6cd88d6f1d0e22c4fa5fdf440fb99b23d19907119c6f957efacdd4fed792a6a1ab27f2015ce672d957a25426f3763619dfd083b3a2f3e074727ad952a33fd4598347de34ddae92d7af1ecdede06fb1ba52dfb22f46243ccbad8b2c957f040763767c99ee6ec2a0ec8cc80ffb1b6c5b5d8d59c5d456f95562cbc8a15bb8c8481bec479f2cb8a83576477103b2134297833766a03e859f16345c3e5014e2ce144f8fbe347e87338f7d17ff9cc37de40bccf5038390595c4d11069b50772d522cd826f2758303e7b993d600b7e247ed49492c8ee0436d4cac3615d2f87d4113d31a3127ecb3a651878d20f7e6058a7a20b8abb3b790492d3493b816202e9da850e1020c1715cd2e19ac0034c1412e8900b3329c7b818a4a038c326b5442e947a482ee11feb6eff967ecc4af4b0a93df57212ab2306e25629e6b054cca1e742d857cce136e90dbd62862e15511a70ca4eeda2a343d6d1c66ba3ad815acb1c45be8e75370825dac2727c717440afb364676ff3ca3de21e7a1b14e6ad2e40eca2bd1db718648f2a151f5d9be326fa1af179c04a964f23407ad373ff00fdbc66e20a9868a6e24b34d070054ab45329e15f30da6e38613b54129f42944b2cca25c1d2568a599fe40cc08a40086639cbca8bf9c04cb15c21c6dd3f90287bec23b44687a34186a6010df5a3dc6e83a6fb395d55ca871ec8e932b4f4dff50d2261b00709d51e2095b84c7b8084d0ecdfa6bf6e593346bcf1a069a6147c3bae9271dabb19d2f18e2ca7f470d0d4db7989efc2d471029d4b6e48579071e69a73cee2097b75459d7711f21379d4fbfd27096e54c49d664487980c1249ee79d2435ea9f20e12d9526d891c083a7af613b97950aaaa2e5ecadeeb7bcb8de5c949d699d0facebc0b03a983cc81613726c1eee85b728274a564f0835229d2eeb4f5cbd2495adaa14e7857b52a5bc14dd007466aba21a8e469a2b7d124d84a934068120dd224649a18a189014d42170dd0049ed95b0cb248f5bedcb868a9703bd0447291c8da1c40b3e93940be207c54a4a6b886bc7b117510e2401155977b7f1545d441506511065af8da8aa8bb2162b13bfbaa8ba8af0e9143fb8248e3fa11b9635f1071d78fc8e17d41a475fd88dcbd2f888c5d3f2247f7059189eb47e4f8be20b27b11752f4caeb188ba072aba84b05b11f5b7c52f0ff7d1fa243badcfa0a68d5cb2cdfa88ed89c5ba180a3b617822313ce4122f650f55db492aa32ac3c5b925e55d591f52c61c4103346f04d4499660a128307e701712259ca6a0686e2bb738620389fe53f74397cc27502417c677740825f24bab6b48755e104ec1521e88c7b8f1ce61d6e6e46052e81dba402e3489b3cf8fa03f5130266727d7127d87f065450042870b65e4efa896783641cea40b386e534211cd496d89d4789ce65d6a7642602ea55261d877e1a00417a5b0469efa6b46c81821b6fe0b6b62899edd12a79ce47a13416de4108f3b1855443db8d34456556e6d69dc1c433585c2a0f0a4bfcf147074c48d4027e4ea1c9132aceea269dcb2cb0ee54c30d0ed0301b22bf0edfa910ba49183f2e21b12d20588700a0d3bcc63b343a374ba98ce0a914bc8ac629a6cad8684a5810d61c3622925253cf062a7b86bcbd8d82585e3b1a0d551445308dce98108b526112af5d4ab6b75779010321fe9dd61c70f725aa32665158d143697eb10a2b01cc41c82e32d92405471e94a3e90612401c97eca45083c25b8268fb4d1d41e0ce8076632174bd2a67fa5ad2106a2649c079c11d2888b9504c57fc69b03ba4896dcfc1037be2c3b66998e24f0e18f983d667203d9e6e771760b4d8c789c4cfcd873c20fe2dfe94e19df97c5a6b314ac09050981a3ac1d5bd9ad0c0195f7337251b13375c94553fa09faf8d9f7de4e6c232e51b0fa5d4d7e93d4cd82c39c1c3a46b84cf2da25da4ffb1217d21d874a0a071c1712754422ac5c05e864ef1b958188092d5f02909091a01ecd43cf46f60724b28fd9aa7b26c6583e41264cea100a706249b344b44b6622b49296b48eeb94c50a30904f218e9b5c4f844a75c8b130982d4c948a59fa211b0a0b858d14ae8b0ae228c9ee0c4228a4b96bb72004210dc270e5d930600b1c3026c54f683635ab00d6fa688af860cb443a244c1583c0389a4a7e01d9bc3728f5641e4c4d3cf524498b2e363ad80cf5b1f9206340d0ab2081149a08de95e7fc098c40c9b084430c670cf840c2c30f80c1001c72a3194cc61aa744850e3d04b1b03d3ab8d9413ec822bd068f000b0550d7b21ea77848e6d0820405be34e44ba3c3bb979b21d294f9a6ac6c324898105f3eef85321bd08c03a944affa37399518f854a264b612a46b78e9665837e93605c7df919d97b17e9c682fbe3dbc5d7dd9d216f910179773b795c36d3596d57b7a3f85d95244a87095c41ae3ab3cbe7a2fd4522e197c1fc80d02f26553a9bb6d92b5975c9529ea3da1226175581e8e9d003afca4be5a223c8d1dd6b1ca4d86d089879b7c07a5515d1e6079e220f730fc4f674e6e99ea7c4a6fcbec5b315b97b3f59eb3ab0923db26f00ea026b3fed1701dc9cabe6d5492748924e97c0ed7882d6435fae7b86830703b4af160f1a12cd9b407799af2ae171cad3c821f620a5c698a59f511d988b0c5f7a8016e3f291dc2ab0777d1456fbf1dd503b80a996be23700e23d231d6c71ef05b7b3011d3bf7fefb062960728e82342d8b6b900cc5e50dbec311c38292e1586a4afa350f91f328e15902d5b4151ce636bcf6509cd8a85526bf902f5e62d5e00b4f7cc58ebdddca313462bd02c9e921b5ca387a6374204d9fd7261057f07f5de10d68ba6d6a8ec28b4a668ed804fecbeb540c5394c5d81d5f712a95e0a70ced28d8eedc5edb8e1a7e478d6bd851c38f7ba51d855e77e73bb7c585403f322b4766db062503831a25811a7bd801efdd8148311e194556f468346b4cab1ae221176535ef4aa65ff6d6eed590ea1a69b4cfc4317b11a74ca76571b9a9bfb6b2295454fcae08e7607b2565b3aaa404a2baab4a4a807d04be9262717acec8035703032e989c159d754a640147f079ae90f81a37d0872a65dff3ac04ce72a710f181af81841c78579d196a20b6ac8184acb2b8936f32c9302e78707dade56f56a20632263d6b825352ba0e16c569cb65eec0578e41c4c1dab154bf387e0dfaa5635b2e17c0a3adc0700c2faa861597e8700e1ffad5e320f5fa3b9b280b2c81e86e0616488598c1f5dbefe7769ac8451714c7a02d898f57d1edb4a36dea1dc96dafe17d65bcf82a3dd99b868e47bf293ef9d5676f19d0f2b401d6f296b53c59956552f441a5e80df39698a53c4dfd83ec68f9e6aab746f596f937291396399eb1dd6d848574f66d44c0587438c5cd2ca9ec036cf37f0b0de3ebb0c8d80d9a1672b079a95dac8b45a2e2f439ee36e2e48b8db192b550550564771bc377292cdb98a735bb4ffca3a5fdf47ccec8e3b4f77ce450ca314cf8d69fe8047a3f22878e20fcdaff19f79e7434a3c746ebefac0dca7bf7dfbc36328542a6edb820b046600432719855c908c5604614532916a51dc32363fdba353d22d40c25b264e141fc88e82de6f851fa0349af1889da620490914b38808c3880440e860248c3c16513f65ae35786fd00d2ec08206309203d9c12f92a808ca6b80254c19100d29401a447c5226ea72f6500697d00197b3be92355e5d713a3238999b16dc1a2646ac606e245d6be134c3ebc8d41b32bcfd0ec6ed1e3c48a97becfd8ffff8cf51750b65c46aa38fcb211ed36e06ddc30edc657387689ea5ae68c04575f54db8239f95583c21d259e3d51a9c80984574c3ab62bd2debfb351fa2b49df5f09d88a559dc9167f25e0247f69659ca9fc9586f82b6ec05f69f5fd9506dfb13c25f8bc593c83898168ef7819edb16790fea93656c29531b92dc3e9b631e7adb35c01e3727499d6e15008d849b3385d64ef9638319907d92dcef6af04245d64f6d8be210d990cdc472248b8432a9797f8f46523e3e668992de55ca7de35d729a1aa53e9b3b8ea53ba3241e5b634cec1ad82dbf229f257908c2c9ec50b0e635956966141f1157268c47b09e0bdc470e7254625ff212e1ae2bd9832f41c702bb4fca25bfb4b4174e61acb79826461243f15364c32fc34462ea121730a88b0635c868d7c0e5c2e0918c13f3ec1ee2049d102d7fe49ea16fc85002be94fc0ae8acafc3b702f455adcf7b5f2e46906e10294915cc077a9785d5d9574627f8904bb8a21f13edb8a7ed9063b20a15ccd22152117b762a0148b24c4e5c5ad7e469696ab344d799b2b4dffd1a6fc93fef49d8fcc2e2eb7e75d6fd5cd2e2fafcecdf6da6e6df6d1f6ba5a7db8d39eebd197f575e95fecb5bbb3bdd5ee34ded7ddca6acf2daeb87317967b8bd38b2bf3ed8b8a7f0c99def9fe2e0d55ed6e77b5ebf07f5b2cae3c5a4d567cacd310ed8a33e0e9bd73b32b0036476db4baacbb0ed8bdd98797a9e111374bfd0bedae9b5b5de97567e77a8aeb00e9eb77e0786e757ef191c7f744efe581e5fcd06b5cee63cfa9f44df21f4350bb47786176e551225777f1dc6cf771b7d47edcbd7fa1bde22163d7b32b1ebe62cd9ae66bddd5deeadceab2f3ff71488969ffff18e132651a3cdac61cb22ce9dd1756da17d70806ed50684aa83eb278b13d3ffdf0e3bdf63ab05cef752fcc097569ee1f349552ff05ee7357f400d00700008101010100204b21f3cba072cc493e70861540df4677b498b0505a8b8e2a346b85a0c2dd2fc4263c4a7d8629026c4eb594ad96fac2bfe5f8ffebb9c841c353920b7b8ec11abc0100d90778da8d563b8f1c4510eedbf7e37cf209d9e60808402496c0dcdaac4e8ece01090112afe83043ef74ed4e6b677a86ee9edd5b3b2121b049888d842c84c0c1456702eb20b036424242c2e00408800c24fe03d53db3f33a58e860b6bbeaebeaeaaaafaab7f55bff9d1a796df0e5798263c37cc89f2fbe657e1eb8c7cb92e0de5f83c1eded95e4fded2d08150faf5ea5237e69f7855db2d3c199e351e5915a339c0b900d4103681849dff5c09daa3818bc34ec5057f319d54036b6c640752cc1617c024a17515d1a6b2f945c2f48a3ab3d09ca0b7dd68ab9d097078d292cd4267e9c39f089a70faea351378c85563b11c8802bf44c383eccc0cf20cd39e55a9d31df4c766ee487eed4f528174e4425baab412ab2fd44400f1dab73046827567402f6ece195a73495139455b44ee4ead4bb1db3594b2a94b929fa51367179f0f4882adc00722dea6c6edb0798d3452a7fd60d858643ed8c2598c8297bf18227220efe2f948148a1851bbb515c72a47ce34cbbeec655133b0106781de0c9aa059f8f41f3200b19833148090c41870e1c465c528b9b73c1c2798a3a57b5c2c0cfe276de28b9f0b90027552b7e6375c085d35a0691f6ac7a7768c39351b2a4eabb54b8e0dba3486d2b597131b1f0b3553ab68cff9c15a9dec3adc83b0327b5764a645b3bbd7c77b2ce294f6a755cf4a278e473d7c1692b91a74e75d083a9b5d828596cb8218364a6175132eb4b782fe61202581d2b906ec926dcee4a2cd2302de6ec9354785ea52d5bd5900bda21ea652849adab4030243b676debdc60af83126d32d91c2d34a85341c20682e6d233ab41b8f02f154e6a05e4e9b897c2b319c990c52e3a859123b533d932bbdf76c276c527c2e4b21ceb4d8cd8aa8bb1b56dac6d90260d1b8db10c036bbaa54063abace4ba8ea2241c3da3f77980ddaa92bd2e7628c7629ab617f54c2527174b05a6ae8a8236da3229af186acd0293fea689c65e7716ccb0eb61a892b5e548eeca2475a55ec7d3d32658c78357533c329d62a2b5eda28a6cb492c93f3758e35524f9ac128236578e11276e742c286468aca330a42cf661ab98b783ebbd58643cafff27cf7b71c4685a678db575669c5f1543c3e0735af70bef07a975ec4a819b769132cbcc6379f1637c36f3278f7c7debe2cb1f7c7eadd434c8feb73fdd3bfaf4956223c0f1fcb4fec587792193fd4fee3cc31edc2956278e5f1fdd7cfc59566c1fbd39fc19d8d14999a138ee42707492b171f5c0afa848c877af9e78c7cb22f570ec3f77fb789951c882be4940930cf4f0d1db6fdc5f16528fe3ddaf0eee2fb324e3d8fb1e057942cd851ffef1fb8fc5fcd920f8af3f2e66c9fcffb84b7ff865b7ce875708c9ff60d8f137aa5a1fa900d00700001001010020742877c36a520b152b1337ea1ecd37b0c98ad07289c32fec392e7eebab9f0ac71f7bc8c718cfa75317b2e15702372a9222c4616783ee7b3f0ec6358f8c328eea00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232201a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b72410000d00700001001010020058e23368b919493d6ac61d27f66b829a53893e88ddde857d3b82d913960960d22fa36f397752b98c295e3b31927f740127c0a99e76f8bfeea88f44466b8fbfd00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea990000d0070000100101001f43fe868e263d8134cf705aa85e26ce78ebb058edd558865fa3d240f5cb9e50c2389e9c8276eac800b7233a552045b2e79124c97e5156a0649849cc7f5d09eee600005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232204a90c00d55454dc5b059055ca213579c6ea856967712a56017487886a4d4cc0f0000d0070000100101001f29e82b08ccf15e2187f29fea11ee3f4974f41b51e45b19f353348d8848b86fb71cadd88630456b7a1c60803c7b402487d41fbf18f0b0a13b4cca1f740447938300005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220e0fb64b1085cc5538970158d05a009c24e276fb94e1a0bf6a528b48fbc4ff5260000d0070000100101002047a8b784c3765b5c63ac52e3d8461b80bc2d3e3f62434f8accb277d9f2487cfd3c0728fcd26b5119a11288e5db46bc5b547877e220971609d1cef8cba443340800005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed32322068dcaa34c0517d19666e6b33add67351d8c5f69e999ca1e37931bc410a2974280000d007000010010100203e701fbafd4149bc95b55a6bfc3b78246f5c2668ccc05ed4059a36ceb38f140b31e3b69e15f2579571e5bde39e034947271599c200e540b3949112bef163074c00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220ad9e3d8f650687709fd68f4b90b41f7d825a365b02c23a636cef88ac2ac00c430000d0070000100101001f0cc7352e60f4f8476783d6d1b48766a111c56fee2c1a552e76a75c92bc17de172f994ffc854c09717c904054819ca7a17379ddecaf531c439b35337ba099b81300005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232208ba52fe7a3956c5cd3a656a3174b931d3bb2abb45578befc59f283ecd816a4050000d0070000100101002040965063a83be2d53b36c8d7e0775f503c2caa1407e586314562aace52c272fe60659e196413a6c9db4168470bcabb9a5851121c10c7b665f363f6cd4d1e4bda00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232202652f5f96006294109b3dd0bbde63693f55324af452b799ee137a81a905eed250000d0070000100101002074ea7468b2a031c4cd53bf10ec3ac66b0c4b5c8779e045f1ef8d9c7b116be649217ff340107d0163397b99918ee2ce822b66cd6fce7b385af97a04671136e2ee00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220f0af56d2c5a48d60a4a5b5c903edfb7db3a736a94ed589d0b797df33ff9d3e1d0000d007000010010100204dfb21ca5140582379bc026792c16b4cf97827143a4a9cd99ae70b3e6016cd6316bcbb9f1cb1233f12a0bbcd9debafa64724d0459b5c8d3cb67ceddfb2e3962500005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232204e7bf348da00a945489b2a681749eb56f5de00b900014e137ddae39f48f69d670000d0070000100101002033446a3a94ade71dff3edb786259679487ab701bbc147490b1d4159fecf545fa22fee0698db16bf616465e5cebb985bfc4d9ed1ec4a55e38997dd4b4bbc427eb00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232204fca8bd82bbd181e714e283f83e1b45d95ca5af40fb89ad3977b653c448f78c20000d0070000100101001f3f67edd35bf731a07f40c638e8812112cd7d1baa39ec7dac4a1b2f0c83ac8bd53689b56dba69a7386e3860a6f8976695ac0bc2b5dacae91080f1d54df2dac0c000005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220299dcb6af692324b899b39f16d5a530a33062804e41f09dc97e9f156b44767070000d0070000100101001f1e030564013603d54f9e983b63cd940f8ff09ae038b14813f4021bb0c09ebb640d90cb4f8d57be2809f492a51737b671a5f549d4efa8e7efdaeaa9663c09d1ad00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220c3a6138c5061cf291310887c0b5c71fcaffeab90d5deb50d3b9e687cead450710000d007000010010100205cea642eecf05568ce8c5564e63349eea3b816108914ba2ab5efffbb8ea467265f0b6d474f03ed02a3bf529fd6e55a595cbf8dd1adf4311cb9c51e862f8a535400005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232205443fcf88330c586bc0e5f3dee10e7f63c76c00249c87fe4fbf7f38c082006b40000d0070000100101001f4556076cc86e0840bf69664f1ef8fcd4d91abda313d08e7840d24ba45cb429cf12b7d3a1f64250c19d1b975e7b107853beff70ebfc4c27c44f825dc05cdc9cd600005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220bcd2a26394b36614fd4894241d3c451ab0f6fd110958c3423073621a70826e990000d0070000100101001f354d903ad0f2c6cc9d9a377d681ffaa00475d1e559e48074b4c8cce3111d5c172903b2f179ad4d736dda4e7d1b6a859baeab9dde5e5e495ce09733ec4650634400005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220d528b9f6e9693f45ed277af93474fd473ce7d831dae2180cca35d907bd10cb400000d0070000100101001f1766fa716a828da244c9ce52919b7a19acb38dbd110d1bb0039bb2477c17e4465dceecb8330ed5ee9de1330930dfcfa1a5e8149ce8536a82c0093642adf7328200005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232206bcb40a24e49c26d0a60513b6aeb8551d264e4717f306b81a37a5afb3b47cedc0000d00700001001010020488923db1c78fa430a3a9eab75f4ee467c7b9a3d3b4eb3bd08e183c82ef79b9102a4d2a7d1ec79c96b404911ae1b10f579bd82a660011c1ca2b872b30ef7dcac00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed32322035c2186cc36f7bb4aeaf4487b36e57039ccf45a9136aa856a5d569ecca55ef2b000000 +DMLOG ACCEPTED_BLOCK 3 03000000030000000200000000000000010000000000ea3055000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add80100012d5b1b639d6ae94fcdd0536b224644931573d1ccb2a0c548613cd1feea18888b0200000000000000010000000000ea305503000000010000000000ea305502000000000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add8010000000000034b2b890c59dad3c04fd9300057ba6285196dc55f32e988a49d6059cd023b3d4b0000000000ea305500000000000213588be25132b4167ced6df22b5439e376d5a20284190bb94a43e3e86c50d366bd80731342402e85b2ddc0052985fd31301156b938d7325ded2582756e40bfbc4f83b79f8de2f5d0c5394ffcc2f724830bb6b5ed9dcd5dbb4a09139800000000000000205d7ce507e9dbea47687e80fceaf2794b22bd883902adeb8c97de9f7283b614b0590bc4251ba5410cb035f88e60ffdf6fccecd10d83edfe36021227d1ee9e18830000000029807708239aa7de914d3ed61e9009ab2280bfbc50f1d9769f27f8341ef26198000000000001010ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd0001023b3d4b0000000000ea305500000000000213588be25132b4167ced6df22b5439e376d5a20284190bb94a43e3e86c50d366bd80731342402e85b2ddc0052985fd31301156b938d7325ded2582756e40bfbc4f83b79f8de2f5d0c5394ffcc2f724830bb6b5ed9dcd5dbb4a09139800000000000000205d7ce507e9dbea47687e80fceaf2794b22bd883902adeb8c97de9f7283b614b0590bc4251ba5410cb035f88e60ffdf6fccecd10d83edfe36021227d1ee9e18831400d0070000fb05010100203b7de491b51d3d74624078bc2c5dc4420985f0350afb6923a5585b5621750c9f126d7cff0efeade2068c7b618fc754b2abb5bff8cdb9bd0ecb4432b72ae1ed380100a82f78daed5c7b8c5ce755ff1ef7357b67e3ebc6d94c3609f9e662d0b8a4659bb8eb2575dbbddbc476694b9cca2dfea3b0bbd99d647776bdbb9e1da70e0adead081045158a7894b6405524a4d21424545aa8cacb0d0815a94891fa20414284ff2a025511a245ad54737ee77cf7ceeccb71f09a87545b9e7be77b9cef7ce79cef3cbf71f44fe94f1bf5d03d9f1951f447e343fdf3d87be873f2879efef473830dea77fff59e7bbef7f440d3bfd197d9f57368d1bfa54767949ab11b9736d48cd9b8840f7a0b372ed11f35136cf0436fe80dfac0b80dbc2afa67f84d6306e6063201ad97a8ff9234d00880f033d54c84469e48cd68b03c8b3ea54dd0909531c1fc52d0b0ed95c70e2dae4f3fd29eed5de8b6a767e77a8b8fcdf6daf32a42d7cd6bdd76d9548e51317aeaedd5f5c5d5e9d9f5f576b7a72c9aa273ed73ebed9e4af025c3b4d595e9f9d9deecf4fae2cfb4558d9b09defcf4409f1a2aa7cead3d2e53ebddf6f90b8b40e6426f41a568ba89e04eaf75171f5b5c6e3f4ac8d519393476dbebab17ba73ede9e5c5738bbd75358c9e70f6e155c24ae17d44a6aeaeadaeb7e7f1327f61aedd5d5737a1d3a1f3e1e5d5b9a5b985d9c595e9b5d9eeecb9768ffae9756e8956e29db9475f6918efa23e77a1db6daff4a67b8be7daea00d316339982ed81b579743afff0f4238b2bf3d38be347558696da34d17361b9b778af3a88ef0707693c3db73adf56868958aed36dcfb5097257d61a2280580ef09890d1fac2ec3d6f1c57af61e4a877bdb74a6445ffcd681aa6a60b6bf3e02dda0ed993275414abb8369444511c0f0d594b9f517c8b1e31237624a07ff4371cd123d60e51efd0adb7da86ff63ab8f46725b10ea353d34145aad7434623774b17959a51baaf8d45f568fb8a6c3d9b5b5e5c7d5eb6a07b42a745a7bfdd83d47c727ee7bd39b87fe66539f0854767bbaa9b5dd3093f2d7a9078655417f5be683f4a5c81ecb752737e3f44d5a9f9cccad539d22ee1417cfe76a9c1a9c29b29e53ef1ad64e4faa62e3c4b0a9dbb45007e81ff5e90e663b4d2fe83d39aca9bdf8cdcb2a33ce1e489d4d8d4ac7b5def8415a6e29a755c64d9d66d262f59651832ba175dc6cd2f3ad0a40313352c533b4f3ffd03ada2854d3601718b7043ccf3b757258611fef0076d96d07d2ecce62649cc0127ae5968b8d4e1e38ddc96ecbb17da75c405b74f67c6e4ed034553cd1c92da19207457c3ed70f0c1b0c21ac685a71b19387d4d78c9c75da192c1c776901daf9131d02648088f62d173b2e62184ec68434c5f29bca465367881c84970c54f4d1c22c80549d0a2430a126fe9ede4b742b469a9637a28be0ed843e6191fd00d024d49de6bd366d0a5a6777d2dc74429b0dde36f5df9e6bec7a5859225a9339fce1c9dc60ae39a894d39e26292146a426345d7a93f272c2484b6b9e2e1154e1a0398c01a6a8778011febd839629d7b3d95d34d54c62415e4c31a2584ca6381a31acea26051d200bf4245168a23feb1ca6d5d2043cd2d9e1eda8f8f61f4e43950da9f42744a85e22fae9c3a08b2e5e0021137ecde82da8ded0adb2d78ef257a75be822622d65756a7949d1bae92fd774c0846b1104fa0872b354c43fcee7e5eb2cceaa08c0b2a62194695a9245a3dc961b6c411509c9112f456fcd80799088f838bb54d8415018cf5c23410b00c783082a10f50e84dded3abb44840118013088481f4a76fd881cda17441ad78fc81dfb8288bb7e440eef0b22adeb47e4ee7d4164ecfa1139ba2f884c5c3f22c7f70591cb6a174cf45e9898014c4c05e33982a10750d17ba2a2050223a0592d1118361ae9778cd51be612eb3957aa3975c4aadc4cb9a78eab14d660aa456f43fc36466f357e9ba03728426c01e32d8f870db33cdef01bc66b7ec378b62d9fc883fbd4017a0b8ae4b1fbd44dfc96d1db30bf35e8ad8e193c2eaec645d5b8b01a17f0fa0d5edf1c57b70aee99c7e5f60a97d10a97db2a5c1abc0b8cbbb9dae36baa3d1eacf69809ce8a9118e10581c42db234bd1d1264d57dea2e2107b5fd4035eece6adc1d6459c844b286602bf4adefd3fe7f92f6da533efd522076fd194daed5619535e0fa38f56e78155bff121a57aefcf1b77ee7d73ffde2d44f929380af57ae7cf6db5fc35720b9b9b9f9fca7fff04f3e72cf43c356be5efe95ef50ef43c3817cddfc230c7ef770e22c7c910f12ba05b9544fd1d3d923f6297dccb263414ecb8f8ed693d42f71e55b1f7e71ea3dbcc4339f7cf1c57ff8e047bef6f98d3ed0bfffbddfa0efef1e8e05ea3c3dc8c59e119833c76c4b409205c8de305a8f539ef639d94705e5437ffbf257805a244096e9419a6541802c1cb3ce03719decded17a94fab537bffde13e10c0fc28808402e4494c08c8c5f6fbdba4fd251e4ed2c9de385a0f531979861ee1b8392de34e1fb3137ed844273b365a0ffcb01e3da271b326c3d68ed9861fd6e8643f365ab77ed83be9118f9b5332ecd4313be98791a20538e3c73d013cc6cd451977f198cdfcb8ac931d1fad6b3fec7df4a88d9bb332ecec313be6878d75b2b78c52f891dd415f9ed190a6d7283eb3194e0bf99b27b324fdb2d131046c8ce4ab19389231e8eea0198a568f24ccc8823c7e4064cec5c507d8f58eb3db9a86d1a0a6039d62ed3cbbc37007e32c240f3f2848d65b2e98526010b5769ab010ae038f30f1b0e277b025f8f92fc012a09310635fd260540df077b6d2bce4647f5eea12572b34fae9bc53d4007b414c1f3719351cc2e45a47da98c714f14094031716fa8220d5eabc4ea926751db1ae09479bbacec3d7e6082462fb1461abca25c5157dde4507b51a2086c978c36344650a3d2378e671fa73468757a36d79743d753d30ed296b52d09ec5612f0283b22d4fd91dd44c795b25e102f218997a4c0750d45614c9842289d0ac0145dae9d3e6886dbd0245a283666f5a0cf7652e3b927edb50e84a24f9b8b911f2f6450ad6157d667654f6725c1e13781095c6095c40a756866653a3bc550e555cd032934211daf1045303a7069d09efb9ea4c8ed96760595ee05e97205a1662d29e4bb22a1c7fa6ae9359cfe89cb9c55d2f6881ee71268c99452f700b562d5b1a1523aec20199181db4bb70e1e346d870f3e0d1c79cac96feaa3511197562c7a6be91227a4a1e93f2382d8fb3c29aa3f218ab38045e819050a478bb8c2816e738036dbe496c7b2b734d58365171658c8f34c2d75d5846ebcdc8eced1c6b0d722c138e3564d24cae847bf4581304060ec559728fe871baa9f138454a891e93cda1abf069c8c125c2790976e1d4a6de7960ee4ebf6775c207e6867108142639236748b4227fcf8884fefb560ebe02cf66fa3cdbd4b229614a764ab856bb1ad78840bb706d53ced910b85613ae65c0d8d5ae81718cc54bb2c31a2ca4eaaf98418892b289d978cc2ec8db647f6dac54cd430309821d9c450e083949b2b45f31bbb673bbb9f7b9f5d2f05e4e35e586844ea48239adfc6095dd46019b2246227596a5a3900f24d5c897ec33dbed18927e2e14b3ff4db5b71e8e2b5d9c94ba38f1eb267d5d9c6c93aaa4b4fd7071f6949a44a4060a93c5252b46af76aa9f17f9a8ed38d5a72be161d1b986537d7a40386604cfb395626a99fbd91010518ab173cd9a77ad2db8572bbef6ec575ffbe030ab7ea44c3397c7d43ab6ec7d8b182e223fcef421e535c0d2a77032e9f85b56ebe8815339b682d93966a4d726348cef82e03b431009d0e9a53c06b221840833428f28fca9af13a231231a6e4174461ef38209a000d1b08f682888f2bc15993a2f324be42e6596e6cd88d6f1d0e22c4fa5fdf440fb99b23d19907119c6f957efacdd4fed792a6a1ab27f2015ce672d957a25426f3763619dfd083b3a2f3e074727ad952a33fd4598347de34ddae92d7af1ecdede06fb1ba52dfb22f46243ccbad8b2c957f040763767c99ee6ec2a0ec8cc80ffb1b6c5b5d8d59c5d456f95562cbc8a15bb8c8481bec479f2cb8a83576477103b2134297833766a03e859f16345c3e5014e2ce144f8fbe347e87338f7d17ff9cc37de40bccf5038390595c4d11069b50772d522cd826f2758303e7b993d600b7e247ed49492c8ee0436d4cac3615d2f87d4113d31a3127ecb3a651878d20f7e6058a7a20b8abb3b790492d3493b816202e9da850e1020c1715cd2e19ac0034c1412e8900b3329c7b818a4a038c326b5442e947a482ee11feb6eff967ecc4af4b0a93df57212ab2306e25629e6b054cca1e742d857cce136e90dbd62862e15511a70ca4eeda2a343d6d1c66ba3ad815acb1c45be8e75370825dac2727c717440afb364676ff3ca3de21e7a1b14e6ad2e40eca2bd1db718648f2a151f5d9be326fa1af179c04a964f23407ad373ff00fdbc66e20a9868a6e24b34d070054ab45329e15f30da6e38613b54129f42944b2cca25c1d2568a599fe40cc08a40086639cbca8bf9c04cb15c21c6dd3f90287bec23b44687a34186a6010df5a3dc6e83a6fb395d55ca871ec8e932b4f4dff50d2261b00709d51e2095b84c7b8084d0ecdfa6bf6e593346bcf1a069a6147c3bae9271dabb19d2f18e2ca7f470d0d4db7989efc2d471029d4b6e48579071e69a73cee2097b75459d7711f21379d4fbfd27096e54c49d664487980c1249ee79d2435ea9f20e12d9526d891c083a7af613b97950aaaa2e5ecadeeb7bcb8de5c949d699d0facebc0b03a983cc81613726c1eee85b728274a564f0835229d2eeb4f5cbd2495adaa14e7857b52a5bc14dd007466aba21a8e469a2b7d124d84a934068120dd224649a18a189014d42170dd0049ed95b0cb248f5bedcb868a9703bd0447291c8da1c40b3e93940be207c54a4a6b886bc7b117510e2401155977b7f1545d441506511065af8da8aa8bb2162b13bfbaa8ba8af0e9143fb8248e3fa11b9635f1071d78fc8e17d41a475fd88dcbd2f888c5d3f2247f7059189eb47e4f8be20b27b11752f4caeb188ba072aba84b05b11f5b7c52f0ff7d1fa243badcfa0a68d5cb2cdfa88ed89c5ba180a3b617822313ce4122f650f55db492aa32ac3c5b925e55d591f52c61c4103346f04d4499660a128307e701712259ca6a0686e2bb738620389fe53f74397cc27502417c677740825f24bab6b48755e104ec1521e88c7b8f1ce61d6e6e46052e81dba402e3489b3cf8fa03f5130266727d7127d87f065450042870b65e4efa896783641cea40b386e534211cd496d89d4789ce65d6a7642602ea55261d877e1a00417a5b0469efa6b46c81821b6fe0b6b62899edd12a79ce47a13416de4108f3b1855443db8d34456556e6d69dc1c433585c2a0f0a4bfcf147074c48d4027e4ea1c9132aceea269dcb2cb0ee54c30d0ed0301b22bf0edfa910ba49183f2e21b12d20588700a0d3bcc63b343a374ba98ce0a914bc8ac629a6cad8684a5810d61c3622925253cf062a7b86bcbd8d82585e3b1a0d551445308dce98108b526112af5d4ab6b75779010321fe9dd61c70f725aa32665158d143697eb10a2b01cc41c82e32d92405471e94a3e90612401c97eca45083c25b8268fb4d1d41e0ce8076632174bd2a67fa5ad2106a2649c079c11d2888b9504c57fc69b03ba4896dcfc1037be2c3b66998e24f0e18f983d667203d9e6e771760b4d8c789c4cfcd873c20fe2dfe94e19df97c5a6b314ac09050981a3ac1d5bd9ad0c0195f7337251b13375c94553fa09faf8d9f7de4e6c232e51b0fa5d4d7e93d4cd82c39c1c3a46b84cf2da25da4ffb1217d21d874a0a071c1712754422ac5c05e864ef1b958188092d5f02909091a01ecd43cf46f60724b28fd9aa7b26c6583e41264cea100a706249b344b44b6622b49296b48eeb94c50a30904f218e9b5c4f844a75c8b130982d4c948a59fa211b0a0b858d14ae8b0ae228c9ee0c4228a4b96bb72004210dc270e5d930600b1c3026c54f683635ab00d6fa688af860cb443a244c1583c0389a4a7e01d9bc3728f5641e4c4d3cf524498b2e363ad80cf5b1f9206340d0ab2081149a08de95e7fc098c40c9b084430c670cf840c2c30f80c1001c72a3194cc61aa744850e3d04b1b03d3ab8d9413ec822bd068f000b0550d7b21ea77848e6d0820405be34e44ba3c3bb979b21d294f9a6ac6c324898105f3eef85321bd08c03a944affa37399518f854a264b612a46b78e9665837e93605c7df919d97b17e9c682fbe3dbc5d7dd9d216f910179773b795c36d3596d57b7a3f85d95244a87095c41ae3ab3cbe7a2fd4522e197c1fc80d02f26553a9bb6d92b5975c9529ea3da1226175581e8e9d003afca4be5a223c8d1dd6b1ca4d86d089879b7c07a5515d1e6079e220f730fc4f674e6e99ea7c4a6fcbec5b315b97b3f59eb3ab0923db26f00ea026b3fed1701dc9cabe6d5492748924e97c0ed7882d6435fae7b86830703b4af160f1a12cd9b407799af2ae171cad3c821f620a5c698a59f511d988b0c5f7a8016e3f291dc2ab0777d1456fbf1dd503b80a996be23700e23d231d6c71ef05b7b3011d3bf7fefb062960728e82342d8b6b900cc5e50dbec311c38292e1586a4afa350f91f328e15902d5b4151ce636bcf6509cd8a85526bf902f5e62d5e00b4f7cc58ebdddca313462bd02c9e921b5ca387a6374204d9fd7261057f07f5de10d68ba6d6a8ec28b4a668ed804fecbeb540c5394c5d81d5f712a95e0a70ced28d8eedc5edb8e1a7e478d6bd851c38f7ba51d855e77e73bb7c585403f322b4766db062503831a25811a7bd801efdd8148311e194556f468346b4cab1ae221176535ef4aa65ff6d6eed590ea1a69b4cfc4317b11a74ca76571b9a9bfb6b2295454fcae08e7607b2565b3aaa404a2baab4a4a807d04be9262717acec8035703032e989c159d754a640147f079ae90f81a37d0872a65dff3ac04ce72a710f181af81841c78579d196a20b6ac8184acb2b8936f32c9302e78707dade56f56a20632263d6b825352ba0e16c569cb65eec0578e41c4c1dab154bf387e0dfaa5635b2e17c0a3adc0700c2faa861597e8700e1ffad5e320f5fa3b9b280b2c81e86e0616488598c1f5dbefe7769ac8451714c7a02d898f57d1edb4a36dea1dc96dafe17d65bcf82a3dd99b868e47bf293ef9d5676f19d0f2b401d6f296b53c59956552f441a5e80df39698a53c4dfd83ec68f9e6aab746f596f937291396399eb1dd6d848574f66d44c0587438c5cd2ca9ec036cf37f0b0de3ebb0c8d80d9a1672b079a95dac8b45a2e2f439ee36e2e48b8db192b550550564771bc377292cdb98a735bb4ffca3a5fdf47ccec8e3b4f77ce450ca314cf8d69fe8047a3f22878e20fcdaff19f79e7434a3c746ebefac0dca7bf7dfbc36328542a6edb820b046600432719855c908c5604614532916a51dc32363fdba353d22d40c25b264e141fc88e82de6f851fa0349af1889da620490914b38808c3880440e860248c3c16513f65ae35786fd00d2ec08206309203d9c12f92a808ca6b80254c19100d29401a447c5226ea72f6500697d00197b3be92355e5d713a3238999b16dc1a2646ac606e245d6be134c3ebc8d41b32bcfd0ec6ed1e3c48a97becfd8ffff8cf51750b65c46aa38fcb211ed36e06ddc30edc657387689ea5ae68c04575f54db8239f95583c21d259e3d51a9c80984574c3ab62bd2debfb351fa2b49df5f09d88a559dc9167f25e0247f69659ca9fc9586f82b6ec05f69f5fd9506dfb13c25f8bc593c83898168ef7819edb16790fea93656c29531b92dc3e9b631e7adb35c01e3727499d6e15008d849b3385d64ef9638319907d92dcef6af04245d64f6d8be210d990cdc472248b8432a9797f8f46523e3e668992de55ca7de35d729a1aa53e9b3b8ea53ba3241e5b634cec1ad82dbf229f257908c2c9ec50b0e635956966141f1157268c47b09e0bdc470e7254625ff212e1ae2bd9832f41c702bb4fca25bfb4b4174e61acb79826461243f15364c32fc34462ea121730a88b0635c868d7c0e5c2e0918c13f3ec1ee2049d102d7fe49ea16fc85002be94fc0ae8acafc3b702f455adcf7b5f2e46906e10294915cc077a9785d5d9574627f8904bb8a21f13edb8a7ed9063b20a15ccd22152117b762a0148b24c4e5c5ad7e469696ab344d799b2b4dffd1a6fc93fef49d8fcc2e2eb7e75d6fd5cd2e2fafcecdf6da6e6df6d1f6ba5a7db8d39eebd197f575e95fecb5bbb3bdd5ee34ded7ddca6acf2daeb87317967b8bd38b2bf3ed8b8a7f0c99def9fe2e0d55ed6e77b5ebf07f5b2cae3c5a4d567cacd310ed8a33e0e9bd73b32b0036476db4baacbb0ed8bdd98797a9e111374bfd0bedae9b5b5de97567e77a8aeb00e9eb77e0786e757ef191c7f744efe581e5fcd06b5cee63cfa9f44df21f4350bb47786176e551225777f1dc6cf771b7d47edcbd7fa1bde22163d7b32b1ebe62cd9ae66bddd5deeadceab2f3ff71488969ffff18e132651a3cdac61cb22ce9dd1756da17d70806ed50684aa83eb278b13d3ffdf0e3bdf63ab05cef752fcc097569ee1f349552ff05ee7357f400d00700008101010100204b21f3cba072cc493e70861540df4677b498b0505a8b8e2a346b85a0c2dd2fc4263c4a7d8629026c4eb594ad96fac2bfe5f8ffebb9c841c353920b7b8ec11abc0100d90778da8d563b8f1c4510eedbf7e37cf209d9e60808402496c0dcdaac4e8ece01090112afe83043ef74ed4e6b677a86ee9edd5b3b2121b049888d842c84c0c1456702eb20b036424242c2e00408800c24fe03d53db3f33a58e860b6bbeaebeaeaaaafaab7f55bff9d1a796df0e5798263c37cc89f2fbe657e1eb8c7cb92e0de5f83c1eded95e4fded2d08150faf5ea5237e69f7855db2d3c199e351e5915a339c0b900d4103681849dff5c09daa3818bc34ec5057f319d54036b6c640752cc1617c024a17515d1a6b2f945c2f48a3ab3d09ca0b7dd68ab9d097078d292cd4267e9c39f089a70faea351378c85563b11c8802bf44c383eccc0cf20cd39e55a9d31df4c766ee487eed4f528174e4425baab412ab2fd44400f1dab73046827567402f6ece195a73495139455b44ee4ead4bb1db3594b2a94b929fa51367179f0f4882adc00722dea6c6edb0798d3452a7fd60d858643ed8c2598c8297bf18227220efe2f948148a1851bbb515c72a47ce34cbbeec655133b0106781de0c9aa059f8f41f3200b19833148090c41870e1c465c528b9b73c1c2798a3a57b5c2c0cfe276de28b9f0b90027552b7e6375c085d35a0691f6ac7a7768c39351b2a4eabb54b8e0dba3486d2b597131b1f0b3553ab68cff9c15a9dec3adc83b0327b5764a645b3bbd7c77b2ce294f6a755cf4a278e473d7c1692b91a74e75d083a9b5d828596cb8218364a6175132eb4b782fe61202581d2b906ec926dcee4a2cd2302de6ec9354785ea52d5bd5900bda21ea652849adab4030243b676debdc60af83126d32d91c2d34a85341c20682e6d233ab41b8f02f154e6a05e4e9b897c2b319c990c52e3a859123b533d932bbdf76c276c527c2e4b21ceb4d8cd8aa8bb1b56dac6d90260d1b8db10c036bbaa54063abace4ba8ea2241c3da3f77980ddaa92bd2e7628c7629ab617f54c2527174b05a6ae8a8236da3229af186acd0293fea689c65e7716ccb0eb61a892b5e548eeca2475a55ec7d3d32658c78357533c329d62a2b5eda28a6cb492c93f3758e35524f9ac128236578e11276e742c286468aca330a42cf661ab98b783ebbd58643cafff27cf7b71c4685a678db575669c5f1543c3e0735af70bef07a975ec4a819b769132cbcc6379f1637c36f3278f7c7debe2cb1f7c7eadd434c8feb73fdd3bfaf4956223c0f1fcb4fec587792193fd4fee3cc31edc2956278e5f1fdd7cfc59566c1fbd39fc19d8d14999a138ee42707492b171f5c0afa848c877af9e78c7cb22f570ec3f77fb789951c882be4940930cf4f0d1db6fdc5f16528fe3ddaf0eee2fb324e3d8fb1e057942cd851ffef1fb8fc5fcd920f8af3f2e66c9fcffb84b7ff865b7ce875708c9ff60d8f137aa5a1fa900d00700001001010020742877c36a520b152b1337ea1ecd37b0c98ad07289c32fec392e7eebab9f0ac71f7bc8c718cfa75317b2e15702372a9222c4616783ee7b3f0ec6358f8c328eea00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232201a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b72410000d00700001001010020058e23368b919493d6ac61d27f66b829a53893e88ddde857d3b82d913960960d22fa36f397752b98c295e3b31927f740127c0a99e76f8bfeea88f44466b8fbfd00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea990000d0070000100101001f43fe868e263d8134cf705aa85e26ce78ebb058edd558865fa3d240f5cb9e50c2389e9c8276eac800b7233a552045b2e79124c97e5156a0649849cc7f5d09eee600005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232204a90c00d55454dc5b059055ca213579c6ea856967712a56017487886a4d4cc0f0000d0070000100101001f29e82b08ccf15e2187f29fea11ee3f4974f41b51e45b19f353348d8848b86fb71cadd88630456b7a1c60803c7b402487d41fbf18f0b0a13b4cca1f740447938300005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220e0fb64b1085cc5538970158d05a009c24e276fb94e1a0bf6a528b48fbc4ff5260000d0070000100101002047a8b784c3765b5c63ac52e3d8461b80bc2d3e3f62434f8accb277d9f2487cfd3c0728fcd26b5119a11288e5db46bc5b547877e220971609d1cef8cba443340800005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed32322068dcaa34c0517d19666e6b33add67351d8c5f69e999ca1e37931bc410a2974280000d007000010010100203e701fbafd4149bc95b55a6bfc3b78246f5c2668ccc05ed4059a36ceb38f140b31e3b69e15f2579571e5bde39e034947271599c200e540b3949112bef163074c00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220ad9e3d8f650687709fd68f4b90b41f7d825a365b02c23a636cef88ac2ac00c430000d0070000100101001f0cc7352e60f4f8476783d6d1b48766a111c56fee2c1a552e76a75c92bc17de172f994ffc854c09717c904054819ca7a17379ddecaf531c439b35337ba099b81300005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232208ba52fe7a3956c5cd3a656a3174b931d3bb2abb45578befc59f283ecd816a4050000d0070000100101002040965063a83be2d53b36c8d7e0775f503c2caa1407e586314562aace52c272fe60659e196413a6c9db4168470bcabb9a5851121c10c7b665f363f6cd4d1e4bda00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232202652f5f96006294109b3dd0bbde63693f55324af452b799ee137a81a905eed250000d0070000100101002074ea7468b2a031c4cd53bf10ec3ac66b0c4b5c8779e045f1ef8d9c7b116be649217ff340107d0163397b99918ee2ce822b66cd6fce7b385af97a04671136e2ee00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220f0af56d2c5a48d60a4a5b5c903edfb7db3a736a94ed589d0b797df33ff9d3e1d0000d007000010010100204dfb21ca5140582379bc026792c16b4cf97827143a4a9cd99ae70b3e6016cd6316bcbb9f1cb1233f12a0bbcd9debafa64724d0459b5c8d3cb67ceddfb2e3962500005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232204e7bf348da00a945489b2a681749eb56f5de00b900014e137ddae39f48f69d670000d0070000100101002033446a3a94ade71dff3edb786259679487ab701bbc147490b1d4159fecf545fa22fee0698db16bf616465e5cebb985bfc4d9ed1ec4a55e38997dd4b4bbc427eb00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232204fca8bd82bbd181e714e283f83e1b45d95ca5af40fb89ad3977b653c448f78c20000d0070000100101001f3f67edd35bf731a07f40c638e8812112cd7d1baa39ec7dac4a1b2f0c83ac8bd53689b56dba69a7386e3860a6f8976695ac0bc2b5dacae91080f1d54df2dac0c000005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220299dcb6af692324b899b39f16d5a530a33062804e41f09dc97e9f156b44767070000d0070000100101001f1e030564013603d54f9e983b63cd940f8ff09ae038b14813f4021bb0c09ebb640d90cb4f8d57be2809f492a51737b671a5f549d4efa8e7efdaeaa9663c09d1ad00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220c3a6138c5061cf291310887c0b5c71fcaffeab90d5deb50d3b9e687cead450710000d007000010010100205cea642eecf05568ce8c5564e63349eea3b816108914ba2ab5efffbb8ea467265f0b6d474f03ed02a3bf529fd6e55a595cbf8dd1adf4311cb9c51e862f8a535400005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232205443fcf88330c586bc0e5f3dee10e7f63c76c00249c87fe4fbf7f38c082006b40000d0070000100101001f4556076cc86e0840bf69664f1ef8fcd4d91abda313d08e7840d24ba45cb429cf12b7d3a1f64250c19d1b975e7b107853beff70ebfc4c27c44f825dc05cdc9cd600005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220bcd2a26394b36614fd4894241d3c451ab0f6fd110958c3423073621a70826e990000d0070000100101001f354d903ad0f2c6cc9d9a377d681ffaa00475d1e559e48074b4c8cce3111d5c172903b2f179ad4d736dda4e7d1b6a859baeab9dde5e5e495ce09733ec4650634400005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220d528b9f6e9693f45ed277af93474fd473ce7d831dae2180cca35d907bd10cb400000d0070000100101001f1766fa716a828da244c9ce52919b7a19acb38dbd110d1bb0039bb2477c17e4465dceecb8330ed5ee9de1330930dfcfa1a5e8149ce8536a82c0093642adf7328200005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232206bcb40a24e49c26d0a60513b6aeb8551d264e4717f306b81a37a5afb3b47cedc0000d00700001001010020488923db1c78fa430a3a9eab75f4ee467c7b9a3d3b4eb3bd08e183c82ef79b9102a4d2a7d1ec79c96b404911ae1b10f579bd82a660011c1ca2b872b30ef7dcac00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed32322035c2186cc36f7bb4aeaf4487b36e57039ccf45a9136aa856a5d569ecca55ef2b000001 DMLOG START_BLOCK 4 DMLOG FEATURE_OP ACTIVATE 1a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b7241 {"feature_digest":"1a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b7241","subjective_restrictions":{"enabled":true,"preactivation_required":true,"earliest_allowed_activation_time":"1970-01-01T00:00:00.000"},"description_digest":"f3c3d91c4603cde2397268bfed4e662465293aab10cd9416db0d442b8cec2949","dependencies":[],"protocol_feature_type":"builtin","specification":[{"name":"builtin_feature_codename","value":"ONLY_LINK_TO_EXISTING_PERMISSION"}]} DMLOG FEATURE_OP ACTIVATE ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea99 {"feature_digest":"ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea99","subjective_restrictions":{"enabled":true,"preactivation_required":true,"earliest_allowed_activation_time":"1970-01-01T00:00:00.000"},"description_digest":"9908b3f8413c8474ab2a6be149d3f4f6d0421d37886033f27d4759c47a26d944","dependencies":[],"protocol_feature_type":"builtin","specification":[{"name":"builtin_feature_codename","value":"REPLACE_DEFERRED"}]} @@ -156,7 +156,7 @@ DMLOG RLIMIT_OP ACCOUNT_USAGE UPD {"owner":"eosio","net_usage":{"last_ordinal":1 DMLOG APPLIED_TRANSACTION 4 d276f624b0262b174fe373d5bcb026f4b5c87dd77d794b246b77a75e4d22525504000000033b3d4b01000000044444a224a7e16ae04e24e6640d6a909fb89ca6e5e8c1e5397e1e03ef0100d00700008201000000000000000010040000000000000001010000010000000000ea30552deb8b0eef2f2bfd027d20727a96e4b30eb6ccdc27488670d57bf488395c48fc19000000000000001900000000000000010000000000ea3055190000000000000002020000000000ea30550000000000ea305500000000b863b2c2010000000000ea305500000000a8ed323293120000000000ea305589120e656f73696f3a3a6162692f312e320117626c6f636b5f7369676e696e675f617574686f726974792276617269616e745f626c6f636b5f7369676e696e675f617574686f726974795f763019086162695f686173680002056f776e6572046e616d6504686173680b636865636b73756d32353608616374697661746500010e666561747572655f6469676573740b636865636b73756d32353609617574686f726974790004097468726573686f6c640675696e743332046b6579730c6b65795f7765696768745b5d086163636f756e7473197065726d697373696f6e5f6c6576656c5f7765696768745b5d0577616974730d776169745f7765696768745b5d1a626c6f636b5f7369676e696e675f617574686f726974795f76300002097468726573686f6c640675696e743332046b6579730c6b65795f7765696768745b5d15626c6f636b636861696e5f706172616d65746572730011136d61785f626c6f636b5f6e65745f75736167650675696e7436341a7461726765745f626c6f636b5f6e65745f75736167655f7063740675696e743332196d61785f7472616e73616374696f6e5f6e65745f75736167650675696e7433321e626173655f7065725f7472616e73616374696f6e5f6e65745f75736167650675696e743332106e65745f75736167655f6c65657761790675696e74333223636f6e746578745f667265655f646973636f756e745f6e65745f75736167655f6e756d0675696e74333223636f6e746578745f667265655f646973636f756e745f6e65745f75736167655f64656e0675696e743332136d61785f626c6f636b5f6370755f75736167650675696e7433321a7461726765745f626c6f636b5f6370755f75736167655f7063740675696e743332196d61785f7472616e73616374696f6e5f6370755f75736167650675696e743332196d696e5f7472616e73616374696f6e5f6370755f75736167650675696e743332186d61785f7472616e73616374696f6e5f6c69666574696d650675696e7433321e64656665727265645f7472785f65787069726174696f6e5f77696e646f770675696e743332156d61785f7472616e73616374696f6e5f64656c61790675696e743332166d61785f696e6c696e655f616374696f6e5f73697a650675696e743332176d61785f696e6c696e655f616374696f6e5f64657074680675696e743136136d61785f617574686f726974795f64657074680675696e7431360b63616e63656c64656c617900020e63616e63656c696e675f61757468107065726d697373696f6e5f6c6576656c067472785f69640b636865636b73756d3235360a64656c657465617574680002076163636f756e74046e616d650a7065726d697373696f6e046e616d650a6b65795f7765696768740002036b65790a7075626c69635f6b6579067765696768740675696e743136086c696e6b617574680004076163636f756e74046e616d6504636f6465046e616d650474797065046e616d650b726571756972656d656e74046e616d650a6e65776163636f756e7400040763726561746f72046e616d65046e616d65046e616d65056f776e657209617574686f726974790661637469766509617574686f72697479076f6e6572726f7200020973656e6465725f69640775696e743132380873656e745f747278056279746573107065726d697373696f6e5f6c6576656c0002056163746f72046e616d650a7065726d697373696f6e046e616d65177065726d697373696f6e5f6c6576656c5f77656967687400020a7065726d697373696f6e107065726d697373696f6e5f6c6576656c067765696768740675696e7431361270726f64756365725f617574686f7269747900020d70726f64756365725f6e616d65046e616d6509617574686f7269747917626c6f636b5f7369676e696e675f617574686f726974790c72657161637469766174656400010e666561747572655f6469676573740b636865636b73756d323536077265716175746800010466726f6d046e616d65067365746162690002076163636f756e74046e616d65036162690562797465730a736574616c696d6974730004076163636f756e74046e616d650972616d5f627974657305696e7436340a6e65745f77656967687405696e7436340a6370755f77656967687405696e74363407736574636f64650004076163636f756e74046e616d6506766d747970650575696e743809766d76657273696f6e0575696e743804636f646505627974657309736574706172616d73000106706172616d7315626c6f636b636861696e5f706172616d657465727307736574707269760002076163636f756e74046e616d650769735f707269760575696e74380873657470726f64730001087363686564756c651470726f64756365725f617574686f726974795b5d0a756e6c696e6b617574680003076163636f756e74046e616d6504636f6465046e616d650474797065046e616d650a757064617465617574680004076163636f756e74046e616d650a7065726d697373696f6e046e616d6506706172656e74046e616d65046175746809617574686f726974790b776169745f776569676874000208776169745f7365630675696e743332067765696768740675696e743136100000002a9bed32320861637469766174650000bc892a4585a6410b63616e63656c64656c6179000040cbdaa8aca24a0a64656c65746561757468000000002d6b03a78b086c696e6b617574680000409e9a2264b89a0a6e65776163636f756e7400000000e0d27bd5a4076f6e6572726f7200905436db6564acba0c72657161637469766174656400000000a0656dacba07726571617574680000000000b863b2c206736574616269000000ce4eba68b2c20a736574616c696d6974730000000040258ab2c207736574636f6465000000c0d25c53b3c209736574706172616d730000000060bb5bb3c207736574707269760000000038d15bb3c20873657470726f6473000040cbdac0e9e2d40a756e6c696e6b61757468000040cbdaa86c52d50a757064617465617574680001000000a061d3dc31036936340000086162695f68617368000000012276617269616e745f626c6f636b5f7369676e696e675f617574686f726974795f7630011a626c6f636b5f7369676e696e675f617574686f726974795f76300000000000000000000000d276f624b0262b174fe373d5bcb026f4b5c87dd77d794b246b77a75e4d22525504000000033b3d4b01000000044444a224a7e16ae04e24e6640d6a909fb89ca6e5e8c1e5397e1e03ef010000000000ea3055890000000000000000000000000000 DMLOG RLIMIT_OP STATE UPD {"average_block_net_usage":{"last_ordinal":3,"value_ex":78666667,"consumed":9440},"average_block_cpu_usage":{"last_ordinal":3,"value_ex":334993056,"consumed":40101},"pending_net_usage":7920,"pending_cpu_usage":4100,"total_net_weight":0,"total_cpu_weight":0,"total_ram_bytes":0,"virtual_net_limit":1050675,"virtual_cpu_limit":200400} DMLOG RLIMIT_OP STATE UPD {"average_block_net_usage":{"last_ordinal":4,"value_ex":144011111,"consumed":7999},"average_block_cpu_usage":{"last_ordinal":4,"value_ex":366368114,"consumed":4433},"pending_net_usage":0,"pending_cpu_usage":0,"total_net_weight":0,"total_cpu_weight":0,"total_ram_bytes":0,"virtual_net_limit":1051726,"virtual_cpu_limit":200600} -DMLOG ACCEPTED_BLOCK 4 04000000040000000300000000000000010000000000ea3055000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add8010003000000034b2b890c59dad3c04fd9300057ba6285196dc55f32e988a49d6059cd2d5b1b639d6ae94fcdd0536b224644931573d1ccb2a0c548613cd1feea18888ba1daaeb8ca4a99a2fdb182ebb462ceb26de69d76f024586207a1159226ea43de0300000000000000010000000000ea305504000000010000000000ea305503000000000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add8010000000000044444a224a7e16ae04e24e6640d6a909fb89ca6e5e8c1e5397e1e03ef033b3d4b0000000000ea30550000000000034b2b890c59dad3c04fd9300057ba6285196dc55f32e988a49d6059cdded9794b7e6d10923376031cef59e6d6d809ee653bcc37297cb644078a507a6bd26c416dc790f2b35815f1d6741bc63e1c235f13e809df666ddea0ba2e5f45c80000000000010000c104121a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b7241ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea994a90c00d55454dc5b059055ca213579c6ea856967712a56017487886a4d4cc0fe0fb64b1085cc5538970158d05a009c24e276fb94e1a0bf6a528b48fbc4ff52668dcaa34c0517d19666e6b33add67351d8c5f69e999ca1e37931bc410a297428ad9e3d8f650687709fd68f4b90b41f7d825a365b02c23a636cef88ac2ac00c438ba52fe7a3956c5cd3a656a3174b931d3bb2abb45578befc59f283ecd816a4052652f5f96006294109b3dd0bbde63693f55324af452b799ee137a81a905eed25f0af56d2c5a48d60a4a5b5c903edfb7db3a736a94ed589d0b797df33ff9d3e1d4e7bf348da00a945489b2a681749eb56f5de00b900014e137ddae39f48f69d674fca8bd82bbd181e714e283f83e1b45d95ca5af40fb89ad3977b653c448f78c2299dcb6af692324b899b39f16d5a530a33062804e41f09dc97e9f156b4476707c3a6138c5061cf291310887c0b5c71fcaffeab90d5deb50d3b9e687cead450715443fcf88330c586bc0e5f3dee10e7f63c76c00249c87fe4fbf7f38c082006b4bcd2a26394b36614fd4894241d3c451ab0f6fd110958c3423073621a70826e99d528b9f6e9693f45ed277af93474fd473ce7d831dae2180cca35d907bd10cb406bcb40a24e49c26d0a60513b6aeb8551d264e4717f306b81a37a5afb3b47cedc35c2186cc36f7bb4aeaf4487b36e57039ccf45a9136aa856a5d569ecca55ef2b001f5505dc47adebb683205c6cd618df9385777f5ffb29d307479ace7cc07533ec5d3b9e648ee95c2a1dbea8d971cd9e980c6185867556035db58109f2678ce179e30000000029807708239aa7de914d3ed61e9009ab2280bfbc50f1d9769f27f8341ef26198000000000001130ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd1a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b72412652f5f96006294109b3dd0bbde63693f55324af452b799ee137a81a905eed25299dcb6af692324b899b39f16d5a530a33062804e41f09dc97e9f156b447670735c2186cc36f7bb4aeaf4487b36e57039ccf45a9136aa856a5d569ecca55ef2b4a90c00d55454dc5b059055ca213579c6ea856967712a56017487886a4d4cc0f4e7bf348da00a945489b2a681749eb56f5de00b900014e137ddae39f48f69d674fca8bd82bbd181e714e283f83e1b45d95ca5af40fb89ad3977b653c448f78c25443fcf88330c586bc0e5f3dee10e7f63c76c00249c87fe4fbf7f38c082006b468dcaa34c0517d19666e6b33add67351d8c5f69e999ca1e37931bc410a2974286bcb40a24e49c26d0a60513b6aeb8551d264e4717f306b81a37a5afb3b47cedc8ba52fe7a3956c5cd3a656a3174b931d3bb2abb45578befc59f283ecd816a405ad9e3d8f650687709fd68f4b90b41f7d825a365b02c23a636cef88ac2ac00c43bcd2a26394b36614fd4894241d3c451ab0f6fd110958c3423073621a70826e99c3a6138c5061cf291310887c0b5c71fcaffeab90d5deb50d3b9e687cead45071d528b9f6e9693f45ed277af93474fd473ce7d831dae2180cca35d907bd10cb40e0fb64b1085cc5538970158d05a009c24e276fb94e1a0bf6a528b48fbc4ff526ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea99f0af56d2c5a48d60a4a5b5c903edfb7db3a736a94ed589d0b797df33ff9d3e1d0001033b3d4b0000000000ea30550000000000034b2b890c59dad3c04fd9300057ba6285196dc55f32e988a49d6059cdded9794b7e6d10923376031cef59e6d6d809ee653bcc37297cb644078a507a6bd26c416dc790f2b35815f1d6741bc63e1c235f13e809df666ddea0ba2e5f45c80000000000010000c104121a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b7241ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea994a90c00d55454dc5b059055ca213579c6ea856967712a56017487886a4d4cc0fe0fb64b1085cc5538970158d05a009c24e276fb94e1a0bf6a528b48fbc4ff52668dcaa34c0517d19666e6b33add67351d8c5f69e999ca1e37931bc410a297428ad9e3d8f650687709fd68f4b90b41f7d825a365b02c23a636cef88ac2ac00c438ba52fe7a3956c5cd3a656a3174b931d3bb2abb45578befc59f283ecd816a4052652f5f96006294109b3dd0bbde63693f55324af452b799ee137a81a905eed25f0af56d2c5a48d60a4a5b5c903edfb7db3a736a94ed589d0b797df33ff9d3e1d4e7bf348da00a945489b2a681749eb56f5de00b900014e137ddae39f48f69d674fca8bd82bbd181e714e283f83e1b45d95ca5af40fb89ad3977b653c448f78c2299dcb6af692324b899b39f16d5a530a33062804e41f09dc97e9f156b4476707c3a6138c5061cf291310887c0b5c71fcaffeab90d5deb50d3b9e687cead450715443fcf88330c586bc0e5f3dee10e7f63c76c00249c87fe4fbf7f38c082006b4bcd2a26394b36614fd4894241d3c451ab0f6fd110958c3423073621a70826e99d528b9f6e9693f45ed277af93474fd473ce7d831dae2180cca35d907bd10cb406bcb40a24e49c26d0a60513b6aeb8551d264e4717f306b81a37a5afb3b47cedc35c2186cc36f7bb4aeaf4487b36e57039ccf45a9136aa856a5d569ecca55ef2b001f5505dc47adebb683205c6cd618df9385777f5ffb29d307479ace7cc07533ec5d3b9e648ee95c2a1dbea8d971cd9e980c6185867556035db58109f2678ce179e30200d0070000dc060101002026bcc48f2a2332b2fcb39133cc9226405049c824dba512d79415009b403b107d7dade3a3173c6cc9657b7662dd1fd267156e076ac91df0aea1d8172e65d676fe0100b13578daed3c0b90645755f7f33e3dfdba77df862569665773fb31606fdcc8246c66c710cdbe91ec872424c480c10fb3bd33bd3bddf3dd9e9e65d7c2edc56ce1f2b10a1525099602150d0906ab02456128ac4d245504b58c24a108258a965552201acb2a410a89e773efebd73d33614327912a9964a7efbbef9e73ef3dff73eeed09ff317a8b166ffef2e3e705fc48fc25be3efe46f8bdef15ef7ae0e17c87f8c837afbcf22fefcc75fd393cd45717b147de215f765888c3aadb158775177f7ba7e147760f0b79fab0d73d7d1abafc2efe409fe60f95fd3e6ddf89c3018251bf3c0df84e3b4c80f6340d94d023bb841861e10560a769795497401899821ef5b43fa61b4b27a2d923d3479b4bb3d3cd893d42634fa9b1bcda5c9eaeafae36da1d21b12b9e596bb71b4b9de97663a6d13cd1680b0fbbfdfa91651822b05de6f1d3ab73f52baf9a108a70f7faaee09edca8abaeb892fb62dbd76eae341667af9818e7ee208f69641ad633b7d069be5af8f8ecf55006795c2303482258f032ac777abe714a04d863561b9de9230bcb33f33373f5e6d2f44abd5d5f6c741aed5568cecc376679c7c162637166e5940809e6d8f78329e0b08b11f54a7b796579b5318b8dd9b51918234688aa8e849de66283c9b71dd1d6673a40d0dc684255586937973aabd30bbc9a8b1c8972bb291256e0de6a67b9dd20f645d4d56e1c5f6b424f7dad33274ad8b58517d63cd15c681c83d596b1f325d8d96eac2eafb5671ad30bcdc56667556cc1372fdb781fd38d93622b41aeb41bb4ec7aa7317db451efacb51b2226aaf1b2f9617b73d5bd9d76c367c53666393c2f2f4dcfd63bf5e9d5e6af36c445d40d7867a773ef9818dbf202393db33cdb102fc1fe226c1e49885b273e95a1636d651697857ddb7b949c83b54bbdff3af1d26db1d816c771292ea8f8e2822a5c22652c2bfc5390f643560af8290ad094ee9f2ac882829f82e7cb15592efb5a0a195cacbb323d735e445d91fed323d9473822fdfacacac229f18a918ba44865547ae39b7ee1cdbff84bbff296e9d9c6d1e247df2daff3445c8bbea28511f1e5f1981146bec1db3775e6cc7f3f71efd93bffeeaeae51f08cddf16b7ce8beff8bbff9bb1fb8f78dbdde6b5cef47fff6f3a2d7fd33d87de66b0f7cf3fce3efcee1f8593bfa530ffec7adbdde6ba1f7db7ffcc4173ef2a1c71fcae1d84738ce3cf1d90f3f35d5eb4db9f7ccb7eec8f7be967b3ff69dbffffaddb9f9aee3ee4f7eeb8bf9c1fbb1f753ff75f7fbefcaf71ea0b1e77fe73b7d630f52efeffdf5379ec8f7dec078bffc271ff8e283b9e95e4fdd8f3df0f14fdf7965aff7267feabdf73ef5d457de7367dfe09b7d23782be71fbce349718bb72fa5e7a71efff8377ffddf456ca2c11f3f37dcf1a96fcf80e0edfd0852115f11b5a5ee2a98b42c52b115ba525153ef81e7343e91c856a226e0897bcfee4bb411ad746b2b5d399e8809e8004095c61d23ae477068cb168e37121a5519c16bb94fa4dd03f8367def7911cbc8e848896842caeecb618ef904de4ca81200c30ca59a123df467f6612f4e938a6b05e28d5e2d551796268dd851956a9f06fcf12b135196a9a8f21ad3445d5796466d13a224a2c8882840f1fd89c8079d17f67711172bf6ea7369f47a5ec857c57c22c78070b0f42f09d844819a31b4bcac85af45fcaa517a34b286af60c5f3f116e8f9aa688d89e8c305582192b30618e09797a8f9347c1defff21d83f7556556414c014e2ada38676eea505a2b587bdadaa7628005a6f0cad532f07ed65d0a5a1a0e3a1a0b70f055d190a7ae750d06628e8b1a1a06b4341ef1e0a7a7c28e83d43414f0e057dcd50d0e7453fb8dc0c5ce5c05506fe3080cbef0f2e07c0115844df915a76d569b23be3f38926b38373c5adb83025aa5e1a57d1a02968fb60e2f08551ad6aa0d0de02ce436561fca9c23b8c67822b84389b785393e7ce567d78d78575190fde55bd12dbdf54eea7b5ddcbd61720f76a304d0e76af16381ddafb5655e0ec386b44a63c51a901d3cd686240d39b5e033ccea73be9c9563c026b2cb4766980341a3f3c9cc8db8b9103cd07561f440dec2398fef7435802f98087db7f5af0fe352820fa01684d815daec04777af8e611a01e8120f97ac0921d1c0832da277bbeb5f3ff16f57800b10e8450ed2fa00227e7502f41e379a377efe61763b0678378eef692df11ea68f17ab37d0b077fd050e03c1887725021711034d6aadc4c736ec03569804ae5d6925a16d1b8d688185f884988d029f9014f0c33385f9781c0885fd00047bc151013fc7ee3944a24d5a6cbbf438ef9cc6d44066e6ecc3843acc1308761c7648a13da166a1090e88b8185f35ca780a791a59425aacf1c5893fa1f6102961647c2bb274ea349055be0d7efdda9b76c01a90668226cdb880f403c0ddc4055d533b13851fe6268057f826860fe6564aa0ca3057419e2ab0819d25e40434ed7a693b1a6c583c1121a2c355e120c5756574dc97453c5325f1af2b8bdebc205707500daa02506a8c57e65ac876501b4321050a83421c0a70000b044a3261aae2272e7627e187099f26df7d36d0234e2f311e505558f618cf297005a0bce06690bb3e29034ceb83f8d73030d80f4107a804ae02175652107bc0f37e5c2374e2c2029238fe0f5407561957133fedb2edf84f1453248f9fbedd3b54c646fcca6a98ca6a017b81ba1c19cd27e1188caf824e49901c3fae8eb2d0062cb456460324fcb80980d6b60304d8b56adc52b8fe9042b7f4a9cf9296042664e10a310cf114ef30bdb46f10ca7f7504f080c92802f9c0981a2f350006b6230273e293425755cae6a444468c46b5aa65dabecf66a484562c3265b2621159b112bc032be69b08ad58c45aea3b2bf6c9476805ca9450c89483652be66330da22bb19e1b4c001b4625e66c57c67c5ecf468b500054ed93364a496b00cd4a06a84734548359e3232457c61b0a788d0f050e307126a9353f322da6dea8a7b9a2f41ed40fa820965f003f50ad0a3dd82373b9daced4c7cb2382036a85a3ebe8ce123ca54cbc05380aa85f295a9d608cd6789ad415841af006301f5208420989495d8c328b0df671da0f8d504d11d4a2b30cfa40506ac73152cc418597eb4a982d9f01932966a5facc834b005fd06751ac97ec0633b48bf023686011a438c77802f199530f682e7b8f70c84f480f6f4802d126b0a4d15b28e26e019ff80e551a737b7c88da079ab51f0eea15946bb63836c8f788dfe17d685161276b3670a25a016278455583328c00cfa6806fdcc0c8200a17df882eef96da00c980672f1b493445f232ee1989e1ebd2973ee1aa1d11a4ed5a0a5a8350e2d49ad496821b8a6712fc1d63d600b0a27ae161793aec7d07a29b52ad0da8e4030e06a11521f8e0bb2717e36cec325f0fc93efc0a9dfc159e199f3cf88dbcf4247c5767cf5dadb294ca0a7a72fbdfd2cbe2c9cc3c76f6fc557156c7f3724a01ab4bfa76fa7659f3d7b96350fa6895fe0692a442024da366a21212fa216922fee4bb3e47c7a291bd2730f6741184a23d804930bc20c0981228e422c7697967ed79a647c8fbf807af3696511d801c623483ffae47d4fa216a6f761a31abab1206434cc4b9fb8efc9cbc040827683bde9242198e5e0d08e24c4e5800517148b8145ad2cc4bb303492e40dd005a08ca2d7a82c2490ff49544f819a0eab26b9c72c34e04108dea2180f8c6f60734b5e6ec8cea7d0024753951a737270431ece024113cf225921503ee1f50ac47699be617409dd34353ecf510c097e34341e394d9ad29234a429614710e08a7837bc0d39daf351ed04aed6c368881d30600083012b0a7145214282d372ee59a2f991687e607b38130564d8e94167f498d29eb34431e95b8a1b22ef0e53810b535872e0552339381d4ffcf9d45fbbbf1a90d79110e7834f0ccfa17fbc0577a3881296e7be5ba0b206024d02a6d0645589b8cf8a297d1bc18469777f5959d4a48da556fc86513281840b8247df5293e8e75b6a021961533eca09c8487c8b8838c441cf0e9efec632c584269c8f7fce02520465c29be80d587294e788832f8f3f7001881da9047a1afffc28f945b0607faa155ab09efd42e31d5ff23c9b3191993191993191376382cd98c8cc98c8cc98f8ff67c6c01fed483f0399e34ee0d54fa79fa6161ba8e8f7fb5d0e57a884e595425ee123f34a5b5e493b85a216f34adac928d3b4bc82d63d60bc9857125d30f10a5bcc2b89ccf49057d8c7bc72e3fc6c9ce7720b8d44d48344d47d44d4fd44d43922ea1c11754644dde3154d13bfc0d3548840cc2b6c31afb0c5bc8268257e39c52cc09d7b7e1410fcb005042afe310a02bef410e709cca83f93d2735ea497d441f8fae38957b6358702565cc809908de53a437ac63b44eee89555c55998e7b2304042cff1a5a316810586219053c9a8972f615ec2193b2760e86d2996f5d293c7d3c2817c6ea5b3b16cb25f2b35ac1c97cdcb7549f0052e2dc2e0fd47483640f2a8629918ef930996081485e72803b6c034c61f98db417f0bab2c355bd3225cf96c1ba3bd1a2663353586f542aa8b38ee939060b9718ca21475e0d9e02e4138e9e0e4b3c059808b070068e129859e992c531e5968c553a35c732924544ea1fd95b8aa628b7acf485bd1b3a54fc8bd3546bd9ace8728563e44cbb8fd91befae8e58202215e32058e182a3d238e03a19e91c713d5b9ff2cb2260d5b90850655346a67e4f1b20d9228e6f339e653102c91bbc2b80883611731791868e3070f641642587a6399abaecc232f72317d308f549b4ffc9b7284633cc8ea00d2e52c9930beade8dea1fab2880207e65ea2e78d3c44e125d0e36a215c1202dbd55c48ee42d46fab45dae51eb8274c3cba379465fa0cfc8428e8444a78a9d2ef4197e67c4361bea1b19000f98626c1d556788d8664838bd8982f00ed2025a8eade5a6c44ebd68041782f039230d13cd57f608a8328fe482dda3a05f11187fa05ae7f62c4ef5118ee22faad363b7840290da2017a76463eaf8a762b7fdc6c156d42ddc6927b704335bb95c5fe736240c1080fcafd3e869e84a7c7a8743289824f0a40551eece51c0975cb157ab9c4a077e99b139f4bd472af46dde71a3696be6bb6dcb94b1f64ac57b2364de6b4e91aa74d6724aad30d439a37d8f893e769e3d0f9375279cc80ff6be797f37a032e0ff43b334758740b8cbe5c405a693037c1a45a1c882bce21dea760439ecb6136db50c56ea8f2836ea86281bfdf867cebc693f5bb82bcda77236337321e1854c80daa38fa54360909b0b216308de8cc20ae16f0a3521d21528d9802961ee29791f5861d7d48cba8dfc7a159264a65d574df5205abcdde40a5d91b23b3e05bb50315dfce1f311f0081fe54587f0a2eb6a16debf8d028ad88bd48bc778ab4d0e0f903f8203063f1a8f52a86e49bb18e51decffd220972f57e0fac7b80f5fe80d688050520f8e116566390fcada4805707ba586ef7d285563282188a88b96809bfd24a22b2afa6c819bf971eccd8d7e23a658931682e491e6c9952aa3ae9b947042afe368922927651e97d10ce229d40446684d2f2c844a648af50950b3c8c174d55573a13d35c1ef172bef3af34955b47723904791b4d07817400c8a75a1b39511da1a6a04cb03967af02ae929ca84f4ed447278a35a610a559630d639d130d73859324c0c442b31fd558b37d1d3b502c50e0d1d58d5c418709fd0ed0dd84b6c29368c7108d041dc918a29121459c204286447890099dc010bcef01ee2bda6f7de4c1967d07f03ebe2b3306c5c57560487990213e533a807d4596da452aaf61593fa25748ed11c79002d599606b052415e851c444c07584ae1424b874a2f93c8b0b43db498b49e9e3eb49441082b165e5c43f0c7411b8a97b25ab5e4ca03926f090c82e2cc03a786235c7b31f308b6b155a4ea90497fa6d0933f106a387ec5cc9fa6fc74a908930c53c8c2a9810385c8bfa4f12e5cf03c7fc350c27f07887aa753af5d61294a010e209f6cf28841aab7d9ec3a3b1ed7720bcd06415500ed0108160a1448c60e0d7db4da5b5f9be702f552f178210858abc85002b6a8129ce5723d603b06f10c348148e80d959c40e143b3eb1915464e4e846422f9ed8082b4f396992287a5bb87c285136b73279b79050da32e85616db2d5955b4831208db93b6078935e74607a6cca31811e1468be0c32a126b2660ad68a73924a528aa5cd2364cc24b4920c3650cab0a1c56791c568d70a9142660e10cd8ee05ce6e85fd760fd79bf0d1295d5020bbb795ed9ec8d93de9e0c9d4fa4ecd0ad6eef9836aa699e0b24ac55ddad0161b086ee5235eaae386561b4d9915a38c2b29e31b5b81fcbca47890f7af32d5c3c85b5bed188cddbcaac7aeda1d7af36da9090e55b14e4d38a4b39a7e967a687cd3673565de6a9281cbac261eb8f5520f0fad26397f14504d6567b69a9435f8f7c487a3774a0ad406cf8a5f88c8d606a5607a6e748e15da3fe58eed36b97676fd600aa3f83ce4c2ae9d6d048da1c8855d3b7b6ed0f150d0db8782ae0c05bd7328683314f4d850d0b5a1a0770f053d3e14f49ea1a0278782be6628e875d7ce3603bfc06b679b69f846d7cedef7c3520485f4852c183984f778be4711933de87b7a1363bae172fc81e5f8b81c9f8de914a54aeeaa4b1a57f9865d16856417cdf07a01e1edbfe4e2e36595d09a5e9fcf90f11c0f231d94df117bb9a54891eb637cb98597a8f87649112fb70426a2cb2d015d6e29620201599e09f0720b5f08c2d4b7ef72cb88292221471cac5bfe089ac911dc174ebbee728b72975b94bba207c1133c0c5ed12bda3c12270a90123c1f67ac1c68e72fab59a20d5c59d3f6ca5a855b7c658d8a248020e40ac93f034d0a5c22f1f91a99dcab6fe3eac8cd994f8b5bf1647605eb735a7bee44e5874612fa448078ef5bde8779de17f2bc0f99f7851cefc30be1bd6f42e4bdef60dd827daa2058de8717c4fb7003de8719ef7de63dcf373cefedf5d5ab334e3e29b5ee3fbce7545373b40671dee4e2fd7c0c2ee8ce027f65c0f878c0eedb0376778f80ef29daf0b5a6c693e07e4a08368771170f037730af28a5a300173a21bafdee432eba654329d0ce12a8072e022b152eb78b29b7db4aa7f3fcfd81ef71c15b0f6c8ebf0d40970f38d9a34c48df8317d340bc60912063b740f21276b8ceddbe5a6ccd5d2611eb2eb078587e5128d43267d1a54d00037b37974278266f4044cae10c37c6e90f04819c6cf81855de3aeaee2b0478af84614a7952445f9154b450b9fd733dc2c8ab54ccf91fefdac35d7b9bef5a6fb46bcd3e70fdae75ffae6397cf795cc87118f5c618e58053b67b8e6f1b1d60f2252c10944ab16854f196c7a6fd279474390d1588e9da479ab4129b3ad65a57293105adcbc5582a8fefa02abc40d9153db1e00b25ee664c4dd56838a4bbf204caf9f11d0915d7f1545fd0acd19dba77917abd14a27abd8852e8e430643994b62874951ae73387679d7ecb854cbf65f3e99db687681bbcf52a1f824e83ca7fe2e14ce5430c22fdbe8cc9cbaef1af579681708c260ed14c849c0c0f9a8959c80c7bb24281199651d064e1718c978be1d6ef57b1271bd82c85809caea6559af34599e43ef922cc02025eed0f8c3741a50750d9cb669009c5bf3c1afdcf7ab3c40a01d93e4c058aa45191eeafdaca817e91ec945d818f2b805fc7936007d5dfa93c414ecc043ba8a463e732feeb379c6b83d3c79c5556f4ff28d90fb6343407a46ac03abc50f7317be46de84b171cae507d767beecb17aac4a75e5ebbc75b5751d838dbe12b851bd9562fae8ff69b6a3ab2a55bc75cfdb1676a826eca93553cc88720028b531ed5015d15b9d2a2d7ecbef3c5298fab4eaa2aec8977c4851e4a86e85e229daf494a3df6ba3a5dcc9721f11805fa4a94ab7cf0111bf1d1f1c96f706dde5be7e65eccdabc3de00ed1a2afafcd1fb187db7db579cc5a2617b0366fa9ea2355b5bdcc9f04842f57f2c3d760ce06a86a6bac1a565670c1505636f7736573c73aa3e399d13e9109364a5103a4ef1e62e3ba2c154f481163bf4d8d3ee4cb527f497d7d517d72b157541fe7a2bae2a23a857414fdabac20adb282b4ca0ad28a0bd2dcea2fb94f6e5072cf95da8933546affa3df7e74eca6b24cef86cf5ca51d47e8f4b71ebdfb223cb747fec8ce86757689d23cb9d0abad031f6d6d1d39daabadff409b42cab8d6645fe53dbb00edaaeff4ed9a8d8ad1f4151557959754952743deabcae3418d2dc303d222be26b92bf59de8905bf6b3339df759c9a3ea7189cbea11ddaf85416f6bbd4a8ad780db464f01c8b900ed56c791a75ba4bd5d4bcbb2c967ee78c973c74b24bbb672aef86c490c6a80e4fd20767b7937ca7d6582a5d35d74f807feda8476c7adc3d98a70bdadd06c2b34db8a70d056a05de0fb1f7c99b9d5f327e18141150cf98b3e035e2b1c74cbf64b1ff06879566b51c97c904c7c595d58bb3ace5fe9c03331d8139fc5d0c5264e2b4576e53cee3d17f8608d9f4b74bb04268a8f2195c7d906dc40f77576090bb1d90c6260063130033d3fbfc84e4ad92574ca1e29597456667be8dc7b87ce3d3b74ee79373eefe6b319bebb0fb8c7f6aa31a3507bed552d11fd8bc70710e745cea1a3462b32bbfcbc4509694fd6f80b59a40a052a596401b03ab08163f7dda58c81c8cdd61efa44243bea9d6cd195e4752242ae9788273311a138a39fecd4d52f28d4d52f2bf65a8e1517c50e2517c2ec7ea1b60ba876e563de8df078ebf0b0f1c4af39262a9e1b7d6e342389a0afae3a111beba35e5eca72d453eba9a7d653af276eeefb1f0371d0792ae67c10bf2593bbe48ca4ae9030e2b7644998e99a204b5c0dcfca21e44bf43c5fee40eccc1de5ce7d576e2a2b5bc42b5e4f2618bf3a836200c46b733c83be0cff144400de906274fe86430003ecb72cc0fce35d109fac2702e35776e8eb6d74b5cd435df0ca744b80bd1187c6907a1fa7c818298733611cee8e8dc5848dd75dfc0cefe2cbecd16e66ecf27a9b11ee79c1cfb769241f213339630e9969c3db9df33136dd84d8f627c1317dc3d7f41736a25347ebcd85c6ace92c9bfac2c2f24cbdd3302bf5638d55b1b6d438b9d298e9c0cb46bbbddc36cd2573b479b2313b7de454a7b16a669697563bedb599ce725b74e61a66798987f19fbb3133f5a5a5e58e39d280d602ce30db6c03b68553c2fda4ef7c48444f79f609ff3c0ee06bd4174dbdd3692cae7478556f6d376949ab1d83b33496664551d83fc3033861a51d93fd359eab8d58692f77966796178c1bd35c35b890de5fece13f80a5e09f7e6ef30a5cab0fff02f8b77ca405dbc101ab0c00c3db7520c634b6794ea0d8e2da42a739dd5c9a6d9c144c1ffc9b40cda563d978612935032f6046c6bb8ab09dfa9105e8386aeaf07eaed1469277dab091f5932f2ecf368f9eda645e3b811d734113f05f1d6a3b2ececcd5978e0135dacdc57afb94996f9c326f9d6b2c5994b89bfa9245bc294171dffdf43cd6e80811022d0bf06f04fe15e15f24f04fb108d15c3a515f68ce9a13f576b3be847bc2ad3c17a1045b1b795f4b00d7ff02b86d754e00d007000082010101001f5be06ae972efdca372f145881855ab035fc9b761243d0042d6d39703ea6e94a86cd3b37db4b18cdc3f21d6403bc80f91005cf01ed88a02858ec4a7b9fe9ba77f0100e60778da8d56bd6f1c45149ffb3edf25966308c6292882682205e24bb0ac54a6a0a1a0e0a340262c733bef6e47b73bbb9999bdf3414d116890e88284228420852ba7890c85752d121209690205d081c4ffc09bd9bdddbd353e3cc57ebcf7e6cdbcf77eef37d3faa3fb418dbcf7f4f131c151310ff2f7b577cdeba17b385b10dcffa7d7fb627d2eb9b3be0aa1e2e1cd9bb4cf5fd97ab957d9e8fba13b72141f0a2e860e8db5174aaea797c754722ab4738ade195fdb6ca313c7a3ca23d546381120eb8206503792aeeb813b5271d07b75bb4d5dcdc75403a9ac0e80ea5882c3f810942e5aad64ae497d457b129417faac1973a1aff7ea2398aa73f87026c0879edebb854edd30165a6d462003ae3028e1f830063f33694c28d7eabc7966b24ba78743aa6758f6a29def7a940b27a212c3d52015b9f04c40f7d35409d04eace810ac93ed1b97349543d065ad13b93a5d66d34cd6920a653285712cbab8de7ba14f154e00b9d46a2df7ed034ce83495bfe88642c3be7606124ce6954d5c6127220ece6aca40a4a68588dd285ed8c862c4997659c465179b01267899c1f3650f3e1f80e641963206039012181aed3bb01f7149addd840b164e52ab8b652f0cfc2c6fcf1925173e17e0a46ac53f9a2fb07152cb20d29e556f6ddbf4e4f02aaaba2e152ef87629525d4dfee6685c2bc3b969f6cf59b1553a38157167cc49b5953682edbd4e3e3bf9cfb14baa35fce94471dfe7ae839fcd449e6eaa8d3b18598ff5058f753764907ce969947c7525dc8eb98400e6cb0a845b3209a7bb129b3c4cc9207b240c917779d3b202e48256887a194aec43058221d8396bd9cdf576da28d1a6928dfe54833a912424207497ae594ec2c6290c41aa05cb93795f48cf7a244316bbb8a99ca6aae7336116651ece69c47a0e93372744b694115bc6d254a4521fc830b0fe9b0a34b26ea9ec35142599e918bdcf0324be52215790ac1c6bd3b0b4d4314d9dc4980a4c8b15052df465aa5f72d41c0706090d93989d9571304602c4ac25ff162ec956b08ada52a4229566f2f1dfec69d689241f97826a71e51871e2b86d8d4286ceda0a93c4621f9e3d5994bd5b9d586450aefd2f943b71c468da4af5a5ad644298e3bd6eecf352770b470ca9b6ed9f0237258a4520ade1417ce54b3c94f35391fc70e7caeb9f7cfbda022f90dd1f9fde3ff8fa8d62afe3b83aaa7df759deab64f7abbb97d9c3bbc506c4f1fba38f9f7c93f5d3e7ef6cff0aece068117938ee41707094a16c7e7d98438c909fde3cf20e674548e1d87de9d3c359060d428e1fbdfff68359a1d8383efc7eefc12c2b2b8e9d9f519097d00477fcd79fbf146b6503f6df7a52ac88b9c9dca38f7fdbaaf1ed1b84e4f70dbcf59ce182525976da937f01d36959fa0000 +DMLOG ACCEPTED_BLOCK 4 04000000040000000300000000000000010000000000ea3055000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add8010003000000034b2b890c59dad3c04fd9300057ba6285196dc55f32e988a49d6059cd2d5b1b639d6ae94fcdd0536b224644931573d1ccb2a0c548613cd1feea18888ba1daaeb8ca4a99a2fdb182ebb462ceb26de69d76f024586207a1159226ea43de0300000000000000010000000000ea305504000000010000000000ea305503000000000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add8010000000000044444a224a7e16ae04e24e6640d6a909fb89ca6e5e8c1e5397e1e03ef033b3d4b0000000000ea30550000000000034b2b890c59dad3c04fd9300057ba6285196dc55f32e988a49d6059cdded9794b7e6d10923376031cef59e6d6d809ee653bcc37297cb644078a507a6bd26c416dc790f2b35815f1d6741bc63e1c235f13e809df666ddea0ba2e5f45c80000000000010000c104121a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b7241ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea994a90c00d55454dc5b059055ca213579c6ea856967712a56017487886a4d4cc0fe0fb64b1085cc5538970158d05a009c24e276fb94e1a0bf6a528b48fbc4ff52668dcaa34c0517d19666e6b33add67351d8c5f69e999ca1e37931bc410a297428ad9e3d8f650687709fd68f4b90b41f7d825a365b02c23a636cef88ac2ac00c438ba52fe7a3956c5cd3a656a3174b931d3bb2abb45578befc59f283ecd816a4052652f5f96006294109b3dd0bbde63693f55324af452b799ee137a81a905eed25f0af56d2c5a48d60a4a5b5c903edfb7db3a736a94ed589d0b797df33ff9d3e1d4e7bf348da00a945489b2a681749eb56f5de00b900014e137ddae39f48f69d674fca8bd82bbd181e714e283f83e1b45d95ca5af40fb89ad3977b653c448f78c2299dcb6af692324b899b39f16d5a530a33062804e41f09dc97e9f156b4476707c3a6138c5061cf291310887c0b5c71fcaffeab90d5deb50d3b9e687cead450715443fcf88330c586bc0e5f3dee10e7f63c76c00249c87fe4fbf7f38c082006b4bcd2a26394b36614fd4894241d3c451ab0f6fd110958c3423073621a70826e99d528b9f6e9693f45ed277af93474fd473ce7d831dae2180cca35d907bd10cb406bcb40a24e49c26d0a60513b6aeb8551d264e4717f306b81a37a5afb3b47cedc35c2186cc36f7bb4aeaf4487b36e57039ccf45a9136aa856a5d569ecca55ef2b001f5505dc47adebb683205c6cd618df9385777f5ffb29d307479ace7cc07533ec5d3b9e648ee95c2a1dbea8d971cd9e980c6185867556035db58109f2678ce179e30000000029807708239aa7de914d3ed61e9009ab2280bfbc50f1d9769f27f8341ef26198000000000001130ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd1a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b72412652f5f96006294109b3dd0bbde63693f55324af452b799ee137a81a905eed25299dcb6af692324b899b39f16d5a530a33062804e41f09dc97e9f156b447670735c2186cc36f7bb4aeaf4487b36e57039ccf45a9136aa856a5d569ecca55ef2b4a90c00d55454dc5b059055ca213579c6ea856967712a56017487886a4d4cc0f4e7bf348da00a945489b2a681749eb56f5de00b900014e137ddae39f48f69d674fca8bd82bbd181e714e283f83e1b45d95ca5af40fb89ad3977b653c448f78c25443fcf88330c586bc0e5f3dee10e7f63c76c00249c87fe4fbf7f38c082006b468dcaa34c0517d19666e6b33add67351d8c5f69e999ca1e37931bc410a2974286bcb40a24e49c26d0a60513b6aeb8551d264e4717f306b81a37a5afb3b47cedc8ba52fe7a3956c5cd3a656a3174b931d3bb2abb45578befc59f283ecd816a405ad9e3d8f650687709fd68f4b90b41f7d825a365b02c23a636cef88ac2ac00c43bcd2a26394b36614fd4894241d3c451ab0f6fd110958c3423073621a70826e99c3a6138c5061cf291310887c0b5c71fcaffeab90d5deb50d3b9e687cead45071d528b9f6e9693f45ed277af93474fd473ce7d831dae2180cca35d907bd10cb40e0fb64b1085cc5538970158d05a009c24e276fb94e1a0bf6a528b48fbc4ff526ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea99f0af56d2c5a48d60a4a5b5c903edfb7db3a736a94ed589d0b797df33ff9d3e1d0001033b3d4b0000000000ea30550000000000034b2b890c59dad3c04fd9300057ba6285196dc55f32e988a49d6059cdded9794b7e6d10923376031cef59e6d6d809ee653bcc37297cb644078a507a6bd26c416dc790f2b35815f1d6741bc63e1c235f13e809df666ddea0ba2e5f45c80000000000010000c104121a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b7241ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea994a90c00d55454dc5b059055ca213579c6ea856967712a56017487886a4d4cc0fe0fb64b1085cc5538970158d05a009c24e276fb94e1a0bf6a528b48fbc4ff52668dcaa34c0517d19666e6b33add67351d8c5f69e999ca1e37931bc410a297428ad9e3d8f650687709fd68f4b90b41f7d825a365b02c23a636cef88ac2ac00c438ba52fe7a3956c5cd3a656a3174b931d3bb2abb45578befc59f283ecd816a4052652f5f96006294109b3dd0bbde63693f55324af452b799ee137a81a905eed25f0af56d2c5a48d60a4a5b5c903edfb7db3a736a94ed589d0b797df33ff9d3e1d4e7bf348da00a945489b2a681749eb56f5de00b900014e137ddae39f48f69d674fca8bd82bbd181e714e283f83e1b45d95ca5af40fb89ad3977b653c448f78c2299dcb6af692324b899b39f16d5a530a33062804e41f09dc97e9f156b4476707c3a6138c5061cf291310887c0b5c71fcaffeab90d5deb50d3b9e687cead450715443fcf88330c586bc0e5f3dee10e7f63c76c00249c87fe4fbf7f38c082006b4bcd2a26394b36614fd4894241d3c451ab0f6fd110958c3423073621a70826e99d528b9f6e9693f45ed277af93474fd473ce7d831dae2180cca35d907bd10cb406bcb40a24e49c26d0a60513b6aeb8551d264e4717f306b81a37a5afb3b47cedc35c2186cc36f7bb4aeaf4487b36e57039ccf45a9136aa856a5d569ecca55ef2b001f5505dc47adebb683205c6cd618df9385777f5ffb29d307479ace7cc07533ec5d3b9e648ee95c2a1dbea8d971cd9e980c6185867556035db58109f2678ce179e30200d0070000dc060101002026bcc48f2a2332b2fcb39133cc9226405049c824dba512d79415009b403b107d7dade3a3173c6cc9657b7662dd1fd267156e076ac91df0aea1d8172e65d676fe0100b13578daed3c0b90645755f7f33e3dfdba77df862569665773fb31606fdcc8246c66c710cdbe91ec872424c480c10fb3bd33bd3bddf3dd9e9e65d7c2edc56ce1f2b10a1525099602150d0906ab02456128ac4d245504b58c24a108258a965552201acb2a410a89e773efebd73d33614327912a9964a7efbbef9e73ef3dff73eeed09ff317a8b166ffef2e3e705fc48fc25be3efe46f8bdef15ef7ae0e17c87f8c837afbcf22fefcc75fd393cd45717b147de215f765888c3aadb158775177f7ba7e147760f0b79fab0d73d7d1abafc2efe409fe60f95fd3e6ddf89c3018251bf3c0df84e3b4c80f6340d94d023bb841861e10560a769795497401899821ef5b43fa61b4b27a2d923d3479b4bb3d3cd893d42634fa9b1bcda5c9eaeafae36da1d21b12b9e596bb71b4b9de97663a6d13cd1680b0fbbfdfa91651822b05de6f1d3ab73f52baf9a108a70f7faaee09edca8abaeb892fb62dbd76eae341667af9818e7ee208f69641ad633b7d069be5af8f8ecf55006795c2303482258f032ac777abe714a04d863561b9de9230bcb33f33373f5e6d2f44abd5d5f6c741aed5568cecc376679c7c162637166e5940809e6d8f78329e0b08b11f54a7b796579b5318b8dd9b51918234688aa8e849de66283c9b71dd1d6673a40d0dc684255586937973aabd30bbc9a8b1c8972bb291256e0de6a67b9dd20f645d4d56e1c5f6b424f7dad33274ad8b58517d63cd15c681c83d596b1f325d8d96eac2eafb5671ad30bcdc56667556cc1372fdb781fd38d93622b41aeb41bb4ec7aa7317db451efacb51b2226aaf1b2f9617b73d5bd9d76c367c53666393c2f2f4dcfd63bf5e9d5e6af36c445d40d7867a773ef9818dbf202393db33cdb102fc1fe226c1e49885b273e95a1636d651697857ddb7b949c83b54bbdff3af1d26db1d816c771292ea8f8e2822a5c22652c2bfc5390f643560af8290ad094ee9f2ac882829f82e7cb15592efb5a0a195cacbb323d735e445d91fed323d9473822fdfacacac229f18a918ba44865547ae39b7ee1cdbff84bbff296e9d9c6d1e247df2daff3445c8bbea28511f1e5f1981146bec1db3775e6cc7f3f71efd93bffeeaeae51f08cddf16b7ce8beff8bbff9bb1fb8f78dbdde6b5cef47fff6f3a2d7fd33d87de66b0f7cf3fce3efcee1f8593bfa530ffec7adbdde6ba1f7db7ffcc4173ef2a1c71fcae1d84738ce3cf1d90f3f35d5eb4db9f7ccb7eec8f7be967b3ff69dbffffaddb9f9aee3ee4f7eeb8bf9c1fbb1f753ff75f7fbefcaf71ea0b1e77fe73b7d630f52efeffdf5379ec8f7dec078bffc271ff8e283b9e95e4fdd8f3df0f14fdf7965aff7267feabdf73ef5d457de7367dfe09b7d23782be71fbce349718bb72fa5e7a71efff8377ffddf456ca2c11f3f37dcf1a96fcf80e0edfd0852115f11b5a5ee2a98b42c52b115ba525153ef81e7343e91c856a226e0897bcfee4bb411ad746b2b5d399e8809e8004095c61d23ae477068cb168e37121a5519c16bb94fa4dd03f8367def7911cbc8e848896842caeecb618ef904de4ca81200c30ca59a123df467f6612f4e938a6b05e28d5e2d551796268dd851956a9f06fcf12b135196a9a8f21ad3445d5796466d13a224a2c8882840f1fd89c8079d17f67711172bf6ea7369f47a5ec857c57c22c78070b0f42f09d844819a31b4bcac85af45fcaa517a34b286af60c5f3f116e8f9aa688d89e8c305582192b30618e09797a8f9347c1defff21d83f7556556414c014e2ada38676eea505a2b587bdadaa7628005a6f0cad532f07ed65d0a5a1a0e3a1a0b70f055d190a7ae750d06628e8b1a1a06b4341ef1e0a7a7c28e83d43414f0e057dcd50d0e7453fb8dc0c5ce5c05506fe3080cbef0f2e07c0115844df915a76d569b23be3f38926b38373c5adb83025aa5e1a57d1a02968fb60e2f08551ad6aa0d0de02ce436561fca9c23b8c67822b84389b785393e7ce567d78d78575190fde55bd12dbdf54eea7b5ddcbd61720f76a304d0e76af16381ddafb5655e0ec386b44a63c51a901d3cd686240d39b5e033ccea73be9c9563c026b2cb4766980341a3f3c9cc8db8b9103cd07561f440dec2398fef7435802f98087db7f5af0fe352820fa01684d815daec04777af8e611a01e8120f97ac0921d1c0832da277bbeb5f3ff16f57800b10e8450ed2fa00227e7502f41e379a377efe61763b0678378eef692df11ea68f17ab37d0b077fd050e03c1887725021711034d6aadc4c736ec03569804ae5d6925a16d1b8d688185f884988d029f9014f0c33385f9781c0885fd00047bc151013fc7ee3944a24d5a6cbbf438ef9cc6d44066e6ecc3843acc1308761c7648a13da166a1090e88b8185f35ca780a791a59425aacf1c5893fa1f6102961647c2bb274ea349055be0d7efdda9b76c01a90668226cdb880f403c0ddc4055d533b13851fe6268057f826860fe6564aa0ca3057419e2ab0819d25e40434ed7a693b1a6c583c1121a2c355e120c5756574dc97453c5325f1af2b8bdebc205707500daa02506a8c57e65ac876501b4321050a83421c0a70000b044a3261aae2272e7627e187099f26df7d36d0234e2f311e505558f618cf297005a0bce06690bb3e29034ceb83f8d73030d80f4107a804ae02175652107bc0f37e5c2374e2c2029238fe0f5407561957133fedb2edf84f1453248f9fbedd3b54c646fcca6a98ca6a017b81ba1c19cd27e1188caf824e49901c3fae8eb2d0062cb456460324fcb80980d6b60304d8b56adc52b8fe9042b7f4a9cf9296042664e10a310cf114ef30bdb46f10ca7f7504f080c92802f9c0981a2f350006b6230273e293425755cae6a444468c46b5aa65dabecf66a484562c3265b2621159b112bc032be69b08ad58c45aea3b2bf6c9476805ca9450c89483652be66330da22bb19e1b4c001b4625e66c57c67c5ecf468b500054ed93364a496b00cd4a06a84734548359e3232457c61b0a788d0f050e307126a9353f322da6dea8a7b9a2f41ed40fa820965f003f50ad0a3dd82373b9daced4c7cb2382036a85a3ebe8ce123ca54cbc05380aa85f295a9d608cd6789ad415841af006301f5208420989495d8c328b0df671da0f8d504d11d4a2b30cfa40506ac73152cc418597eb4a982d9f01932966a5facc834b005fd06751ac97ec0633b48bf023686011a438c77802f199530f682e7b8f70c84f480f6f4802d126b0a4d15b28e26e019ff80e551a737b7c88da079ab51f0eea15946bb63836c8f788dfe17d685161276b3670a25a016278455583328c00cfa6806fdcc0c8200a17df882eef96da00c980672f1b493445f232ee1989e1ebd2973ee1aa1d11a4ed5a0a5a8350e2d49ad496821b8a6712fc1d63d600b0a27ae161793aec7d07a29b52ad0da8e4030e06a11521f8e0bb2717e36cec325f0fc93efc0a9dfc159e199f3cf88dbcf4247c5767cf5dadb294ca0a7a72fbdfd2cbe2c9cc3c76f6fc557156c7f3724a01ab4bfa76fa7659f3d7b96350fa6895fe0692a442024da366a21212fa216922fee4bb3e47c7a291bd2730f6741184a23d804930bc20c0981228e422c7697967ed79a647c8fbf807af3696511d801c623483ffae47d4fa216a6f761a31abab1206434cc4b9fb8efc9cbc040827683bde9242198e5e0d08e24c4e5800517148b8145ad2cc4bb303492e40dd005a08ca2d7a82c2490ff49544f819a0eab26b9c72c34e04108dea2180f8c6f60734b5e6ec8cea7d0024753951a737270431ece024113cf225921503ee1f50ac47699be617409dd34353ecf510c097e34341e394d9ad29234a429614710e08a7837bc0d39daf351ed04aed6c368881d30600083012b0a7145214282d372ee59a2f991687e607b38130564d8e94167f498d29eb34431e95b8a1b22ef0e53810b535872e0552339381d4ffcf9d45fbbbf1a90d79110e7834f0ccfa17fbc0577a3881296e7be5ba0b206024d02a6d0645589b8cf8a297d1bc18469777f5959d4a48da556fc86513281840b8247df5293e8e75b6a021961533eca09c8487c8b8838c441cf0e9efec632c584269c8f7fce02520465c29be80d587294e788832f8f3f7001881da9047a1afffc28f945b0607faa155ab09efd42e31d5ff23c9b3191993191993191376382cd98c8cc98c8cc98f8ff67c6c01fed483f0399e34ee0d54fa79fa6161ba8e8f7fb5d0e57a884e595425ee123f34a5b5e493b85a216f34adac928d3b4bc82d63d60bc9857125d30f10a5bcc2b89ccf49057d8c7bc72e3fc6c9ce7720b8d44d48344d47d44d4fd44d43922ea1c11754644dde3154d13bfc0d3548840cc2b6c31afb0c5bc8268257e39c52cc09d7b7e1410fcb005042afe310a02bef410e709cca83f93d2735ea497d441f8fae38957b6358702565cc809908de53a437ac63b44eee89555c55998e7b2304042cff1a5a316810586219053c9a8972f615ec2193b2760e86d2996f5d293c7d3c2817c6ea5b3b16cb25f2b35ac1c97cdcb7549f0052e2dc2e0fd47483640f2a8629918ef930996081485e72803b6c034c61f98db417f0bab2c355bd3225cf96c1ba3bd1a2663353586f542aa8b38ee939060b9718ca21475e0d9e02e4138e9e0e4b3c059808b070068e129859e992c531e5968c553a35c732924544ea1fd95b8aa628b7acf485bd1b3a54fc8bd3546bd9ace8728563e44cbb8fd91befae8e58202215e32058e182a3d238e03a19e91c713d5b9ff2cb2260d5b90850655346a67e4f1b20d9228e6f339e653102c91bbc2b80883611731791868e3070f641642587a6399abaecc232f72317d308f549b4ffc9b7284633cc8ea00d2e52c9930beade8dea1fab2880207e65ea2e78d3c44e125d0e36a215c1202dbd55c48ee42d46fab45dae51eb8274c3cba379465fa0cfc8428e8444a78a9d2ef4197e67c4361bea1b19000f98626c1d556788d8664838bd8982f00ed2025a8eade5a6c44ebd68041782f039230d13cd57f608a8328fe482dda3a05f11187fa05ae7f62c4ef5118ee22faad363b7840290da2017a76463eaf8a762b7fdc6c156d42ddc6927b704335bb95c5fe736240c1080fcafd3e869e84a7c7a8743289824f0a40551eece51c0975cb157ab9c4a077e99b139f4bd472af46dde71a3696be6bb6dcb94b1f64ac57b2364de6b4e91aa74d6724aad30d439a37d8f893e769e3d0f9375279cc80ff6be797f37a032e0ff43b334758740b8cbe5c405a693037c1a45a1c882bce21dea760439ecb6136db50c56ea8f2836ea86281bfdf867cebc693f5bb82bcda77236337321e1854c80daa38fa54360909b0b216308de8cc20ae16f0a3521d21528d9802961ee29791f5861d7d48cba8dfc7a159264a65d574df5205abcdde40a5d91b23b3e05bb50315dfce1f311f0081fe54587f0a2eb6a16debf8d028ad88bd48bc778ab4d0e0f903f8203063f1a8f52a86e49bb18e51decffd220972f57e0fac7b80f5fe80d688050520f8e116566390fcada4805707ba586ef7d285563282188a88b96809bfd24a22b2afa6c819bf971eccd8d7e23a658931682e491e6c9952aa3ae9b947042afe368922927651e97d10ce229d40446684d2f2c844a648af50950b3c8c174d55573a13d35c1ef172bef3af34955b47723904791b4d07817400c8a75a1b39511da1a6a04cb03967af02ae929ca84f4ed447278a35a610a559630d639d130d73859324c0c442b31fd558b37d1d3b502c50e0d1d58d5c418709fd0ed0dd84b6c29368c7108d041dc918a29121459c204286447890099dc010bcef01ee2bda6f7de4c1967d07f03ebe2b3306c5c57560487990213e533a807d4596da452aaf61593fa25748ed11c79002d599606b052415e851c444c07584ae1424b874a2f93c8b0b43db498b49e9e3eb49441082b165e5c43f0c7411b8a97b25ab5e4ca03926f090c82e2cc03a786235c7b31f308b6b155a4ea90497fa6d0933f106a387ec5cc9fa6fc74a908930c53c8c2a9810385c8bfa4f12e5cf03c7fc350c27f07887aa753af5d61294a010e209f6cf28841aab7d9ec3a3b1ed7720bcd06415500ed0108160a1448c60e0d7db4da5b5f9be702f552f178210858abc85002b6a8129ce5723d603b06f10c348148e80d959c40e143b3eb1915464e4e846422f9ed8082b4f396992287a5bb87c285136b73279b79050da32e85616db2d5955b4831208db93b6078935e74607a6cca31811e1468be0c32a126b2660ad68a73924a528aa5cd2364cc24b4920c3650cab0a1c56791c568d70a9142660e10cd8ee05ce6e85fd760fd79bf0d1295d5020bbb795ed9ec8d93de9e0c9d4fa4ecd0ad6eef9836aa699e0b24ac55ddad0161b086ee5235eaae386561b4d9915a38c2b29e31b5b81fcbca47890f7af32d5c3c85b5bed188cddbcaac7aeda1d7af36da9090e55b14e4d38a4b39a7e967a687cd3673565de6a9281cbac261eb8f5520f0fad26397f14504d6567b69a9435f8f7c487a3774a0ad406cf8a5f88c8d606a5607a6e748e15da3fe58eed36b97676fd600aa3f83ce4c2ae9d6d048da1c8855d3b7b6ed0f150d0db8782ae0c05bd7328683314f4d850d0b5a1a0770f053d3e14f49ea1a0278782be6628e875d7ce3603bfc06b679b69f846d7cedef7c3520485f4852c183984f778be4711933de87b7a1363bae172fc81e5f8b81c9f8de914a54aeeaa4b1a57f9865d16856417cdf07a01e1edbfe4e2e36595d09a5e9fcf90f11c0f231d94df117bb9a54891eb637cb98597a8f87649112fb70426a2cb2d015d6e29620201599e09f0720b5f08c2d4b7ef72cb88292221471cac5bfe089ac911dc174ebbee728b72975b94bba207c1133c0c5ed12bda3c12270a90123c1f67ac1c68e72fab59a20d5c59d3f6ca5a855b7c658d8a248020e40ac93f034d0a5c22f1f91a99dcab6fe3eac8cd994f8b5bf1647605eb735a7bee44e5874612fa448078ef5bde8779de17f2bc0f99f7851cefc30be1bd6f42e4bdef60dd827daa2058de8717c4fb7003de8719ef7de63dcf373cefedf5d5ab334e3e29b5ee3fbce7545373b40671dee4e2fd7c0c2ee8ce027f65c0f878c0eedb0376778f80ef29daf0b5a6c693e07e4a08368771170f037730af28a5a300173a21bafdee432eba654329d0ce12a8072e022b152eb78b29b7db4aa7f3fcfd81ef71c15b0f6c8ebf0d40970f38d9a34c48df8317d340bc60912063b740f21276b8ceddbe5a6ccd5d2611eb2eb078587e5128d43267d1a54d00037b37974278266f4044cae10c37c6e90f04819c6cf81855de3aeaee2b0478af84614a7952445f9154b450b9fd733dc2c8ab54ccf91fefdac35d7b9bef5a6fb46bcd3e70fdae75ffae6397cf795cc87118f5c618e58053b67b8e6f1b1d60f2252c10944ab16854f196c7a6fd279474390d1588e9da479ab4129b3ad65a57293105adcbc5582a8fefa02abc40d9153db1e00b25ee664c4dd56838a4bbf204caf9f11d0915d7f1545fd0acd19dba77917abd14a27abd8852e8e430643994b62874951ae73387679d7ecb854cbf65f3e99db687681bbcf52a1f824e83ca7fe2e14ce5430c22fdbe8cc9cbaef1af579681708c260ed14c849c0c0f9a8959c80c7bb24281199651d064e1718c978be1d6ef57b1271bd82c85809caea6559af34599e43ef922cc02025eed0f8c3741a50750d9cb669009c5bf3c1afdcf7ab3c40a01d93e4c058aa45191eeafdaca817e91ec945d818f2b805fc7936007d5dfa93c414ecc043ba8a463e732feeb379c6b83d3c79c5556f4ff28d90fb6343407a46ac03abc50f7317be46de84b171cae507d767beecb17aac4a75e5ebbc75b5751d838dbe12b851bd9562fae8ff69b6a3ab2a55bc75cfdb1676a826eca93553cc88720028b531ed5015d15b9d2a2d7ecbef3c5298fab4eaa2aec8977c4851e4a86e85e229daf494a3df6ba3a5dcc9721f11805fa4a94ab7cf0111bf1d1f1c96f706dde5be7e65eccdabc3de00ed1a2afafcd1fb187db7db579cc5a2617b0366fa9ea2355b5bdcc9f04842f57f2c3d760ce06a86a6bac1a565670c1505636f7736573c73aa3e399d13e9109364a5103a4ef1e62e3ba2c154f481163bf4d8d3ee4cb527f497d7d517d72b157541fe7a2bae2a23a857414fdabac20adb282b4ca0ad28a0bd2dcea2fb94f6e5072cf95da8933546affa3df7e74eca6b24cef86cf5ca51d47e8f4b71ebdfb223cb747fec8ce86757689d23cb9d0abad031f6d6d1d39daabadff409b42cab8d6645fe53dbb00edaaeff4ed9a8d8ad1f4151557959754952743deabcae3418d2dc303d222be26b92bf59de8905bf6b3339df759c9a3ea7189cbea11ddaf85416f6bbd4a8ad780db464f01c8b900ed56c791a75ba4bd5d4bcbb2c967ee78c973c74b24bbb672aef86c490c6a80e4fd20767b7937ca7d6582a5d35d74f807feda8476c7adc3d98a70bdadd06c2b34db8a70d056a05de0fb1f7c99b9d5f327e18141150cf98b3e035e2b1c74cbf64b1ff06879566b51c97c904c7c595d58bb3ace5fe9c03331d8139fc5d0c5264e2b4576e53cee3d17f8608d9f4b74bb04268a8f2195c7d906dc40f77576090bb1d90c6260063130033d3fbfc84e4ad92574ca1e29597456667be8dc7b87ce3d3b74ee79373eefe6b319bebb0fb8c7f6aa31a3507bed552d11fd8bc70710e745cea1a3462b32bbfcbc4509694fd6f80b59a40a052a596401b03ab08163f7dda58c81c8cdd61efa44243bea9d6cd195e4752242ae9788273311a138a39fecd4d52f28d4d52f2bf65a8e1517c50e2517c2ec7ea1b60ba876e563de8df078ebf0b0f1c4af39262a9e1b7d6e342389a0afae3a111beba35e5eca72d453eba9a7d653af276eeefb1f0371d0792ae67c10bf2593bbe48ca4ae9030e2b7644998e99a204b5c0dcfca21e44bf43c5fee40eccc1de5ce7d576e2a2b5bc42b5e4f2618bf3a836200c46b733c83be0cff144400de906274fe86430003ecb72cc0fce35d109fac2702e35776e8eb6d74b5cd435df0ca744b80bd1187c6907a1fa7c818298733611cee8e8dc5848dd75dfc0cefe2cbecd16e66ecf27a9b11ee79c1cfb769241f213339630e9969c3db9df33136dd84d8f627c1317dc3d7f41736a25347ebcd85c6ace92c9bfac2c2f24cbdd3302bf5638d55b1b6d438b9d298e9c0cb46bbbddc36cd2573b479b2313b7de454a7b16a669697563bedb599ce725b74e61a66798987f19fbb3133f5a5a5e58e39d280d602ce30db6c03b68553c2fda4ef7c48444f79f609ff3c0ee06bd4174dbdd3692cae7478556f6d376949ab1d83b33496664551d83fc3033861a51d93fd359eab8d58692f77966796178c1bd35c35b890de5fece13f80a5e09f7e6ef30a5cab0fff02f8b77ca405dbc101ab0c00c3db7520c634b6794ea0d8e2da42a739dd5c9a6d9c144c1ffc9b40cda563d978612935032f6046c6bb8ab09dfa9105e8386aeaf07eaed1469277dab091f5932f2ecf368f9eda645e3b811d734113f05f1d6a3b2ececcd5978e0135dacdc57afb94996f9c326f9d6b2c5994b89bfa9245bc294171dffdf43cd6e80811022d0bf06f04fe15e15f24f04fb108d15c3a515f68ce9a13f576b3be847bc2ad3c17a1045b1b795f4b00d7ff02b86d754e00d007000082010101001f5be06ae972efdca372f145881855ab035fc9b761243d0042d6d39703ea6e94a86cd3b37db4b18cdc3f21d6403bc80f91005cf01ed88a02858ec4a7b9fe9ba77f0100e60778da8d56bd6f1c45149ffb3edf25966308c6292882682205e24bb0ac54a6a0a1a0e0a340262c733bef6e47b73bbb9999bdf3414d116890e88284228420852ba7890c85752d121209690205d081c4ffc09bd9bdddbd353e3cc57ebcf7e6cdbcf77eef37d3faa3fb418dbcf7f4f131c151310ff2f7b577cdeba17b385b10dcffa7d7fb627d2eb9b3be0aa1e2e1cd9bb4cf5fd97ab957d9e8fba13b72141f0a2e860e8db5174aaea797c754722ab4738ade195fdb6ca313c7a3ca23d546381120eb8206503792aeeb813b5271d07b75bb4d5dcdc75403a9ac0e80ea5882c3f810942e5aad64ae497d457b129417faac1973a1aff7ea2398aa73f87026c0879edebb854edd30165a6d462003ae3028e1f830063f33694c28d7eabc7966b24ba78743aa6758f6a29def7a940b27a212c3d52015b9f04c40f7d35409d04eace810ac93ed1b97349543d065ad13b93a5d66d34cd6920a653285712cbab8de7ba14f154e00b9d46a2df7ed034ce83495bfe88642c3be7606124ce6954d5c6127220ece6aca40a4a68588dd285ed8c862c4997659c465179b01267899c1f3650f3e1f80e641963206039012181aed3bb01f7149addd840b164e52ab8b652f0cfc2c6fcf1925173e17e0a46ac53f9a2fb07152cb20d29e556f6ddbf4e4f02aaaba2e152ef87629525d4dfee6685c2bc3b969f6cf59b1553a38157167cc49b5953682edbd4e3e3bf9cfb14baa35fce94471dfe7ae839fcd449e6eaa8d3b18598ff5058f753764907ce969947c7525dc8eb98400e6cb0a845b3209a7bb129b3c4cc9207b240c917779d3b202e48256887a194aec43058221d8396bd9cdf576da28d1a6928dfe54833a912424207497ae594ec2c6290c41aa05cb93795f48cf7a244316bbb8a99ca6aae7336116651ece69c47a0e93372744b694115bc6d254a4521fc830b0fe9b0a34b26ea9ec35142599e918bdcf0324be52215790ac1c6bd3b0b4d4314d9dc4980a4c8b15052df465aa5f72d41c0706090d93989d9571304602c4ac25ff162ec956b08ada52a4229566f2f1dfec69d689241f97826a71e51871e2b86d8d4286ceda0a93c4621f9e3d5994bd5b9d586450aefd2f943b71c468da4af5a5ad644298e3bd6eecf352770b470ca9b6ed9f0237258a4520ade1417ce54b3c94f35391fc70e7caeb9f7cfbda022f90dd1f9fde3ff8fa8d62afe3b83aaa7df759deab64f7abbb97d9c3bbc506c4f1fba38f9f7c93f5d3e7ef6cff0aece068117938ee41707094a16c7e7d98438c909fde3cf20e674548e1d87de9d3c359060d428e1fbdfff68359a1d8383efc7eefc12c2b2b8e9d9f519097d00477fcd79fbf146b6503f6df7a52ac88b9c9dca38f7fdbaaf1ed1b84e4f70dbcf59ce182525976da937f01d36959fa0001 DMLOG START_BLOCK 5 DMLOG CREATION_OP ROOT 0 DMLOG RLIMIT_OP ACCOUNT_USAGE UPD {"owner":"eosio","net_usage":{"last_ordinal":1262304004,"value_ex":100468,"consumed":1},"cpu_usage":{"last_ordinal":1262304004,"value_ex":256961,"consumed":101},"ram_usage":199629} @@ -177,4 +177,4 @@ DMLOG RLIMIT_OP ACCOUNT_USAGE UPD {"owner":"alice","net_usage":{"last_ordinal":1 DMLOG APPLIED_TRANSACTION 5 6cdc68c8257e32da2f6cd8c4f6e6bb7798a47e7da6ac14a7cea45da9cd70c81c05000000043b3d4b010000000596867aec674ebb58c23ffcafefab5119fdbdbb08736a63fbdc64e7510100d007000012000000000000000090000000000000000001010000010000000000ea3055f3d881d2f7fbf2f7cb6081aff84e7aca1dd3914a0948ef4fc9422e734e8d4d571c000000000000001c00000000000000010000000000855c34010000000000000002020000000000ea30550000000000ea30550040cbdaa86c52d5010000000000855c3400000000a8ed3232310000000000855c34000000008090b1ca00000000a8ed32320100000000010000000000ea305500000000a8ed3232010000000000000000000000006cdc68c8257e32da2f6cd8c4f6e6bb7798a47e7da6ac14a7cea45da9cd70c81c05000000043b3d4b010000000596867aec674ebb58c23ffcafefab5119fdbdbb08736a63fbdc64e751010000000000855c34400100000000000000000000000000 DMLOG RLIMIT_OP STATE UPD {"average_block_net_usage":{"last_ordinal":4,"value_ex":144011111,"consumed":7999},"average_block_cpu_usage":{"last_ordinal":4,"value_ex":366368114,"consumed":4433},"pending_net_usage":376,"pending_cpu_usage":4100,"total_net_weight":0,"total_cpu_weight":0,"total_ram_bytes":0,"virtual_net_limit":1051726,"virtual_cpu_limit":200600} DMLOG RLIMIT_OP STATE UPD {"average_block_net_usage":{"last_ordinal":5,"value_ex":145944352,"consumed":519},"average_block_cpu_usage":{"last_ordinal":5,"value_ex":397481713,"consumed":4464},"pending_net_usage":0,"pending_cpu_usage":0,"total_net_weight":0,"total_cpu_weight":0,"total_ram_bytes":0,"virtual_net_limit":1052778,"virtual_cpu_limit":200800} -DMLOG ACCEPTED_BLOCK 5 05000000050000000400000000000000010000000000ea3055000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add80100018b5b706080c8d5ec9456986e611761b17ec82e672f8176e581625f54535c32150400000000000000010000000000ea305505000000010000000000ea305504000000000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add80100000000000596867aec674ebb58c23ffcafefab5119fdbdbb08736a63fbdc64e751043b3d4b0000000000ea30550000000000044444a224a7e16ae04e24e6640d6a909fb89ca6e5e8c1e5397e1e03ef08a40bd5b3162bbbe3fb5d540dabc705bada65dd0a276381665c9ed73d4afa451bb28b302f397e60628e8e3a7d3ecb23a8a0bf20f606dd26b93b61511cb87673000000000000001f69207fd55ddaddad96457d633a97f216211989106548974158e1cf70d1f8e5e5262d674a40041040cf394ae3496bb3852b67e15d9c771dfb4a818f6166f590a90000000029807708239aa7de914d3ed61e9009ab2280bfbc50f1d9769f27f8341ef26198000000000001130ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd1a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b72412652f5f96006294109b3dd0bbde63693f55324af452b799ee137a81a905eed25299dcb6af692324b899b39f16d5a530a33062804e41f09dc97e9f156b447670735c2186cc36f7bb4aeaf4487b36e57039ccf45a9136aa856a5d569ecca55ef2b4a90c00d55454dc5b059055ca213579c6ea856967712a56017487886a4d4cc0f4e7bf348da00a945489b2a681749eb56f5de00b900014e137ddae39f48f69d674fca8bd82bbd181e714e283f83e1b45d95ca5af40fb89ad3977b653c448f78c25443fcf88330c586bc0e5f3dee10e7f63c76c00249c87fe4fbf7f38c082006b468dcaa34c0517d19666e6b33add67351d8c5f69e999ca1e37931bc410a2974286bcb40a24e49c26d0a60513b6aeb8551d264e4717f306b81a37a5afb3b47cedc8ba52fe7a3956c5cd3a656a3174b931d3bb2abb45578befc59f283ecd816a405ad9e3d8f650687709fd68f4b90b41f7d825a365b02c23a636cef88ac2ac00c43bcd2a26394b36614fd4894241d3c451ab0f6fd110958c3423073621a70826e99c3a6138c5061cf291310887c0b5c71fcaffeab90d5deb50d3b9e687cead45071d528b9f6e9693f45ed277af93474fd473ce7d831dae2180cca35d907bd10cb40e0fb64b1085cc5538970158d05a009c24e276fb94e1a0bf6a528b48fbc4ff526ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea99f0af56d2c5a48d60a4a5b5c903edfb7db3a736a94ed589d0b797df33ff9d3e1d0001043b3d4b0000000000ea30550000000000044444a224a7e16ae04e24e6640d6a909fb89ca6e5e8c1e5397e1e03ef08a40bd5b3162bbbe3fb5d540dabc705bada65dd0a276381665c9ed73d4afa451bb28b302f397e60628e8e3a7d3ecb23a8a0bf20f606dd26b93b61511cb87673000000000000001f69207fd55ddaddad96457d633a97f216211989106548974158e1cf70d1f8e5e5262d674a40041040cf394ae3496bb3852b67e15d9c771dfb4a818f6166f590a90200d00700001d0101001f55be758d9f4e3d253e069c66875beafbffe405c897ee2da2dd4577b2953a3379758676fd8906c5c3eb7043a30dc8d939af3eef5e73bf7f57f253e854f803dd810000bd0107e10b5e0400a7e16ae000000000010000000000ea305500409e9a2264b89a010000000000ea305500000000a8ed32328a010000000000ea30550000000000855c3401000000010002bb30f6894f29bb6fca635b1df728ad77e48fdd6123ce5e4455b0f71e072e7df80100010000000000855c3400804a1401ea305501000001000000010003ebcf44b45a71d4f225768f602d1e2e2b25ef779ee9897fe744bf1a16e85423d50100010000000000855c3400804a1401ea30550100000000d0070000120101001f504584a3e50ad7d75a7ffd3254fbd363824a5269d57a1d3443644067e42117515242fb12efe17be14590b398489b951a8823f8b5aed4d7de11be3a971498ddb100006307e10b5e0400a7e16ae000000000010000000000ea30550040cbdaa86c52d5010000000000855c3400000000a8ed3232310000000000855c34000000008090b1ca00000000a8ed32320100000000010000000000ea305500000000a8ed3232010000000000 +DMLOG ACCEPTED_BLOCK 5 05000000050000000400000000000000010000000000ea3055000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add80100018b5b706080c8d5ec9456986e611761b17ec82e672f8176e581625f54535c32150400000000000000010000000000ea305505000000010000000000ea305504000000000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add80100000000000596867aec674ebb58c23ffcafefab5119fdbdbb08736a63fbdc64e751043b3d4b0000000000ea30550000000000044444a224a7e16ae04e24e6640d6a909fb89ca6e5e8c1e5397e1e03ef08a40bd5b3162bbbe3fb5d540dabc705bada65dd0a276381665c9ed73d4afa451bb28b302f397e60628e8e3a7d3ecb23a8a0bf20f606dd26b93b61511cb87673000000000000001f69207fd55ddaddad96457d633a97f216211989106548974158e1cf70d1f8e5e5262d674a40041040cf394ae3496bb3852b67e15d9c771dfb4a818f6166f590a90000000029807708239aa7de914d3ed61e9009ab2280bfbc50f1d9769f27f8341ef26198000000000001130ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd1a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b72412652f5f96006294109b3dd0bbde63693f55324af452b799ee137a81a905eed25299dcb6af692324b899b39f16d5a530a33062804e41f09dc97e9f156b447670735c2186cc36f7bb4aeaf4487b36e57039ccf45a9136aa856a5d569ecca55ef2b4a90c00d55454dc5b059055ca213579c6ea856967712a56017487886a4d4cc0f4e7bf348da00a945489b2a681749eb56f5de00b900014e137ddae39f48f69d674fca8bd82bbd181e714e283f83e1b45d95ca5af40fb89ad3977b653c448f78c25443fcf88330c586bc0e5f3dee10e7f63c76c00249c87fe4fbf7f38c082006b468dcaa34c0517d19666e6b33add67351d8c5f69e999ca1e37931bc410a2974286bcb40a24e49c26d0a60513b6aeb8551d264e4717f306b81a37a5afb3b47cedc8ba52fe7a3956c5cd3a656a3174b931d3bb2abb45578befc59f283ecd816a405ad9e3d8f650687709fd68f4b90b41f7d825a365b02c23a636cef88ac2ac00c43bcd2a26394b36614fd4894241d3c451ab0f6fd110958c3423073621a70826e99c3a6138c5061cf291310887c0b5c71fcaffeab90d5deb50d3b9e687cead45071d528b9f6e9693f45ed277af93474fd473ce7d831dae2180cca35d907bd10cb40e0fb64b1085cc5538970158d05a009c24e276fb94e1a0bf6a528b48fbc4ff526ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea99f0af56d2c5a48d60a4a5b5c903edfb7db3a736a94ed589d0b797df33ff9d3e1d0001043b3d4b0000000000ea30550000000000044444a224a7e16ae04e24e6640d6a909fb89ca6e5e8c1e5397e1e03ef08a40bd5b3162bbbe3fb5d540dabc705bada65dd0a276381665c9ed73d4afa451bb28b302f397e60628e8e3a7d3ecb23a8a0bf20f606dd26b93b61511cb87673000000000000001f69207fd55ddaddad96457d633a97f216211989106548974158e1cf70d1f8e5e5262d674a40041040cf394ae3496bb3852b67e15d9c771dfb4a818f6166f590a90200d00700001d0101001f55be758d9f4e3d253e069c66875beafbffe405c897ee2da2dd4577b2953a3379758676fd8906c5c3eb7043a30dc8d939af3eef5e73bf7f57f253e854f803dd810000bd0107e10b5e0400a7e16ae000000000010000000000ea305500409e9a2264b89a010000000000ea305500000000a8ed32328a010000000000ea30550000000000855c3401000000010002bb30f6894f29bb6fca635b1df728ad77e48fdd6123ce5e4455b0f71e072e7df80100010000000000855c3400804a1401ea305501000001000000010003ebcf44b45a71d4f225768f602d1e2e2b25ef779ee9897fe744bf1a16e85423d50100010000000000855c3400804a1401ea30550100000000d0070000120101001f504584a3e50ad7d75a7ffd3254fbd363824a5269d57a1d3443644067e42117515242fb12efe17be14590b398489b951a8823f8b5aed4d7de11be3a971498ddb100006307e10b5e0400a7e16ae000000000010000000000ea30550040cbdaa86c52d5010000000000855c3400000000a8ed3232310000000000855c34000000008090b1ca00000000a8ed32320100000000010000000000ea305500000000a8ed3232010000000001 From 19d99f5913a7a8e3a6be861c8e216bbd708887fe Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 11 Jul 2023 10:03:47 -0500 Subject: [PATCH 069/180] GH-1357 Reduced number of generated trxs to reduce load on ci/cd --- tests/nodeos_contrl_c_test.py | 2 +- tests/nodeos_startup_catchup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/nodeos_contrl_c_test.py b/tests/nodeos_contrl_c_test.py index 3ba58b6561..0e9fc6fc70 100755 --- a/tests/nodeos_contrl_c_test.py +++ b/tests/nodeos_contrl_c_test.py @@ -97,7 +97,7 @@ testSuccessful=False Print("Configure and launch txn generators") - targetTpsPerGenerator = 100 + targetTpsPerGenerator = 10 testTrxGenDurationSec=60 trxGeneratorCnt=1 cluster.launchTrxGenerators(contractOwnerAcctName=cluster.eosioAccount.name, acctNamesList=[accounts[0].name,accounts[1].name], diff --git a/tests/nodeos_startup_catchup.py b/tests/nodeos_startup_catchup.py index 364d4948a5..94171e8927 100755 --- a/tests/nodeos_startup_catchup.py +++ b/tests/nodeos_startup_catchup.py @@ -111,7 +111,7 @@ def waitForNodeStarted(node): waitForBlock(node0, blockNum, blockType=BlockType.lib) Print("Configure and launch txn generators") - targetTpsPerGenerator = 100 + targetTpsPerGenerator = 10 testTrxGenDurationSec=60*60 cluster.launchTrxGenerators(contractOwnerAcctName=cluster.eosioAccount.name, acctNamesList=[account1Name, account2Name], acctPrivKeysList=[account1PrivKey,account2PrivKey], nodeId=node0.nodeId, From 149f76b6c55587d098122bb47109c10221acfe48 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 11 Jul 2023 10:37:28 -0500 Subject: [PATCH 070/180] GH-1383 Fix ship test to wait on correct transaction --- tests/ship_streamer_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ship_streamer_test.py b/tests/ship_streamer_test.py index 9ee47c9338..8134e4dc50 100755 --- a/tests/ship_streamer_test.py +++ b/tests/ship_streamer_test.py @@ -137,7 +137,7 @@ def getLatestSnapshot(nodeId): nonProdNode.waitForTransBlockIfNeeded(trans, True, exitOnError=True) for account in accounts: Print(f"Transfer funds {transferAmount} from account {cluster.eosioAccount.name} to {account.name}") - nonProdNode.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer", waitForTransBlock=False) + trans=nonProdNode.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer", waitForTransBlock=False) nonProdNode.waitForTransBlockIfNeeded(trans, True, exitOnError=True) for account in accounts: trans=nonProdNode.delegatebw(account, 20000000.0000, 20000000.0000, waitForTransBlock=False, exitOnError=True) From 11f71f8273e3d422735adc3d27e5dd37a510fcf4 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 11 Jul 2023 13:03:58 -0400 Subject: [PATCH 071/180] Update appbase to tip --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index 02a08a374a..3492ca16e8 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit 02a08a374a6018b9f9d067a7dfc35936d10a4c6d +Subproject commit 3492ca16e881d39ed20b0d0cdbe59156a699a10f From 244c7d3e52d7472d7a091f6774f41152dcff1438 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 11 Jul 2023 13:15:04 -0400 Subject: [PATCH 072/180] point boost to AntelopeIO repo --- .gitmodules | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitmodules b/.gitmodules index f6cea8d706..d646d0340c 100644 --- a/.gitmodules +++ b/.gitmodules @@ -33,4 +33,4 @@ url = https://github.com/AntelopeIO/CLI11.git [submodule "libraries/boost"] path = libraries/boost - url = https://github.com/boostorg/boost + url = https://github.com/AntelopeIO/boost From e2550cf9d9c40af5dedd570668a308796ac3a193 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 11 Jul 2023 13:30:25 -0400 Subject: [PATCH 073/180] Update appbase to tip --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index 3492ca16e8..6316189788 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit 3492ca16e881d39ed20b0d0cdbe59156a699a10f +Subproject commit 63161897889248ebf8fb3bfae8cfe0936b373b6b From 8b4a134316d35720308c2763c420fbb052c9b79f Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 11 Jul 2023 13:36:03 -0400 Subject: [PATCH 074/180] remove old boost submodule --- .gitmodules | 3 --- libraries/boost | 1 - 2 files changed, 4 deletions(-) delete mode 160000 libraries/boost diff --git a/.gitmodules b/.gitmodules index d646d0340c..ab01b3d5c0 100644 --- a/.gitmodules +++ b/.gitmodules @@ -31,6 +31,3 @@ [submodule "libraries/cli11/cli11"] path = libraries/cli11/cli11 url = https://github.com/AntelopeIO/CLI11.git -[submodule "libraries/boost"] - path = libraries/boost - url = https://github.com/AntelopeIO/boost diff --git a/libraries/boost b/libraries/boost deleted file mode 160000 index b6928ae5c9..0000000000 --- a/libraries/boost +++ /dev/null @@ -1 +0,0 @@ -Subproject commit b6928ae5c92e21a04bbe17a558e6e066dbe632f6 From eae52736b9150143e6f6aa86551d9a6f95302985 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 11 Jul 2023 13:40:06 -0400 Subject: [PATCH 075/180] Add boost submodule from AntelopeIO --- .gitmodules | 3 +++ libraries/boost | 1 + 2 files changed, 4 insertions(+) create mode 160000 libraries/boost diff --git a/.gitmodules b/.gitmodules index ab01b3d5c0..d646d0340c 100644 --- a/.gitmodules +++ b/.gitmodules @@ -31,3 +31,6 @@ [submodule "libraries/cli11/cli11"] path = libraries/cli11/cli11 url = https://github.com/AntelopeIO/CLI11.git +[submodule "libraries/boost"] + path = libraries/boost + url = https://github.com/AntelopeIO/boost diff --git a/libraries/boost b/libraries/boost new file mode 160000 index 0000000000..25fd279875 --- /dev/null +++ b/libraries/boost @@ -0,0 +1 @@ +Subproject commit 25fd27987595c4936fe83e640b625b97ea0bebb9 From 127e0465e56e9795b24120e7539227e63e75ee26 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 11 Jul 2023 14:38:22 -0400 Subject: [PATCH 076/180] Update boost to tip --- libraries/boost | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/boost b/libraries/boost index 25fd279875..41141acf3a 160000 --- a/libraries/boost +++ b/libraries/boost @@ -1 +1 @@ -Subproject commit 25fd27987595c4936fe83e640b625b97ea0bebb9 +Subproject commit 41141acf3a937c357bf50cacd03269833b35049e From 1ab33ebdd8f507b15d94973bb60dbdc4dfbbe48e Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 11 Jul 2023 15:06:31 -0400 Subject: [PATCH 077/180] whitespace change --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 71582a1ce4..c036a07f07 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ # Leap -Leap is a C++ implementation of the [Antelope](https://github.com/AntelopeIO) protocol. It contains blockchain node software and supporting tools for developers and node operators. +Leap is a C++ implementation of the [Antelope](https://github.com/AntelopeIO) protocol. It contains blockchain node software and supporting tools for developers and node operators. ## Branches The `main` branch is the development branch; do not use it for production. Refer to the [release page](https://github.com/AntelopeIO/leap/releases) for current information on releases, pre-releases, and obsolete releases, as well as the corresponding tags for those releases. From 4f158ded97d3fef70c16136fe98b237fb7514a4f Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 11 Jul 2023 14:31:15 -0500 Subject: [PATCH 078/180] Try breaking out build steps into reusable workflow. Make use of reusable workflow in ph_backward_compatibility workflow. --- .github/workflows/build_base.yaml | 86 +++++++++++++++++++ .../workflows/ph_backward_compatibility.yaml | 73 ++-------------- 2 files changed, 91 insertions(+), 68 deletions(-) create mode 100644 .github/workflows/build_base.yaml diff --git a/.github/workflows/build_base.yaml b/.github/workflows/build_base.yaml new file mode 100644 index 0000000000..8fa4de8bde --- /dev/null +++ b/.github/workflows/build_base.yaml @@ -0,0 +1,86 @@ +name: "Build leap" + +on: + workflow_dispatch: + workflow_call: + outputs: + p: + description: "Discovered Build Platforms" + value: ${{ jobs.d.outputs.p }} + +permissions: + packages: read + contents: read + +defaults: + run: + shell: bash + +jobs: + d: + name: Discover Platforms + runs-on: ubuntu-latest + outputs: + missing-platforms: ${{steps.discover.outputs.missing-platforms}} + p: ${{steps.discover.outputs.platforms}} + steps: + - name: Discover Platforms + id: discover + uses: AntelopeIO/discover-platforms-action@v1 + with: + platform-file: .cicd/platforms.json + password: ${{secrets.GITHUB_TOKEN}} + package-name: builders + + build-platforms: + name: Build Platforms + needs: d + if: needs.d.outputs.missing-platforms != '[]' + strategy: + fail-fast: false + matrix: + platform: ${{fromJSON(needs.d.outputs.missing-platforms)}} + runs-on: ["self-hosted", "enf-x86-beefy"] + permissions: + packages: write + contents: read + steps: + - name: Login to Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{github.repository_owner}} + password: ${{secrets.GITHUB_TOKEN}} + - name: Build and push + uses: docker/build-push-action@v3 + with: + push: true + tags: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} + file: ${{fromJSON(needs.d.outputs.p)[matrix.platform].dockerfile}} + + Build: + needs: [d, build-platforms] + if: always() && needs.d.result == 'success' && (needs.build-platforms.result == 'success' || needs.build-platforms.result == 'skipped') + strategy: + fail-fast: false + matrix: + platform: [ubuntu20, ubuntu22] + runs-on: ["self-hosted", "enf-x86-beefy"] + container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} + steps: + - uses: actions/checkout@v3 + with: + submodules: recursive + - name: Build + id: build + run: | + # https://github.com/actions/runner/issues/2033 + chown -R $(id -u):$(id -g) $PWD + cmake -B build -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -GNinja + cmake --build build + tar -pc --exclude "*.o" build | zstd --long -T0 -9 > build.tar.zst + - name: Upload builddir + uses: AntelopeIO/upload-artifact-large-chunks-action@v1 + with: + name: ${{matrix.platform}}-build + path: build.tar.zst \ No newline at end of file diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index 2b017a5e7b..dd239e4b77 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -12,76 +12,13 @@ defaults: shell: bash jobs: - d: - name: Discover Platforms - runs-on: ubuntu-latest - outputs: - missing-platforms: ${{steps.discover.outputs.missing-platforms}} - p: ${{steps.discover.outputs.platforms}} - steps: - - name: Discover Platforms - id: discover - uses: AntelopeIO/discover-platforms-action@v1 - with: - platform-file: .cicd/platforms.json - password: ${{secrets.GITHUB_TOKEN}} - package-name: builders - - build-platforms: - name: Build Platforms - needs: d - if: needs.d.outputs.missing-platforms != '[]' - strategy: - fail-fast: false - matrix: - platform: ${{fromJSON(needs.d.outputs.missing-platforms)}} - runs-on: ["self-hosted", "enf-x86-beefy"] - permissions: - packages: write - contents: read - steps: - - name: Login to Container Registry - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{github.repository_owner}} - password: ${{secrets.GITHUB_TOKEN}} - - name: Build and push - uses: docker/build-push-action@v3 - with: - push: true - tags: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} - file: ${{fromJSON(needs.d.outputs.p)[matrix.platform].dockerfile}} - - Build: - needs: [d, build-platforms] - if: always() && needs.d.result == 'success' && (needs.build-platforms.result == 'success' || needs.build-platforms.result == 'skipped') - strategy: - fail-fast: false - matrix: - platform: [ubuntu20, ubuntu22] - runs-on: ["self-hosted", "enf-x86-beefy"] - container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} - steps: - - uses: actions/checkout@v3 - with: - submodules: recursive - - name: Build - id: build - run: | - cmake -B build -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -GNinja - cmake --build build - tar -pc --exclude "*.o" build | zstd --long -T0 -9 > build.tar.zst - - name: Upload builddir - uses: AntelopeIO/upload-artifact-large-chunks-action@v1 - with: - name: ${{matrix.platform}}-build - path: build.tar.zst + build-base: + uses: AntelopeIO/leap/.github/workflows/build_base.yaml@main tests: name: Tests - needs: [d, Build] - if: always() && needs.Build.result == 'success' + needs: [build-base] + if: always() && needs.build-base.result == 'success' strategy: fail-fast: false matrix: @@ -89,7 +26,7 @@ jobs: release: [3.1, 3.2, 4.0] runs-on: ["self-hosted", "enf-x86-lowtier"] container: - image: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} + image: ${{fromJSON(needs.build-base.outputs.p)[matrix.platform].image}} options: --security-opt seccomp=unconfined steps: - uses: actions/checkout@v3 From 8044aa16ba9fc422a6f5924ae7abb9d68977fb1b Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 11 Jul 2023 14:33:07 -0500 Subject: [PATCH 079/180] Needs to use this branch's reusable workflow for now, not main --- .github/workflows/ph_backward_compatibility.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index dd239e4b77..c3c89b0e7c 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -13,7 +13,7 @@ defaults: jobs: build-base: - uses: AntelopeIO/leap/.github/workflows/build_base.yaml@main + uses: AntelopeIO/leap/.github/workflows/build_base.yaml@GH-1156-ph-cicd-nodeos-versions tests: name: Tests From de40f8a4bb456016cae2232728711b446d77206b Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 11 Jul 2023 14:38:46 -0500 Subject: [PATCH 080/180] Inherit secrets. --- .github/workflows/ph_backward_compatibility.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index c3c89b0e7c..b93675457a 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -14,6 +14,7 @@ defaults: jobs: build-base: uses: AntelopeIO/leap/.github/workflows/build_base.yaml@GH-1156-ph-cicd-nodeos-versions + secrets: inherit tests: name: Tests From 7df3af711e1e611b23384b6c245d40175a48b8a9 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 11 Jul 2023 14:40:35 -0500 Subject: [PATCH 081/180] Provide packages write permission for Build Platforms job. --- .github/workflows/build_base.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_base.yaml b/.github/workflows/build_base.yaml index 8fa4de8bde..f5d7773fae 100644 --- a/.github/workflows/build_base.yaml +++ b/.github/workflows/build_base.yaml @@ -9,7 +9,7 @@ on: value: ${{ jobs.d.outputs.p }} permissions: - packages: read + packages: write contents: read defaults: From cf7ae29952238cd8c00f317e0ba18906efdb9fcc Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 11 Jul 2023 14:47:58 -0500 Subject: [PATCH 082/180] Give packages write permissions. --- .github/workflows/ph_backward_compatibility.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index b93675457a..c78d3365fe 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -4,7 +4,7 @@ on: workflow_dispatch: permissions: - packages: read + packages: write contents: read defaults: @@ -14,7 +14,6 @@ defaults: jobs: build-base: uses: AntelopeIO/leap/.github/workflows/build_base.yaml@GH-1156-ph-cicd-nodeos-versions - secrets: inherit tests: name: Tests From 7be77cd7450e0c4b64cf506500c2840c40426381 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 11 Jul 2023 15:24:50 -0500 Subject: [PATCH 083/180] Convert build.yaml to use reusable build-base workflow. --- .github/workflows/build.yaml | 99 +++++++----------------------------- 1 file changed, 18 insertions(+), 81 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 09e926b18d..b520ddb073 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -23,7 +23,7 @@ on: type: string permissions: - packages: read + packages: write contents: read defaults: @@ -31,20 +31,9 @@ defaults: shell: bash jobs: - d: - name: Discover Platforms - runs-on: ubuntu-latest - outputs: - missing-platforms: ${{steps.discover.outputs.missing-platforms}} - p: ${{steps.discover.outputs.platforms}} - steps: - - name: Discover Platforms - id: discover - uses: AntelopeIO/discover-platforms-action@v1 - with: - platform-file: .cicd/platforms.json - password: ${{secrets.GITHUB_TOKEN}} - package-name: builders + build-base: + uses: AntelopeIO/leap/.github/workflows/build_base.yaml@GH-1156-ph-cicd-nodeos-versions + v: name: Discover Versions runs-on: ubuntu-latest @@ -72,69 +61,17 @@ jobs: if [[ "${{inputs.override-eos-system-contracts}}" != "" ]]; then echo eos-system-contracts-ref=${{inputs.override-eos-system-contracts}} >> $GITHUB_OUTPUT fi - build-platforms: - name: Build Platforms - needs: d - if: needs.d.outputs.missing-platforms != '[]' - strategy: - fail-fast: false - matrix: - platform: ${{fromJSON(needs.d.outputs.missing-platforms)}} - runs-on: ["self-hosted", "enf-x86-beefy"] - permissions: - packages: write - contents: read - steps: - - name: Login to Container Registry - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{github.repository_owner}} - password: ${{secrets.GITHUB_TOKEN}} - - name: Build and push - uses: docker/build-push-action@v3 - with: - push: true - tags: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} - file: ${{fromJSON(needs.d.outputs.p)[matrix.platform].dockerfile}} - - Build: - needs: [d, build-platforms] - if: always() && needs.d.result == 'success' && (needs.build-platforms.result == 'success' || needs.build-platforms.result == 'skipped') - strategy: - fail-fast: false - matrix: - platform: [ubuntu20, ubuntu22] - runs-on: ["self-hosted", "enf-x86-beefy"] - container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} - steps: - - uses: actions/checkout@v3 - with: - submodules: recursive - - name: Build - id: build - run: | - # https://github.com/actions/runner/issues/2033 - chown -R $(id -u):$(id -g) $PWD - cmake -B build -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -GNinja - cmake --build build - tar -pc --exclude "*.o" build | zstd --long -T0 -9 > build.tar.zst - - name: Upload builddir - uses: AntelopeIO/upload-artifact-large-chunks-action@v1 - with: - name: ${{matrix.platform}}-build - path: build.tar.zst dev-package: name: Build leap-dev package - needs: [d, Build] - if: always() && needs.Build.result == 'success' + needs: [build-base] + if: always() && needs.build-base.result == 'success' strategy: fail-fast: false matrix: platform: [ubuntu20, ubuntu22] runs-on: ubuntu-latest - container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} + container: ${{fromJSON(needs.build-base.outputs.p)[matrix.platform].image}} steps: - uses: actions/checkout@v3 with: @@ -163,15 +100,15 @@ jobs: tests: name: Tests - needs: [d, Build] - if: always() && needs.Build.result == 'success' + needs: [build-base] + if: always() && needs.build-base.result == 'success' strategy: fail-fast: false matrix: platform: [ubuntu20, ubuntu22] runs-on: ["self-hosted", "enf-x86-hightier"] container: - image: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} + image: ${{fromJSON(needs.build-base.outputs.p)[matrix.platform].image}} options: --security-opt seccomp=unconfined steps: - uses: actions/checkout@v3 @@ -189,8 +126,8 @@ jobs: np-tests: name: NP Tests - needs: [d, Build] - if: always() && needs.Build.result == 'success' + needs: [build-base] + if: always() && needs.build-base.result == 'success' strategy: fail-fast: false matrix: @@ -205,7 +142,7 @@ jobs: - name: Run tests in parallel containers uses: ./.github/actions/parallel-ctest-containers with: - container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} + container: ${{fromJSON(needs.build-base.outputs.p)[matrix.platform].image}} error-log-paths: '["build/etc", "build/var", "build/leap-ignition-wd", "build/TestLogs"]' log-tarball-prefix: ${{matrix.platform}} tests-label: nonparallelizable_tests @@ -219,8 +156,8 @@ jobs: lr-tests: name: LR Tests - needs: [d, Build] - if: always() && needs.Build.result == 'success' + needs: [build-base] + if: always() && needs.build-base.result == 'success' strategy: fail-fast: false matrix: @@ -235,7 +172,7 @@ jobs: - name: Run tests in parallel containers uses: ./.github/actions/parallel-ctest-containers with: - container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} + container: ${{fromJSON(needs.build-base.outputs.p)[matrix.platform].image}} error-log-paths: '["build/etc", "build/var", "build/leap-ignition-wd", "build/TestLogs"]' log-tarball-prefix: ${{matrix.platform}} tests-label: long_running_tests @@ -249,7 +186,7 @@ jobs: libtester-tests: name: libtester tests - needs: [d, v, Build, dev-package] + needs: [build-base, v, dev-package] if: always() && needs.v.result == 'success' && needs.dev-package.result == 'success' strategy: fail-fast: false @@ -257,7 +194,7 @@ jobs: platform: [ubuntu20, ubuntu22] test: [build-tree, make-dev-install, deb-install] runs-on: ["self-hosted", "enf-x86-midtier"] - container: ${{ matrix.test != 'deb-install' && fromJSON(needs.d.outputs.p)[matrix.platform].image || matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy' }} + container: ${{ matrix.test != 'deb-install' && fromJSON(needs.build-base.outputs.p)[matrix.platform].image || matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy' }} steps: # LEAP - if: ${{ matrix.test != 'deb-install' }} From a93484ee9a9cce6a232d1599d97a0362e581d3a5 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 11 Jul 2023 17:22:12 -0400 Subject: [PATCH 084/180] move boost `add_subdirectory()` to the libraries `CMakeLists.txt` --- CMakeLists.txt | 5 ----- README.md | 2 +- libraries/CMakeLists.txt | 4 ++++ 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 35fc09243e..d983207680 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -101,11 +101,6 @@ else() set(no_whole_archive_flag "--no-whole-archive") endif() -set( Boost_USE_MULTITHREADED ON ) -set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) - -add_subdirectory( libraries/boost ) - if( APPLE AND UNIX ) # Apple Specific Options Here message( STATUS "Configuring Leap on macOS" ) diff --git a/README.md b/README.md index c036a07f07..71582a1ce4 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ # Leap -Leap is a C++ implementation of the [Antelope](https://github.com/AntelopeIO) protocol. It contains blockchain node software and supporting tools for developers and node operators. +Leap is a C++ implementation of the [Antelope](https://github.com/AntelopeIO) protocol. It contains blockchain node software and supporting tools for developers and node operators. ## Branches The `main` branch is the development branch; do not use it for production. Refer to the [release page](https://github.com/AntelopeIO/leap/releases) for current information on releases, pre-releases, and obsolete releases, as well as the corresponding tags for those releases. diff --git a/libraries/CMakeLists.txt b/libraries/CMakeLists.txt index 4b041dd047..462d73801c 100644 --- a/libraries/CMakeLists.txt +++ b/libraries/CMakeLists.txt @@ -5,6 +5,10 @@ set(SOFTFLOAT_INSTALL_COMPONENT "dev") set(EOSVM_INSTALL_COMPONENT "dev") set(BN256_INSTALL_COMPONENT "dev") +set( Boost_USE_MULTITHREADED ON ) +set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) +add_subdirectory( boost ) + add_subdirectory( libfc ) add_subdirectory( builtins ) add_subdirectory( softfloat ) From 14c34413416c16daeabfdcd93ed708e169e1bec6 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 11 Jul 2023 18:19:05 -0400 Subject: [PATCH 085/180] Remove outdated `libboost-all-dev` references. --- README.md | 1 - package.cmake | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 71582a1ce4..9d0dfa8530 100644 --- a/README.md +++ b/README.md @@ -132,7 +132,6 @@ sudo apt-get install -y \ build-essential \ cmake \ git \ - libboost-all-dev \ libcurl4-openssl-dev \ libgmp-dev \ libssl-dev \ diff --git a/package.cmake b/package.cmake index ff3aebbd4b..c782938e54 100644 --- a/package.cmake +++ b/package.cmake @@ -61,7 +61,7 @@ set(CPACK_DEBIAN_BASE_FILE_NAME "${CPACK_DEBIAN_FILE_NAME}.deb") string(REGEX REPLACE "^(${CMAKE_PROJECT_NAME})" "\\1-dev" CPACK_DEBIAN_DEV_FILE_NAME "${CPACK_DEBIAN_BASE_FILE_NAME}") #deb package tooling will be unable to detect deps for the dev package. llvm is tricky since we don't know what package could have been used; try to figure it out -set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libboost-all-dev, libssl-dev, libgmp-dev, python3-numpy") +set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libssl-dev, libgmp-dev, python3-numpy") find_program(DPKG_QUERY "dpkg-query") if(DPKG_QUERY AND OS_RELEASE MATCHES "\n?ID=\"?ubuntu" AND LLVM_CMAKE_DIR) execute_process(COMMAND "${DPKG_QUERY}" -S "${LLVM_CMAKE_DIR}" COMMAND cut -d: -f1 RESULT_VARIABLE LLVM_PKG_FIND_RESULT OUTPUT_VARIABLE LLVM_PKG_FIND_OUTPUT) From 7bcebcaa0f9794ff0a531def5b69ccf08346127e Mon Sep 17 00:00:00 2001 From: Lin Huang Date: Sat, 1 Jul 2023 15:11:36 -0400 Subject: [PATCH 086/180] A companion Leap changes for fixing WASM memory exhaustion --- libraries/chain/webassembly/runtimes/eos-vm.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/chain/webassembly/runtimes/eos-vm.cpp b/libraries/chain/webassembly/runtimes/eos-vm.cpp index 1d78a1a7fa..48610ee436 100644 --- a/libraries/chain/webassembly/runtimes/eos-vm.cpp +++ b/libraries/chain/webassembly/runtimes/eos-vm.cpp @@ -247,7 +247,7 @@ std::unique_ptr eos_vm_runtime::instan wasm_code_ptr code((uint8_t*)code_bytes, code_size); apply_options options = { .max_pages = 65536, .max_call_depth = 0 }; - std::unique_ptr bkend = std::make_unique(code, code_size, nullptr, options); + std::unique_ptr bkend = std::make_unique(code, code_size, nullptr, options, false); // uses 2-passes parsing eos_vm_host_functions_t::resolve(bkend->get_module()); return std::make_unique>(this, std::move(bkend)); } catch(eosio::vm::exception& e) { @@ -273,7 +273,7 @@ std::unique_ptr eos_vm_profile_runtime::inst wasm_code_ptr code((uint8_t*)code_bytes, code_size); apply_options options = { .max_pages = 65536, .max_call_depth = 0 }; - std::unique_ptr bkend = std::make_unique(code, code_size, nullptr, options); + std::unique_ptr bkend = std::make_unique(code, code_size, nullptr, options, false); // uses 2-passes parsing eos_vm_host_functions_t::resolve(bkend->get_module()); return std::make_unique(std::move(bkend), code_bytes, code_size); } catch(eosio::vm::exception& e) { From c8327b51bb164b6b81a2100464e215e3f87c85f1 Mon Sep 17 00:00:00 2001 From: Lin Huang Date: Wed, 12 Jul 2023 09:57:26 -0400 Subject: [PATCH 087/180] update eos-vm to point to release/3x --- libraries/eos-vm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/eos-vm b/libraries/eos-vm index 7db4b33fa9..4da112c820 160000 --- a/libraries/eos-vm +++ b/libraries/eos-vm @@ -1 +1 @@ -Subproject commit 7db4b33fa92df545c0bae36c77369afb2631dc9e +Subproject commit 4da112c8209aaadbe9b03faa1727579e8dca3e8a From 0821c7f7e6aad1bbfb0ff1605bbf7acc10600207 Mon Sep 17 00:00:00 2001 From: Lin Huang Date: Wed, 12 Jul 2023 10:59:33 -0400 Subject: [PATCH 088/180] Bump Leap version to release/3.1.5 --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index e49f000255..dc9e7f8115 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -15,7 +15,7 @@ set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 3) set(VERSION_MINOR 1) -set(VERSION_PATCH 4) +set(VERSION_PATCH 5) #set(VERSION_SUFFIX rc4) if(VERSION_SUFFIX) From 7080e1b78e2f19c5b435ada879c3e7e805038178 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 12 Jul 2023 10:36:24 -0500 Subject: [PATCH 089/180] GH-1119 Remove unneeded fast_shutdown --- .../chain/include/eosio/chain/wasm_interface_private.hpp | 9 +-------- .../eosio/chain/webassembly/runtime_interface.hpp | 1 - libraries/chain/wasm_interface.cpp | 4 ---- libraries/chain/webassembly/runtimes/eos-vm.cpp | 4 ---- plugins/chain_plugin/chain_plugin.cpp | 2 -- 5 files changed, 1 insertion(+), 19 deletions(-) diff --git a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp index 1181976285..a05360b548 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp @@ -97,13 +97,7 @@ namespace eosio { namespace chain { #endif } - ~wasm_interface_impl() { - if(is_shutting_down) - for(wasm_cache_index::iterator it = wasm_instantiation_cache.begin(); it != wasm_instantiation_cache.end(); ++it) - wasm_instantiation_cache.modify(it, [](wasm_cache_entry& e) { - e.module.release()->fast_shutdown(); - }); - } + ~wasm_interface_impl() = default; bool is_code_cached(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version) const { wasm_cache_index::iterator it = wasm_instantiation_cache.find( boost::make_tuple(code_hash, vm_type, vm_version) ); @@ -162,7 +156,6 @@ namespace eosio { namespace chain { return it->module; } - bool is_shutting_down = false; std::unique_ptr runtime_interface; typedef boost::multi_index_container< diff --git a/libraries/chain/include/eosio/chain/webassembly/runtime_interface.hpp b/libraries/chain/include/eosio/chain/webassembly/runtime_interface.hpp index 09df1e023a..5cb0cd79ad 100644 --- a/libraries/chain/include/eosio/chain/webassembly/runtime_interface.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/runtime_interface.hpp @@ -13,7 +13,6 @@ class apply_context; class wasm_instantiated_module_interface { public: virtual void apply(apply_context& context) = 0; - virtual void fast_shutdown() {} virtual ~wasm_instantiated_module_interface(); }; diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index 668fdb81a4..9393bfb67a 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -74,10 +74,6 @@ namespace eosio { namespace chain { //Hard: Kick off instantiation in a separate thread at this location } - void wasm_interface::indicate_shutting_down() { - my->is_shutting_down = true; - } - void wasm_interface::code_block_num_last_used(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, const uint32_t& block_num) { my->code_block_num_last_used(code_hash, vm_type, vm_version, block_num); } diff --git a/libraries/chain/webassembly/runtimes/eos-vm.cpp b/libraries/chain/webassembly/runtimes/eos-vm.cpp index 522d39e13d..7e11a7ada9 100644 --- a/libraries/chain/webassembly/runtimes/eos-vm.cpp +++ b/libraries/chain/webassembly/runtimes/eos-vm.cpp @@ -201,10 +201,6 @@ class eos_vm_profiling_module : public wasm_instantiated_module_interface { } } - void fast_shutdown() override { - _prof.clear(); - } - profile_data* start(apply_context& context) { name account = context.get_receiver(); if(!context.control.is_profiling(account)) return nullptr; diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 5193df2d61..6073a8637d 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -1177,8 +1177,6 @@ void chain_plugin_impl::plugin_shutdown() { accepted_transaction_connection.reset(); applied_transaction_connection.reset(); block_start_connection.reset(); - if(app().is_quiting()) - chain->get_wasm_interface().indicate_shutting_down(); chain.reset(); } From 037efde6163f17db0859886123f63eb4bb9c63b5 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 12 Jul 2023 10:38:46 -0500 Subject: [PATCH 090/180] GH-1119 Switch controller wasm_interface to provide composite wasm_interface_collection --- libraries/chain/apply_context.cpp | 2 +- libraries/chain/controller.cpp | 6 +++--- libraries/chain/include/eosio/chain/controller.hpp | 3 ++- .../include/eosio/chain/wasm_interface_collection.hpp | 8 ++++++++ libraries/testing/tester.cpp | 1 + 5 files changed, 15 insertions(+), 5 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index b61ee77bbe..6dc0f9a1b6 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 2598b03a81..c3fa424eaf 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -2692,8 +2692,8 @@ struct controller_impl { wasm_if_collect.init_thread_local_data(db, conf.state_dir, conf.eosvmoc_config, !conf.profile_accounts.empty()); } - wasm_interface& get_wasm_interface() { - return wasm_if_collect.get_wasm_interface(); + wasm_interface_collection& get_wasm_interface() { + return wasm_if_collect; } void code_block_num_last_used(const digest_type& code_hash, uint8_t vm_type, uint8_t vm_version, uint32_t block_num) { @@ -3394,7 +3394,7 @@ const apply_handler* controller::find_apply_handler( account_name receiver, acco } return nullptr; } -wasm_interface& controller::get_wasm_interface() { +wasm_interface_collection& controller::get_wasm_interface() { return my->get_wasm_interface(); } diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 1ffcc4e8b4..68da901025 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -40,6 +40,7 @@ namespace eosio { namespace chain { class account_object; class deep_mind_handler; class subjective_billing; + class wasm_interface_collection; using resource_limits::resource_limits_manager; using apply_handler = std::function; using forked_branch_callback = std::function; @@ -348,7 +349,7 @@ namespace eosio { namespace chain { */ const apply_handler* find_apply_handler( account_name contract, scope_name scope, action_name act )const; - wasm_interface& get_wasm_interface(); + wasm_interface_collection& get_wasm_interface(); static chain_id_type extract_chain_id(snapshot_reader& snapshot); diff --git a/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp b/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp index 8245406290..01f120cfeb 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp @@ -27,6 +27,14 @@ namespace eosio::chain { return *threaded_wasmifs[std::this_thread::get_id()]; } + void apply(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, apply_context& context) { + get_wasm_interface().apply(code_hash, vm_type, vm_version, context); + } + + bool is_code_cached(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version) { + return get_wasm_interface().is_code_cached(code_hash, vm_type, vm_version); + } + // update current lib of all wasm interfaces void current_lib(const uint32_t lib) { // producer_plugin has already asserted irreversible_block signal is called in write window diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index c3608b1549..756516f173 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include From d3b775efc4ee296fad153e7b38c3bf6cbe977454 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 12 Jul 2023 11:46:32 -0500 Subject: [PATCH 091/180] GH-1119 Factor out get_wasm_interface --- .../eosio/chain/wasm_interface_collection.hpp | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp b/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp index 01f120cfeb..f3815053c5 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp @@ -17,22 +17,21 @@ namespace eosio::chain { , wasmif(vm, eosvmoc_tierup, d, data_dir, eosvmoc_config, profile) {} - wasm_interface& get_wasm_interface() { + void apply(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, apply_context& context) { if (is_on_main_thread() #ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED || is_eos_vm_oc_enabled() #endif - ) - return wasmif; - return *threaded_wasmifs[std::this_thread::get_id()]; - } - - void apply(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, apply_context& context) { - get_wasm_interface().apply(code_hash, vm_type, vm_version, context); + ) { + wasmif.apply(code_hash, vm_type, vm_version, context); + } + threaded_wasmifs[std::this_thread::get_id()]->apply(code_hash, vm_type, vm_version, context); } + // used for tests, only valid on main thread bool is_code_cached(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version) { - return get_wasm_interface().is_code_cached(code_hash, vm_type, vm_version); + EOS_ASSERT(is_on_main_thread(), wasm_execution_error, "is_code_cached called off the main thread"); + return wasmif.is_code_cached(code_hash, vm_type, vm_version); } // update current lib of all wasm interfaces From 99a712632a461b59cb0c15d32aa16cdbe9937096 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 12 Jul 2023 13:22:30 -0500 Subject: [PATCH 092/180] GH-1119 Add missing return --- .../chain/include/eosio/chain/wasm_interface_collection.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp b/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp index f3815053c5..28cea2c979 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp @@ -23,7 +23,7 @@ namespace eosio::chain { || is_eos_vm_oc_enabled() #endif ) { - wasmif.apply(code_hash, vm_type, vm_version, context); + return wasmif.apply(code_hash, vm_type, vm_version, context); } threaded_wasmifs[std::this_thread::get_id()]->apply(code_hash, vm_type, vm_version, context); } From ca4de7c9a2dc377ef77041ae6e698254c5ae31ff Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Wed, 12 Jul 2023 13:42:53 -0500 Subject: [PATCH 093/180] Update max_block_cpu_usage and max_transaction_cpu_usage properly. --- tests/TestHarness/launcher.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/TestHarness/launcher.py b/tests/TestHarness/launcher.py index 570f6bfc8e..38935234a4 100644 --- a/tests/TestHarness/launcher.py +++ b/tests/TestHarness/launcher.py @@ -375,8 +375,8 @@ def init_genesis(self): with open(genesis_path, 'r') as f: genesis = json.load(f) genesis['initial_key'] = self.network.nodes['bios'].keys[0].pubkey - genesis['max_block_cpu_usage'] = self.args.max_block_cpu_usage - genesis['max_transaction_cpu_usage'] = self.args.max_transaction_cpu_usage + genesis['max_block_cpu_usage']['initial_configuration'] = self.args.max_block_cpu_usage + genesis['max_transaction_cpu_usage']['initial_configuration'] = self.args.max_transaction_cpu_usage return genesis def write_genesis_file(self, node, genesis): From 58b2e9e8a8478a34c8a7a92cf202ef670ea54743 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 12 Jul 2023 13:49:49 -0500 Subject: [PATCH 094/180] GH-1390 Use 475ms for max_transaction_cpu_usage to match value used in other integration tests. --- tests/performance_tests/genesis.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/performance_tests/genesis.json b/tests/performance_tests/genesis.json index a215407af4..868a3d6c12 100644 --- a/tests/performance_tests/genesis.json +++ b/tests/performance_tests/genesis.json @@ -11,7 +11,7 @@ "context_free_discount_net_usage_den": 100, "max_block_cpu_usage": 500000, "target_block_cpu_usage_pct": 500, - "max_transaction_cpu_usage": 150000, + "max_transaction_cpu_usage": 475000, "min_transaction_cpu_usage": 0, "max_transaction_lifetime": 3600, "deferred_trx_expiration_window": 600, From 477f62c85779a081bb4b5180ac55085f8cd5dfd4 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 12 Jul 2023 15:15:14 -0500 Subject: [PATCH 095/180] Remove hardcoded org and repo and ref. --- .github/workflows/build.yaml | 2 +- .github/workflows/ph_backward_compatibility.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index b520ddb073..593dce030a 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -32,7 +32,7 @@ defaults: jobs: build-base: - uses: AntelopeIO/leap/.github/workflows/build_base.yaml@GH-1156-ph-cicd-nodeos-versions + uses: .github/workflows/build_base.yaml v: name: Discover Versions diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index c78d3365fe..d9045f4013 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -13,7 +13,7 @@ defaults: jobs: build-base: - uses: AntelopeIO/leap/.github/workflows/build_base.yaml@GH-1156-ph-cicd-nodeos-versions + uses: .github/workflows/build_base.yaml tests: name: Tests From 495d6eab30fcde1b995c492a4d6bf4e698c299b1 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 12 Jul 2023 15:20:36 -0500 Subject: [PATCH 096/180] Try to rework permissions to leave top level read, but pass write to build-base. --- .github/workflows/build.yaml | 5 ++++- .github/workflows/ph_backward_compatibility.yaml | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 593dce030a..0ad6bbd794 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -23,7 +23,7 @@ on: type: string permissions: - packages: write + packages: read contents: read defaults: @@ -33,6 +33,9 @@ defaults: jobs: build-base: uses: .github/workflows/build_base.yaml + permissions: + packages: write + contents: read v: name: Discover Versions diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index d9045f4013..b3bf8efc5b 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -4,7 +4,7 @@ on: workflow_dispatch: permissions: - packages: write + packages: read contents: read defaults: @@ -14,6 +14,9 @@ defaults: jobs: build-base: uses: .github/workflows/build_base.yaml + permissions: + packages: read + contents: read tests: name: Tests From 0da3e7b36eb4a4fa52dcc2db099711a026c91e37 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 12 Jul 2023 15:24:44 -0500 Subject: [PATCH 097/180] Fix path to workflow. --- .github/workflows/build.yaml | 2 +- .github/workflows/ph_backward_compatibility.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 0ad6bbd794..48bc4ff07a 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -32,7 +32,7 @@ defaults: jobs: build-base: - uses: .github/workflows/build_base.yaml + uses: ./.github/workflows/build_base.yaml permissions: packages: write contents: read diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index b3bf8efc5b..4fea2faa00 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -13,7 +13,7 @@ defaults: jobs: build-base: - uses: .github/workflows/build_base.yaml + uses: ./.github/workflows/build_base.yaml permissions: packages: read contents: read From 4e19e3636237596aab66de2f6fceabcf89e0cc3f Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 12 Jul 2023 16:04:23 -0500 Subject: [PATCH 098/180] Somehow put the wrong permission here. --- .github/workflows/ph_backward_compatibility.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index 4fea2faa00..7a0892eb5f 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -15,7 +15,7 @@ jobs: build-base: uses: ./.github/workflows/build_base.yaml permissions: - packages: read + packages: write contents: read tests: From a56efcdc1adcc8e4a5b458e4c6dbbe352d1c56a3 Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Wed, 12 Jul 2023 16:40:24 -0500 Subject: [PATCH 099/180] In the right order. --- tests/TestHarness/launcher.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/TestHarness/launcher.py b/tests/TestHarness/launcher.py index 38935234a4..665ed4fc13 100644 --- a/tests/TestHarness/launcher.py +++ b/tests/TestHarness/launcher.py @@ -375,8 +375,8 @@ def init_genesis(self): with open(genesis_path, 'r') as f: genesis = json.load(f) genesis['initial_key'] = self.network.nodes['bios'].keys[0].pubkey - genesis['max_block_cpu_usage']['initial_configuration'] = self.args.max_block_cpu_usage - genesis['max_transaction_cpu_usage']['initial_configuration'] = self.args.max_transaction_cpu_usage + genesis['initial_configuration']['max_block_cpu_usage'] = self.args.max_block_cpu_usage + genesis['initial_configuration']['max_transaction_cpu_usage'] = self.args.max_transaction_cpu_usage return genesis def write_genesis_file(self, node, genesis): From 056b0979f73627a618bf1fb4908fa5d1eec9cabb Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 13 Jul 2023 10:34:24 -0500 Subject: [PATCH 100/180] GH-1119 Refactor to only have one eosvmoc which lives wasm_interface_collection --- libraries/chain/CMakeLists.txt | 1 + .../include/eosio/chain/wasm_interface.hpp | 11 +-- .../eosio/chain/wasm_interface_collection.hpp | 96 +++++++++++++++++-- .../eosio/chain/wasm_interface_private.hpp | 48 ++-------- libraries/chain/wasm_interface.cpp | 54 ++--------- libraries/chain/wasm_interface_collection.cpp | 10 ++ 6 files changed, 119 insertions(+), 101 deletions(-) create mode 100644 libraries/chain/wasm_interface_collection.cpp diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index bb92b43363..5fd6e1f572 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -100,6 +100,7 @@ add_library( eosio_chain wast_to_wasm.cpp wasm_interface.cpp + wasm_interface_collection.cpp wasm_eosio_validation.cpp wasm_eosio_injection.cpp wasm_config.cpp diff --git a/libraries/chain/include/eosio/chain/wasm_interface.hpp b/libraries/chain/include/eosio/chain/wasm_interface.hpp index 4d3964dda0..7e67d28151 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface.hpp @@ -46,7 +46,7 @@ namespace eosio { namespace chain { oc_none }; - wasm_interface(vm_type vm, vm_oc_enable eosvmoc_tierup, const chainbase::database& d, const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, bool profile); + wasm_interface(vm_type vm, const chainbase::database& d, const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, bool profile); ~wasm_interface(); // initialize exec per thread @@ -61,19 +61,14 @@ namespace eosio { namespace chain { //indicate that a particular code probably won't be used after given block_num void code_block_num_last_used(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, const uint32_t& block_num); - //indicate the current LIB. evicts old cache entries - void current_lib(const uint32_t lib); + //indicate the current LIB. evicts old cache entries, each evicted entry is provided to callback + void current_lib(const uint32_t lib, const std::function& callback); //Calls apply or error on a given code void apply(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, apply_context& context); //Returns true if the code is cached bool is_code_cached(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version) const; - - // If substitute_apply is set, then apply calls it before doing anything else. If substitute_apply returns true, - // then apply returns immediately. - std::function substitute_apply; private: unique_ptr my; }; diff --git a/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp b/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp index 28cea2c979..1924620054 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp @@ -1,5 +1,10 @@ #pragma once #include +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED +#include +#else +#define _REGISTER_EOSVMOC_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG) +#endif namespace eosio::chain { @@ -8,16 +13,77 @@ namespace eosio::chain { */ class wasm_interface_collection { public: - wasm_interface_collection(wasm_interface::vm_type vm, wasm_interface::vm_oc_enable eosvmoc_tierup, +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED + struct eosvmoc_tier { + eosvmoc_tier(const std::filesystem::path& d, const eosvmoc::config& c, const chainbase::database& db) + : cc(d, c, db) { + // construct exec for the main thread + init_thread_local_data(); + } + + // Support multi-threaded execution. + void init_thread_local_data() { + exec = std::make_unique(cc); + } + + eosvmoc::code_cache_async cc; + + // Each thread requires its own exec and mem. Defined in wasm_interface.cpp + thread_local static std::unique_ptr exec; + thread_local static eosvmoc::memory mem; + }; +#endif + + wasm_interface_collection(wasm_interface::vm_type vm, wasm_interface::vm_oc_enable eosvmoc_tierup, const chainbase::database& d, const std::filesystem::path& data_dir, const eosvmoc::config& eosvmoc_config, bool profile) : main_thread_id(std::this_thread::get_id()) , wasm_runtime(vm) , eosvmoc_tierup(eosvmoc_tierup) - , wasmif(vm, eosvmoc_tierup, d, data_dir, eosvmoc_config, profile) - {} + , wasmif(vm, d, data_dir, eosvmoc_config, profile) + { +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED + if(eosvmoc_tierup != wasm_interface::vm_oc_enable::oc_none) { + EOS_ASSERT(vm != wasm_interface::vm_type::eos_vm_oc, wasm_exception, "You can't use EOS VM OC as the base runtime when tier up is activated"); + eosvmoc.emplace(data_dir, eosvmoc_config, d); + } +#endif + } void apply(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, apply_context& context) { + if(substitute_apply && substitute_apply(code_hash, vm_type, vm_version, context)) + return; +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED + if(eosvmoc && (eosvmoc_tierup == wasm_interface::vm_oc_enable::oc_all || context.should_use_eos_vm_oc())) { + const chain::eosvmoc::code_descriptor* cd = nullptr; + chain::eosvmoc::code_cache_base::get_cd_failure failure = chain::eosvmoc::code_cache_base::get_cd_failure::temporary; + try { + const bool high_priority = context.get_receiver().prefix() == chain::config::system_account_name; + cd = eosvmoc->cc.get_descriptor_for_code(high_priority, code_hash, vm_version, context.control.is_write_window(), failure); + } + catch(...) { + //swallow errors here, if EOS VM OC has gone in to the weeds we shouldn't bail: continue to try and run baseline + //In the future, consider moving bits of EOS VM that can fire exceptions and such out of this call path + static bool once_is_enough; + if(!once_is_enough) + elog("EOS VM OC has encountered an unexpected failure"); + once_is_enough = true; + } + if(cd) { + if (!context.is_applying_block()) // read_only_trx_test.py looks for this log statement + tlog("${a} speculatively executing ${h} with eos vm oc", ("a", context.get_receiver())("h", code_hash)); + eosvmoc->exec->execute(*cd, eosvmoc->mem, context); + return; + } + else if (context.trx_context.is_read_only()) { + if (failure == chain::eosvmoc::code_cache_base::get_cd_failure::temporary) { + EOS_ASSERT(false, ro_trx_vm_oc_compile_temporary_failure, "get_descriptor_for_code failed with temporary failure"); + } else { + EOS_ASSERT(false, ro_trx_vm_oc_compile_permanent_failure, "get_descriptor_for_code failed with permanent failure"); + } + } + } +#endif if (is_on_main_thread() #ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED || is_eos_vm_oc_enabled() @@ -37,9 +103,17 @@ namespace eosio::chain { // update current lib of all wasm interfaces void current_lib(const uint32_t lib) { // producer_plugin has already asserted irreversible_block signal is called in write window - wasmif.current_lib(lib); + std::function cb{}; +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED + if(eosvmoc) { + cb = [&]( const digest_type& code_hash, uint8_t vm_version ) { + eosvmoc->cc.free_code( code_hash, vm_version ); + }; + } +#endif + wasmif.current_lib(lib, cb); for (auto& w: threaded_wasmifs) { - w.second->current_lib(lib); + w.second->current_lib(lib, cb); } } @@ -50,13 +124,15 @@ namespace eosio::chain { #ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED if (is_eos_vm_oc_enabled()) { // EOSVMOC needs further initialization of its thread local data + if (eosvmoc) + eosvmoc->init_thread_local_data(); wasmif.init_thread_local_data(); } else #endif { std::lock_guard g(threaded_wasmifs_mtx); // Non-EOSVMOC needs a wasmif per thread - threaded_wasmifs[std::this_thread::get_id()] = std::make_unique(wasm_runtime, eosvmoc_tierup, d, data_dir, eosvmoc_config, profile); + threaded_wasmifs[std::this_thread::get_id()] = std::make_unique(wasm_runtime, d, data_dir, eosvmoc_config, profile); } } @@ -76,6 +152,10 @@ namespace eosio::chain { } } + // If substitute_apply is set, then apply calls it before doing anything else. If substitute_apply returns true, + // then apply returns immediately. Provided function must be multi-thread safe. + std::function substitute_apply; + private: bool is_on_main_thread() { return main_thread_id == std::this_thread::get_id(); }; @@ -87,6 +167,10 @@ namespace eosio::chain { wasm_interface wasmif; // used by main thread and all threads for EOSVMOC std::mutex threaded_wasmifs_mtx; std::unordered_map> threaded_wasmifs; // one for each read-only thread, used by eos-vm and eos-vm-jit + +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED + std::optional eosvmoc; +#endif }; } // eosio::chain diff --git a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp index a05360b548..e7ea5776e6 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp @@ -43,32 +43,10 @@ namespace eosio { namespace chain { struct by_hash; struct by_last_block_num; -#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - struct eosvmoc_tier { - eosvmoc_tier(const std::filesystem::path& d, const eosvmoc::config& c, const chainbase::database& db) - : cc(d, c, db) { - // construct exec for the main thread - init_thread_local_data(); - } - - // Support multi-threaded execution. - void init_thread_local_data() { - exec = std::make_unique(cc); - } - - eosvmoc::code_cache_async cc; - - // Each thread requires its own exec and mem. Defined in wasm_interface.cpp - thread_local static std::unique_ptr exec; - thread_local static eosvmoc::memory mem; - }; -#endif - - wasm_interface_impl(wasm_interface::vm_type vm, wasm_interface::vm_oc_enable eosvmoc_tierup, const chainbase::database& d, + wasm_interface_impl(wasm_interface::vm_type vm, const chainbase::database& d, const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, bool profile) : db(d) , wasm_runtime_time(vm) - , eosvmoc_tierup(eosvmoc_tierup) { #ifdef EOSIO_EOS_VM_RUNTIME_ENABLED if(vm == wasm_interface::vm_type::eos_vm) @@ -88,13 +66,6 @@ namespace eosio { namespace chain { #endif if(!runtime_interface) EOS_THROW(wasm_exception, "${r} wasm runtime not supported on this platform and/or configuration", ("r", vm)); - -#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - if(eosvmoc_tierup != wasm_interface::vm_oc_enable::oc_none) { - EOS_ASSERT(vm != wasm_interface::vm_type::eos_vm_oc, wasm_exception, "You can't use EOS VM OC as the base runtime when tier up is activated"); - eosvmoc.emplace(data_dir, eosvmoc_config, d); - } -#endif } ~wasm_interface_impl() = default; @@ -112,14 +83,16 @@ namespace eosio { namespace chain { }); } - void current_lib(uint32_t lib) { + // reports each code_hash and vm_version that will be erased to callback + void current_lib(uint32_t lib, const std::function& callback) { //anything last used before or on the LIB can be evicted const auto first_it = wasm_instantiation_cache.get().begin(); const auto last_it = wasm_instantiation_cache.get().upper_bound(lib); -#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - if(eosvmoc) for(auto it = first_it; it != last_it; it++) - eosvmoc->cc.free_code(it->code_hash, it->vm_version); -#endif + if (callback) { + for(auto it = first_it; it != last_it; it++) { + callback(it->code_hash, it->vm_version); + } + } wasm_instantiation_cache.get().erase(first_it, last_it); } @@ -175,11 +148,6 @@ namespace eosio { namespace chain { const chainbase::database& db; const wasm_interface::vm_type wasm_runtime_time; - const wasm_interface::vm_oc_enable eosvmoc_tierup; - -#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - std::optional eosvmoc; -#endif }; } } // eosio::chain diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index 9393bfb67a..d815330502 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -32,16 +32,14 @@ namespace eosio { namespace chain { - wasm_interface::wasm_interface(vm_type vm, vm_oc_enable eosvmoc_tierup, const chainbase::database& d, const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, bool profile) - : my( new wasm_interface_impl(vm, eosvmoc_tierup, d, data_dir, eosvmoc_config, profile) ) {} + wasm_interface::wasm_interface(vm_type vm, const chainbase::database& d, const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, bool profile) + : my( new wasm_interface_impl(vm, d, data_dir, eosvmoc_config, profile) ) {} wasm_interface::~wasm_interface() {} #ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED void wasm_interface::init_thread_local_data() { - if (my->eosvmoc) - my->eosvmoc->init_thread_local_data(); - else if (my->wasm_runtime_time == wasm_interface::vm_type::eos_vm_oc && my->runtime_interface) + if (my->wasm_runtime_time == wasm_interface::vm_type::eos_vm_oc && my->runtime_interface) my->runtime_interface->init_thread_local_data(); } #endif @@ -78,44 +76,11 @@ namespace eosio { namespace chain { my->code_block_num_last_used(code_hash, vm_type, vm_version, block_num); } - void wasm_interface::current_lib(const uint32_t lib) { - my->current_lib(lib); + void wasm_interface::current_lib(const uint32_t lib, const std::function& callback) { + my->current_lib(lib, callback); } void wasm_interface::apply( const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, apply_context& context ) { - if(substitute_apply && substitute_apply(code_hash, vm_type, vm_version, context)) - return; -#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - if(my->eosvmoc && (my->eosvmoc_tierup == wasm_interface::vm_oc_enable::oc_all || context.should_use_eos_vm_oc())) { - const chain::eosvmoc::code_descriptor* cd = nullptr; - chain::eosvmoc::code_cache_base::get_cd_failure failure = chain::eosvmoc::code_cache_base::get_cd_failure::temporary; - try { - const bool high_priority = context.get_receiver().prefix() == chain::config::system_account_name; - cd = my->eosvmoc->cc.get_descriptor_for_code(high_priority, code_hash, vm_version, context.control.is_write_window(), failure); - } - catch(...) { - //swallow errors here, if EOS VM OC has gone in to the weeds we shouldn't bail: continue to try and run baseline - //In the future, consider moving bits of EOS VM that can fire exceptions and such out of this call path - static bool once_is_enough; - if(!once_is_enough) - elog("EOS VM OC has encountered an unexpected failure"); - once_is_enough = true; - } - if(cd) { - if (!context.is_applying_block()) // read_only_trx_test.py looks for this log statement - tlog("${a} speculatively executing ${h} with eos vm oc", ("a", context.get_receiver())("h", code_hash)); - my->eosvmoc->exec->execute(*cd, my->eosvmoc->mem, context); - return; - } - else if (context.trx_context.is_read_only()) { - if (failure == chain::eosvmoc::code_cache_base::get_cd_failure::temporary) { - EOS_ASSERT(false, ro_trx_vm_oc_compile_temporary_failure, "get_descriptor_for_code failed with temporary failure"); - } else { - EOS_ASSERT(false, ro_trx_vm_oc_compile_permanent_failure, "get_descriptor_for_code failed with permanent failure"); - } - } - } -#endif my->get_instantiated_module(code_hash, vm_type, vm_version, context.trx_context)->apply(context); } @@ -123,13 +88,8 @@ namespace eosio { namespace chain { return my->is_code_cached(code_hash, vm_type, vm_version); } - wasm_instantiated_module_interface::~wasm_instantiated_module_interface() {} - wasm_runtime_interface::~wasm_runtime_interface() {} - -#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - thread_local std::unique_ptr wasm_interface_impl::eosvmoc_tier::exec {}; - thread_local eosvmoc::memory wasm_interface_impl::eosvmoc_tier::mem{ wasm_constraints::maximum_linear_memory/wasm_constraints::wasm_page_size }; -#endif + wasm_instantiated_module_interface::~wasm_instantiated_module_interface() = default; + wasm_runtime_interface::~wasm_runtime_interface() = default; std::istream& operator>>(std::istream& in, wasm_interface::vm_type& runtime) { std::string s; diff --git a/libraries/chain/wasm_interface_collection.cpp b/libraries/chain/wasm_interface_collection.cpp new file mode 100644 index 0000000000..b057ddad7c --- /dev/null +++ b/libraries/chain/wasm_interface_collection.cpp @@ -0,0 +1,10 @@ +#include + +namespace eosio { namespace chain { + +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED + thread_local std::unique_ptr wasm_interface_collection::eosvmoc_tier::exec {}; + thread_local eosvmoc::memory wasm_interface_collection::eosvmoc_tier::mem{ wasm_constraints::maximum_linear_memory/wasm_constraints::wasm_page_size }; +#endif + +} } /// eosio::chain From 13b2db936551a25aa7189e2d57c9395a9199b5d9 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 13 Jul 2023 14:00:36 -0500 Subject: [PATCH 101/180] GH-1119 Move impl into cpp file --- .../eosio/chain/wasm_interface_collection.hpp | 134 ++---------------- libraries/chain/wasm_interface_collection.cpp | 132 ++++++++++++++++- 2 files changed, 141 insertions(+), 125 deletions(-) diff --git a/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp b/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp index 1924620054..38cf417fab 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp @@ -5,6 +5,9 @@ #else #define _REGISTER_EOSVMOC_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG) #endif +#include +#include +#include namespace eosio::chain { @@ -13,86 +16,13 @@ namespace eosio::chain { */ class wasm_interface_collection { public: -#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - struct eosvmoc_tier { - eosvmoc_tier(const std::filesystem::path& d, const eosvmoc::config& c, const chainbase::database& db) - : cc(d, c, db) { - // construct exec for the main thread - init_thread_local_data(); - } - - // Support multi-threaded execution. - void init_thread_local_data() { - exec = std::make_unique(cc); - } - - eosvmoc::code_cache_async cc; - - // Each thread requires its own exec and mem. Defined in wasm_interface.cpp - thread_local static std::unique_ptr exec; - thread_local static eosvmoc::memory mem; - }; -#endif - - wasm_interface_collection(wasm_interface::vm_type vm, wasm_interface::vm_oc_enable eosvmoc_tierup, + wasm_interface_collection(wasm_interface::vm_type vm, wasm_interface::vm_oc_enable eosvmoc_tierup, const chainbase::database& d, const std::filesystem::path& data_dir, - const eosvmoc::config& eosvmoc_config, bool profile) - : main_thread_id(std::this_thread::get_id()) - , wasm_runtime(vm) - , eosvmoc_tierup(eosvmoc_tierup) - , wasmif(vm, d, data_dir, eosvmoc_config, profile) - { -#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - if(eosvmoc_tierup != wasm_interface::vm_oc_enable::oc_none) { - EOS_ASSERT(vm != wasm_interface::vm_type::eos_vm_oc, wasm_exception, "You can't use EOS VM OC as the base runtime when tier up is activated"); - eosvmoc.emplace(data_dir, eosvmoc_config, d); - } -#endif - } + const eosvmoc::config& eosvmoc_config, bool profile); - void apply(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, apply_context& context) { - if(substitute_apply && substitute_apply(code_hash, vm_type, vm_version, context)) - return; -#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - if(eosvmoc && (eosvmoc_tierup == wasm_interface::vm_oc_enable::oc_all || context.should_use_eos_vm_oc())) { - const chain::eosvmoc::code_descriptor* cd = nullptr; - chain::eosvmoc::code_cache_base::get_cd_failure failure = chain::eosvmoc::code_cache_base::get_cd_failure::temporary; - try { - const bool high_priority = context.get_receiver().prefix() == chain::config::system_account_name; - cd = eosvmoc->cc.get_descriptor_for_code(high_priority, code_hash, vm_version, context.control.is_write_window(), failure); - } - catch(...) { - //swallow errors here, if EOS VM OC has gone in to the weeds we shouldn't bail: continue to try and run baseline - //In the future, consider moving bits of EOS VM that can fire exceptions and such out of this call path - static bool once_is_enough; - if(!once_is_enough) - elog("EOS VM OC has encountered an unexpected failure"); - once_is_enough = true; - } - if(cd) { - if (!context.is_applying_block()) // read_only_trx_test.py looks for this log statement - tlog("${a} speculatively executing ${h} with eos vm oc", ("a", context.get_receiver())("h", code_hash)); - eosvmoc->exec->execute(*cd, eosvmoc->mem, context); - return; - } - else if (context.trx_context.is_read_only()) { - if (failure == chain::eosvmoc::code_cache_base::get_cd_failure::temporary) { - EOS_ASSERT(false, ro_trx_vm_oc_compile_temporary_failure, "get_descriptor_for_code failed with temporary failure"); - } else { - EOS_ASSERT(false, ro_trx_vm_oc_compile_permanent_failure, "get_descriptor_for_code failed with permanent failure"); - } - } - } -#endif - if (is_on_main_thread() -#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - || is_eos_vm_oc_enabled() -#endif - ) { - return wasmif.apply(code_hash, vm_type, vm_version, context); - } - threaded_wasmifs[std::this_thread::get_id()]->apply(code_hash, vm_type, vm_version, context); - } + ~wasm_interface_collection(); + + void apply(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, apply_context& context); // used for tests, only valid on main thread bool is_code_cached(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version) { @@ -101,40 +31,10 @@ namespace eosio::chain { } // update current lib of all wasm interfaces - void current_lib(const uint32_t lib) { - // producer_plugin has already asserted irreversible_block signal is called in write window - std::function cb{}; -#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - if(eosvmoc) { - cb = [&]( const digest_type& code_hash, uint8_t vm_version ) { - eosvmoc->cc.free_code( code_hash, vm_version ); - }; - } -#endif - wasmif.current_lib(lib, cb); - for (auto& w: threaded_wasmifs) { - w.second->current_lib(lib, cb); - } - } + void current_lib(const uint32_t lib); // only called from non-main threads (read-only trx execution threads) when producer_plugin starts them - void init_thread_local_data(const chainbase::database& d, const std::filesystem::path& data_dir, - const eosvmoc::config& eosvmoc_config, bool profile) { - EOS_ASSERT(!is_on_main_thread(), misc_exception, "init_thread_local_data called on the main thread"); -#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - if (is_eos_vm_oc_enabled()) { - // EOSVMOC needs further initialization of its thread local data - if (eosvmoc) - eosvmoc->init_thread_local_data(); - wasmif.init_thread_local_data(); - } else -#endif - { - std::lock_guard g(threaded_wasmifs_mtx); - // Non-EOSVMOC needs a wasmif per thread - threaded_wasmifs[std::this_thread::get_id()] = std::make_unique(wasm_runtime, d, data_dir, eosvmoc_config, profile); - } - } + void init_thread_local_data(const chainbase::database& d, const std::filesystem::path& data_dir, const eosvmoc::config& eosvmoc_config, bool profile); #ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED bool is_eos_vm_oc_enabled() const { @@ -142,15 +42,7 @@ namespace eosio::chain { } #endif - void code_block_num_last_used(const digest_type& code_hash, uint8_t vm_type, uint8_t vm_version, uint32_t block_num) { - // The caller of this function apply_eosio_setcode has already asserted that - // the transaction is not a read-only trx, which implies we are - // in write window. Safe to call threaded_wasmifs's code_block_num_last_used - wasmif.code_block_num_last_used(code_hash, vm_type, vm_version, block_num); - for (auto& w: threaded_wasmifs) { - w.second->code_block_num_last_used(code_hash, vm_type, vm_version, block_num); - } - } + void code_block_num_last_used(const digest_type& code_hash, uint8_t vm_type, uint8_t vm_version, uint32_t block_num); // If substitute_apply is set, then apply calls it before doing anything else. If substitute_apply returns true, // then apply returns immediately. Provided function must be multi-thread safe. @@ -164,12 +56,12 @@ namespace eosio::chain { const wasm_interface::vm_type wasm_runtime; const wasm_interface::vm_oc_enable eosvmoc_tierup; - wasm_interface wasmif; // used by main thread and all threads for EOSVMOC + wasm_interface wasmif; // used by main thread std::mutex threaded_wasmifs_mtx; std::unordered_map> threaded_wasmifs; // one for each read-only thread, used by eos-vm and eos-vm-jit #ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - std::optional eosvmoc; + std::unique_ptr eosvmoc; // used by all threads #endif }; diff --git a/libraries/chain/wasm_interface_collection.cpp b/libraries/chain/wasm_interface_collection.cpp index b057ddad7c..21a6279220 100644 --- a/libraries/chain/wasm_interface_collection.cpp +++ b/libraries/chain/wasm_interface_collection.cpp @@ -1,10 +1,134 @@ #include -namespace eosio { namespace chain { +namespace eosio::chain { #ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - thread_local std::unique_ptr wasm_interface_collection::eosvmoc_tier::exec {}; - thread_local eosvmoc::memory wasm_interface_collection::eosvmoc_tier::mem{ wasm_constraints::maximum_linear_memory/wasm_constraints::wasm_page_size }; +struct eosvmoc_tier { + eosvmoc_tier(const std::filesystem::path& d, const eosvmoc::config& c, const chainbase::database& db) + : cc(d, c, db) { + // construct exec for the main thread + init_thread_local_data(); + } + + // Support multi-threaded execution. + void init_thread_local_data() { + exec = std::make_unique(cc); + } + + eosvmoc::code_cache_async cc; + + // Each thread requires its own exec and mem. Defined in wasm_interface.cpp + thread_local static std::unique_ptr exec; + thread_local static eosvmoc::memory mem; +}; + +thread_local std::unique_ptr eosvmoc_tier::exec{}; +thread_local eosvmoc::memory eosvmoc_tier::mem{wasm_constraints::maximum_linear_memory / wasm_constraints::wasm_page_size}; +#endif + +wasm_interface_collection::wasm_interface_collection(wasm_interface::vm_type vm, wasm_interface::vm_oc_enable eosvmoc_tierup, + const chainbase::database& d, const std::filesystem::path& data_dir, + const eosvmoc::config& eosvmoc_config, bool profile) + : main_thread_id(std::this_thread::get_id()) + , wasm_runtime(vm) + , eosvmoc_tierup(eosvmoc_tierup) + , wasmif(vm, d, data_dir, eosvmoc_config, profile) { +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED + if (eosvmoc_tierup != wasm_interface::vm_oc_enable::oc_none) { + EOS_ASSERT(vm != wasm_interface::vm_type::eos_vm_oc, wasm_exception, "You can't use EOS VM OC as the base runtime when tier up is activated"); + eosvmoc = std::make_unique(data_dir, eosvmoc_config, d); + } +#endif +} + +wasm_interface_collection::~wasm_interface_collection() = default; + +void wasm_interface_collection::apply(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, apply_context& context) { + if (substitute_apply && substitute_apply(code_hash, vm_type, vm_version, context)) + return; +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED + if (eosvmoc && (eosvmoc_tierup == wasm_interface::vm_oc_enable::oc_all || context.should_use_eos_vm_oc())) { + const chain::eosvmoc::code_descriptor* cd = nullptr; + chain::eosvmoc::code_cache_base::get_cd_failure failure = chain::eosvmoc::code_cache_base::get_cd_failure::temporary; + try { + const bool high_priority = context.get_receiver().prefix() == chain::config::system_account_name; + cd = eosvmoc->cc.get_descriptor_for_code(high_priority, code_hash, vm_version, context.control.is_write_window(), failure); + } catch (...) { + // swallow errors here, if EOS VM OC has gone in to the weeds we shouldn't bail: continue to try and run baseline + // In the future, consider moving bits of EOS VM that can fire exceptions and such out of this call path + static bool once_is_enough; + if (!once_is_enough) + elog("EOS VM OC has encountered an unexpected failure"); + once_is_enough = true; + } + if (cd) { + if (!context.is_applying_block()) // read_only_trx_test.py looks for this log statement + tlog("${a} speculatively executing ${h} with eos vm oc", ("a", context.get_receiver())("h", code_hash)); + eosvmoc->exec->execute(*cd, eosvmoc->mem, context); + return; + } else if (context.trx_context.is_read_only()) { + if (failure == chain::eosvmoc::code_cache_base::get_cd_failure::temporary) { + EOS_ASSERT(false, ro_trx_vm_oc_compile_temporary_failure, "get_descriptor_for_code failed with temporary failure"); + } else { + EOS_ASSERT(false, ro_trx_vm_oc_compile_permanent_failure, "get_descriptor_for_code failed with permanent failure"); + } + } + } +#endif + if (is_on_main_thread() +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED + || is_eos_vm_oc_enabled() +#endif + ) { + return wasmif.apply(code_hash, vm_type, vm_version, context); + } + threaded_wasmifs[std::this_thread::get_id()]->apply(code_hash, vm_type, vm_version, context); +} + +// update current lib of all wasm interfaces +void wasm_interface_collection::current_lib(const uint32_t lib) { + // producer_plugin has already asserted irreversible_block signal is called in write window + std::function cb{}; +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED + if (eosvmoc) { + cb = [&](const digest_type& code_hash, uint8_t vm_version) { + eosvmoc->cc.free_code(code_hash, vm_version); + }; + } #endif + wasmif.current_lib(lib, cb); + for (auto& w : threaded_wasmifs) { + w.second->current_lib(lib, cb); + } +} + +// only called from non-main threads (read-only trx execution threads) when producer_plugin starts them +void wasm_interface_collection::init_thread_local_data(const chainbase::database& d, const std::filesystem::path& data_dir, + const eosvmoc::config& eosvmoc_config, bool profile) { + EOS_ASSERT(!is_on_main_thread(), misc_exception, "init_thread_local_data called on the main thread"); +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED + if (is_eos_vm_oc_enabled()) { + // EOSVMOC needs further initialization of its thread local data + if (eosvmoc) + eosvmoc->init_thread_local_data(); + wasmif.init_thread_local_data(); + } else +#endif + { + std::lock_guard g(threaded_wasmifs_mtx); + // Non-EOSVMOC needs a wasmif per thread + threaded_wasmifs[std::this_thread::get_id()] = std::make_unique(wasm_runtime, d, data_dir, eosvmoc_config, profile); + } +} + +void wasm_interface_collection::code_block_num_last_used(const digest_type& code_hash, uint8_t vm_type, uint8_t vm_version, uint32_t block_num) { + // The caller of this function apply_eosio_setcode has already asserted that + // the transaction is not a read-only trx, which implies we are + // in write window. Safe to call threaded_wasmifs's code_block_num_last_used + wasmif.code_block_num_last_used(code_hash, vm_type, vm_version, block_num); + for (auto& w : threaded_wasmifs) { + w.second->code_block_num_last_used(code_hash, vm_type, vm_version, block_num); + } +} -} } /// eosio::chain +} // namespace eosio::chain From b7de2af761df6f61730d78fcddc8e987f072c336 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 13 Jul 2023 14:24:17 -0500 Subject: [PATCH 102/180] GH-1119 Fallback to non-oc when oc not ready for read-only. --- libraries/chain/wasm_interface_collection.cpp | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/libraries/chain/wasm_interface_collection.cpp b/libraries/chain/wasm_interface_collection.cpp index 21a6279220..b7378cc7da 100644 --- a/libraries/chain/wasm_interface_collection.cpp +++ b/libraries/chain/wasm_interface_collection.cpp @@ -48,7 +48,7 @@ void wasm_interface_collection::apply(const digest_type& code_hash, const uint8_ return; #ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED if (eosvmoc && (eosvmoc_tierup == wasm_interface::vm_oc_enable::oc_all || context.should_use_eos_vm_oc())) { - const chain::eosvmoc::code_descriptor* cd = nullptr; + const chain::eosvmoc::code_descriptor* cd = nullptr; chain::eosvmoc::code_cache_base::get_cd_failure failure = chain::eosvmoc::code_cache_base::get_cd_failure::temporary; try { const bool high_priority = context.get_receiver().prefix() == chain::config::system_account_name; @@ -66,12 +66,6 @@ void wasm_interface_collection::apply(const digest_type& code_hash, const uint8_ tlog("${a} speculatively executing ${h} with eos vm oc", ("a", context.get_receiver())("h", code_hash)); eosvmoc->exec->execute(*cd, eosvmoc->mem, context); return; - } else if (context.trx_context.is_read_only()) { - if (failure == chain::eosvmoc::code_cache_base::get_cd_failure::temporary) { - EOS_ASSERT(false, ro_trx_vm_oc_compile_temporary_failure, "get_descriptor_for_code failed with temporary failure"); - } else { - EOS_ASSERT(false, ro_trx_vm_oc_compile_permanent_failure, "get_descriptor_for_code failed with permanent failure"); - } } } #endif From 56c00275dbf59cc3451eea916a6a7b99a24bef26 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 13 Jul 2023 16:14:55 -0500 Subject: [PATCH 103/180] fix permission. --- .github/workflows/build_base.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_base.yaml b/.github/workflows/build_base.yaml index f5d7773fae..8fa4de8bde 100644 --- a/.github/workflows/build_base.yaml +++ b/.github/workflows/build_base.yaml @@ -9,7 +9,7 @@ on: value: ${{ jobs.d.outputs.p }} permissions: - packages: write + packages: read contents: read defaults: From bfa1685672c865cc28152865a26113cfb19df98b Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 14 Jul 2023 13:46:12 -0500 Subject: [PATCH 104/180] Add names to jobs for better UI experience. --- .github/workflows/build.yaml | 1 + .github/workflows/build_base.yaml | 1 + .github/workflows/ph_backward_compatibility.yaml | 1 + 3 files changed, 3 insertions(+) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 48bc4ff07a..286d3de481 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -32,6 +32,7 @@ defaults: jobs: build-base: + name: Run Build Workflow uses: ./.github/workflows/build_base.yaml permissions: packages: write diff --git a/.github/workflows/build_base.yaml b/.github/workflows/build_base.yaml index 8fa4de8bde..5d47ba37cc 100644 --- a/.github/workflows/build_base.yaml +++ b/.github/workflows/build_base.yaml @@ -59,6 +59,7 @@ jobs: file: ${{fromJSON(needs.d.outputs.p)[matrix.platform].dockerfile}} Build: + name: Build leap needs: [d, build-platforms] if: always() && needs.d.result == 'success' && (needs.build-platforms.result == 'success' || needs.build-platforms.result == 'skipped') strategy: diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index 7a0892eb5f..e166c92eff 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -13,6 +13,7 @@ defaults: jobs: build-base: + name: Run Build Workflow uses: ./.github/workflows/build_base.yaml permissions: packages: write From 70cd71cb29ed8f88df574da6284d1693eee26634 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 14 Jul 2023 13:50:21 -0500 Subject: [PATCH 105/180] GH-1119 Move include to source file --- .../chain/include/eosio/chain/wasm_interface_collection.hpp | 5 ----- libraries/chain/wasm_interface_collection.cpp | 5 +++++ 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp b/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp index 38cf417fab..36af0ce3ec 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp @@ -1,10 +1,5 @@ #pragma once #include -#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED -#include -#else -#define _REGISTER_EOSVMOC_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG) -#endif #include #include #include diff --git a/libraries/chain/wasm_interface_collection.cpp b/libraries/chain/wasm_interface_collection.cpp index b7378cc7da..d1276e82fd 100644 --- a/libraries/chain/wasm_interface_collection.cpp +++ b/libraries/chain/wasm_interface_collection.cpp @@ -1,4 +1,9 @@ #include +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED +#include +#else +#define _REGISTER_EOSVMOC_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG) +#endif namespace eosio::chain { From 150d817f67f6bbd2f7224731b2b55294b3bacc05 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 14 Jul 2023 18:26:34 -0500 Subject: [PATCH 106/180] GH-1119 Fix spacing --- libraries/chain/wasm_interface.cpp | 2 +- libraries/chain/wasm_interface_collection.cpp | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index d815330502..abe014d946 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -70,7 +70,7 @@ namespace eosio { namespace chain { //there are a couple opportunties for improvement here-- //Easy: Cache the Module created here so it can be reused for instantiaion //Hard: Kick off instantiation in a separate thread at this location - } + } void wasm_interface::code_block_num_last_used(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, const uint32_t& block_num) { my->code_block_num_last_used(code_hash, vm_type, vm_version, block_num); diff --git a/libraries/chain/wasm_interface_collection.cpp b/libraries/chain/wasm_interface_collection.cpp index d1276e82fd..2b437a2794 100644 --- a/libraries/chain/wasm_interface_collection.cpp +++ b/libraries/chain/wasm_interface_collection.cpp @@ -79,7 +79,8 @@ void wasm_interface_collection::apply(const digest_type& code_hash, const uint8_ || is_eos_vm_oc_enabled() #endif ) { - return wasmif.apply(code_hash, vm_type, vm_version, context); + wasmif.apply(code_hash, vm_type, vm_version, context); + return; } threaded_wasmifs[std::this_thread::get_id()]->apply(code_hash, vm_type, vm_version, context); } From 3a7c072015d9fd848ea2c9d95c9897acd40ac9bd Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Sat, 15 Jul 2023 18:43:37 -0500 Subject: [PATCH 107/180] Listen on multiple addresses for net_plugin p2p. --- plugins/net_plugin/net_plugin.cpp | 118 ++++++++++++++++++++---------- 1 file changed, 79 insertions(+), 39 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 63242aa1b6..1222eb23b0 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -42,6 +42,21 @@ using namespace eosio::chain::plugin_interface; +namespace boost +{ + /// @brief Overload for boost::lexical_cast to convert vector of strings to string + /// + /// Used by boost::program_options to print the default value of an std::vector option + /// + /// @param v the vector to convert + /// @return the contents of the vector as a comma-separated string + template<> + inline std::string lexical_cast(const std::vector& v) + { + return boost::join(v, ","); + } +} + namespace eosio { static auto _net_plugin = application::register_plugin(); @@ -395,8 +410,8 @@ namespace eosio { * Thread safe, only updated in plugin initialize * @{ */ - string p2p_address; - string p2p_server_address; + vector p2p_addresses; + vector p2p_server_addresses; vector allowed_peers; ///< peer keys allowed to connect std::mapthread_pool.get_executor() ), socket( new tcp::socket( std::move(s) ) ), + p2p_address( address), connection_id( ++my_impl->current_connection_id ), response_expected_timer( my_impl->thread_pool.get_executor() ), last_handshake_recv(), @@ -2655,7 +2675,7 @@ namespace eosio { } - void net_plugin_impl::create_session(tcp::socket&& socket) { + void net_plugin_impl::create_session(tcp::socket&& socket, const string& p2p_address) { uint32_t visitors = 0; uint32_t from_addr = 0; boost::system::error_code rec; @@ -2681,7 +2701,7 @@ namespace eosio { visitors < connections.get_max_client_count())) { fc_ilog(logger, "Accepted new connection: " + paddr_str); - connection_ptr new_connection = std::make_shared(std::move(socket)); + connection_ptr new_connection = std::make_shared(std::move(socket), p2p_address); new_connection->strand.post([new_connection, this]() { if (new_connection->start_session()) { connections.add(new_connection); @@ -3144,9 +3164,9 @@ namespace eosio { if (msg.time + c_time <= check_time) return false; } else if (net_version < proto_dup_node_id_goaway || msg.network_version < proto_dup_node_id_goaway) { - if (my_impl->p2p_address < msg.p2p_address) { - fc_dlog( logger, "my_impl->p2p_address '${lhs}' < msg.p2p_address '${rhs}'", - ("lhs", my_impl->p2p_address)( "rhs", msg.p2p_address ) ); + if (p2p_address < msg.p2p_address) { + fc_dlog( logger, "p2p_address '${lhs}' < msg.p2p_address '${rhs}'", + ("lhs", p2p_address)( "rhs", msg.p2p_address ) ); // only the connection from lower p2p_address to higher p2p_address will be considered as a duplicate, // so there is no chance for both connections to be closed return false; @@ -3828,7 +3848,7 @@ namespace eosio { // If we couldn't sign, don't send a token. if(hello.sig == chain::signature_type()) hello.token = sha256(); - hello.p2p_address = my_impl->p2p_address; + hello.p2p_address = p2p_address; if( is_transactions_only_connection() ) hello.p2p_address += ":trx"; // if we are not accepting transactions tell peer we are blocks only if( is_blocks_only_connection() || !my_impl->p2p_accept_transactions ) hello.p2p_address += ":blk"; @@ -3860,8 +3880,8 @@ namespace eosio { void net_plugin::set_program_options( options_description& /*cli*/, options_description& cfg ) { cfg.add_options() - ( "p2p-listen-endpoint", bpo::value()->default_value( "0.0.0.0:9876" ), "The actual host:port used to listen for incoming p2p connections.") - ( "p2p-server-address", bpo::value(), "An externally accessible host:port for identifying this node. Defaults to p2p-listen-endpoint.") + ( "p2p-listen-endpoint", bpo::value< vector >()->default_value( vector(1, string("0.0.0.0:9876")) ), "The actual host:port used to listen for incoming p2p connections. May be specified multiple times.") + ( "p2p-server-address", bpo::value< vector >(), "An externally accessible host:port for identifying this node. Defaults to p2p-listen-endpoint. May be specified as many times as p2p-listen-endpoint") ( "p2p-peer-address", bpo::value< vector >()->composing(), "The public endpoint of a peer node to connect to. Use multiple p2p-peer-address options as needed to compose a network.\n" " Syntax: host:port[:|]\n" @@ -3882,7 +3902,7 @@ namespace eosio { ( "agent-name", bpo::value()->default_value("EOS Test Agent"), "The name supplied to identify this node amongst the peers.") ( "allowed-connection", bpo::value>()->multitoken()->default_value({"any"}, "any"), "Can be 'any' or 'producers' or 'specified' or 'none'. If 'specified', peer-key must be specified at least once. If only 'producers', peer-key is not required. 'producers' and 'specified' may be combined.") ( "peer-key", bpo::value>()->composing()->multitoken(), "Optional public key of peer allowed to connect. May be used multiple times.") - ( "peer-private-key", boost::program_options::value>()->composing()->multitoken(), + ( "peer-private-key", bpo::value>()->composing()->multitoken(), "Tuple of [PublicKey, WIF private key] (may specify multiple times)") ( "max-clients", bpo::value()->default_value(def_max_clients), "Maximum number of clients from which connections are accepted, use 0 for no limit") ( "connection-cleanup-period", bpo::value()->default_value(def_conn_retry_wait), "number of seconds to wait before cleaning up dead connections") @@ -3942,16 +3962,31 @@ namespace eosio { std::chrono::seconds( options.at("connection-cleanup-period").as() ), options.at("max-clients").as() ); - if( options.count( "p2p-listen-endpoint" ) && options.at("p2p-listen-endpoint").as().length()) { - p2p_address = options.at( "p2p-listen-endpoint" ).as(); - EOS_ASSERT( p2p_address.length() <= max_p2p_address_length, chain::plugin_config_exception, - "p2p-listen-endpoint too long, must be less than ${m}", ("m", max_p2p_address_length) ); + if( options.count( "p2p-listen-endpoint" ) && !options.at("p2p-listen-endpoint").as>().empty()) { + p2p_addresses = options.at( "p2p-listen-endpoint" ).as>(); + auto addr_count = p2p_addresses.size(); + std::sort(p2p_addresses.begin(), p2p_addresses.end()); + std::unique(p2p_addresses.begin(), p2p_addresses.end()); + if( size_t addr_diff = addr_count - p2p_addresses.size(); addr_diff != 0) { + fc_ilog( logger, "Removed ${count} duplicate p2p-listen-endpoint entries", ("count", addr_diff)); + } + for(auto& addr : p2p_addresses) { + EOS_ASSERT( addr.length() <= max_p2p_address_length, chain::plugin_config_exception, + "p2p-listen-endpoint ${a} too long, must be less than ${m}", + ("a", addr)("m", max_p2p_address_length) ); + } } if( options.count( "p2p-server-address" ) ) { - p2p_server_address = options.at( "p2p-server-address" ).as(); - EOS_ASSERT( p2p_server_address.length() <= max_p2p_address_length, chain::plugin_config_exception, - "p2p_server_address too long, must be less than ${m}", ("m", max_p2p_address_length) ); + p2p_server_addresses = options.at( "p2p-server-address" ).as>(); + EOS_ASSERT( p2p_server_addresses.size() <= p2p_addresses.size(), chain::plugin_config_exception, + "p2p-server-address may not be specified more times than p2p-listen-endpoint" ); + for( auto& addr: p2p_server_addresses ) { + EOS_ASSERT( addr.length() <= max_p2p_address_length, chain::plugin_config_exception, + "p2p-server-address ${a} too long, must be less than ${m}", + ("a", addr)("m", max_p2p_address_length) ); + } } + p2p_server_addresses.resize(p2p_addresses.size()); // extend with empty entries as needed thread_pool_size = options.at( "net-threads" ).as(); EOS_ASSERT( thread_pool_size > 0, chain::plugin_config_exception, @@ -4044,7 +4079,7 @@ namespace eosio { dispatcher = std::make_unique( my_impl->thread_pool.get_executor() ); - if( !p2p_accept_transactions && p2p_address.size() ) { + if( !p2p_accept_transactions && p2p_addresses.size() ) { fc_ilog( logger, "\n" "***********************************\n" "* p2p-accept-transactions = false *\n" @@ -4052,13 +4087,14 @@ namespace eosio { "***********************************\n" ); } - std::string listen_address = p2p_address; + std::vector listen_addresses = p2p_addresses; - if( !p2p_address.empty() ) { - auto [host, port] = fc::split_host_port(listen_address); + std::transform(p2p_addresses.begin(), p2p_addresses.end(), p2p_server_addresses.begin(), + p2p_addresses.begin(), [](const string& p2p_address, const string& p2p_server_address) { + auto [host, port] = fc::split_host_port(p2p_address); if( !p2p_server_address.empty() ) { - p2p_address = p2p_server_address; + return p2p_server_address; } else if( host.empty() || host == "0.0.0.0" || host == "[::]") { boost::system::error_code ec; auto hostname = host_name( ec ); @@ -4068,9 +4104,10 @@ namespace eosio { "Unable to retrieve host_name. ${msg}", ("msg", ec.message())); } - p2p_address = hostname + ":" + port; + return hostname + ":" + port; } - } + return p2p_address; + }); { chain::controller& cc = chain_plug->chain(); @@ -4094,8 +4131,10 @@ namespace eosio { incoming_transaction_ack_subscription = app().get_channel().subscribe( [this](auto&& t) { transaction_ack(std::forward(t)); }); - app().executor().post(priority::highest, [my=shared_from_this(), address = std::move(listen_address)](){ - if (address.size()) { + for(auto listen_itr = listen_addresses.begin(), p2p_iter = p2p_addresses.begin(); + listen_itr != listen_addresses.end(); + ++listen_itr, ++p2p_iter) { + app().executor().post(priority::highest, [my=shared_from_this(), address = std::move(*listen_itr), p2p_addr = *p2p_iter](){ try { const boost::posix_time::milliseconds accept_timeout(100); @@ -4104,20 +4143,21 @@ namespace eosio { fc::create_listener( my->thread_pool.get_executor(), logger, accept_timeout, address, extra_listening_log_info, - [my = my](tcp::socket&& socket) { my->create_session(std::move(socket)); }); + [my = my, addr = p2p_addr](tcp::socket&& socket) { my->create_session(std::move(socket), addr); }); } catch (const std::exception& e) { fc_elog( logger, "net_plugin::plugin_startup failed to listen on ${addr}, ${what}", ("addr", address)("what", e.what()) ); app().quit(); return; } - } - - my->ticker(); - my->start_monitors(); - my->update_chain_info(); - my->connections.connect_supplied_peers(); - }); + }); + app().executor().post(priority::highest, [my=shared_from_this()](){ + my->ticker(); + my->start_monitors(); + my->update_chain_info(); + my->connections.connect_supplied_peers(); + }); + } } void net_plugin::plugin_initialize( const variables_map& options ) { From d5fdd64a0d0d0b24276bbdae3696b8cc70e4a4ea Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Mon, 17 Jul 2023 11:10:51 -0500 Subject: [PATCH 108/180] Move post of timers outside listener post loop. --- plugins/net_plugin/net_plugin.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 1222eb23b0..0f08cc947a 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -4151,13 +4151,13 @@ namespace eosio { return; } }); - app().executor().post(priority::highest, [my=shared_from_this()](){ - my->ticker(); - my->start_monitors(); - my->update_chain_info(); - my->connections.connect_supplied_peers(); - }); } + app().executor().post(priority::highest, [my=shared_from_this()](){ + my->ticker(); + my->start_monitors(); + my->update_chain_info(); + my->connections.connect_supplied_peers(); + }); } void net_plugin::plugin_initialize( const variables_map& options ) { From 0d514c67a19123194303eae366880fbe5bfccfa0 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 17 Jul 2023 11:55:42 -0500 Subject: [PATCH 109/180] GH-1279 Print non-default options --- libraries/appbase | 2 +- programs/nodeos/main.cpp | 48 +++++++++++++++++++++++++++++++++++++++- 2 files changed, 48 insertions(+), 2 deletions(-) diff --git a/libraries/appbase b/libraries/appbase index 54cc7fb4f9..0f959acba5 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit 54cc7fb4f9a3dbbe2c71bdf23f9342c9c01b9673 +Subproject commit 0f959acba5664e0271d3ba6cf3dc1d99abe2663f diff --git a/programs/nodeos/main.cpp b/programs/nodeos/main.cpp index 2deda685f6..dc9552e048 100644 --- a/programs/nodeos/main.cpp +++ b/programs/nodeos/main.cpp @@ -11,10 +11,16 @@ #include #include #include +#include #include #include +#include +#include +#include +#include + #include "config.hpp" using namespace appbase; @@ -22,6 +28,39 @@ using namespace eosio; namespace detail { +void log_non_default_options(const std::vector>& options) { + string result; + for (const auto& op : options) { + bool mask = false; + if (op.string_key == "signature-provider" + || op.string_key == "peer-private-key" + || op.string_key == "p2p-auto-bp-peer") { + mask = true; + } + std::string v; + for (auto i = op.value.cbegin(), b = op.value.cbegin(), e = op.value.cend(); i != e; ++i) { + if (i != b) + v += ", "; + if (mask) + v += "***"; + else + v += *i; + } + + if (!result.empty()) + result += ", "; + + if (v.empty()) { + result += op.string_key; + } else { + result += op.string_key; + result += " = "; + result += v; + } + } + ilog("Non-default options: ${v}", ("v", result)); +} + fc::logging_config& add_deep_mind_logger(fc::logging_config& config) { config.appenders.push_back( fc::appender_config( "deep-mind", "dmlog" ) @@ -109,6 +148,12 @@ int main(int argc, char** argv) { try { appbase::scoped_app app; + fc::scoped_exit> on_exit = [&]() { + ilog("${name} version ${ver} ${fv}", + ("name", nodeos::config::node_executable_name)("ver", app->version_string()) + ("fv", app->version_string() == app->full_version_string() ? "" : app->full_version_string()) ); + ::detail::log_non_default_options(app->get_parsed_options()); + }; uint32_t short_hash = 0; fc::from_hex(eosio::version::version_hash(), (char*)&short_hash, sizeof(short_hash)); @@ -137,11 +182,12 @@ int main(int argc, char** argv) elog("resource_monitor_plugin failed to initialize"); return INITIALIZE_FAIL; } - ilog( "${name} version ${ver} ${fv}", + ilog("${name} version ${ver} ${fv}", ("name", nodeos::config::node_executable_name)("ver", app->version_string()) ("fv", app->version_string() == app->full_version_string() ? "" : app->full_version_string()) ); ilog("${name} using configuration file ${c}", ("name", nodeos::config::node_executable_name)("c", app->full_config_file_path().string())); ilog("${name} data directory is ${d}", ("name", nodeos::config::node_executable_name)("d", app->data_dir().string())); + ::detail::log_non_default_options(app->get_parsed_options()); app->startup(); app->set_thread_priority_max(); app->exec(); From 73e6abea618f27ce921a6b61f1d5a48d85daa175 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 17 Jul 2023 18:00:12 -0500 Subject: [PATCH 110/180] GH-1279 Update appbase to main --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index 0f959acba5..2da170ea8c 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit 0f959acba5664e0271d3ba6cf3dc1d99abe2663f +Subproject commit 2da170ea8c39442c7d1374c3403e80d60338b34d From 81a3a773983b9b42bb739b89e1cfda9a0ababd5d Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 17 Jul 2023 18:23:27 -0500 Subject: [PATCH 111/180] GH-1279 Use string literals --- programs/nodeos/main.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/programs/nodeos/main.cpp b/programs/nodeos/main.cpp index dc9552e048..6c99a2fdd4 100644 --- a/programs/nodeos/main.cpp +++ b/programs/nodeos/main.cpp @@ -29,12 +29,13 @@ using namespace eosio; namespace detail { void log_non_default_options(const std::vector>& options) { + using namespace std::string_literals; string result; for (const auto& op : options) { bool mask = false; - if (op.string_key == "signature-provider" - || op.string_key == "peer-private-key" - || op.string_key == "p2p-auto-bp-peer") { + if (op.string_key == "signature-provider"s + || op.string_key == "peer-private-key"s + || op.string_key == "p2p-auto-bp-peer"s) { mask = true; } std::string v; From 1c81166570655840918aea5e8167a1dd3f7935a0 Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Mon, 17 Jul 2023 19:05:47 -0500 Subject: [PATCH 112/180] Restore running nodeos with zero p2p listen ports. --- plugins/net_plugin/net_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 0f08cc947a..f8a4fb0596 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -3962,7 +3962,7 @@ namespace eosio { std::chrono::seconds( options.at("connection-cleanup-period").as() ), options.at("max-clients").as() ); - if( options.count( "p2p-listen-endpoint" ) && !options.at("p2p-listen-endpoint").as>().empty()) { + if( options.count( "p2p-listen-endpoint" ) && !options.at("p2p-listen-endpoint").as>().empty() && options.at("p2p-listen-endpoint").as>()[0].length()) { p2p_addresses = options.at( "p2p-listen-endpoint" ).as>(); auto addr_count = p2p_addresses.size(); std::sort(p2p_addresses.begin(), p2p_addresses.end()); From ffcd97e6cf1aed6698e522fe1a359951249f466a Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 18 Jul 2023 11:16:29 -0400 Subject: [PATCH 113/180] Add boost to install --- CMakeLists.txt | 7 ++ scripts/MakeBoostDistro.py | 212 +++++++++++++++++++++++++++++++++++++ 2 files changed, 219 insertions(+) create mode 100755 scripts/MakeBoostDistro.py diff --git a/CMakeLists.txt b/CMakeLists.txt index d983207680..c66ee52cdf 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -272,6 +272,13 @@ configure_file(${CMAKE_SOURCE_DIR}/libraries/cli11/bash-completion/completions/c install(FILES libraries/cli11/bash-completion/completions/leap-util DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/bash-completion/completions COMPONENT base) install(FILES libraries/cli11/bash-completion/completions/cleos DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/bash-completion/completions COMPONENT base) +add_custom_target(boost_install ALL) +add_custom_command(TARGET boost_install + COMMAND ${CMAKE_SOURCE_DIR}/scripts/MakeBoostDistro.py ${CMAKE_SOURCE_DIR}/libraries/boost ${CMAKE_BINARY_DIR}/boost-dist + VERBATIM) + +install(DIRECTORY "${CMAKE_BINARY_DIR}/boost-dist/boost" DESTINATION include COMPONENT dev EXCLUDE_FROM_ALL) + add_custom_target(dev-install COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_BINARY_DIR}" COMMAND "${CMAKE_COMMAND}" --install "${CMAKE_BINARY_DIR}" diff --git a/scripts/MakeBoostDistro.py b/scripts/MakeBoostDistro.py new file mode 100755 index 0000000000..e7e8f02fd9 --- /dev/null +++ b/scripts/MakeBoostDistro.py @@ -0,0 +1,212 @@ +#!/usr/bin/python3 +# + +# Prepare a boost checkout for release +# 1) Copy all the files at the root level to the dest folder ($DEST) +# 2) Copy all the "special" folders to the dest folder ($DEST) +# 3) copy all the files from $SOURCE/libs to $DEST/libs +# 4a) For each subproject, copy everything except "include" into $DEST/libs +# 4b) For each subproject, copy the contents of the "includes" folder into $DEST/boost +# +# Usage: %0 source dest + +from __future__ import print_function + +import os, sys +import shutil +import stat +import six +import datetime + +IgnoreFiles = shutil.ignore_patterns( + '[.]*', + '[.]gitattributes', + '[.]gitignore', + '[.]gitmodules', + '[.]travis[.]yml', + 'appveyor[.]yml', + 'circle[.]yml') + +def IgnoreFile(src, name): + return len(IgnoreFiles(src, [name])) > 0 + +## from +def MergeTree(src, dst, symlinks = False): + if not os.path.exists(dst): + os.makedirs(dst) + shutil.copystat(src, dst) + lst = os.listdir(src) + excl = IgnoreFiles(src, lst) + lst = [x for x in lst if x not in excl] + for item in lst: + s = os.path.join(src, item) + d = os.path.join(dst, item) + if symlinks and os.path.islink(s): + if os.path.lexists(d): + os.remove(d) + os.symlink(os.readlink(s), d) + try: + st = os.lstat(s) + mode = stat.S_IMODE(st.st_mode) + os.lchmod(d, mode) + except: + pass # lchmod not available + elif os.path.isdir(s): + MergeTree(s, d, symlinks) + else: + if os.path.exists(d): + print("## Overwriting file %s with %s" % (d, s)) + shutil.copy2(s, d) + + +def CopyFile (s, d, f): + if os.path.isfile(os.path.join(s,f)) and not IgnoreFile(s, f): + shutil.copy2(os.path.join(s,f), os.path.join(d,f)) + +def CopyDir (s, d, dd): + if os.path.isdir(os.path.join(s,dd)) and not IgnoreFile(s, dd): + shutil.copytree(os.path.join(s,dd), os.path.join(d,dd), symlinks=False, ignore=IgnoreFiles) + +def MergeIf(s, d, dd): +# if dd == 'detail': +# print "MergeIf %s -> %s" % (os.path.join(s, dd), os.path.join(d, dd)) + if os.path.exists(os.path.join(s, dd)): + MergeTree(os.path.join(s, dd), os.path.join(d, dd), symlinks=False) + +def CopyInclude(src, dst): + for item in os.listdir(src): + if IgnoreFile(src, item): + continue + if item == 'pending': + continue + if item == 'detail': + continue + s = os.path.join(src, item) + d = os.path.join(dst, item) + if os.path.isdir(s): + MergeTree(s, d, symlinks=False) + else: + if os.path.exists(d): + print("## Overwriting file %s with %s" % (d, s)) + CopyFile(src, dst, item) + + +def CopySubProject(src, dst, headers, p): + # First, everything except the "include" directory + Source = os.path.join(src,p) + Dest = os.path.join(dst,p) + # print "CopySubProject %p" % p + os.makedirs(Dest) + for item in os.listdir(Source): + if os.path.isfile(os.path.join(Source, item)): + CopyFile(Source, Dest, item) + elif item != "include": + CopyDir(Source, Dest, item) + + #shutil.copytree(Source, Dest, symlinks=False, ignore=shutil.ignore_patterns('\.*', "include")) + + # Now the includes + Source = os.path.join(src, "%s/include/boost" % p) + if os.path.exists(Source): + CopyInclude(Source, headers) +# MergeTree(Source, Dest, symlinks=False, ignore=shutil.ignore_patterns('\.*', 'detail', 'pending')) + MergeIf(Source, headers, 'detail') + MergeIf(Source, headers, 'pending') + + +def CopyNestedProject(src, dst, headers, p): + # First, everything except the "include" directory + Source = os.path.join(src,p[1]) + Dest = os.path.join(dst,p[1]) + os.makedirs(Dest) + for item in os.listdir(Source): + if os.path.isfile(os.path.join(Source, item)): + CopyFile(Source, Dest, item) + elif item != "include": + CopyDir(Source, Dest, item) + # shutil.copytree(Source, Dest, symlinks=False, ignore=shutil.ignore_patterns('\.*', "include")) + + Source = os.path.join(src, "%s/include/boost" % (p[1])) + # Dest = os.path.join(headers, p) + # print "Installing headers from %s to %s" % (Source, headers) + CopyInclude(Source, headers) + # # MergeTree(Source, Dest, symlinks=False, ignore=shutil.ignore_patterns('\.*', 'detail', 'pending')) + # MergeIf(Source, headers, 'detail') + # MergeIf(Source, headers, 'pending') + +BoostHeaders = "boost" +BoostLibs = "libs" + +BoostSpecialFolders = [ "doc", "more", "status", "tools" ] + +SourceRoot = sys.argv[1] +DestRoot = sys.argv[2] + +print("Source = %s" % SourceRoot) +print("Dest = %s" % DestRoot) + +if not os.path.exists(SourceRoot): + print("## Error: %s does not exist" % SourceRoot) + exit(1) + +if os.path.exists(DestRoot): + print("The destination directory already exists. All good.\n") + exit(0) + #timestamp1 = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S") + #os.rename(DestRoot,DestRoot + "_bck_" + timestamp1) + +if not os.path.exists(DestRoot): + print("Creating destination directory %s" % DestRoot) + os.makedirs(DestRoot) + +DestHeaders = os.path.join(DestRoot, BoostHeaders) +DestLibs = os.path.join(DestRoot, BoostLibs) +os.makedirs(DestHeaders) +os.makedirs(DestLibs) + +## Step 1 +for f in os.listdir(SourceRoot): + if f != 'CMakeLists.txt': + CopyFile(SourceRoot, DestRoot, f) + +## Step 2 +for d in BoostSpecialFolders: + CopyDir(SourceRoot, DestRoot, d) + +## Step 3 +SourceLibs = os.path.join(SourceRoot, BoostLibs) +for f in os.listdir(SourceLibs): + CopyFile(SourceLibs, DestLibs, f) + +## Step 4 +BoostSubProjects = set() +for f in os.listdir(SourceLibs): + if os.path.isdir(os.path.join(SourceLibs,f)): + if os.path.isfile(os.path.join(SourceLibs,f,"meta","libraries.json")): + BoostSubProjects.add(f) + elif os.path.isdir(os.path.join(SourceLibs,f,"include")): + BoostSubProjects.add(f) + elif f == 'headers': + BoostSubProjects.add(f) + elif os.path.isfile(os.path.join(SourceLibs,f,"sublibs")): + for s in os.listdir(os.path.join(SourceLibs,f)): + if os.path.isdir(os.path.join(SourceLibs,f,s)): + if os.path.isfile(os.path.join(SourceLibs,f,s,"meta","libraries.json")): + BoostSubProjects.add((f,s)) + elif os.path.isdir(os.path.join(SourceLibs,f,s,"include")): + BoostSubProjects.add((f,s)) + +for p in BoostSubProjects: + if isinstance(p, six.string_types): + CopySubProject(SourceLibs, DestLibs, DestHeaders, p) + else: + NestedSource = os.path.join(SourceRoot,"libs",p[0]) + NestedDest = os.path.join(DestRoot,"libs",p[0]) + NestedHeaders = os.path.join(DestRoot,"boost") + if not os.path.exists(NestedDest): + os.makedirs(NestedDest) + if not os.path.exists(NestedHeaders): + os.makedirs(NestedHeaders) + for f in os.listdir(NestedSource): + CopyFile(NestedSource, NestedDest, f) + CopyNestedProject(NestedSource, NestedDest, NestedHeaders, p) From ea7131725813f2f3e2dc0d39f1608f67fe630c02 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 18 Jul 2023 11:30:23 -0400 Subject: [PATCH 114/180] Update appbase --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index 6316189788..fe1b3a6cd9 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit 63161897889248ebf8fb3bfae8cfe0936b373b6b +Subproject commit fe1b3a6cd9b6f7529d6fb4beac0e880d136308a8 From 53228de8d2df05bd273bdf4fdf22ea8f653d8485 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 18 Jul 2023 16:54:41 -0400 Subject: [PATCH 115/180] Add `python3` to command. --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c66ee52cdf..9b729ab331 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -274,7 +274,7 @@ install(FILES libraries/cli11/bash-completion/completions/cleos DESTINATION ${CM add_custom_target(boost_install ALL) add_custom_command(TARGET boost_install - COMMAND ${CMAKE_SOURCE_DIR}/scripts/MakeBoostDistro.py ${CMAKE_SOURCE_DIR}/libraries/boost ${CMAKE_BINARY_DIR}/boost-dist + COMMAND python3 ${CMAKE_SOURCE_DIR}/scripts/MakeBoostDistro.py ${CMAKE_SOURCE_DIR}/libraries/boost ${CMAKE_BINARY_DIR}/boost-dist VERBATIM) install(DIRECTORY "${CMAKE_BINARY_DIR}/boost-dist/boost" DESTINATION include COMPONENT dev EXCLUDE_FROM_ALL) From 9cdae1082cec699f3e76e2cd271b199e72aa65df Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 18 Jul 2023 16:15:34 -0500 Subject: [PATCH 116/180] GH-1119 Move test_read_only_trx from producer_plugin/test to tests and update to set bios contract so wasm is used. --- plugins/producer_plugin/test/CMakeLists.txt | 1 - tests/CMakeLists.txt | 2 +- tests/read_only_trx_test.py | 6 + .../test => tests}/test_read_only_trx.cpp | 72 ++++++--- unittests/test_utils.hpp | 143 ++++++++++++++++++ 5 files changed, 199 insertions(+), 25 deletions(-) rename {plugins/producer_plugin/test => tests}/test_read_only_trx.cpp (79%) create mode 100644 unittests/test_utils.hpp diff --git a/plugins/producer_plugin/test/CMakeLists.txt b/plugins/producer_plugin/test/CMakeLists.txt index 043d56791c..42c42596f8 100644 --- a/plugins/producer_plugin/test/CMakeLists.txt +++ b/plugins/producer_plugin/test/CMakeLists.txt @@ -1,7 +1,6 @@ add_executable( test_producer_plugin test_trx_full.cpp test_options.cpp - test_read_only_trx.cpp test_block_timing_util.cpp main.cpp ) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 0dbeb89358..a90031dec8 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -76,7 +76,7 @@ else() endif() #To run plugin_test with all log from blockchain displayed, put --verbose after --, i.e. plugin_test -- --verbose -add_test(NAME plugin_test COMMAND plugin_test --report_level=detailed --color_output) +add_test(NAME plugin_test COMMAND plugin_test --report_level=detailed --color_output --catch_system_errors=no) add_test(NAME nodeos_sanity_test COMMAND tests/nodeos_run_test.py -v --sanity-test ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_sanity_test PROPERTY LABELS nonparallelizable_tests) diff --git a/tests/read_only_trx_test.py b/tests/read_only_trx_test.py index 3ee600fe8c..752f9775b7 100755 --- a/tests/read_only_trx_test.py +++ b/tests/read_only_trx_test.py @@ -100,6 +100,12 @@ def startCluster(): specificExtraNodeosArgs={} # producer nodes will be mapped to 0 through pnodes-1, so the number pnodes is the no-producing API node specificExtraNodeosArgs[pnodes]=" --plugin eosio::net_api_plugin" + specificExtraNodeosArgs[pnodes]+=" --read-only-write-window-time-us " + specificExtraNodeosArgs[pnodes]+=" 10000 " + specificExtraNodeosArgs[pnodes]+=" --read-only-read-window-time-us " + specificExtraNodeosArgs[pnodes]+=" 490000 " + specificExtraNodeosArgs[pnodes]+=" --eos-vm-oc-cache-size-mb " + specificExtraNodeosArgs[pnodes]+=" 1 " # set small so there is churn specificExtraNodeosArgs[pnodes]+=" --read-only-threads " specificExtraNodeosArgs[pnodes]+=str(args.read_only_threads) if args.eos_vm_oc_enable: diff --git a/plugins/producer_plugin/test/test_read_only_trx.cpp b/tests/test_read_only_trx.cpp similarity index 79% rename from plugins/producer_plugin/test/test_read_only_trx.cpp rename to tests/test_read_only_trx.cpp index 19c25d0d60..db36eea1e9 100644 --- a/plugins/producer_plugin/test/test_read_only_trx.cpp +++ b/tests/test_read_only_trx.cpp @@ -1,36 +1,33 @@ #include +#include #include #include +#include +#include +#include +#include #include #include +#include #include #include +#include #include #include -namespace eosio::test::detail { -using namespace eosio::chain::literals; -struct testit { - uint64_t id; - testit( uint64_t id = 0 ) - :id(id){} - static account_name get_account() { - return chain::config::system_account_name; - } - static action_name get_name() { - return "testit"_n; - } -}; -} -FC_REFLECT( eosio::test::detail::testit, (id) ) +#include + +#include +#include + namespace { using namespace eosio; using namespace eosio::chain; -using namespace eosio::test::detail; +using namespace eosio::test_utils; -auto make_unique_trx( const chain_id_type& chain_id ) { +auto make_unique_trx() { static uint64_t nextid = 0; ++nextid; account_name creator = config::system_account_name; @@ -90,7 +87,12 @@ BOOST_AUTO_TEST_CASE(not_check_configs_if_no_read_only_threads) { test_configs_common(specific_args, app_init_status::succeeded); } -void test_trxs_common(std::vector& specific_args) { +void test_trxs_common(std::vector& specific_args, bool test_disable_tierup = false) { + fc::scoped_exit> on_exit = []() { + chain::wasm_interface_collection::test_disable_tierup = false; + }; + chain::wasm_interface_collection::test_disable_tierup = test_disable_tierup; + using namespace std::chrono_literals; fc::temp_directory temp; appbase::scoped_app app; @@ -104,13 +106,15 @@ void test_trxs_common(std::vector& specific_args) { std::vector argv = {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str()}; argv.insert( argv.end(), specific_args.begin(), specific_args.end() ); app->initialize( argv.size(), (char**) &argv[0] ); + app->find_plugin()->chain(); app->startup(); plugin_promise.set_value( {app->find_plugin(), app->find_plugin()} ); app->exec(); } ); auto[prod_plug, chain_plug] = plugin_fut.get(); - auto chain_id = chain_plug->get_chain_id(); + + activate_protocol_features_set_bios_contract(app, chain_plug); std::atomic next_calls = 0; std::atomic num_get_account_calls = 0; @@ -120,12 +124,12 @@ void test_trxs_common(std::vector& specific_args) { const size_t num_pushes = 4242; for( size_t i = 1; i <= num_pushes; ++i ) { - auto ptrx = make_unique_trx( chain_id ); + auto ptrx = i % 3 == 0 ? make_unique_trx() : make_bios_ro_trx(chain_plug->chain()); app->executor().post( priority::low, exec_queue::read_only, [&chain_plug=chain_plug, &num_get_account_calls]() { chain_plug->get_read_only_api(fc::seconds(90)).get_account(chain_apis::read_only::get_account_params{.account_name=config::system_account_name}, fc::time_point::now()+fc::seconds(90)); ++num_get_account_calls; }); - app->executor().post( priority::low, exec_queue::read_write, [ptrx, &next_calls, &num_posts, &trace_with_except, &trx_match, &app]() { + app->executor().post( priority::low, exec_queue::read_only, [ptrx, &next_calls, &num_posts, &trace_with_except, &trx_match, &app]() { ++num_posts; bool return_failure_traces = true; app->get_method()(ptrx, @@ -198,6 +202,17 @@ BOOST_AUTO_TEST_CASE(with_3_read_only_threads) { test_trxs_common(specific_args); } +// test read-only trxs on 3 threads (with --read-only-threads) +BOOST_AUTO_TEST_CASE(with_3_read_only_threads_no_tierup) { + std::vector specific_args = { "-p", "eosio", "-e", + "--read-only-threads=3", + "--max-transaction-time=10", + "--abi-serializer-max-time-ms=999", + "--read-only-write-window-time-us=100000", + "--read-only-read-window-time-us=40000" }; + test_trxs_common(specific_args, true); +} + // test read-only trxs on 8 separate threads (with --read-only-threads) BOOST_AUTO_TEST_CASE(with_8_read_only_threads) { std::vector specific_args = { "-p", "eosio", "-e", @@ -205,10 +220,21 @@ BOOST_AUTO_TEST_CASE(with_8_read_only_threads) { "--eos-vm-oc-enable=none", "--max-transaction-time=10", "--abi-serializer-max-time-ms=999", - "--read-only-write-window-time-us=100000", - "--read-only-read-window-time-us=40000" }; + "--read-only-write-window-time-us=10000", + "--read-only-read-window-time-us=400000" }; test_trxs_common(specific_args); } +// test read-only trxs on 8 separate threads (with --read-only-threads) +BOOST_AUTO_TEST_CASE(with_8_read_only_threads_no_tierup) { + std::vector specific_args = { "-p", "eosio", "-e", + "--read-only-threads=8", + "--eos-vm-oc-enable=none", + "--max-transaction-time=10", + "--abi-serializer-max-time-ms=999", + "--read-only-write-window-time-us=10000", + "--read-only-read-window-time-us=400000" }; + test_trxs_common(specific_args, true); +} BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/test_utils.hpp b/unittests/test_utils.hpp new file mode 100644 index 0000000000..f7e8da9ea7 --- /dev/null +++ b/unittests/test_utils.hpp @@ -0,0 +1,143 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace eosio::test_utils { + +using namespace eosio::chain; +using namespace eosio::chain::literals; + +struct testit { + uint64_t id; + explicit testit(uint64_t id = 0) + :id(id){} + static account_name get_account() { + return chain::config::system_account_name; + } + static action_name get_name() { + return "testit"_n; + } +}; + +// Corresponds to the reqactivated action of the bios contract. +// See libraries/testing/contracts/eosio.bios/eosio.bios.hpp +struct reqactivated { + chain::digest_type feature_digest; + + explicit reqactivated(const chain::digest_type& fd) + :feature_digest(fd){}; + + static account_name get_account() { + return chain::config::system_account_name; + } + static action_name get_name() { + return "reqactivated"_n; + } +}; + +// Create a read-only trx that works with bios reqactivated action +auto make_bios_ro_trx(eosio::chain::controller& control) { + const auto& pfm = control.get_protocol_feature_manager(); + static auto feature_digest = pfm.get_builtin_digest(builtin_protocol_feature_t::replace_deferred); + + signed_transaction trx; + trx.expiration = fc::time_point_sec{fc::time_point::now() + fc::seconds(30)}; + vector no_auth{}; + trx.actions.emplace_back( no_auth, reqactivated{*feature_digest} ); + return std::make_shared( std::move(trx) ); +} + +// Push an input transaction to controller and return trx trace +// If account is eosio then signs with the default private key +auto push_input_trx(eosio::chain::controller& control, account_name account, signed_transaction& trx) { + trx.expiration = fc::time_point_sec{fc::time_point::now() + fc::seconds(30)}; + trx.set_reference_block( control.head_block_id() ); + if (account == config::system_account_name) { + auto default_priv_key = private_key_type::regenerate(fc::sha256::hash(std::string("nathan"))); + trx.sign(default_priv_key, control.get_chain_id()); + } else { + trx.sign(testing::tester::get_private_key(account, "active"), control.get_chain_id()); + } + auto ptrx = std::make_shared( trx, packed_transaction::compression_type::zlib ); + auto fut = transaction_metadata::start_recover_keys( ptrx, control.get_thread_pool(), control.get_chain_id(), fc::microseconds::maximum(), transaction_metadata::trx_type::input ); + auto r = control.push_transaction( fut.get(), fc::time_point::maximum(), fc::microseconds::maximum(), 0, false, 0 ); + return r; +} + +// Push setcode trx to controller and return trx trace +auto set_code(eosio::chain::controller& control, account_name account, const vector& wasm) { + signed_transaction trx; + trx.actions.emplace_back(std::vector{{account, config::active_name}}, + chain::setcode{ + .account = account, + .vmtype = 0, + .vmversion = 0, + .code = bytes(wasm.begin(), wasm.end()) + }); + return push_input_trx(control, account, trx); +} + +void activate_protocol_features_set_bios_contract(appbase::scoped_app& app, chain_plugin* chain_plug) { + using namespace appbase; + + std::promise feature_promise; + std::future feature_future = feature_promise.get_future(); + app->executor().post( priority::high, exec_queue::read_write, [&chain_plug=chain_plug, &feature_promise](){ + const auto& pfm = chain_plug->chain().get_protocol_feature_manager(); + auto preactivate_feature_digest = pfm.get_builtin_digest(builtin_protocol_feature_t::preactivate_feature); + BOOST_CHECK( preactivate_feature_digest ); + chain_plug->chain().preactivate_feature( *preactivate_feature_digest, false ); + std::vector pfs{ + builtin_protocol_feature_t::only_link_to_existing_permission, + builtin_protocol_feature_t::replace_deferred, + builtin_protocol_feature_t::no_duplicate_deferred_id, + builtin_protocol_feature_t::fix_linkauth_restriction, + builtin_protocol_feature_t::disallow_empty_producer_schedule, + builtin_protocol_feature_t::restrict_action_to_self, + builtin_protocol_feature_t::only_bill_first_authorizer, + builtin_protocol_feature_t::forward_setcode, + builtin_protocol_feature_t::get_sender, + builtin_protocol_feature_t::ram_restrictions, + builtin_protocol_feature_t::webauthn_key, + builtin_protocol_feature_t::wtmsig_block_signatures }; + for (const auto t : pfs) { + auto feature_digest = pfm.get_builtin_digest(t); + BOOST_CHECK( feature_digest ); + chain_plug->chain().preactivate_feature( *feature_digest, false ); + } + feature_promise.set_value(); + }); + + // Wait for next block + std::this_thread::sleep_for( std::chrono::milliseconds(config::block_interval_ms) ); + + if (feature_future.wait_for(std::chrono::seconds(5)) == std::future_status::timeout) + throw std::runtime_error("failed to preactivate features"); + + std::promise setcode_promise; + std::future setcode_future = setcode_promise.get_future(); + app->executor().post( priority::high, exec_queue::read_write, [&chain_plug=chain_plug, &setcode_promise](){ + auto r = set_code(chain_plug->chain(), config::system_account_name, testing::contracts::eosio_bios_wasm()); + BOOST_CHECK(r->receipt && r->receipt->status == transaction_receipt_header::executed); + setcode_promise.set_value(); + }); + + if (setcode_future.wait_for(std::chrono::seconds(5)) == std::future_status::timeout) + throw std::runtime_error("failed to setcode"); +} + + +} // namespace eosio::test_utils + +FC_REFLECT( eosio::test_utils::testit, (id) ) +FC_REFLECT( eosio::test_utils::reqactivated, (feature_digest) ) From e8ca2472860d90a01a3a027ff744702588f6d8ed Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 18 Jul 2023 16:16:31 -0500 Subject: [PATCH 117/180] GH-1119 Fix initialization of wasm interfaces and usage in apply for multiple threads. Also add ability to disable tierup for testing. --- .../eosio/chain/wasm_interface_collection.hpp | 2 ++ libraries/chain/wasm_interface_collection.cpp | 21 ++++++++----------- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp b/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp index 36af0ce3ec..4ee4ac7388 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp @@ -11,6 +11,8 @@ namespace eosio::chain { */ class wasm_interface_collection { public: + inline static bool test_disable_tierup = false; // set by unittests to test tierup failing + wasm_interface_collection(wasm_interface::vm_type vm, wasm_interface::vm_oc_enable eosvmoc_tierup, const chainbase::database& d, const std::filesystem::path& data_dir, const eosvmoc::config& eosvmoc_config, bool profile); diff --git a/libraries/chain/wasm_interface_collection.cpp b/libraries/chain/wasm_interface_collection.cpp index 2b437a2794..eace6f6517 100644 --- a/libraries/chain/wasm_interface_collection.cpp +++ b/libraries/chain/wasm_interface_collection.cpp @@ -22,7 +22,7 @@ struct eosvmoc_tier { eosvmoc::code_cache_async cc; - // Each thread requires its own exec and mem. Defined in wasm_interface.cpp + // Each thread requires its own exec and mem. thread_local static std::unique_ptr exec; thread_local static eosvmoc::memory mem; }; @@ -58,6 +58,8 @@ void wasm_interface_collection::apply(const digest_type& code_hash, const uint8_ try { const bool high_priority = context.get_receiver().prefix() == chain::config::system_account_name; cd = eosvmoc->cc.get_descriptor_for_code(high_priority, code_hash, vm_version, context.control.is_write_window(), failure); + if (test_disable_tierup) + cd = nullptr; } catch (...) { // swallow errors here, if EOS VM OC has gone in to the weeds we shouldn't bail: continue to try and run baseline // In the future, consider moving bits of EOS VM that can fire exceptions and such out of this call path @@ -74,11 +76,7 @@ void wasm_interface_collection::apply(const digest_type& code_hash, const uint8_ } } #endif - if (is_on_main_thread() -#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - || is_eos_vm_oc_enabled() -#endif - ) { + if (is_on_main_thread()) { wasmif.apply(code_hash, vm_type, vm_version, context); return; } @@ -112,13 +110,12 @@ void wasm_interface_collection::init_thread_local_data(const chainbase::database if (eosvmoc) eosvmoc->init_thread_local_data(); wasmif.init_thread_local_data(); - } else -#endif - { - std::lock_guard g(threaded_wasmifs_mtx); - // Non-EOSVMOC needs a wasmif per thread - threaded_wasmifs[std::this_thread::get_id()] = std::make_unique(wasm_runtime, d, data_dir, eosvmoc_config, profile); } +#endif + + std::lock_guard g(threaded_wasmifs_mtx); + // Non-EOSVMOC needs a wasmif per thread + threaded_wasmifs[std::this_thread::get_id()] = std::make_unique(wasm_runtime, d, data_dir, eosvmoc_config, profile); } void wasm_interface_collection::code_block_num_last_used(const digest_type& code_hash, uint8_t vm_type, uint8_t vm_version, uint32_t block_num) { From 12528894f4486dab148822698a798375c6781b03 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 18 Jul 2023 16:37:36 -0500 Subject: [PATCH 118/180] GH-1119 Add needed include --- unittests/test_utils.hpp | 1 + 1 file changed, 1 insertion(+) diff --git a/unittests/test_utils.hpp b/unittests/test_utils.hpp index f7e8da9ea7..d14815580e 100644 --- a/unittests/test_utils.hpp +++ b/unittests/test_utils.hpp @@ -11,6 +11,7 @@ #include #include #include +#include namespace eosio::test_utils { From 6ce21266aa9e9ef394c8cf81742616adc9a6d960 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 18 Jul 2023 17:54:22 -0400 Subject: [PATCH 119/180] whitespace change --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 9b729ab331..0a34b59c18 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -277,7 +277,7 @@ add_custom_command(TARGET boost_install COMMAND python3 ${CMAKE_SOURCE_DIR}/scripts/MakeBoostDistro.py ${CMAKE_SOURCE_DIR}/libraries/boost ${CMAKE_BINARY_DIR}/boost-dist VERBATIM) -install(DIRECTORY "${CMAKE_BINARY_DIR}/boost-dist/boost" DESTINATION include COMPONENT dev EXCLUDE_FROM_ALL) +install(DIRECTORY "${CMAKE_BINARY_DIR}/boost-dist/boost" DESTINATION include COMPONENT dev EXCLUDE_FROM_ALL) add_custom_target(dev-install COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_BINARY_DIR}" From 3204110e5ad11ee0e76078a5cda416a8925dc409 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 18 Jul 2023 18:00:33 -0400 Subject: [PATCH 120/180] Add `python3-all` to `.cicd/platforms/ubuntu22.Dockerfile` --- .cicd/platforms/ubuntu22.Dockerfile | 1 + CMakeLists.txt | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.cicd/platforms/ubuntu22.Dockerfile b/.cicd/platforms/ubuntu22.Dockerfile index fd943f7043..0ef6f4c86d 100644 --- a/.cicd/platforms/ubuntu22.Dockerfile +++ b/.cicd/platforms/ubuntu22.Dockerfile @@ -11,6 +11,7 @@ RUN apt-get update && apt-get upgrade -y && \ libssl-dev \ llvm-11-dev \ ninja-build \ + python3-all \ python3-numpy \ ubuntu-dev-tools \ zlib1g-dev \ diff --git a/CMakeLists.txt b/CMakeLists.txt index 0a34b59c18..9b729ab331 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -277,7 +277,7 @@ add_custom_command(TARGET boost_install COMMAND python3 ${CMAKE_SOURCE_DIR}/scripts/MakeBoostDistro.py ${CMAKE_SOURCE_DIR}/libraries/boost ${CMAKE_BINARY_DIR}/boost-dist VERBATIM) -install(DIRECTORY "${CMAKE_BINARY_DIR}/boost-dist/boost" DESTINATION include COMPONENT dev EXCLUDE_FROM_ALL) +install(DIRECTORY "${CMAKE_BINARY_DIR}/boost-dist/boost" DESTINATION include COMPONENT dev EXCLUDE_FROM_ALL) add_custom_target(dev-install COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_BINARY_DIR}" From 158bd75cd03908f8396f9863cf5d6ab606b44eb9 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 18 Jul 2023 18:02:35 -0400 Subject: [PATCH 121/180] Add `python3-distutils` to `.cicd/platforms/ubuntu22.Dockerfile` --- .cicd/platforms/ubuntu22.Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/.cicd/platforms/ubuntu22.Dockerfile b/.cicd/platforms/ubuntu22.Dockerfile index 0ef6f4c86d..fa3da263dd 100644 --- a/.cicd/platforms/ubuntu22.Dockerfile +++ b/.cicd/platforms/ubuntu22.Dockerfile @@ -12,6 +12,7 @@ RUN apt-get update && apt-get upgrade -y && \ llvm-11-dev \ ninja-build \ python3-all \ + python3-distutils \ python3-numpy \ ubuntu-dev-tools \ zlib1g-dev \ From 28a47da37001003fe5cb76e57aca1a5a731a7c0f Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 18 Jul 2023 18:46:42 -0400 Subject: [PATCH 122/180] Add `python3-all` to `.cicd/platforms/ubuntu20.Dockerfile` and remove `python3-distutils` from `.cicd/platforms/ubuntu22.Dockerfile` --- .cicd/platforms/ubuntu20.Dockerfile | 1 + .cicd/platforms/ubuntu22.Dockerfile | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/.cicd/platforms/ubuntu20.Dockerfile b/.cicd/platforms/ubuntu20.Dockerfile index c60c53f5bb..3924f7ffe5 100644 --- a/.cicd/platforms/ubuntu20.Dockerfile +++ b/.cicd/platforms/ubuntu20.Dockerfile @@ -11,6 +11,7 @@ RUN apt-get update && apt-get upgrade -y && \ libssl-dev \ llvm-11-dev \ ninja-build \ + python3-all \ python3-numpy \ ubuntu-dev-tools \ zlib1g-dev \ diff --git a/.cicd/platforms/ubuntu22.Dockerfile b/.cicd/platforms/ubuntu22.Dockerfile index fa3da263dd..0ef6f4c86d 100644 --- a/.cicd/platforms/ubuntu22.Dockerfile +++ b/.cicd/platforms/ubuntu22.Dockerfile @@ -12,7 +12,6 @@ RUN apt-get update && apt-get upgrade -y && \ llvm-11-dev \ ninja-build \ python3-all \ - python3-distutils \ python3-numpy \ ubuntu-dev-tools \ zlib1g-dev \ From fe1faf8672b8ace12b86c18f98caffc4d6431b6c Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 19 Jul 2023 13:28:35 -0400 Subject: [PATCH 123/180] Update `CMakeModules/EosioTesterBuild.cmake.in` to support installed boost --- CMakeLists.txt | 13 ++++++++++++ CMakeModules/EosioTesterBuild.cmake.in | 28 +++++++++++++++++++++++--- libraries/CMakeLists.txt | 2 +- 3 files changed, 39 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 9b729ab331..7a87a9265e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -279,6 +279,19 @@ add_custom_command(TARGET boost_install install(DIRECTORY "${CMAKE_BINARY_DIR}/boost-dist/boost" DESTINATION include COMPONENT dev EXCLUDE_FROM_ALL) +install(TARGETS boost_date_time COMPONENT dev EXCLUDE_FROM_ALL) +install(TARGETS boost_filesystem COMPONENT dev EXCLUDE_FROM_ALL) +install(TARGETS boost_system COMPONENT dev EXCLUDE_FROM_ALL) +install(TARGETS boost_chrono COMPONENT dev EXCLUDE_FROM_ALL) +install(TARGETS boost_multi_index COMPONENT dev EXCLUDE_FROM_ALL) +install(TARGETS boost_multiprecision COMPONENT dev EXCLUDE_FROM_ALL) +install(TARGETS boost_interprocess COMPONENT dev EXCLUDE_FROM_ALL) +install(TARGETS boost_asio COMPONENT dev EXCLUDE_FROM_ALL) +install(TARGETS boost_signals2 COMPONENT dev EXCLUDE_FROM_ALL) +install(TARGETS boost_iostreams COMPONENT dev EXCLUDE_FROM_ALL) +install(TARGETS boost_unit_test_framework COMPONENT dev EXCLUDE_FROM_ALL) +install(TARGETS boost_headers COMPONENT dev EXCLUDE_FROM_ALL) + add_custom_target(dev-install COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_BINARY_DIR}" COMMAND "${CMAKE_COMMAND}" --install "${CMAKE_BINARY_DIR}" diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index 331099f06a..dbb2cc85b5 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -33,7 +33,17 @@ else ( APPLE ) endif ( APPLE ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) -add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost) +IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) + add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) +else() + find_package(Boost @Boost_MAJOR_VERSION@.@Boost_MINOR_VERSION@ EXACT REQUIRED COMPONENTS + date_time + filesystem + system + chrono + iostreams + unit_test_framework) +endif() find_library(libtester eosio_testing @CMAKE_BINARY_DIR@/libraries/testing NO_DEFAULT_PATH) find_library(libchain eosio_chain @CMAKE_BINARY_DIR@/libraries/chain NO_DEFAULT_PATH) @@ -77,8 +87,10 @@ macro(add_eosio_test_executable test_name) ${libbuiltins} ${libsecp256k1} ${libbn256} - @GMP_LIBRARY@ + @GMP_LIBRARY@ ) +IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) + target_link_libraries( ${test_name} Boost::date_time Boost::filesystem Boost::system @@ -90,8 +102,18 @@ macro(add_eosio_test_executable test_name) Boost::signals2 Boost::iostreams "-lz" # Needed by Boost iostreams - Boost::unit_test_framework + Boost::unit_test_framework) +else() + target_link_libraries( ${test_name} + ${Boost_FILESYSTEM_LIBRARY} + ${Boost_SYSTEM_LIBRARY} + ${Boost_CHRONO_LIBRARY} + ${Boost_IOSTREAMS_LIBRARY} + "-lz" # Needed by Boost iostreams + ${Boost_DATE_TIME_LIBRARY}) +endif() + target_link_libraries( ${test_name} ${LLVM_LIBS} ${PLATFORM_SPECIFIC_LIBS} diff --git a/libraries/CMakeLists.txt b/libraries/CMakeLists.txt index 462d73801c..e7ad9b144e 100644 --- a/libraries/CMakeLists.txt +++ b/libraries/CMakeLists.txt @@ -7,7 +7,7 @@ set(BN256_INSTALL_COMPONENT "dev") set( Boost_USE_MULTITHREADED ON ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) -add_subdirectory( boost ) +add_subdirectory( boost EXCLUDE_FROM_ALL ) add_subdirectory( libfc ) add_subdirectory( builtins ) From db93c63a57cdcfd1e8f37de9e69528bbbbe0101d Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Wed, 19 Jul 2023 12:39:08 -0500 Subject: [PATCH 124/180] Set p2p_address for outgoing connections as well. Outbound connections are always attributed to the first listen address. --- .../eosio/net_plugin/auto_bp_peering.hpp | 2 +- plugins/net_plugin/net_plugin.cpp | 46 ++++++++++--------- .../tests/auto_bp_peering_unittest.cpp | 9 ++-- tests/CMakeLists.txt | 2 +- 4 files changed, 31 insertions(+), 28 deletions(-) diff --git a/plugins/net_plugin/include/eosio/net_plugin/auto_bp_peering.hpp b/plugins/net_plugin/include/eosio/net_plugin/auto_bp_peering.hpp index a394312669..b5122f80aa 100644 --- a/plugins/net_plugin/include/eosio/net_plugin/auto_bp_peering.hpp +++ b/plugins/net_plugin/include/eosio/net_plugin/auto_bp_peering.hpp @@ -182,7 +182,7 @@ class bp_connection_manager { fc_dlog(self()->get_logger(), "pending_downstream_neighbors: ${pending_downstream_neighbors}", ("pending_downstream_neighbors", to_string(pending_downstream_neighbors))); - for (auto neighbor : pending_downstream_neighbors) { self()->connections.connect(config.bp_peer_addresses[neighbor]); } + for (auto neighbor : pending_downstream_neighbors) { self()->connections.connect(config.bp_peer_addresses[neighbor], *self()->p2p_addresses.begin() ); } pending_neighbors = std::move(pending_downstream_neighbors); finder.add_upstream_neighbors(pending_neighbors); diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index f8a4fb0596..b437580a44 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -350,7 +350,7 @@ namespace eosio { private: // must call with held mutex connection_ptr find_connection_i(const string& host) const; void add_i(connection_ptr&& c); - void connect_i(const string& peer); + void connect_i(const string& peer, const string& p2p_address); void connection_monitor(const std::weak_ptr& from_connection); @@ -370,14 +370,14 @@ namespace eosio { void register_update_p2p_connection_metrics(std::function&& fun); - void connect_supplied_peers(); + void connect_supplied_peers(const string& p2p_address); void start_conn_timer(); void start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection); void stop_conn_timer(); void add(connection_ptr c); - string connect(const string& host); + string connect(const string& host, const string& p2p_address); string disconnect(const string& host); void close_all(); @@ -533,7 +533,7 @@ namespace eosio { bool in_sync() const; fc::logger& get_logger() { return logger; } - void create_session(tcp::socket&& socket, const string& p2p_address); + void create_session(tcp::socket&& socket, const string p2p_address); }; // peer_[x]log must be called from thread in connection strand @@ -765,7 +765,7 @@ namespace eosio { public: enum class connection_state { connecting, connected, closing, closed }; - explicit connection( const string& endpoint ); + explicit connection( const string& endpoint, const string& address ); /// @brief ctor /// @param socket created by boost::asio in fc::listener /// @param address identifier of listen socket which accepted this new connection @@ -808,6 +808,7 @@ namespace eosio { std::atomic conn_state{connection_state::connecting}; + string p2p_address; // address string used in handshake const string peer_addr; enum connection_types : char { both, @@ -828,7 +829,6 @@ namespace eosio { queued_buffer buffer_queue; - string p2p_address; // address string used in handshake fc::sha256 conn_node_id; string short_conn_node_id; string log_p2p_address; @@ -1146,8 +1146,9 @@ namespace eosio { //--------------------------------------------------------------------------- - connection::connection( const string& endpoint ) - : peer_addr( endpoint ), + connection::connection( const string& endpoint, const string& address ) + : p2p_address( address ), + peer_addr( endpoint ), strand( my_impl->thread_pool.get_executor() ), socket( new tcp::socket( my_impl->thread_pool.get_executor() ) ), log_p2p_address( endpoint ), @@ -1161,16 +1162,17 @@ namespace eosio { } connection::connection(tcp::socket&& s, const string& address) - : peer_addr(), + : p2p_address( address), + peer_addr(), strand( my_impl->thread_pool.get_executor() ), socket( new tcp::socket( std::move(s) ) ), - p2p_address( address), connection_id( ++my_impl->current_connection_id ), response_expected_timer( my_impl->thread_pool.get_executor() ), last_handshake_recv(), last_handshake_sent() { - fc_dlog( logger, "new connection object created" ); + update_endpoints(); + fc_dlog( logger, "new connection object created for peer ${address}:${port} from listener ${addr}", ("address", log_remote_endpoint_ip)("port", log_remote_endpoint_port)("addr", p2p_address) ); } // called from connection strand @@ -1244,7 +1246,6 @@ namespace eosio { bool connection::start_session() { verify_strand_in_this_thread( strand, __func__, __LINE__ ); - update_endpoints(); boost::asio::ip::tcp::no_delay nodelay( true ); boost::system::error_code ec; socket->set_option( nodelay, ec ); @@ -2675,7 +2676,7 @@ namespace eosio { } - void net_plugin_impl::create_session(tcp::socket&& socket, const string& p2p_address) { + void net_plugin_impl::create_session(tcp::socket&& socket, const string p2p_address) { uint32_t visitors = 0; uint32_t from_addr = 0; boost::system::error_code rec; @@ -2700,7 +2701,7 @@ namespace eosio { (auto_bp_peering_enabled() || connections.get_max_client_count() == 0 || visitors < connections.get_max_client_count())) { fc_ilog(logger, "Accepted new connection: " + paddr_str); - +fc_dlog(logger, "Instantiating connection with listener address ${addr}", ("addr", p2p_address)); connection_ptr new_connection = std::make_shared(std::move(socket), p2p_address); new_connection->strand.post([new_connection, this]() { if (new_connection->start_session()) { @@ -3848,6 +3849,7 @@ namespace eosio { // If we couldn't sign, don't send a token. if(hello.sig == chain::signature_type()) hello.token = sha256(); + peer_dlog( this, "populated handshake with address ${addr}", ("addr", p2p_address)); hello.p2p_address = p2p_address; if( is_transactions_only_connection() ) hello.p2p_address += ":trx"; // if we are not accepting transactions tell peer we are blocks only @@ -4156,7 +4158,7 @@ namespace eosio { my->ticker(); my->start_monitors(); my->update_chain_info(); - my->connections.connect_supplied_peers(); + my->connections.connect_supplied_peers(*my->p2p_addresses.begin()); // attribute every outbound connection to the first listen port }); } @@ -4193,7 +4195,7 @@ namespace eosio { /// RPC API string net_plugin::connect( const string& host ) { - return my->connections.connect( host ); + return my->connections.connect( host, *my->p2p_addresses.begin() ); } /// RPC API @@ -4267,10 +4269,10 @@ namespace eosio { update_p2p_connection_metrics = std::move(fun); } - void connections_manager::connect_supplied_peers() { + void connections_manager::connect_supplied_peers(const string& p2p_address) { std::lock_guard g(connections_mtx); for (const auto& peer : supplied_peers) { - connect_i(peer); + connect_i(peer, p2p_address); } } @@ -4280,12 +4282,12 @@ namespace eosio { } // called by API - string connections_manager::connect( const string& host ) { + string connections_manager::connect( const string& host, const string& p2p_address ) { std::lock_guard g( connections_mtx ); if( find_connection_i( host ) ) return "already connected"; - connect_i( host ); + connect_i( host, p2p_address ); supplied_peers.insert(host); return "added connection"; } @@ -4342,8 +4344,8 @@ namespace eosio { } // call with connections_mtx - void connections_manager::connect_i( const string& host ) { - connection_ptr c = std::make_shared( host ); + void connections_manager::connect_i( const string& host, const string& p2p_address ) { + connection_ptr c = std::make_shared( host, p2p_address ); fc_dlog( logger, "calling active connector: ${h}", ("h", host) ); if( c->resolve_and_connect() ) { fc_dlog( logger, "adding new connection to the list: ${host} ${cid}", ("host", host)("cid", c->connection_id) ); diff --git a/plugins/net_plugin/tests/auto_bp_peering_unittest.cpp b/plugins/net_plugin/tests/auto_bp_peering_unittest.cpp index 93ac898a5b..6aa7fbebd6 100644 --- a/plugins/net_plugin/tests/auto_bp_peering_unittest.cpp +++ b/plugins/net_plugin/tests/auto_bp_peering_unittest.cpp @@ -18,7 +18,7 @@ struct mock_connections_manager { uint32_t max_client_count = 0; std::vector connections; - std::function connect; + std::function connect; std::function disconnect; uint32_t get_max_client_count() const { return max_client_count; } @@ -36,6 +36,7 @@ struct mock_net_plugin : eosio::auto_bp_peering::bp_connection_manager p2p_addresses{"0.0.0.0:9876"}; bool in_sync() { return is_in_sync; } @@ -165,7 +166,7 @@ BOOST_AUTO_TEST_CASE(test_on_pending_schedule) { std::vector connected_hosts; - plugin.connections.connect = [&connected_hosts](std::string host) { connected_hosts.push_back(host); }; + plugin.connections.connect = [&connected_hosts](std::string host, std::string p2p_address) { connected_hosts.push_back(host); }; // make sure nothing happens when it is not in_sync plugin.is_in_sync = false; @@ -209,7 +210,7 @@ BOOST_AUTO_TEST_CASE(test_on_active_schedule1) { plugin.config.my_bp_accounts = { "prodd"_n, "produ"_n }; plugin.active_neighbors = { "proda"_n, "prodh"_n, "prodn"_n }; - plugin.connections.connect = [](std::string host) {}; + plugin.connections.connect = [](std::string host, std::string p2p_address) {}; std::vector disconnected_hosts; plugin.connections.disconnect = [&disconnected_hosts](std::string host) { disconnected_hosts.push_back(host); }; @@ -245,7 +246,7 @@ BOOST_AUTO_TEST_CASE(test_on_active_schedule2) { plugin.config.my_bp_accounts = { "prodd"_n, "produ"_n }; plugin.active_neighbors = { "proda"_n, "prodh"_n, "prodn"_n }; - plugin.connections.connect = [](std::string host) {}; + plugin.connections.connect = [](std::string host, std::string p2p_address) {}; std::vector disconnected_hosts; plugin.connections.disconnect = [&disconnected_hosts](std::string host) { disconnected_hosts.push_back(host); }; diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 0dbeb89358..be1df6a7dd 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -263,7 +263,7 @@ set_property(TEST nodeos_repeat_transaction_lr_test PROPERTY LABELS long_running add_test(NAME light_validation_sync_test COMMAND tests/light_validation_sync_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST light_validation_sync_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME auto_bp_peering_test COMMAND tests/auto_bp_peering_test.py ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME auto_bp_peering_test COMMAND tests/auto_bp_peering_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST auto_bp_peering_test PROPERTY LABELS long_running_tests) add_test(NAME gelf_test COMMAND tests/gelf_test.py ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) From eb0282eb969394fa916519e935136b4f55bc035c Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 19 Jul 2023 13:50:36 -0400 Subject: [PATCH 125/180] Update `EosioTester.cmake.in` --- CMakeModules/EosioTester.cmake.in | 29 +++++++++++++++++++++++--- CMakeModules/EosioTesterBuild.cmake.in | 2 +- 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index f4b78322b7..163c4eee9c 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -36,7 +36,17 @@ else ( APPLE ) endif ( APPLE ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) -add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost) +IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) + add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) +else() + find_package(Boost @Boost_MAJOR_VERSION@.@Boost_MINOR_VERSION@ EXACT REQUIRED COMPONENTS + date_time + filesystem + system + chrono + iostreams + unit_test_framework) +endif() find_library(libtester eosio_testing @CMAKE_INSTALL_FULL_LIBDIR@ NO_DEFAULT_PATH) find_library(libchain eosio_chain @CMAKE_INSTALL_FULL_LIBDIR@ NO_DEFAULT_PATH) @@ -80,8 +90,10 @@ macro(add_eosio_test_executable test_name) ${libbuiltins} ${libsecp256k1} ${libbn256} - @GMP_LIBRARY@ + @GMP_LIBRARY@) +IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) + target_link_libraries( ${test_name} Boost::date_time Boost::filesystem Boost::system @@ -93,7 +105,18 @@ macro(add_eosio_test_executable test_name) Boost::signals2 Boost::iostreams "-lz" # Needed by Boost iostreams - Boost::unit_test_framework + Boost::unit_test_framework) +else() + target_link_libraries( ${test_name} + ${Boost_FILESYSTEM_LIBRARY} + ${Boost_SYSTEM_LIBRARY} + ${Boost_CHRONO_LIBRARY} + ${Boost_IOSTREAMS_LIBRARY} + "-lz" # Needed by Boost iostreams + ${Boost_DATE_TIME_LIBRARY}) +endif() + + target_link_libraries( ${test_name} ${LLVM_LIBS} diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index dbb2cc85b5..fb43776048 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -87,7 +87,7 @@ macro(add_eosio_test_executable test_name) ${libbuiltins} ${libsecp256k1} ${libbn256} - @GMP_LIBRARY@ ) + @GMP_LIBRARY@) IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) target_link_libraries( ${test_name} From b4bc63e67e48a0b218756a648fa2e3be70e554d5 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 19 Jul 2023 14:19:22 -0400 Subject: [PATCH 126/180] remove boost version specification --- CMakeModules/EosioTester.cmake.in | 2 +- CMakeModules/EosioTesterBuild.cmake.in | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index 163c4eee9c..941422cb58 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -39,7 +39,7 @@ set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) else() - find_package(Boost @Boost_MAJOR_VERSION@.@Boost_MINOR_VERSION@ EXACT REQUIRED COMPONENTS + find_package(Boost REQUIRED COMPONENTS  date_time  filesystem  system diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index fb43776048..617319a200 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -36,7 +36,7 @@ set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) else() - find_package(Boost @Boost_MAJOR_VERSION@.@Boost_MINOR_VERSION@ EXACT REQUIRED COMPONENTS + find_package(Boost REQUIRED COMPONENTS  date_time  filesystem  system From 91b96a0b35c10059c1470af6b18daa3b30b0e998 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 19 Jul 2023 14:58:16 -0400 Subject: [PATCH 127/180] Use boost submodule organization always. --- CMakeLists.txt | 38 +++++++++++++------------- CMakeModules/EosioTester.cmake.in | 25 ++--------------- CMakeModules/EosioTesterBuild.cmake.in | 24 ++-------------- 3 files changed, 25 insertions(+), 62 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7a87a9265e..0b44f388b3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -272,25 +272,25 @@ configure_file(${CMAKE_SOURCE_DIR}/libraries/cli11/bash-completion/completions/c install(FILES libraries/cli11/bash-completion/completions/leap-util DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/bash-completion/completions COMPONENT base) install(FILES libraries/cli11/bash-completion/completions/cleos DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/bash-completion/completions COMPONENT base) -add_custom_target(boost_install ALL) -add_custom_command(TARGET boost_install - COMMAND python3 ${CMAKE_SOURCE_DIR}/scripts/MakeBoostDistro.py ${CMAKE_SOURCE_DIR}/libraries/boost ${CMAKE_BINARY_DIR}/boost-dist - VERBATIM) - -install(DIRECTORY "${CMAKE_BINARY_DIR}/boost-dist/boost" DESTINATION include COMPONENT dev EXCLUDE_FROM_ALL) - -install(TARGETS boost_date_time COMPONENT dev EXCLUDE_FROM_ALL) -install(TARGETS boost_filesystem COMPONENT dev EXCLUDE_FROM_ALL) -install(TARGETS boost_system COMPONENT dev EXCLUDE_FROM_ALL) -install(TARGETS boost_chrono COMPONENT dev EXCLUDE_FROM_ALL) -install(TARGETS boost_multi_index COMPONENT dev EXCLUDE_FROM_ALL) -install(TARGETS boost_multiprecision COMPONENT dev EXCLUDE_FROM_ALL) -install(TARGETS boost_interprocess COMPONENT dev EXCLUDE_FROM_ALL) -install(TARGETS boost_asio COMPONENT dev EXCLUDE_FROM_ALL) -install(TARGETS boost_signals2 COMPONENT dev EXCLUDE_FROM_ALL) -install(TARGETS boost_iostreams COMPONENT dev EXCLUDE_FROM_ALL) -install(TARGETS boost_unit_test_framework COMPONENT dev EXCLUDE_FROM_ALL) -install(TARGETS boost_headers COMPONENT dev EXCLUDE_FROM_ALL) +#add_custom_target(boost_install ALL) +#add_custom_command(TARGET boost_install +# COMMAND python3 ${CMAKE_SOURCE_DIR}/scripts/MakeBoostDistro.py ${CMAKE_SOURCE_DIR}/libraries/boost ${CMAKE_BINARY_DIR}/boost-dist +# VERBATIM) + +install(DIRECTORY "${CMAKE_SOURCE_DIR}/libraries/boost/" DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost COMPONENT dev EXCLUDE_FROM_ALL) + +#install(TARGETS boost_date_time COMPONENT dev EXCLUDE_FROM_ALL) +#install(TARGETS boost_filesystem COMPONENT dev EXCLUDE_FROM_ALL) +#install(TARGETS boost_system COMPONENT dev EXCLUDE_FROM_ALL) +#install(TARGETS boost_chrono COMPONENT dev EXCLUDE_FROM_ALL) +#install(TARGETS boost_multi_index COMPONENT dev EXCLUDE_FROM_ALL) +#install(TARGETS boost_multiprecision COMPONENT dev EXCLUDE_FROM_ALL) +#install(TARGETS boost_interprocess COMPONENT dev EXCLUDE_FROM_ALL) +#install(TARGETS boost_asio COMPONENT dev EXCLUDE_FROM_ALL) +#install(TARGETS boost_signals2 COMPONENT dev EXCLUDE_FROM_ALL) +#install(TARGETS boost_iostreams COMPONENT dev EXCLUDE_FROM_ALL) +#install(TARGETS boost_unit_test_framework COMPONENT dev EXCLUDE_FROM_ALL) +#install(TARGETS boost_headers COMPONENT dev EXCLUDE_FROM_ALL) add_custom_target(dev-install COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_BINARY_DIR}" diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index 941422cb58..a708e27164 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -39,13 +39,7 @@ set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) else() - find_package(Boost REQUIRED COMPONENTS - date_time - filesystem - system - chrono - iostreams - unit_test_framework) + add_subdirectory( ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) endif() find_library(libtester eosio_testing @CMAKE_INSTALL_FULL_LIBDIR@ NO_DEFAULT_PATH) @@ -90,10 +84,8 @@ macro(add_eosio_test_executable test_name) ${libbuiltins} ${libsecp256k1} ${libbn256} - @GMP_LIBRARY@) + @GMP_LIBRARY@ -IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) - target_link_libraries( ${test_name} Boost::date_time Boost::filesystem Boost::system @@ -105,18 +97,7 @@ IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) Boost::signals2 Boost::iostreams "-lz" # Needed by Boost iostreams - Boost::unit_test_framework) -else() - target_link_libraries( ${test_name} - ${Boost_FILESYSTEM_LIBRARY} - ${Boost_SYSTEM_LIBRARY} - ${Boost_CHRONO_LIBRARY} - ${Boost_IOSTREAMS_LIBRARY} - "-lz" # Needed by Boost iostreams - ${Boost_DATE_TIME_LIBRARY}) -endif() - - target_link_libraries( ${test_name} + Boost::unit_test_framework ${LLVM_LIBS} diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index 617319a200..599ca2d58d 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -36,13 +36,7 @@ set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) else() - find_package(Boost REQUIRED COMPONENTS - date_time - filesystem - system - chrono - iostreams - unit_test_framework) + add_subdirectory( ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) endif() find_library(libtester eosio_testing @CMAKE_BINARY_DIR@/libraries/testing NO_DEFAULT_PATH) @@ -87,10 +81,8 @@ macro(add_eosio_test_executable test_name) ${libbuiltins} ${libsecp256k1} ${libbn256} - @GMP_LIBRARY@) + @GMP_LIBRARY@ -IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) - target_link_libraries( ${test_name} Boost::date_time Boost::filesystem Boost::system @@ -102,18 +94,8 @@ IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) Boost::signals2 Boost::iostreams "-lz" # Needed by Boost iostreams - Boost::unit_test_framework) -else() - target_link_libraries( ${test_name} - ${Boost_FILESYSTEM_LIBRARY} - ${Boost_SYSTEM_LIBRARY} - ${Boost_CHRONO_LIBRARY} - ${Boost_IOSTREAMS_LIBRARY} - "-lz" # Needed by Boost iostreams - ${Boost_DATE_TIME_LIBRARY}) -endif() + Boost::unit_test_framework - target_link_libraries( ${test_name} ${LLVM_LIBS} ${PLATFORM_SPECIFIC_LIBS} From 4d71d2aa0792420e9046331f62a0295081a2824b Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Wed, 19 Jul 2023 14:02:03 -0500 Subject: [PATCH 128/180] Rename connection's p2p_address to listen_address. Erase undefined elements from p2p_addresses vector after std::unique(). Add assert before std::transform of two vectors. Const correctness and whitespace cleanup. --- plugins/net_plugin/net_plugin.cpp | 39 ++++++++++++++++--------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index b437580a44..4995feeb96 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -533,7 +533,7 @@ namespace eosio { bool in_sync() const; fc::logger& get_logger() { return logger; } - void create_session(tcp::socket&& socket, const string p2p_address); + void create_session(tcp::socket&& socket, const string listen_address); }; // peer_[x]log must be called from thread in connection strand @@ -808,7 +808,7 @@ namespace eosio { std::atomic conn_state{connection_state::connecting}; - string p2p_address; // address string used in handshake + string listen_address; // address sent to peer in handshake const string peer_addr; enum connection_types : char { both, @@ -1147,7 +1147,7 @@ namespace eosio { //--------------------------------------------------------------------------- connection::connection( const string& endpoint, const string& address ) - : p2p_address( address ), + : listen_address( address ), peer_addr( endpoint ), strand( my_impl->thread_pool.get_executor() ), socket( new tcp::socket( my_impl->thread_pool.get_executor() ) ), @@ -1162,7 +1162,7 @@ namespace eosio { } connection::connection(tcp::socket&& s, const string& address) - : p2p_address( address), + : listen_address( address), peer_addr(), strand( my_impl->thread_pool.get_executor() ), socket( new tcp::socket( std::move(s) ) ), @@ -1172,7 +1172,7 @@ namespace eosio { last_handshake_sent() { update_endpoints(); - fc_dlog( logger, "new connection object created for peer ${address}:${port} from listener ${addr}", ("address", log_remote_endpoint_ip)("port", log_remote_endpoint_port)("addr", p2p_address) ); + fc_dlog( logger, "new connection object created for peer ${address}:${port} from listener ${addr}", ("address", log_remote_endpoint_ip)("port", log_remote_endpoint_port)("addr", listen_address) ); } // called from connection strand @@ -2676,7 +2676,7 @@ namespace eosio { } - void net_plugin_impl::create_session(tcp::socket&& socket, const string p2p_address) { + void net_plugin_impl::create_session(tcp::socket&& socket, const string listen_address) { uint32_t visitors = 0; uint32_t from_addr = 0; boost::system::error_code rec; @@ -2701,8 +2701,8 @@ namespace eosio { (auto_bp_peering_enabled() || connections.get_max_client_count() == 0 || visitors < connections.get_max_client_count())) { fc_ilog(logger, "Accepted new connection: " + paddr_str); -fc_dlog(logger, "Instantiating connection with listener address ${addr}", ("addr", p2p_address)); - connection_ptr new_connection = std::make_shared(std::move(socket), p2p_address); + + connection_ptr new_connection = std::make_shared(std::move(socket), listen_address); new_connection->strand.post([new_connection, this]() { if (new_connection->start_session()) { connections.add(new_connection); @@ -3165,9 +3165,9 @@ fc_dlog(logger, "Instantiating connection with listener address ${addr}", ("addr if (msg.time + c_time <= check_time) return false; } else if (net_version < proto_dup_node_id_goaway || msg.network_version < proto_dup_node_id_goaway) { - if (p2p_address < msg.p2p_address) { - fc_dlog( logger, "p2p_address '${lhs}' < msg.p2p_address '${rhs}'", - ("lhs", p2p_address)( "rhs", msg.p2p_address ) ); + if (listen_address < msg.p2p_address) { + fc_dlog( logger, "listen_address '${lhs}' < msg.p2p_address '${rhs}'", + ("lhs", listen_address)( "rhs", msg.p2p_address ) ); // only the connection from lower p2p_address to higher p2p_address will be considered as a duplicate, // so there is no chance for both connections to be closed return false; @@ -3849,8 +3849,7 @@ fc_dlog(logger, "Instantiating connection with listener address ${addr}", ("addr // If we couldn't sign, don't send a token. if(hello.sig == chain::signature_type()) hello.token = sha256(); - peer_dlog( this, "populated handshake with address ${addr}", ("addr", p2p_address)); - hello.p2p_address = p2p_address; + hello.p2p_address = listen_address; if( is_transactions_only_connection() ) hello.p2p_address += ":trx"; // if we are not accepting transactions tell peer we are blocks only if( is_blocks_only_connection() || !my_impl->p2p_accept_transactions ) hello.p2p_address += ":blk"; @@ -3964,15 +3963,16 @@ fc_dlog(logger, "Instantiating connection with listener address ${addr}", ("addr std::chrono::seconds( options.at("connection-cleanup-period").as() ), options.at("max-clients").as() ); - if( options.count( "p2p-listen-endpoint" ) && !options.at("p2p-listen-endpoint").as>().empty() && options.at("p2p-listen-endpoint").as>()[0].length()) { + if( options.count( "p2p-listen-endpoint" ) && !options.at("p2p-listen-endpoint").as>().empty() && !options.at("p2p-listen-endpoint").as>()[0].empty()) { p2p_addresses = options.at( "p2p-listen-endpoint" ).as>(); auto addr_count = p2p_addresses.size(); std::sort(p2p_addresses.begin(), p2p_addresses.end()); - std::unique(p2p_addresses.begin(), p2p_addresses.end()); + auto last = std::unique(p2p_addresses.begin(), p2p_addresses.end()); + p2p_addresses.erase(last, p2p_addresses.end()); if( size_t addr_diff = addr_count - p2p_addresses.size(); addr_diff != 0) { - fc_ilog( logger, "Removed ${count} duplicate p2p-listen-endpoint entries", ("count", addr_diff)); + fc_wlog( logger, "Removed ${count} duplicate p2p-listen-endpoint entries", ("count", addr_diff)); } - for(auto& addr : p2p_addresses) { + for( const auto& addr : p2p_addresses ) { EOS_ASSERT( addr.length() <= max_p2p_address_length, chain::plugin_config_exception, "p2p-listen-endpoint ${a} too long, must be less than ${m}", ("a", addr)("m", max_p2p_address_length) ); @@ -3982,7 +3982,7 @@ fc_dlog(logger, "Instantiating connection with listener address ${addr}", ("addr p2p_server_addresses = options.at( "p2p-server-address" ).as>(); EOS_ASSERT( p2p_server_addresses.size() <= p2p_addresses.size(), chain::plugin_config_exception, "p2p-server-address may not be specified more times than p2p-listen-endpoint" ); - for( auto& addr: p2p_server_addresses ) { + for( const auto& addr: p2p_server_addresses ) { EOS_ASSERT( addr.length() <= max_p2p_address_length, chain::plugin_config_exception, "p2p-server-address ${a} too long, must be less than ${m}", ("a", addr)("m", max_p2p_address_length) ); @@ -4091,6 +4091,7 @@ fc_dlog(logger, "Instantiating connection with listener address ${addr}", ("addr std::vector listen_addresses = p2p_addresses; + EOS_ASSERT( p2p_addresses.size() == p2p_server_addresses.size(), chain::plugin_config_exception, "" ); std::transform(p2p_addresses.begin(), p2p_addresses.end(), p2p_server_addresses.begin(), p2p_addresses.begin(), [](const string& p2p_address, const string& p2p_server_address) { auto [host, port] = fc::split_host_port(p2p_address); @@ -4109,7 +4110,7 @@ fc_dlog(logger, "Instantiating connection with listener address ${addr}", ("addr return hostname + ":" + port; } return p2p_address; - }); + }); { chain::controller& cc = chain_plug->chain(); From 0910fdc1147e9af473d8b9ab9a66ee2622a1e5ef Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 19 Jul 2023 15:17:56 -0400 Subject: [PATCH 129/180] Fix names in `.in` files. --- CMakeModules/EosioTester.cmake.in | 2 +- CMakeModules/EosioTesterBuild.cmake.in | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index a708e27164..fe531a72be 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -39,7 +39,7 @@ set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) else() - add_subdirectory( ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) + add_subdirectory( @CMAKE_INSTALL_FULL_DATAROOTDIR@/leap_boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) endif() find_library(libtester eosio_testing @CMAKE_INSTALL_FULL_LIBDIR@ NO_DEFAULT_PATH) diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index 599ca2d58d..649fd4e9e4 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -36,7 +36,7 @@ set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) else() - add_subdirectory( ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) + add_subdirectory( @CMAKE_INSTALL_FULL_DATAROOTDIR@/leap_boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) endif() find_library(libtester eosio_testing @CMAKE_BINARY_DIR@/libraries/testing NO_DEFAULT_PATH) From 0a177a99359a1d27506542528a2128d7910a6235 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 19 Jul 2023 15:44:45 -0400 Subject: [PATCH 130/180] Add missing packages in debian install. --- CMakeLists.txt | 18 ------------------ package.cmake | 2 +- 2 files changed, 1 insertion(+), 19 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 0b44f388b3..32c223a6d4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -272,26 +272,8 @@ configure_file(${CMAKE_SOURCE_DIR}/libraries/cli11/bash-completion/completions/c install(FILES libraries/cli11/bash-completion/completions/leap-util DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/bash-completion/completions COMPONENT base) install(FILES libraries/cli11/bash-completion/completions/cleos DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/bash-completion/completions COMPONENT base) -#add_custom_target(boost_install ALL) -#add_custom_command(TARGET boost_install -# COMMAND python3 ${CMAKE_SOURCE_DIR}/scripts/MakeBoostDistro.py ${CMAKE_SOURCE_DIR}/libraries/boost ${CMAKE_BINARY_DIR}/boost-dist -# VERBATIM) - install(DIRECTORY "${CMAKE_SOURCE_DIR}/libraries/boost/" DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost COMPONENT dev EXCLUDE_FROM_ALL) -#install(TARGETS boost_date_time COMPONENT dev EXCLUDE_FROM_ALL) -#install(TARGETS boost_filesystem COMPONENT dev EXCLUDE_FROM_ALL) -#install(TARGETS boost_system COMPONENT dev EXCLUDE_FROM_ALL) -#install(TARGETS boost_chrono COMPONENT dev EXCLUDE_FROM_ALL) -#install(TARGETS boost_multi_index COMPONENT dev EXCLUDE_FROM_ALL) -#install(TARGETS boost_multiprecision COMPONENT dev EXCLUDE_FROM_ALL) -#install(TARGETS boost_interprocess COMPONENT dev EXCLUDE_FROM_ALL) -#install(TARGETS boost_asio COMPONENT dev EXCLUDE_FROM_ALL) -#install(TARGETS boost_signals2 COMPONENT dev EXCLUDE_FROM_ALL) -#install(TARGETS boost_iostreams COMPONENT dev EXCLUDE_FROM_ALL) -#install(TARGETS boost_unit_test_framework COMPONENT dev EXCLUDE_FROM_ALL) -#install(TARGETS boost_headers COMPONENT dev EXCLUDE_FROM_ALL) - add_custom_target(dev-install COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_BINARY_DIR}" COMMAND "${CMAKE_COMMAND}" --install "${CMAKE_BINARY_DIR}" diff --git a/package.cmake b/package.cmake index c782938e54..930acf4456 100644 --- a/package.cmake +++ b/package.cmake @@ -61,7 +61,7 @@ set(CPACK_DEBIAN_BASE_FILE_NAME "${CPACK_DEBIAN_FILE_NAME}.deb") string(REGEX REPLACE "^(${CMAKE_PROJECT_NAME})" "\\1-dev" CPACK_DEBIAN_DEV_FILE_NAME "${CPACK_DEBIAN_BASE_FILE_NAME}") #deb package tooling will be unable to detect deps for the dev package. llvm is tricky since we don't know what package could have been used; try to figure it out -set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libssl-dev, libgmp-dev, python3-numpy") +set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libssl-dev, libgmp-dev, python3-all, python3-numpy, ubuntu-dev-tools, zlib1g-dev") find_program(DPKG_QUERY "dpkg-query") if(DPKG_QUERY AND OS_RELEASE MATCHES "\n?ID=\"?ubuntu" AND LLVM_CMAKE_DIR) execute_process(COMMAND "${DPKG_QUERY}" -S "${LLVM_CMAKE_DIR}" COMMAND cut -d: -f1 RESULT_VARIABLE LLVM_PKG_FIND_RESULT OUTPUT_VARIABLE LLVM_PKG_FIND_OUTPUT) From 877799a998bc450a7e1d7f98ac712cdf73267f60 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 19 Jul 2023 17:24:15 -0400 Subject: [PATCH 131/180] Cleanup changes and remove `.git` from boost install. --- CMakeLists.txt | 6 +- CMakeModules/EosioTester.cmake.in | 1 + CMakeModules/EosioTesterBuild.cmake.in | 1 + scripts/MakeBoostDistro.py | 212 ------------------------- 4 files changed, 7 insertions(+), 213 deletions(-) delete mode 100755 scripts/MakeBoostDistro.py diff --git a/CMakeLists.txt b/CMakeLists.txt index 32c223a6d4..b0b2366ab0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -272,7 +272,11 @@ configure_file(${CMAKE_SOURCE_DIR}/libraries/cli11/bash-completion/completions/c install(FILES libraries/cli11/bash-completion/completions/leap-util DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/bash-completion/completions COMPONENT base) install(FILES libraries/cli11/bash-completion/completions/cleos DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/bash-completion/completions COMPONENT base) -install(DIRECTORY "${CMAKE_SOURCE_DIR}/libraries/boost/" DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost COMPONENT dev EXCLUDE_FROM_ALL) +# Add the boost submodule we used to build to our install package, so headers can be found for libtester +install(DIRECTORY "${CMAKE_SOURCE_DIR}/libraries/boost/" + DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost + PATTERN ".git" EXCLUDE + COMPONENT dev EXCLUDE_FROM_ALL) add_custom_target(dev-install COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_BINARY_DIR}" diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index fe531a72be..0fa2ef5469 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -35,6 +35,7 @@ else ( APPLE ) set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_CXX_FLAGS} -Wall") endif ( APPLE ) +set( Boost_USE_MULTITHREADED ON ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index 649fd4e9e4..9a7774c97e 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -32,6 +32,7 @@ else ( APPLE ) set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_CXX_FLAGS} -Wall") endif ( APPLE ) +set( Boost_USE_MULTITHREADED ON ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) diff --git a/scripts/MakeBoostDistro.py b/scripts/MakeBoostDistro.py deleted file mode 100755 index e7e8f02fd9..0000000000 --- a/scripts/MakeBoostDistro.py +++ /dev/null @@ -1,212 +0,0 @@ -#!/usr/bin/python3 -# - -# Prepare a boost checkout for release -# 1) Copy all the files at the root level to the dest folder ($DEST) -# 2) Copy all the "special" folders to the dest folder ($DEST) -# 3) copy all the files from $SOURCE/libs to $DEST/libs -# 4a) For each subproject, copy everything except "include" into $DEST/libs -# 4b) For each subproject, copy the contents of the "includes" folder into $DEST/boost -# -# Usage: %0 source dest - -from __future__ import print_function - -import os, sys -import shutil -import stat -import six -import datetime - -IgnoreFiles = shutil.ignore_patterns( - '[.]*', - '[.]gitattributes', - '[.]gitignore', - '[.]gitmodules', - '[.]travis[.]yml', - 'appveyor[.]yml', - 'circle[.]yml') - -def IgnoreFile(src, name): - return len(IgnoreFiles(src, [name])) > 0 - -## from -def MergeTree(src, dst, symlinks = False): - if not os.path.exists(dst): - os.makedirs(dst) - shutil.copystat(src, dst) - lst = os.listdir(src) - excl = IgnoreFiles(src, lst) - lst = [x for x in lst if x not in excl] - for item in lst: - s = os.path.join(src, item) - d = os.path.join(dst, item) - if symlinks and os.path.islink(s): - if os.path.lexists(d): - os.remove(d) - os.symlink(os.readlink(s), d) - try: - st = os.lstat(s) - mode = stat.S_IMODE(st.st_mode) - os.lchmod(d, mode) - except: - pass # lchmod not available - elif os.path.isdir(s): - MergeTree(s, d, symlinks) - else: - if os.path.exists(d): - print("## Overwriting file %s with %s" % (d, s)) - shutil.copy2(s, d) - - -def CopyFile (s, d, f): - if os.path.isfile(os.path.join(s,f)) and not IgnoreFile(s, f): - shutil.copy2(os.path.join(s,f), os.path.join(d,f)) - -def CopyDir (s, d, dd): - if os.path.isdir(os.path.join(s,dd)) and not IgnoreFile(s, dd): - shutil.copytree(os.path.join(s,dd), os.path.join(d,dd), symlinks=False, ignore=IgnoreFiles) - -def MergeIf(s, d, dd): -# if dd == 'detail': -# print "MergeIf %s -> %s" % (os.path.join(s, dd), os.path.join(d, dd)) - if os.path.exists(os.path.join(s, dd)): - MergeTree(os.path.join(s, dd), os.path.join(d, dd), symlinks=False) - -def CopyInclude(src, dst): - for item in os.listdir(src): - if IgnoreFile(src, item): - continue - if item == 'pending': - continue - if item == 'detail': - continue - s = os.path.join(src, item) - d = os.path.join(dst, item) - if os.path.isdir(s): - MergeTree(s, d, symlinks=False) - else: - if os.path.exists(d): - print("## Overwriting file %s with %s" % (d, s)) - CopyFile(src, dst, item) - - -def CopySubProject(src, dst, headers, p): - # First, everything except the "include" directory - Source = os.path.join(src,p) - Dest = os.path.join(dst,p) - # print "CopySubProject %p" % p - os.makedirs(Dest) - for item in os.listdir(Source): - if os.path.isfile(os.path.join(Source, item)): - CopyFile(Source, Dest, item) - elif item != "include": - CopyDir(Source, Dest, item) - - #shutil.copytree(Source, Dest, symlinks=False, ignore=shutil.ignore_patterns('\.*', "include")) - - # Now the includes - Source = os.path.join(src, "%s/include/boost" % p) - if os.path.exists(Source): - CopyInclude(Source, headers) -# MergeTree(Source, Dest, symlinks=False, ignore=shutil.ignore_patterns('\.*', 'detail', 'pending')) - MergeIf(Source, headers, 'detail') - MergeIf(Source, headers, 'pending') - - -def CopyNestedProject(src, dst, headers, p): - # First, everything except the "include" directory - Source = os.path.join(src,p[1]) - Dest = os.path.join(dst,p[1]) - os.makedirs(Dest) - for item in os.listdir(Source): - if os.path.isfile(os.path.join(Source, item)): - CopyFile(Source, Dest, item) - elif item != "include": - CopyDir(Source, Dest, item) - # shutil.copytree(Source, Dest, symlinks=False, ignore=shutil.ignore_patterns('\.*', "include")) - - Source = os.path.join(src, "%s/include/boost" % (p[1])) - # Dest = os.path.join(headers, p) - # print "Installing headers from %s to %s" % (Source, headers) - CopyInclude(Source, headers) - # # MergeTree(Source, Dest, symlinks=False, ignore=shutil.ignore_patterns('\.*', 'detail', 'pending')) - # MergeIf(Source, headers, 'detail') - # MergeIf(Source, headers, 'pending') - -BoostHeaders = "boost" -BoostLibs = "libs" - -BoostSpecialFolders = [ "doc", "more", "status", "tools" ] - -SourceRoot = sys.argv[1] -DestRoot = sys.argv[2] - -print("Source = %s" % SourceRoot) -print("Dest = %s" % DestRoot) - -if not os.path.exists(SourceRoot): - print("## Error: %s does not exist" % SourceRoot) - exit(1) - -if os.path.exists(DestRoot): - print("The destination directory already exists. All good.\n") - exit(0) - #timestamp1 = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S") - #os.rename(DestRoot,DestRoot + "_bck_" + timestamp1) - -if not os.path.exists(DestRoot): - print("Creating destination directory %s" % DestRoot) - os.makedirs(DestRoot) - -DestHeaders = os.path.join(DestRoot, BoostHeaders) -DestLibs = os.path.join(DestRoot, BoostLibs) -os.makedirs(DestHeaders) -os.makedirs(DestLibs) - -## Step 1 -for f in os.listdir(SourceRoot): - if f != 'CMakeLists.txt': - CopyFile(SourceRoot, DestRoot, f) - -## Step 2 -for d in BoostSpecialFolders: - CopyDir(SourceRoot, DestRoot, d) - -## Step 3 -SourceLibs = os.path.join(SourceRoot, BoostLibs) -for f in os.listdir(SourceLibs): - CopyFile(SourceLibs, DestLibs, f) - -## Step 4 -BoostSubProjects = set() -for f in os.listdir(SourceLibs): - if os.path.isdir(os.path.join(SourceLibs,f)): - if os.path.isfile(os.path.join(SourceLibs,f,"meta","libraries.json")): - BoostSubProjects.add(f) - elif os.path.isdir(os.path.join(SourceLibs,f,"include")): - BoostSubProjects.add(f) - elif f == 'headers': - BoostSubProjects.add(f) - elif os.path.isfile(os.path.join(SourceLibs,f,"sublibs")): - for s in os.listdir(os.path.join(SourceLibs,f)): - if os.path.isdir(os.path.join(SourceLibs,f,s)): - if os.path.isfile(os.path.join(SourceLibs,f,s,"meta","libraries.json")): - BoostSubProjects.add((f,s)) - elif os.path.isdir(os.path.join(SourceLibs,f,s,"include")): - BoostSubProjects.add((f,s)) - -for p in BoostSubProjects: - if isinstance(p, six.string_types): - CopySubProject(SourceLibs, DestLibs, DestHeaders, p) - else: - NestedSource = os.path.join(SourceRoot,"libs",p[0]) - NestedDest = os.path.join(DestRoot,"libs",p[0]) - NestedHeaders = os.path.join(DestRoot,"boost") - if not os.path.exists(NestedDest): - os.makedirs(NestedDest) - if not os.path.exists(NestedHeaders): - os.makedirs(NestedHeaders) - for f in os.listdir(NestedSource): - CopyFile(NestedSource, NestedDest, f) - CopyNestedProject(NestedSource, NestedDest, NestedHeaders, p) From 4483aedd305e62999c807b79f882fb737e32b649 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 19 Jul 2023 19:28:00 -0400 Subject: [PATCH 132/180] Move `COMPONENT` before `PATTERN` in `install` command. --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b0b2366ab0..02fe5ce94c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -275,8 +275,8 @@ install(FILES libraries/cli11/bash-completion/completions/cleos DESTINATION ${CM # Add the boost submodule we used to build to our install package, so headers can be found for libtester install(DIRECTORY "${CMAKE_SOURCE_DIR}/libraries/boost/" DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost - PATTERN ".git" EXCLUDE - COMPONENT dev EXCLUDE_FROM_ALL) + COMPONENT dev EXCLUDE_FROM_ALL + PATTERN ".git" EXCLUDE) add_custom_target(dev-install COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_BINARY_DIR}" From 3c10138c25d6797337e3039d1ad3d695c600c5f6 Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Wed, 19 Jul 2023 19:50:56 -0500 Subject: [PATCH 133/180] Add p2p multiple listen port test and new fields to connections API --- .../include/eosio/net_plugin/net_plugin.hpp | 13 +++- plugins/net_plugin/net_plugin.cpp | 3 + tests/CMakeLists.txt | 4 +- tests/p2p_multiple_listen_test.py | 75 +++++++++++++++++++ 4 files changed, 90 insertions(+), 5 deletions(-) create mode 100755 tests/p2p_multiple_listen_test.py diff --git a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp index 8eafaba2e5..5d5d12ef40 100644 --- a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp +++ b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp @@ -9,9 +9,14 @@ namespace eosio { struct connection_status { string peer; - bool connecting = false; - bool syncing = false; - bool is_bp_peer = false; + string remote_ip; + string remote_port; + bool connecting = false; + bool syncing = false; + bool is_bp_peer = false; + bool is_socket_open = false; + bool is_blocks_only = false; + bool is_transactions_only = false; handshake_message last_handshake; }; @@ -49,4 +54,4 @@ namespace eosio { } -FC_REFLECT( eosio::connection_status, (peer)(connecting)(syncing)(is_bp_peer)(last_handshake) ) +FC_REFLECT( eosio::connection_status, (peer)(remote_ip)(remote_port)(connecting)(syncing)(is_bp_peer)(is_socket_open)(is_blocks_only)(is_transactions_only)(last_handshake) ) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 4995feeb96..936fa409c1 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1234,9 +1234,12 @@ namespace eosio { connection_status connection::get_status()const { connection_status stat; stat.peer = peer_addr; + stat.remote_ip = log_remote_endpoint_ip; + stat.remote_port = log_remote_endpoint_port; stat.connecting = state() == connection_state::connecting; stat.syncing = peer_syncing_from_us; stat.is_bp_peer = is_bp_connection; + stat.is_socket_open = socket_is_open(); fc::lock_guard g( conn_mtx ); stat.last_handshake = last_handshake_recv; return stat; diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index be1df6a7dd..d3ecc359f4 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -49,6 +49,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/ship_streamer_test.py ${CMAKE_CURRENT configure_file(${CMAKE_CURRENT_SOURCE_DIR}/large-lib-test.py ${CMAKE_CURRENT_BINARY_DIR}/large-lib-test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/http_plugin_test.py ${CMAKE_CURRENT_BINARY_DIR}/http_plugin_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_high_latency_test.py ${CMAKE_CURRENT_BINARY_DIR}/p2p_high_latency_test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_multiple_listen_test.py ${CMAKE_CURRENT_BINARY_DIR}/p2p_multiple_listen_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/compute_transaction_test.py ${CMAKE_CURRENT_BINARY_DIR}/compute_transaction_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/subjective_billing_test.py ${CMAKE_CURRENT_BINARY_DIR}/subjective_billing_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/get_account_test.py ${CMAKE_CURRENT_BINARY_DIR}/get_account_test.py COPYONLY) @@ -183,7 +184,8 @@ set_property(TEST nested_container_multi_index_test PROPERTY LABELS nonparalleli add_test(NAME nodeos_run_check_test COMMAND tests/nodeos_run_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_run_check_test PROPERTY LABELS nonparallelizable_tests) - +add_test(NAME p2p_multiple_listen_test COMMAND tests/p2p_multiple_listen_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST p2p_multiple_listen_test PROPERTY LABELS nonparallelizable_tests) # needs iproute-tc or iproute2 depending on platform #add_test(NAME p2p_high_latency_test COMMAND tests/p2p_high_latency_test.py -v WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) diff --git a/tests/p2p_multiple_listen_test.py b/tests/p2p_multiple_listen_test.py new file mode 100755 index 0000000000..f21d07d46a --- /dev/null +++ b/tests/p2p_multiple_listen_test.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python3 + +import signal + +from TestHarness import Cluster, TestHelper, Utils, WalletMgr + +############################################################### +# p2p_multiple_listen_test +# +# Test nodeos ability to listen on multiple ports for p2p +# +############################################################### + +Print=Utils.Print +errorExit=Utils.errorExit + +args=TestHelper.parse_args({"-p","-n","-d","--keep-logs" + ,"--dump-error-details","-v" + ,"--leave-running","--unshared"}) +pnodes=args.p +delay=args.d +debug=args.v +total_nodes=4 +dumpErrorDetails=args.dump_error_details + +Utils.Debug=debug +testSuccessful=False + +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) +walletMgr=WalletMgr(True) + +try: + TestHelper.printSystemInfo("BEGIN") + + cluster.setWalletMgr(walletMgr) + + Print(f'producing nodes: {pnodes}, delay between nodes launch: {delay} second{"s" if delay != 1 else ""}') + + Print("Stand up cluster") + specificArgs = { + '0': '--agent-name node-00 --p2p-listen-endpoint 0.0.0.0:9779 --p2p-server-address localhost:9779 --plugin eosio::net_api_plugin', + '2': '--agent-name node-02 --p2p-peer-address localhost:9779 --plugin eosio::net_api_plugin', + } + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo='ring', delay=delay, + specificExtraNodeosArgs=specificArgs) is False: + errorExit("Failed to stand up eos cluster.") + + cluster.waitOnClusterSync(blockAdvancing=5) + cluster.biosNode.kill(signal.SIGTERM) + cluster.getNode(1).kill(signal.SIGTERM) + cluster.getNode(3).kill(signal.SIGTERM) + cluster.waitOnClusterSync(blockAdvancing=5) + connections = cluster.nodes[0].processUrllibRequest('net', 'connections') + open_socket_count = 0 + for conn in connections['payload']: + if conn['is_socket_open']: + open_socket_count += 1 + assert conn['last_handshake']['agent'] == 'node-02', f'Connected node identifed as "{conn["last_handshake"]["agent"]}" instead of node-02' + assert conn['last_handshake']['p2p_address'][:14] == 'localhost:9878', 'Connected node is not listening on port 9878' + assert open_socket_count == 1, 'Node 0 is expected to have only one open socket' + connections = cluster.nodes[2].processUrllibRequest('net', 'connections') + open_socket_count = 0 + for conn in connections['payload']: + if conn['is_socket_open']: + open_socket_count += 1 + assert conn['last_handshake']['agent'] == 'node-00', f'Connected node identifed as "{conn["last_handshake"]["agent"]}" instead of node-00' + assert conn['last_handshake']['p2p_address'][:14] == 'localhost:9779', 'Connected node is not listening on port 9779' + assert open_socket_count == 1, 'Node 2 is expected to have only one open socket' + + testSuccessful=True +finally: + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=dumpErrorDetails) + +exitCode = 0 if testSuccessful else 1 +exit(exitCode) From ae0d542b5f7eab7fb08642b98b9b103336a0a816 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 20 Jul 2023 16:45:38 -0400 Subject: [PATCH 134/180] Use `python3-distutils` instead of `python3-all` --- .cicd/platforms/ubuntu20.Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.cicd/platforms/ubuntu20.Dockerfile b/.cicd/platforms/ubuntu20.Dockerfile index 3924f7ffe5..6d27a13fae 100644 --- a/.cicd/platforms/ubuntu20.Dockerfile +++ b/.cicd/platforms/ubuntu20.Dockerfile @@ -11,7 +11,7 @@ RUN apt-get update && apt-get upgrade -y && \ libssl-dev \ llvm-11-dev \ ninja-build \ - python3-all \ + python3-distutils \ python3-numpy \ ubuntu-dev-tools \ zlib1g-dev \ From 82ba8aa055c01e0446b089b8e224d1b22b25e726 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 20 Jul 2023 17:30:45 -0400 Subject: [PATCH 135/180] Install `python3-distutils` instread of `python3-all` in our containers. --- .cicd/platforms/ubuntu22.Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.cicd/platforms/ubuntu22.Dockerfile b/.cicd/platforms/ubuntu22.Dockerfile index 0ef6f4c86d..1d86365382 100644 --- a/.cicd/platforms/ubuntu22.Dockerfile +++ b/.cicd/platforms/ubuntu22.Dockerfile @@ -11,7 +11,7 @@ RUN apt-get update && apt-get upgrade -y && \ libssl-dev \ llvm-11-dev \ ninja-build \ - python3-all \ + python3-distutils \ python3-numpy \ ubuntu-dev-tools \ zlib1g-dev \ From a3ce6d565941cf4ab7f823fb0c20e0fbd8ef055f Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 20 Jul 2023 17:32:26 -0400 Subject: [PATCH 136/180] Update readme --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 9d0dfa8530..e979c6d509 100644 --- a/README.md +++ b/README.md @@ -136,7 +136,9 @@ sudo apt-get install -y \ libgmp-dev \ libssl-dev \ llvm-11-dev \ - python3-numpy + python3-numpy \ + ubuntu-dev-tools \ + zlib1g-dev ``` To build, make sure you are in the root of the `leap` repo, then run the following command: ```bash From 4b7abd0b284b717a9b838ed223102026f09fd612 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 20 Jul 2023 17:38:44 -0400 Subject: [PATCH 137/180] Remove unneeded `find_package` in `libfc` --- libraries/libfc/CMakeLists.txt | 4 ---- 1 file changed, 4 deletions(-) diff --git a/libraries/libfc/CMakeLists.txt b/libraries/libfc/CMakeLists.txt index 3b00430669..742501ca9f 100644 --- a/libraries/libfc/CMakeLists.txt +++ b/libraries/libfc/CMakeLists.txt @@ -78,10 +78,6 @@ if(APPLE) add_library(zstd INTERFACE) endif() -if(NOT boost_headers_SOURCE_DIR) - find_package(Boost REQUIRED COMPONENTS date_time chrono unit_test_framework iostreams) -endif() - find_path(GMP_INCLUDE_DIR NAMES gmp.h) find_library(GMP_LIBRARY gmp) if(NOT GMP_LIBRARY MATCHES ${CMAKE_SHARED_LIBRARY_SUFFIX}) From 344c4ff328d22fae16d6d95431e3d3b033fc0536 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 20 Jul 2023 17:40:18 -0400 Subject: [PATCH 138/180] use `python3-distutils` instead of `python3-all` in `package.cmake` --- package.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.cmake b/package.cmake index 930acf4456..b2000e1ed5 100644 --- a/package.cmake +++ b/package.cmake @@ -61,7 +61,7 @@ set(CPACK_DEBIAN_BASE_FILE_NAME "${CPACK_DEBIAN_FILE_NAME}.deb") string(REGEX REPLACE "^(${CMAKE_PROJECT_NAME})" "\\1-dev" CPACK_DEBIAN_DEV_FILE_NAME "${CPACK_DEBIAN_BASE_FILE_NAME}") #deb package tooling will be unable to detect deps for the dev package. llvm is tricky since we don't know what package could have been used; try to figure it out -set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libssl-dev, libgmp-dev, python3-all, python3-numpy, ubuntu-dev-tools, zlib1g-dev") +set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libssl-dev, libgmp-dev, python3-distutils, python3-numpy, ubuntu-dev-tools, zlib1g-dev") find_program(DPKG_QUERY "dpkg-query") if(DPKG_QUERY AND OS_RELEASE MATCHES "\n?ID=\"?ubuntu" AND LLVM_CMAKE_DIR) execute_process(COMMAND "${DPKG_QUERY}" -S "${LLVM_CMAKE_DIR}" COMMAND cut -d: -f1 RESULT_VARIABLE LLVM_PKG_FIND_RESULT OUTPUT_VARIABLE LLVM_PKG_FIND_OUTPUT) From 07c9402cc7f51961dd421749c0dafc14b2476caf Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 20 Jul 2023 17:41:54 -0400 Subject: [PATCH 139/180] Remove unused `BOOST_VER` variable from `scripts/pinned_build.sh` --- scripts/pinned_build.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/pinned_build.sh b/scripts/pinned_build.sh index f29f1a410e..ebf37d29d7 100755 --- a/scripts/pinned_build.sh +++ b/scripts/pinned_build.sh @@ -30,7 +30,6 @@ DEP_DIR="$(realpath "$1")" LEAP_DIR="$2" JOBS="$3" CLANG_VER=11.0.1 -BOOST_VER=1.82.0 LLVM_VER=11.0.1 SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]:-$0}"; )" &> /dev/null && pwd 2> /dev/null; )"; START_DIR="$(pwd)" From 27a8f53fc8b75da9fe030ef9e2a7c35c1f0f96ae Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 20 Jul 2023 18:37:23 -0400 Subject: [PATCH 140/180] Don't need the `IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost)` since the two `EosioTester` files are for install or build tree. --- CMakeModules/EosioTester.cmake.in | 7 ++----- CMakeModules/EosioTesterBuild.cmake.in | 7 ++----- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index 0fa2ef5469..8b1135bd40 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -37,11 +37,8 @@ endif ( APPLE ) set( Boost_USE_MULTITHREADED ON ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) -IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) - add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) -else() - add_subdirectory( @CMAKE_INSTALL_FULL_DATAROOTDIR@/leap_boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) -endif() + +add_subdirectory( @CMAKE_INSTALL_FULL_DATAROOTDIR@/leap_boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) find_library(libtester eosio_testing @CMAKE_INSTALL_FULL_LIBDIR@ NO_DEFAULT_PATH) find_library(libchain eosio_chain @CMAKE_INSTALL_FULL_LIBDIR@ NO_DEFAULT_PATH) diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index 9a7774c97e..6beb37467b 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -34,11 +34,8 @@ endif ( APPLE ) set( Boost_USE_MULTITHREADED ON ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) -IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) - add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) -else() - add_subdirectory( @CMAKE_INSTALL_FULL_DATAROOTDIR@/leap_boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) -endif() + +add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) find_library(libtester eosio_testing @CMAKE_BINARY_DIR@/libraries/testing NO_DEFAULT_PATH) find_library(libchain eosio_chain @CMAKE_BINARY_DIR@/libraries/chain NO_DEFAULT_PATH) From b2b0af01ad07eb5b5c9513f09c25825c2b47a640 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 20 Jul 2023 18:41:12 -0400 Subject: [PATCH 141/180] Removed `python3-distutils` from docker images. --- .cicd/platforms/ubuntu20.Dockerfile | 1 - .cicd/platforms/ubuntu22.Dockerfile | 1 - 2 files changed, 2 deletions(-) diff --git a/.cicd/platforms/ubuntu20.Dockerfile b/.cicd/platforms/ubuntu20.Dockerfile index 6d27a13fae..c60c53f5bb 100644 --- a/.cicd/platforms/ubuntu20.Dockerfile +++ b/.cicd/platforms/ubuntu20.Dockerfile @@ -11,7 +11,6 @@ RUN apt-get update && apt-get upgrade -y && \ libssl-dev \ llvm-11-dev \ ninja-build \ - python3-distutils \ python3-numpy \ ubuntu-dev-tools \ zlib1g-dev \ diff --git a/.cicd/platforms/ubuntu22.Dockerfile b/.cicd/platforms/ubuntu22.Dockerfile index 1d86365382..fd943f7043 100644 --- a/.cicd/platforms/ubuntu22.Dockerfile +++ b/.cicd/platforms/ubuntu22.Dockerfile @@ -11,7 +11,6 @@ RUN apt-get update && apt-get upgrade -y && \ libssl-dev \ llvm-11-dev \ ninja-build \ - python3-distutils \ python3-numpy \ ubuntu-dev-tools \ zlib1g-dev \ From a4f939990ee55bd7f190f5b4c654f0fb23f0aa2f Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Thu, 20 Jul 2023 18:12:15 -0500 Subject: [PATCH 142/180] Expand description of p2p-server-address option and normalize language. --- plugins/net_plugin/net_plugin.cpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 936fa409c1..9504c555b2 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -765,11 +765,11 @@ namespace eosio { public: enum class connection_state { connecting, connected, closing, closed }; - explicit connection( const string& endpoint, const string& address ); + explicit connection( const string& endpoint, const string& listen_address ); /// @brief ctor /// @param socket created by boost::asio in fc::listener /// @param address identifier of listen socket which accepted this new connection - explicit connection( tcp::socket&& socket, const string& address ); + explicit connection( tcp::socket&& socket, const string& listen_address ); ~connection() = default; connection( const connection& ) = delete; @@ -1146,8 +1146,8 @@ namespace eosio { //--------------------------------------------------------------------------- - connection::connection( const string& endpoint, const string& address ) - : listen_address( address ), + connection::connection( const string& endpoint, const string& listen_address ) + : listen_address( listen_address ), peer_addr( endpoint ), strand( my_impl->thread_pool.get_executor() ), socket( new tcp::socket( my_impl->thread_pool.get_executor() ) ), @@ -1161,8 +1161,8 @@ namespace eosio { fc_ilog( logger, "created connection ${c} to ${n}", ("c", connection_id)("n", endpoint) ); } - connection::connection(tcp::socket&& s, const string& address) - : listen_address( address), + connection::connection(tcp::socket&& s, const string& listen_address) + : listen_address( listen_address ), peer_addr(), strand( my_impl->thread_pool.get_executor() ), socket( new tcp::socket( std::move(s) ) ), @@ -3884,8 +3884,8 @@ namespace eosio { void net_plugin::set_program_options( options_description& /*cli*/, options_description& cfg ) { cfg.add_options() - ( "p2p-listen-endpoint", bpo::value< vector >()->default_value( vector(1, string("0.0.0.0:9876")) ), "The actual host:port used to listen for incoming p2p connections. May be specified multiple times.") - ( "p2p-server-address", bpo::value< vector >(), "An externally accessible host:port for identifying this node. Defaults to p2p-listen-endpoint. May be specified as many times as p2p-listen-endpoint") + ( "p2p-listen-endpoint", bpo::value< vector >()->default_value( vector(1, string("0.0.0.0:9876")) ), "The actual host:port used to listen for incoming p2p connections. May be used multiple times.") + ( "p2p-server-address", bpo::value< vector >(), "An externally accessible host:port for identifying this node. Defaults to p2p-listen-endpoint. May be used as many times as p2p-listen-endpoint. If provided, the first address will be used in handshakes with other nodes. Otherwise the default is used.") ( "p2p-peer-address", bpo::value< vector >()->composing(), "The public endpoint of a peer node to connect to. Use multiple p2p-peer-address options as needed to compose a network.\n" " Syntax: host:port[:|]\n" From 0de5ba1e705362ae0ac6ae01200548614530c657 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 20 Jul 2023 20:16:40 -0400 Subject: [PATCH 143/180] Revert last commit. --- .cicd/platforms/ubuntu20.Dockerfile | 1 + .cicd/platforms/ubuntu22.Dockerfile | 1 + 2 files changed, 2 insertions(+) diff --git a/.cicd/platforms/ubuntu20.Dockerfile b/.cicd/platforms/ubuntu20.Dockerfile index c60c53f5bb..6d27a13fae 100644 --- a/.cicd/platforms/ubuntu20.Dockerfile +++ b/.cicd/platforms/ubuntu20.Dockerfile @@ -11,6 +11,7 @@ RUN apt-get update && apt-get upgrade -y && \ libssl-dev \ llvm-11-dev \ ninja-build \ + python3-distutils \ python3-numpy \ ubuntu-dev-tools \ zlib1g-dev \ diff --git a/.cicd/platforms/ubuntu22.Dockerfile b/.cicd/platforms/ubuntu22.Dockerfile index fd943f7043..1d86365382 100644 --- a/.cicd/platforms/ubuntu22.Dockerfile +++ b/.cicd/platforms/ubuntu22.Dockerfile @@ -11,6 +11,7 @@ RUN apt-get update && apt-get upgrade -y && \ libssl-dev \ llvm-11-dev \ ninja-build \ + python3-distutils \ python3-numpy \ ubuntu-dev-tools \ zlib1g-dev \ From ffc80c53bc83bc1a165e46664f688792eff47c1d Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 21 Jul 2023 07:46:32 -0500 Subject: [PATCH 144/180] GH-1416 Change default max-transaction-cpu-usage for integration/performance tests to 475ms --- tests/TestHarness/launcher.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/TestHarness/launcher.py b/tests/TestHarness/launcher.py index 665ed4fc13..bc662a4612 100644 --- a/tests/TestHarness/launcher.py +++ b/tests/TestHarness/launcher.py @@ -200,8 +200,8 @@ def comma_separated(string): cfg.add_argument('--enable-gelf-logging', action='store_true', help='enable gelf logging appender in logging configuration file', default=False) cfg.add_argument('--gelf-endpoint', help='hostname:port or ip:port of GELF endpoint', default='128.0.0.1:12201') cfg.add_argument('--template', help='the startup script template', default='testnet.template') - cfg.add_argument('--max-block-cpu-usage', type=int, help='the "max-block-cpu-usage" value to use in the genesis.json file', default=200000) - cfg.add_argument('--max-transaction-cpu-usage', type=int, help='the "max-transaction-cpu-usage" value to use in the genesis.json file', default=150000) + cfg.add_argument('--max-block-cpu-usage', type=int, help='the "max-block-cpu-usage" value to use in the genesis.json file', default=None) + cfg.add_argument('--max-transaction-cpu-usage', type=int, help='the "max-transaction-cpu-usage" value to use in the genesis.json file', default=None) cfg.add_argument('--logging-level', type=fc_log_level, help='Provide the "level" value to use in the logging.json file') cfg.add_argument('--logging-level-map', type=json.loads, help='JSON string of a logging level dictionary to use in the logging.json file for specific nodes, matching based on node number. Ex: {"bios":"off","00":"info"}') cfg.add_argument('--is-nodeos-v2', action='store_true', help='Toggles old nodeos compatibility', default=False) @@ -359,9 +359,9 @@ def init_genesis(self): 'net_usage_leeway': 500, 'context_free_discount_net_usage_num': 20, 'context_free_discount_net_usage_den': 100, - 'max_block_cpu_usage': self.args.max_block_cpu_usage, + 'max_block_cpu_usage': 200000 if self.args.max_block_cpu_usage is None else self.args.max_block_cpu_usage, 'target_block_cpu_usage_pct': 1000, - 'max_transaction_cpu_usage': self.args.max_transaction_cpu_usage, + 'max_transaction_cpu_usage': 475000 if self.args.max_transaction_cpu_usage is None else self.args.max_transaction_cpu_usage, 'min_transaction_cpu_usage': 100, 'max_transaction_lifetime': 3600, 'deferred_trx_expiration_window': 600, @@ -375,8 +375,8 @@ def init_genesis(self): with open(genesis_path, 'r') as f: genesis = json.load(f) genesis['initial_key'] = self.network.nodes['bios'].keys[0].pubkey - genesis['initial_configuration']['max_block_cpu_usage'] = self.args.max_block_cpu_usage - genesis['initial_configuration']['max_transaction_cpu_usage'] = self.args.max_transaction_cpu_usage + genesis['initial_configuration']['max_block_cpu_usage'] = 200000 if self.args.max_block_cpu_usage is None else self.args.max_block_cpu_usage + genesis['initial_configuration']['max_transaction_cpu_usage'] = 475000 if self.args.max_transaction_cpu_usage is None else self.args.max_transaction_cpu_usage return genesis def write_genesis_file(self, node, genesis): From feb9b29fe72ed001239bece41c66d1ad4d413704 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 21 Jul 2023 09:09:16 -0400 Subject: [PATCH 145/180] Switch to using boost from `boostorg` instead of `AntelopeIO` --- .gitmodules | 2 +- libraries/boost | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitmodules b/.gitmodules index d646d0340c..022c13dfb4 100644 --- a/.gitmodules +++ b/.gitmodules @@ -33,4 +33,4 @@ url = https://github.com/AntelopeIO/CLI11.git [submodule "libraries/boost"] path = libraries/boost - url = https://github.com/AntelopeIO/boost + url = https://github.com/boostorg/boost.git diff --git a/libraries/boost b/libraries/boost index 41141acf3a..c8b2c632fd 160000 --- a/libraries/boost +++ b/libraries/boost @@ -1 +1 @@ -Subproject commit 41141acf3a937c357bf50cacd03269833b35049e +Subproject commit c8b2c632fdea9560de4fecbbff202dcd05910c6c From 89ff1f49e174b35dd4698a18827eb7c9973ff336 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 21 Jul 2023 09:15:17 -0400 Subject: [PATCH 146/180] Move boost to `boost-1.82.0` tag --- libraries/boost | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/boost b/libraries/boost index c8b2c632fd..b6928ae5c9 160000 --- a/libraries/boost +++ b/libraries/boost @@ -1 +1 @@ -Subproject commit c8b2c632fdea9560de4fecbbff202dcd05910c6c +Subproject commit b6928ae5c92e21a04bbe17a558e6e066dbe632f6 From 17f2b2ae923bac6778a48ced7423f74964acd6ad Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 21 Jul 2023 08:25:58 -0500 Subject: [PATCH 147/180] GH-1416 Change default max-block-cpu-usage for integration/performance tests to 500ms --- tests/TestHarness/launcher.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/TestHarness/launcher.py b/tests/TestHarness/launcher.py index bc662a4612..1f420d5fa1 100644 --- a/tests/TestHarness/launcher.py +++ b/tests/TestHarness/launcher.py @@ -359,7 +359,7 @@ def init_genesis(self): 'net_usage_leeway': 500, 'context_free_discount_net_usage_num': 20, 'context_free_discount_net_usage_den': 100, - 'max_block_cpu_usage': 200000 if self.args.max_block_cpu_usage is None else self.args.max_block_cpu_usage, + 'max_block_cpu_usage': 500000 if self.args.max_block_cpu_usage is None else self.args.max_block_cpu_usage, 'target_block_cpu_usage_pct': 1000, 'max_transaction_cpu_usage': 475000 if self.args.max_transaction_cpu_usage is None else self.args.max_transaction_cpu_usage, 'min_transaction_cpu_usage': 100, @@ -375,7 +375,7 @@ def init_genesis(self): with open(genesis_path, 'r') as f: genesis = json.load(f) genesis['initial_key'] = self.network.nodes['bios'].keys[0].pubkey - genesis['initial_configuration']['max_block_cpu_usage'] = 200000 if self.args.max_block_cpu_usage is None else self.args.max_block_cpu_usage + genesis['initial_configuration']['max_block_cpu_usage'] = 500000 if self.args.max_block_cpu_usage is None else self.args.max_block_cpu_usage genesis['initial_configuration']['max_transaction_cpu_usage'] = 475000 if self.args.max_transaction_cpu_usage is None else self.args.max_transaction_cpu_usage return genesis From 9ff0583b33259b965bd11f3bc228cd2a30e7c814 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 21 Jul 2023 09:51:56 -0500 Subject: [PATCH 148/180] Ubuntu documents apt-get install or dpkg -i as acceptable paths for .deb installs. Use apt-get instead of apt. --- .github/workflows/build.yaml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 286d3de481..037bff97c4 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -91,8 +91,8 @@ jobs: cpack - name: Install dev package run: | - apt update && apt upgrade -y - apt install -y ./build/leap_*.deb ./build/leap-dev*.deb + apt-get update + apt-get install -y ./build/leap_*.deb ./build/leap-dev*.deb - name: Test using TestHarness run: | python3 -c "from TestHarness import Cluster" @@ -255,7 +255,8 @@ jobs: token: ${{github.token}} - name: Install cdt Packages run: | - apt install -y ./*.deb + apt-get update + apt-get install -y ./*.deb rm ./*.deb # Reference Contracts From 4c7fb728cf644eb4a84cfe1221d9cd6f69c60754 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 21 Jul 2023 11:16:58 -0400 Subject: [PATCH 149/180] Change checkout action to `submodules: recursive` --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 286d3de481..be76e51c35 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -79,7 +79,7 @@ jobs: steps: - uses: actions/checkout@v3 with: - submodules: true + submodules: recursive - name: Download builddir uses: actions/download-artifact@v3 with: From 3b2227ec9dbea7209bf63d7ff2f0a15b7422c1d8 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 21 Jul 2023 10:39:47 -0500 Subject: [PATCH 150/180] GH-1435 Make sure app_thread of test is always joined to avoid terminate --- tests/test_read_only_trx.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/test_read_only_trx.cpp b/tests/test_read_only_trx.cpp index db36eea1e9..e8f2574fd9 100644 --- a/tests/test_read_only_trx.cpp +++ b/tests/test_read_only_trx.cpp @@ -111,6 +111,10 @@ void test_trxs_common(std::vector& specific_args, bool test_disable plugin_promise.set_value( {app->find_plugin(), app->find_plugin()} ); app->exec(); } ); + fc::scoped_exit> on_except = [&](){ + if (app_thread.joinable()) + app_thread.join(); + }; auto[prod_plug, chain_plug] = plugin_fut.get(); From 0e364bee8bae2c61f79a19b086f568bf2ae5b394 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 21 Jul 2023 12:23:40 -0400 Subject: [PATCH 151/180] Exclude `testwave` (and other unnecessary files) from boost install --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 02fe5ce94c..e1dc1367fb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -276,7 +276,7 @@ install(FILES libraries/cli11/bash-completion/completions/cleos DESTINATION ${CM install(DIRECTORY "${CMAKE_SOURCE_DIR}/libraries/boost/" DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost COMPONENT dev EXCLUDE_FROM_ALL - PATTERN ".git" EXCLUDE) + PATTERN ".git|example|bench|testwave" EXCLUDE) add_custom_target(dev-install COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_BINARY_DIR}" From be358ce3d5b0f1f222e3063e32e27c7dcd24e8b8 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 21 Jul 2023 11:32:24 -0500 Subject: [PATCH 152/180] Add upgrade step back in. Factor out update and upgrade into own step in libtester-tests job to simplify. --- .github/workflows/build.yaml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 037bff97c4..8275dea347 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -91,7 +91,7 @@ jobs: cpack - name: Install dev package run: | - apt-get update + apt-get update && apt-get upgrade -y apt-get install -y ./build/leap_*.deb ./build/leap-dev*.deb - name: Test using TestHarness run: | @@ -200,6 +200,11 @@ jobs: runs-on: ["self-hosted", "enf-x86-midtier"] container: ${{ matrix.test != 'deb-install' && fromJSON(needs.build-base.outputs.p)[matrix.platform].image || matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy' }} steps: + - name: Update Package Index & Upgrade Packages + run: | + apt-get update + apt-get upgrade -y + # LEAP - if: ${{ matrix.test != 'deb-install' }} name: Clone leap @@ -236,7 +241,6 @@ jobs: - if: ${{ matrix.test == 'deb-install' }} name: Install leap-dev Package run: | - apt-get update export DEBIAN_FRONTEND='noninteractive' export TZ='Etc/UTC' apt-get install -y ./*.deb @@ -255,7 +259,6 @@ jobs: token: ${{github.token}} - name: Install cdt Packages run: | - apt-get update apt-get install -y ./*.deb rm ./*.deb From 60ef3a9153f0f4f6a160c7d319cb88c7c99d43f6 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 21 Jul 2023 13:03:56 -0400 Subject: [PATCH 153/180] try again to not add `testwave` to the deb install. --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index e1dc1367fb..78ae406d1b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -276,7 +276,7 @@ install(FILES libraries/cli11/bash-completion/completions/cleos DESTINATION ${CM install(DIRECTORY "${CMAKE_SOURCE_DIR}/libraries/boost/" DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost COMPONENT dev EXCLUDE_FROM_ALL - PATTERN ".git|example|bench|testwave" EXCLUDE) + PATTERN "(\\.git|example|bench|testwave)" EXCLUDE) add_custom_target(dev-install COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_BINARY_DIR}" From 0323a206c04f620d9322501c48cded571c988a2b Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 21 Jul 2023 13:28:35 -0400 Subject: [PATCH 154/180] Again remove `python3-distutils` from docker files --- .cicd/platforms/ubuntu20.Dockerfile | 1 - .cicd/platforms/ubuntu22.Dockerfile | 1 - 2 files changed, 2 deletions(-) diff --git a/.cicd/platforms/ubuntu20.Dockerfile b/.cicd/platforms/ubuntu20.Dockerfile index 6d27a13fae..c60c53f5bb 100644 --- a/.cicd/platforms/ubuntu20.Dockerfile +++ b/.cicd/platforms/ubuntu20.Dockerfile @@ -11,7 +11,6 @@ RUN apt-get update && apt-get upgrade -y && \ libssl-dev \ llvm-11-dev \ ninja-build \ - python3-distutils \ python3-numpy \ ubuntu-dev-tools \ zlib1g-dev \ diff --git a/.cicd/platforms/ubuntu22.Dockerfile b/.cicd/platforms/ubuntu22.Dockerfile index 1d86365382..fd943f7043 100644 --- a/.cicd/platforms/ubuntu22.Dockerfile +++ b/.cicd/platforms/ubuntu22.Dockerfile @@ -11,7 +11,6 @@ RUN apt-get update && apt-get upgrade -y && \ libssl-dev \ llvm-11-dev \ ninja-build \ - python3-distutils \ python3-numpy \ ubuntu-dev-tools \ zlib1g-dev \ From 6eb19aadb93371791b6c6adb971c03ff85b3cac8 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 21 Jul 2023 14:19:56 -0400 Subject: [PATCH 155/180] Another attempt at filtering testwave --- CMakeLists.txt | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 78ae406d1b..f4d30e3223 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -276,7 +276,11 @@ install(FILES libraries/cli11/bash-completion/completions/cleos DESTINATION ${CM install(DIRECTORY "${CMAKE_SOURCE_DIR}/libraries/boost/" DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost COMPONENT dev EXCLUDE_FROM_ALL - PATTERN "(\\.git|example|bench|testwave)" EXCLUDE) + PATTERN "\\.git" EXCLUDE + PATTERN "/example" EXCLUDE + PATTERN "/bench" EXCLUDE + PATTERN "/testvawe" EXCLUDE + ) add_custom_target(dev-install COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_BINARY_DIR}" From 6c2040a289fe1b985bc5c3bfbbae56c0318abd5b Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 21 Jul 2023 14:24:19 -0400 Subject: [PATCH 156/180] Some more exclude patterns to make the dev install smaller --- CMakeLists.txt | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index f4d30e3223..b6bc2e1069 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -279,6 +279,13 @@ install(DIRECTORY "${CMAKE_SOURCE_DIR}/libraries/boost/" PATTERN "\\.git" EXCLUDE PATTERN "/example" EXCLUDE PATTERN "/bench" EXCLUDE + PATTERN "math/test" EXCLUDE + PATTERN "json/test" EXCLUDE + PATTERN "graph/test" EXCLUDE + PATTERN "gil/test" EXCLUDE + PATTERN "geometry/test" EXCLUDE + PATTERN "beast/test" EXCLUDE + PATTERN "/doc" EXCLUDE PATTERN "/testvawe" EXCLUDE ) From 5780dfef1ad155704f120636acdeea5bf68c6a7d Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 21 Jul 2023 14:44:03 -0400 Subject: [PATCH 157/180] Cleanup exclude patterns in CMakeLists.txt. --- CMakeLists.txt | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b6bc2e1069..01d5c15013 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -276,17 +276,17 @@ install(FILES libraries/cli11/bash-completion/completions/cleos DESTINATION ${CM install(DIRECTORY "${CMAKE_SOURCE_DIR}/libraries/boost/" DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost COMPONENT dev EXCLUDE_FROM_ALL - PATTERN "\\.git" EXCLUDE - PATTERN "/example" EXCLUDE - PATTERN "/bench" EXCLUDE + PATTERN ".git" EXCLUDE + PATTERN "example" EXCLUDE + PATTERN "bench" EXCLUDE + PATTERN "doc" EXCLUDE + PATTERN "testvawe" EXCLUDE PATTERN "math/test" EXCLUDE PATTERN "json/test" EXCLUDE PATTERN "graph/test" EXCLUDE PATTERN "gil/test" EXCLUDE PATTERN "geometry/test" EXCLUDE PATTERN "beast/test" EXCLUDE - PATTERN "/doc" EXCLUDE - PATTERN "/testvawe" EXCLUDE ) add_custom_target(dev-install From bf6518b1cb6c33d7bf16a6c9e5f5fdac17d490a0 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 21 Jul 2023 15:05:02 -0500 Subject: [PATCH 158/180] GH-1435 Capture exception on app thread to avoid terminate call --- .../chain_plugin/test/test_trx_retry_db.cpp | 14 ++++++---- plugins/producer_plugin/test/test_options.cpp | 26 +++++++++++-------- .../producer_plugin/test/test_trx_full.cpp | 22 +++++++++------- tests/test_read_only_trx.cpp | 24 ++++++++++------- tests/test_snapshot_scheduler.cpp | 22 +++++++++------- 5 files changed, 64 insertions(+), 44 deletions(-) diff --git a/plugins/chain_plugin/test/test_trx_retry_db.cpp b/plugins/chain_plugin/test/test_trx_retry_db.cpp index 8c7a3925c9..cfad3ed512 100644 --- a/plugins/chain_plugin/test/test_trx_retry_db.cpp +++ b/plugins/chain_plugin/test/test_trx_retry_db.cpp @@ -224,11 +224,15 @@ BOOST_AUTO_TEST_CASE(trx_retry_logic) { std::promise plugin_promise; std::future plugin_fut = plugin_promise.get_future(); std::thread app_thread( [&]() { - std::vector argv = {"test"}; - app->initialize( argv.size(), (char**) &argv[0] ); - app->startup(); - plugin_promise.set_value(app->find_plugin()); - app->exec(); + try { + std::vector argv = {"test"}; + app->initialize(argv.size(), (char**)&argv[0]); + app->startup(); + plugin_promise.set_value(app->find_plugin()); + app->exec(); + return; + } FC_LOG_AND_DROP() + BOOST_CHECK(!"app threw exception see logged error"); } ); (void)plugin_fut.get(); // wait for app to be started diff --git a/plugins/producer_plugin/test/test_options.cpp b/plugins/producer_plugin/test/test_options.cpp index 23cdea785d..3fe429b6a9 100644 --- a/plugins/producer_plugin/test/test_options.cpp +++ b/plugins/producer_plugin/test/test_options.cpp @@ -30,17 +30,21 @@ BOOST_AUTO_TEST_CASE(state_dir) { std::promise> plugin_promise; std::future> plugin_fut = plugin_promise.get_future(); std::thread app_thread( [&]() { - fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); - std::vector argv = - {"test", - "--data-dir", temp_dir_str.c_str(), - "--state-dir", custom_state_dir_str.c_str(), - "--config-dir", temp_dir_str.c_str(), - "-p", "eosio", "-e" }; - app->initialize( argv.size(), (char**) &argv[0] ); - app->startup(); - plugin_promise.set_value( {app->find_plugin(), app->find_plugin()} ); - app->exec(); + try { + fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); + std::vector argv = + {"test", + "--data-dir", temp_dir_str.c_str(), + "--state-dir", custom_state_dir_str.c_str(), + "--config-dir", temp_dir_str.c_str(), + "-p", "eosio", "-e" }; + app->initialize( argv.size(), (char**) &argv[0] ); + app->startup(); + plugin_promise.set_value( {app->find_plugin(), app->find_plugin()} ); + app->exec(); + return; + } FC_LOG_AND_DROP() + BOOST_CHECK(!"app threw exception see logged error"); } ); auto[prod_plug, chain_plug] = plugin_fut.get(); diff --git a/plugins/producer_plugin/test/test_trx_full.cpp b/plugins/producer_plugin/test/test_trx_full.cpp index 34ddcc6ea9..129b135114 100644 --- a/plugins/producer_plugin/test/test_trx_full.cpp +++ b/plugins/producer_plugin/test/test_trx_full.cpp @@ -108,15 +108,19 @@ BOOST_AUTO_TEST_CASE(producer) { std::promise> plugin_promise; std::future> plugin_fut = plugin_promise.get_future(); std::thread app_thread( [&]() { - fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); - std::vector argv = - {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str(), - "-p", "eosio", "-e", "--disable-subjective-p2p-billing=true" }; - app->initialize( argv.size(), (char**) &argv[0] ); - app->startup(); - plugin_promise.set_value( - {app->find_plugin(), app->find_plugin()} ); - app->exec(); + try { + fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); + std::vector argv = + {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str(), + "-p", "eosio", "-e", "--disable-subjective-p2p-billing=true" }; + app->initialize( argv.size(), (char**) &argv[0] ); + app->startup(); + plugin_promise.set_value( + {app->find_plugin(), app->find_plugin()} ); + app->exec(); + return; + } FC_LOG_AND_DROP() + BOOST_CHECK(!"app threw exception see logged error"); } ); auto[prod_plug, chain_plug] = plugin_fut.get(); diff --git a/tests/test_read_only_trx.cpp b/tests/test_read_only_trx.cpp index e8f2574fd9..c55e3ae0aa 100644 --- a/tests/test_read_only_trx.cpp +++ b/tests/test_read_only_trx.cpp @@ -87,7 +87,7 @@ BOOST_AUTO_TEST_CASE(not_check_configs_if_no_read_only_threads) { test_configs_common(specific_args, app_init_status::succeeded); } -void test_trxs_common(std::vector& specific_args, bool test_disable_tierup = false) { +void test_trxs_common(std::vector& specific_args, bool test_disable_tierup = false) { try { fc::scoped_exit> on_exit = []() { chain::wasm_interface_collection::test_disable_tierup = false; }; @@ -102,14 +102,18 @@ void test_trxs_common(std::vector& specific_args, bool test_disable std::promise> plugin_promise; std::future> plugin_fut = plugin_promise.get_future(); std::thread app_thread( [&]() { - fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); - std::vector argv = {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str()}; - argv.insert( argv.end(), specific_args.begin(), specific_args.end() ); - app->initialize( argv.size(), (char**) &argv[0] ); - app->find_plugin()->chain(); - app->startup(); - plugin_promise.set_value( {app->find_plugin(), app->find_plugin()} ); - app->exec(); + try { + fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); + std::vector argv = {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str()}; + argv.insert(argv.end(), specific_args.begin(), specific_args.end()); + app->initialize(argv.size(), (char**)&argv[0]); + app->find_plugin()->chain(); + app->startup(); + plugin_promise.set_value({app->find_plugin(), app->find_plugin()}); + app->exec(); + return; + } FC_LOG_AND_DROP() + BOOST_CHECK(!"app threw exception see logged error"); } ); fc::scoped_exit> on_except = [&](){ if (app_thread.joinable()) @@ -176,7 +180,7 @@ void test_trxs_common(std::vector& specific_args, bool test_disable BOOST_CHECK_EQUAL( num_pushes, next_calls.load() ); BOOST_CHECK_EQUAL( num_pushes, num_get_account_calls.load() ); BOOST_CHECK( trx_match.load() ); // trace should match the transaction -} +} FC_LOG_AND_RETHROW() } // test read-only trxs on main thread (no --read-only-threads) BOOST_AUTO_TEST_CASE(no_read_only_threads) { diff --git a/tests/test_snapshot_scheduler.cpp b/tests/test_snapshot_scheduler.cpp index bb8f6d8742..84c4410d5d 100644 --- a/tests/test_snapshot_scheduler.cpp +++ b/tests/test_snapshot_scheduler.cpp @@ -61,15 +61,19 @@ BOOST_AUTO_TEST_CASE(snapshot_scheduler_test) { std::future> plugin_fut = plugin_promise.get_future(); std::thread app_thread([&]() { - fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); - std::vector argv = - {"test", "--data-dir", temp.c_str(), "--config-dir", temp.c_str(), - "-p", "eosio", "-e"}; - app->initialize(argv.size(), (char**) &argv[0]); - app->startup(); - plugin_promise.set_value( - {app->find_plugin(), app->find_plugin()}); - app->exec(); + try { + fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); + std::vector argv = + {"test", "--data-dir", temp.c_str(), "--config-dir", temp.c_str(), + "-p", "eosio", "-e"}; + app->initialize(argv.size(), (char**) &argv[0]); + app->startup(); + plugin_promise.set_value( + {app->find_plugin(), app->find_plugin()}); + app->exec(); + return; + } FC_LOG_AND_DROP() + BOOST_CHECK(!"app threw exception see logged error"); }); auto [prod_plug, chain_plug] = plugin_fut.get(); From bc35fb5a1c2b68346ece0ab63d503f38574c236f Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 21 Jul 2023 16:28:37 -0500 Subject: [PATCH 159/180] GH-1435 Only activate protocol features and process trxs when building a block. --- unittests/test_utils.hpp | 125 +++++++++++++++++++++++++-------------- 1 file changed, 79 insertions(+), 46 deletions(-) diff --git a/unittests/test_utils.hpp b/unittests/test_utils.hpp index d14815580e..e24d0759cb 100644 --- a/unittests/test_utils.hpp +++ b/unittests/test_utils.hpp @@ -4,11 +4,15 @@ #include #include #include +#include #include #include #include +#include + #include +#include #include #include #include @@ -60,7 +64,7 @@ auto make_bios_ro_trx(eosio::chain::controller& control) { // Push an input transaction to controller and return trx trace // If account is eosio then signs with the default private key -auto push_input_trx(eosio::chain::controller& control, account_name account, signed_transaction& trx) { +auto push_input_trx(appbase::scoped_app& app, eosio::chain::controller& control, account_name account, signed_transaction& trx) { trx.expiration = fc::time_point_sec{fc::time_point::now() + fc::seconds(30)}; trx.set_reference_block( control.head_block_id() ); if (account == config::system_account_name) { @@ -70,13 +74,42 @@ auto push_input_trx(eosio::chain::controller& control, account_name account, sig trx.sign(testing::tester::get_private_key(account, "active"), control.get_chain_id()); } auto ptrx = std::make_shared( trx, packed_transaction::compression_type::zlib ); - auto fut = transaction_metadata::start_recover_keys( ptrx, control.get_thread_pool(), control.get_chain_id(), fc::microseconds::maximum(), transaction_metadata::trx_type::input ); - auto r = control.push_transaction( fut.get(), fc::time_point::maximum(), fc::microseconds::maximum(), 0, false, 0 ); - return r; + + std::promise trx_promise; + std::future trx_future = trx_promise.get_future(); + + app->executor().post( priority::low, exec_queue::read_write, [&ptrx, &app, &trx_promise]() { + app->get_method()(ptrx, + false, // api_trx + transaction_metadata::trx_type::input, // trx_type + true, // return_failure_traces + [&trx_promise](const next_function_variant& result) { + if( std::holds_alternative( result ) ) { + try { + std::get(result)->dynamic_rethrow_exception(); + } catch(...) { + trx_promise.set_exception(std::current_exception()); + } + } else if ( std::get( result )->except ) { + try { + std::get(result)->except->dynamic_rethrow_exception(); + } catch(...) { + trx_promise.set_exception(std::current_exception()); + } + } else { + trx_promise.set_value(std::get(result)); + } + }); + }); + + if (trx_future.wait_for(std::chrono::seconds(5)) == std::future_status::timeout) + throw std::runtime_error("failed to execute trx: " + ptrx->get_transaction().actions.at(0).name.to_string() + " to account: " + account.to_string()); + + return trx_future.get(); } // Push setcode trx to controller and return trx trace -auto set_code(eosio::chain::controller& control, account_name account, const vector& wasm) { +auto set_code(appbase::scoped_app& app, eosio::chain::controller& control, account_name account, const vector& wasm) { signed_transaction trx; trx.actions.emplace_back(std::vector{{account, config::active_name}}, chain::setcode{ @@ -85,56 +118,56 @@ auto set_code(eosio::chain::controller& control, account_name account, const vec .vmversion = 0, .code = bytes(wasm.begin(), wasm.end()) }); - return push_input_trx(control, account, trx); + return push_input_trx(app, control, account, trx); } void activate_protocol_features_set_bios_contract(appbase::scoped_app& app, chain_plugin* chain_plug) { using namespace appbase; - std::promise feature_promise; - std::future feature_future = feature_promise.get_future(); - app->executor().post( priority::high, exec_queue::read_write, [&chain_plug=chain_plug, &feature_promise](){ - const auto& pfm = chain_plug->chain().get_protocol_feature_manager(); - auto preactivate_feature_digest = pfm.get_builtin_digest(builtin_protocol_feature_t::preactivate_feature); - BOOST_CHECK( preactivate_feature_digest ); - chain_plug->chain().preactivate_feature( *preactivate_feature_digest, false ); - std::vector pfs{ - builtin_protocol_feature_t::only_link_to_existing_permission, - builtin_protocol_feature_t::replace_deferred, - builtin_protocol_feature_t::no_duplicate_deferred_id, - builtin_protocol_feature_t::fix_linkauth_restriction, - builtin_protocol_feature_t::disallow_empty_producer_schedule, - builtin_protocol_feature_t::restrict_action_to_self, - builtin_protocol_feature_t::only_bill_first_authorizer, - builtin_protocol_feature_t::forward_setcode, - builtin_protocol_feature_t::get_sender, - builtin_protocol_feature_t::ram_restrictions, - builtin_protocol_feature_t::webauthn_key, - builtin_protocol_feature_t::wtmsig_block_signatures }; - for (const auto t : pfs) { - auto feature_digest = pfm.get_builtin_digest(t); - BOOST_CHECK( feature_digest ); - chain_plug->chain().preactivate_feature( *feature_digest, false ); - } - feature_promise.set_value(); - }); + std::atomic feature_set = false; + // has to execute when pending block is not null + for (int tries = 0; tries < 100; ++tries) { + app->executor().post( priority::high, exec_queue::read_write, [&chain_plug=chain_plug, &feature_set](){ + try { + if (!chain_plug->chain().is_building_block() || feature_set) + return; + const auto& pfm = chain_plug->chain().get_protocol_feature_manager(); + auto preactivate_feature_digest = pfm.get_builtin_digest(builtin_protocol_feature_t::preactivate_feature); + BOOST_CHECK( preactivate_feature_digest ); + chain_plug->chain().preactivate_feature( *preactivate_feature_digest, false ); + std::vector pfs{ + builtin_protocol_feature_t::only_link_to_existing_permission, + builtin_protocol_feature_t::replace_deferred, + builtin_protocol_feature_t::no_duplicate_deferred_id, + builtin_protocol_feature_t::fix_linkauth_restriction, + builtin_protocol_feature_t::disallow_empty_producer_schedule, + builtin_protocol_feature_t::restrict_action_to_self, + builtin_protocol_feature_t::only_bill_first_authorizer, + builtin_protocol_feature_t::forward_setcode, + builtin_protocol_feature_t::get_sender, + builtin_protocol_feature_t::ram_restrictions, + builtin_protocol_feature_t::webauthn_key, + builtin_protocol_feature_t::wtmsig_block_signatures }; + for (const auto t : pfs) { + auto feature_digest = pfm.get_builtin_digest(t); + BOOST_CHECK( feature_digest ); + chain_plug->chain().preactivate_feature( *feature_digest, false ); + } + feature_set = true; + return; + } FC_LOG_AND_DROP() + BOOST_CHECK(!"exception setting protocol features"); + }); + if (feature_set) + break; + std::this_thread::sleep_for(std::chrono::milliseconds(50)); + } // Wait for next block std::this_thread::sleep_for( std::chrono::milliseconds(config::block_interval_ms) ); - if (feature_future.wait_for(std::chrono::seconds(5)) == std::future_status::timeout) - throw std::runtime_error("failed to preactivate features"); - - std::promise setcode_promise; - std::future setcode_future = setcode_promise.get_future(); - app->executor().post( priority::high, exec_queue::read_write, [&chain_plug=chain_plug, &setcode_promise](){ - auto r = set_code(chain_plug->chain(), config::system_account_name, testing::contracts::eosio_bios_wasm()); - BOOST_CHECK(r->receipt && r->receipt->status == transaction_receipt_header::executed); - setcode_promise.set_value(); - }); - - if (setcode_future.wait_for(std::chrono::seconds(5)) == std::future_status::timeout) - throw std::runtime_error("failed to setcode"); + auto r = set_code(app, chain_plug->chain(), config::system_account_name, testing::contracts::eosio_bios_wasm()); + BOOST_CHECK(r->receipt && r->receipt->status == transaction_receipt_header::executed); } From b3c9eba384f8311dd2bf5517c673322b6a5b49f4 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 21 Jul 2023 16:56:02 -0500 Subject: [PATCH 160/180] GH-1416 Only update provided genesis.json if explicitly provided with values --- tests/TestHarness/launcher.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/TestHarness/launcher.py b/tests/TestHarness/launcher.py index 1f420d5fa1..9fca7c85c4 100644 --- a/tests/TestHarness/launcher.py +++ b/tests/TestHarness/launcher.py @@ -375,8 +375,8 @@ def init_genesis(self): with open(genesis_path, 'r') as f: genesis = json.load(f) genesis['initial_key'] = self.network.nodes['bios'].keys[0].pubkey - genesis['initial_configuration']['max_block_cpu_usage'] = 500000 if self.args.max_block_cpu_usage is None else self.args.max_block_cpu_usage - genesis['initial_configuration']['max_transaction_cpu_usage'] = 475000 if self.args.max_transaction_cpu_usage is None else self.args.max_transaction_cpu_usage + if self.args.max_block_cpu_usage is not None: genesis['initial_configuration']['max_block_cpu_usage'] = self.args.max_block_cpu_usage + if self.args.max_transaction_cpu_usage is not None: genesis['initial_configuration']['max_transaction_cpu_usage'] = self.args.max_transaction_cpu_usage return genesis def write_genesis_file(self, node, genesis): From 2f55f154ca812fae5a2a2c697e014962c97ce620 Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Fri, 21 Jul 2023 17:26:28 -0500 Subject: [PATCH 161/180] Revise net_plugin p2p-listen-endpoint parsing for readability. Exercise multiple listen endpoints, with overrides, in p2p test. --- plugins/net_plugin/net_plugin.cpp | 29 ++++++++++++++----------- tests/p2p_multiple_listen_test.py | 36 +++++++++++++++++++++++-------- 2 files changed, 43 insertions(+), 22 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 9504c555b2..1328e1195a 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -3966,19 +3966,22 @@ namespace eosio { std::chrono::seconds( options.at("connection-cleanup-period").as() ), options.at("max-clients").as() ); - if( options.count( "p2p-listen-endpoint" ) && !options.at("p2p-listen-endpoint").as>().empty() && !options.at("p2p-listen-endpoint").as>()[0].empty()) { - p2p_addresses = options.at( "p2p-listen-endpoint" ).as>(); - auto addr_count = p2p_addresses.size(); - std::sort(p2p_addresses.begin(), p2p_addresses.end()); - auto last = std::unique(p2p_addresses.begin(), p2p_addresses.end()); - p2p_addresses.erase(last, p2p_addresses.end()); - if( size_t addr_diff = addr_count - p2p_addresses.size(); addr_diff != 0) { - fc_wlog( logger, "Removed ${count} duplicate p2p-listen-endpoint entries", ("count", addr_diff)); - } - for( const auto& addr : p2p_addresses ) { - EOS_ASSERT( addr.length() <= max_p2p_address_length, chain::plugin_config_exception, - "p2p-listen-endpoint ${a} too long, must be less than ${m}", - ("a", addr)("m", max_p2p_address_length) ); + if( options.count( "p2p-listen-endpoint" )) { + auto p2ps = options.at("p2p-listen-endpoint").as>(); + if (!p2ps.front().empty()) { + p2p_addresses = p2ps; + auto addr_count = p2p_addresses.size(); + std::sort(p2p_addresses.begin(), p2p_addresses.end()); + auto last = std::unique(p2p_addresses.begin(), p2p_addresses.end()); + p2p_addresses.erase(last, p2p_addresses.end()); + if( size_t addr_diff = addr_count - p2p_addresses.size(); addr_diff != 0) { + fc_wlog( logger, "Removed ${count} duplicate p2p-listen-endpoint entries", ("count", addr_diff)); + } + for( const auto& addr : p2p_addresses ) { + EOS_ASSERT( addr.length() <= max_p2p_address_length, chain::plugin_config_exception, + "p2p-listen-endpoint ${a} too long, must be less than ${m}", + ("a", addr)("m", max_p2p_address_length) ); + } } } if( options.count( "p2p-server-address" ) ) { diff --git a/tests/p2p_multiple_listen_test.py b/tests/p2p_multiple_listen_test.py index f21d07d46a..1b2948c7ec 100755 --- a/tests/p2p_multiple_listen_test.py +++ b/tests/p2p_multiple_listen_test.py @@ -20,7 +20,7 @@ pnodes=args.p delay=args.d debug=args.v -total_nodes=4 +total_nodes=5 dumpErrorDetails=args.dump_error_details Utils.Debug=debug @@ -38,35 +38,53 @@ Print("Stand up cluster") specificArgs = { - '0': '--agent-name node-00 --p2p-listen-endpoint 0.0.0.0:9779 --p2p-server-address localhost:9779 --plugin eosio::net_api_plugin', + '0': '--agent-name node-00 --p2p-listen-endpoint 0.0.0.0:9876 --p2p-listen-endpoint 0.0.0.0:9779 --p2p-server-address ext-ip0:20000 --p2p-server-address ext-ip1:20001 --plugin eosio::net_api_plugin', '2': '--agent-name node-02 --p2p-peer-address localhost:9779 --plugin eosio::net_api_plugin', + '4': '--agent-name node-04 --p2p-peer-address localhost:9876 --plugin eosio::net_api_plugin', } - if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo='ring', delay=delay, + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo='line', delay=delay, specificExtraNodeosArgs=specificArgs) is False: errorExit("Failed to stand up eos cluster.") - + + # Be sure all nodes start out connected cluster.waitOnClusterSync(blockAdvancing=5) + # Shut down bios node, which is connected to all other nodes in all topologies cluster.biosNode.kill(signal.SIGTERM) + # Shut down second node, interrupting the default connections between it and nodes 0 and 3 cluster.getNode(1).kill(signal.SIGTERM) + # Shut down the fourth node, interrupting the default connections between it and nodes 3 and 5 cluster.getNode(3).kill(signal.SIGTERM) + # Be sure all remaining nodes continue to sync via the two listen ports on node 0 cluster.waitOnClusterSync(blockAdvancing=5) connections = cluster.nodes[0].processUrllibRequest('net', 'connections') open_socket_count = 0 for conn in connections['payload']: if conn['is_socket_open']: open_socket_count += 1 - assert conn['last_handshake']['agent'] == 'node-02', f'Connected node identifed as "{conn["last_handshake"]["agent"]}" instead of node-02' - assert conn['last_handshake']['p2p_address'][:14] == 'localhost:9878', 'Connected node is not listening on port 9878' - assert open_socket_count == 1, 'Node 0 is expected to have only one open socket' + if conn['last_handshake']['agent'] == 'node-02': + assert conn['last_handshake']['p2p_address'].split()[0] == 'localhost:9878', f"Connected node is listening on '{conn['last_handshake']['p2p_address'].split()[0]}' instead of port 9878" + elif conn['last_handshake']['agent'] == 'node-04': + assert conn['last_handshake']['p2p_address'].split()[0] == 'localhost:9880', f"Connected node is listening on '{conn['last_handshake']['p2p_address'].split()[0]}' instead of port 9880" + assert open_socket_count == 2, 'Node 0 is expected to have only two open sockets' + connections = cluster.nodes[2].processUrllibRequest('net', 'connections') open_socket_count = 0 for conn in connections['payload']: if conn['is_socket_open']: open_socket_count += 1 - assert conn['last_handshake']['agent'] == 'node-00', f'Connected node identifed as "{conn["last_handshake"]["agent"]}" instead of node-00' - assert conn['last_handshake']['p2p_address'][:14] == 'localhost:9779', 'Connected node is not listening on port 9779' + assert conn['last_handshake']['agent'] == 'node-00', f"Connected node identifed as '{conn['last_handshake']['agent']}' instead of node-00" + assert conn['last_handshake']['p2p_address'].split()[0] == 'ext-ip0:20000', f"Connected node is advertising '{conn['last_handshake']['p2p_address'].split()[0]}' instead of ext-ip0:20000" assert open_socket_count == 1, 'Node 2 is expected to have only one open socket' + connections = cluster.nodes[4].processUrllibRequest('net', 'connections') + open_socket_count = 0 + for conn in connections['payload']: + if conn['is_socket_open']: + open_socket_count += 1 + assert conn['last_handshake']['agent'] == 'node-00', f"Connected node identifed as '{conn['last_handshake']['agent']}' instead of node-00" + assert conn['last_handshake']['p2p_address'].split()[0] == 'ext-ip1:20001', f"Connected node is advertising '{conn['last_handshake']['p2p_address'].split()[0]} 'instead of ext-ip1:20001" + assert open_socket_count == 1, 'Node 4 is expected to have only one open socket' + testSuccessful=True finally: TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=dumpErrorDetails) From 8537bd3e718c8285e7aa6d75ca0598963983a2a3 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 21 Jul 2023 19:06:34 -0400 Subject: [PATCH 162/180] Update exclude patterns in install() for dev package --- CMakeLists.txt | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 01d5c15013..c7a561ba9a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -276,17 +276,17 @@ install(FILES libraries/cli11/bash-completion/completions/cleos DESTINATION ${CM install(DIRECTORY "${CMAKE_SOURCE_DIR}/libraries/boost/" DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost COMPONENT dev EXCLUDE_FROM_ALL - PATTERN ".git" EXCLUDE - PATTERN "example" EXCLUDE - PATTERN "bench" EXCLUDE - PATTERN "doc" EXCLUDE - PATTERN "testvawe" EXCLUDE - PATTERN "math/test" EXCLUDE - PATTERN "json/test" EXCLUDE - PATTERN "graph/test" EXCLUDE - PATTERN "gil/test" EXCLUDE - PATTERN "geometry/test" EXCLUDE - PATTERN "beast/test" EXCLUDE + PATTERN ".git/*" EXCLUDE + PATTERN "example/*" EXCLUDE + PATTERN "bench/*" EXCLUDE + PATTERN "doc/*" EXCLUDE + PATTERN "testwave/*" EXCLUDE + PATTERN "math/test/*" EXCLUDE + PATTERN "json/test/*" EXCLUDE + PATTERN "graph/test/*" EXCLUDE + PATTERN "gil/test/*" EXCLUDE + PATTERN "geometry/test/*" EXCLUDE + PATTERN "beast/test/*" EXCLUDE ) add_custom_target(dev-install From 2d6aff43d1995b2828fa28ae8021207364665771 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 21 Jul 2023 19:10:58 -0400 Subject: [PATCH 163/180] Use `file` as a dependency instead of `ubuntu-dev-tools` --- .cicd/platforms/ubuntu20.Dockerfile | 2 +- .cicd/platforms/ubuntu22.Dockerfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.cicd/platforms/ubuntu20.Dockerfile b/.cicd/platforms/ubuntu20.Dockerfile index c60c53f5bb..e9c3a1d4b6 100644 --- a/.cicd/platforms/ubuntu20.Dockerfile +++ b/.cicd/platforms/ubuntu20.Dockerfile @@ -12,6 +12,6 @@ RUN apt-get update && apt-get upgrade -y && \ llvm-11-dev \ ninja-build \ python3-numpy \ - ubuntu-dev-tools \ + file \ zlib1g-dev \ zstd diff --git a/.cicd/platforms/ubuntu22.Dockerfile b/.cicd/platforms/ubuntu22.Dockerfile index fd943f7043..57d49fe026 100644 --- a/.cicd/platforms/ubuntu22.Dockerfile +++ b/.cicd/platforms/ubuntu22.Dockerfile @@ -12,6 +12,6 @@ RUN apt-get update && apt-get upgrade -y && \ llvm-11-dev \ ninja-build \ python3-numpy \ - ubuntu-dev-tools \ + file \ zlib1g-dev \ zstd From 4a4875ecba277668e81f3397541d48cc1a38aeef Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Fri, 21 Jul 2023 21:32:18 -0500 Subject: [PATCH 164/180] Add test to verify nodeos can run with p2p disabled. --- tests/CMakeLists.txt | 3 ++ tests/p2p_no_listen_test.py | 76 +++++++++++++++++++++++++++++++++++++ 2 files changed, 79 insertions(+) create mode 100755 tests/p2p_no_listen_test.py diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index d57718db4f..6359798055 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -50,6 +50,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/large-lib-test.py ${CMAKE_CURRENT_BIN configure_file(${CMAKE_CURRENT_SOURCE_DIR}/http_plugin_test.py ${CMAKE_CURRENT_BINARY_DIR}/http_plugin_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_high_latency_test.py ${CMAKE_CURRENT_BINARY_DIR}/p2p_high_latency_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_multiple_listen_test.py ${CMAKE_CURRENT_BINARY_DIR}/p2p_multiple_listen_test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_no_listen_test.py ${CMAKE_CURRENT_BINARY_DIR}/p2p_no_listen_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/compute_transaction_test.py ${CMAKE_CURRENT_BINARY_DIR}/compute_transaction_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/subjective_billing_test.py ${CMAKE_CURRENT_BINARY_DIR}/subjective_billing_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/get_account_test.py ${CMAKE_CURRENT_BINARY_DIR}/get_account_test.py COPYONLY) @@ -186,6 +187,8 @@ set_property(TEST nodeos_run_check_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME p2p_multiple_listen_test COMMAND tests/p2p_multiple_listen_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST p2p_multiple_listen_test PROPERTY LABELS nonparallelizable_tests) +add_test(NAME p2p_no_listen_test COMMAND tests/p2p_no_listen_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST p2p_no_listen_test PROPERTY LABELS nonparallelizable_tests) # needs iproute-tc or iproute2 depending on platform #add_test(NAME p2p_high_latency_test COMMAND tests/p2p_high_latency_test.py -v WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) diff --git a/tests/p2p_no_listen_test.py b/tests/p2p_no_listen_test.py new file mode 100755 index 0000000000..76b3c76886 --- /dev/null +++ b/tests/p2p_no_listen_test.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python3 + +import errno +import pathlib +import shutil +import signal +import socket +import time + +from TestHarness import Node, TestHelper, Utils + +############################################################### +# p2p_no_listen_test +# +# Test nodeos disabling p2p +# +############################################################### + +Print=Utils.Print +errorExit=Utils.errorExit + +args=TestHelper.parse_args({"--keep-logs","-v","--leave-running","--unshared"}) +debug=args.v + +Utils.Debug=debug +testSuccessful=False + +try: + TestHelper.printSystemInfo("BEGIN") + + cmd = [ + Utils.EosServerPath, + '-e', + '-p', + 'eosio', + '--p2p-listen-endpoint', + '', + '--plugin', + 'eosio::chain_api_plugin', + '--config-dir', + Utils.ConfigDir, + '--data-dir', + Utils.DataDir, + '--http-server-address', + 'localhost:8888' + ] + node = Node('localhost', '8888', '00', data_dir=pathlib.Path(Utils.DataDir), + config_dir=pathlib.Path(Utils.ConfigDir), cmd=cmd) + + time.sleep(1) + if not node.verifyAlive(): + raise RuntimeError + time.sleep(10) + node.waitForBlock(5) + + s = socket.socket() + err = s.connect_ex(('localhost',9876)) + assert err == errno.ECONNREFUSED, 'Connection to port 9876 must be refused' + + testSuccessful=True +finally: + Utils.ShuttingDown=True + + if not args.leave_running: + node.kill(signal.SIGTERM) + + if not (args.leave_running or args.keep_logs or not testSuccessful): + shutil.rmtree(Utils.DataPath, ignore_errors=True) + + if testSuccessful: + Utils.Print("Test succeeded.") + else: + Utils.Print("Test failed.") + +exitCode = 0 if testSuccessful else 1 +exit(exitCode) From 3b45daafc1f5043f7dce0a3df34605890d9aa130 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Sun, 23 Jul 2023 18:18:46 -0400 Subject: [PATCH 165/180] on cmake 3.22+ compress .deb packages via zstd --- package.cmake | 3 +++ 1 file changed, 3 insertions(+) diff --git a/package.cmake b/package.cmake index ff3aebbd4b..c61b380898 100644 --- a/package.cmake +++ b/package.cmake @@ -46,6 +46,9 @@ set(CPACK_PACKAGE_HOMEPAGE_URL "https://github.com/AntelopeIO/leap") set(CPACK_DEBIAN_PACKAGE_SHLIBDEPS ON) set(CPACK_DEBIAN_BASE_PACKAGE_SECTION "utils") +if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.22) + set(CPACK_DEBIAN_COMPRESSION_TYPE "zstd") +endif() set(CPACK_DEBIAN_PACKAGE_CONFLICTS "eosio, mandel") set(CPACK_RPM_PACKAGE_CONFLICTS "eosio, mandel") From 60eabad15e9a980b515c040baf107f85edf1f83b Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 24 Jul 2023 08:46:59 -0400 Subject: [PATCH 166/180] Update package list in README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e979c6d509..1ffaa053e1 100644 --- a/README.md +++ b/README.md @@ -137,7 +137,7 @@ sudo apt-get install -y \ libssl-dev \ llvm-11-dev \ python3-numpy \ - ubuntu-dev-tools \ + file \ zlib1g-dev ``` To build, make sure you are in the root of the `leap` repo, then run the following command: From 6a51290961f7656e17b0c9d768f60a5a4c230eae Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 24 Jul 2023 08:50:15 -0400 Subject: [PATCH 167/180] Update package list in `package.cmake` --- package.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.cmake b/package.cmake index b2000e1ed5..63989e7535 100644 --- a/package.cmake +++ b/package.cmake @@ -61,7 +61,7 @@ set(CPACK_DEBIAN_BASE_FILE_NAME "${CPACK_DEBIAN_FILE_NAME}.deb") string(REGEX REPLACE "^(${CMAKE_PROJECT_NAME})" "\\1-dev" CPACK_DEBIAN_DEV_FILE_NAME "${CPACK_DEBIAN_BASE_FILE_NAME}") #deb package tooling will be unable to detect deps for the dev package. llvm is tricky since we don't know what package could have been used; try to figure it out -set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libssl-dev, libgmp-dev, python3-distutils, python3-numpy, ubuntu-dev-tools, zlib1g-dev") +set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libssl-dev, libgmp-dev, python3-distutils, python3-numpy, file, zlib1g-dev") find_program(DPKG_QUERY "dpkg-query") if(DPKG_QUERY AND OS_RELEASE MATCHES "\n?ID=\"?ubuntu" AND LLVM_CMAKE_DIR) execute_process(COMMAND "${DPKG_QUERY}" -S "${LLVM_CMAKE_DIR}" COMMAND cut -d: -f1 RESULT_VARIABLE LLVM_PKG_FIND_RESULT OUTPUT_VARIABLE LLVM_PKG_FIND_OUTPUT) From b114f3bf7d9d431b879aa4513f4f289ca643732c Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 24 Jul 2023 08:50:00 -0500 Subject: [PATCH 168/180] GH-1435 Let scope_exit always do the app_thread join. --- tests/test_read_only_trx.cpp | 133 ++++++++++++++++++----------------- 1 file changed, 67 insertions(+), 66 deletions(-) diff --git a/tests/test_read_only_trx.cpp b/tests/test_read_only_trx.cpp index c55e3ae0aa..8bb3a83a9d 100644 --- a/tests/test_read_only_trx.cpp +++ b/tests/test_read_only_trx.cpp @@ -98,31 +98,6 @@ void test_trxs_common(std::vector& specific_args, bool test_disable appbase::scoped_app app; auto temp_dir_str = temp.path().string(); producer_plugin::set_test_mode(true); - - std::promise> plugin_promise; - std::future> plugin_fut = plugin_promise.get_future(); - std::thread app_thread( [&]() { - try { - fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); - std::vector argv = {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str()}; - argv.insert(argv.end(), specific_args.begin(), specific_args.end()); - app->initialize(argv.size(), (char**)&argv[0]); - app->find_plugin()->chain(); - app->startup(); - plugin_promise.set_value({app->find_plugin(), app->find_plugin()}); - app->exec(); - return; - } FC_LOG_AND_DROP() - BOOST_CHECK(!"app threw exception see logged error"); - } ); - fc::scoped_exit> on_except = [&](){ - if (app_thread.joinable()) - app_thread.join(); - }; - - auto[prod_plug, chain_plug] = plugin_fut.get(); - - activate_protocol_features_set_bios_contract(app, chain_plug); std::atomic next_calls = 0; std::atomic num_get_account_calls = 0; @@ -131,50 +106,76 @@ void test_trxs_common(std::vector& specific_args, bool test_disable std::atomic trx_match = true; const size_t num_pushes = 4242; - for( size_t i = 1; i <= num_pushes; ++i ) { - auto ptrx = i % 3 == 0 ? make_unique_trx() : make_bios_ro_trx(chain_plug->chain()); - app->executor().post( priority::low, exec_queue::read_only, [&chain_plug=chain_plug, &num_get_account_calls]() { - chain_plug->get_read_only_api(fc::seconds(90)).get_account(chain_apis::read_only::get_account_params{.account_name=config::system_account_name}, fc::time_point::now()+fc::seconds(90)); - ++num_get_account_calls; - }); - app->executor().post( priority::low, exec_queue::read_only, [ptrx, &next_calls, &num_posts, &trace_with_except, &trx_match, &app]() { - ++num_posts; - bool return_failure_traces = true; - app->get_method()(ptrx, - false, // api_trx - transaction_metadata::trx_type::read_only, // trx_type - return_failure_traces, - [ptrx, &next_calls, &trace_with_except, &trx_match, return_failure_traces] - (const next_function_variant& result) { - if( !std::holds_alternative( result ) && !std::get( result )->except ) { - if( std::get( result )->id != ptrx->id() ) { - elog( "trace not for trx ${id}: ${t}", - ("id", ptrx->id())("t", fc::json::to_pretty_string(*std::get(result))) ); - trx_match = false; + { + std::promise> plugin_promise; + std::future> plugin_fut = plugin_promise.get_future(); + std::thread app_thread( [&]() { + try { + fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); + std::vector argv = {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str()}; + argv.insert(argv.end(), specific_args.begin(), specific_args.end()); + app->initialize(argv.size(), (char**)&argv[0]); + app->find_plugin()->chain(); + app->startup(); + plugin_promise.set_value({app->find_plugin(), app->find_plugin()}); + app->exec(); + return; + } FC_LOG_AND_DROP() + BOOST_CHECK(!"app threw exception see logged error"); + } ); + fc::scoped_exit> on_except = [&](){ + if (app_thread.joinable()) + app_thread.join(); + }; + + auto[prod_plug, chain_plug] = plugin_fut.get(); + + activate_protocol_features_set_bios_contract(app, chain_plug); + + for( size_t i = 1; i <= num_pushes; ++i ) { + auto ptrx = i % 3 == 0 ? make_unique_trx() : make_bios_ro_trx(chain_plug->chain()); + app->executor().post( priority::low, exec_queue::read_only, [&chain_plug=chain_plug, &num_get_account_calls]() { + chain_plug->get_read_only_api(fc::seconds(90)).get_account(chain_apis::read_only::get_account_params{.account_name=config::system_account_name}, fc::time_point::now()+fc::seconds(90)); + ++num_get_account_calls; + }); + app->executor().post( priority::low, exec_queue::read_only, [ptrx, &next_calls, &num_posts, &trace_with_except, &trx_match, &app]() { + ++num_posts; + bool return_failure_traces = true; + app->get_method()(ptrx, + false, // api_trx + transaction_metadata::trx_type::read_only, // trx_type + return_failure_traces, + [ptrx, &next_calls, &trace_with_except, &trx_match, return_failure_traces] + (const next_function_variant& result) { + if( !std::holds_alternative( result ) && !std::get( result )->except ) { + if( std::get( result )->id != ptrx->id() ) { + elog( "trace not for trx ${id}: ${t}", + ("id", ptrx->id())("t", fc::json::to_pretty_string(*std::get(result))) ); + trx_match = false; + } + } else if( !return_failure_traces && !std::holds_alternative( result ) && std::get( result )->except ) { + elog( "trace with except ${e}", + ("e", fc::json::to_pretty_string( *std::get( result ) )) ); + ++trace_with_except; } - } else if( !return_failure_traces && !std::holds_alternative( result ) && std::get( result )->except ) { - elog( "trace with except ${e}", - ("e", fc::json::to_pretty_string( *std::get( result ) )) ); - ++trace_with_except; - } - ++next_calls; - }); - }); - app->executor().post( priority::low, exec_queue::read_only, [&chain_plug=chain_plug]() { - chain_plug->get_read_only_api(fc::seconds(90)).get_consensus_parameters(chain_apis::read_only::get_consensus_parameters_params{}, fc::time_point::now()+fc::seconds(90)); - }); + ++next_calls; + }); + }); + app->executor().post( priority::low, exec_queue::read_only, [&chain_plug=chain_plug]() { + chain_plug->get_read_only_api(fc::seconds(90)).get_consensus_parameters(chain_apis::read_only::get_consensus_parameters_params{}, fc::time_point::now()+fc::seconds(90)); + }); + } + + // Wait long enough such that all transactions are executed + auto start = fc::time_point::now(); + auto hard_deadline = start + fc::seconds(10); // To protect against waiting forever + while ( (next_calls < num_pushes || num_get_account_calls < num_pushes) && fc::time_point::now() < hard_deadline ){ + std::this_thread::sleep_for( 100ms ); + } + + app->quit(); } - // Wait long enough such that all transactions are executed - auto start = fc::time_point::now(); - auto hard_deadline = start + fc::seconds(10); // To protect against waiting forever - while ( (next_calls < num_pushes || num_get_account_calls < num_pushes) && fc::time_point::now() < hard_deadline ){ - std::this_thread::sleep_for( 100ms );; - } - - app->quit(); - app_thread.join(); - BOOST_CHECK_EQUAL( trace_with_except, 0 ); // should not have any traces with except in it BOOST_CHECK_EQUAL( num_pushes, num_posts ); BOOST_CHECK_EQUAL( num_pushes, next_calls.load() ); From 2f24964035d4015dd52c577baaec0070b2826c17 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 24 Jul 2023 09:59:32 -0400 Subject: [PATCH 169/180] Add `ubuntu-dev-tools` back. otherwise install fails on ubuntu20 with: `debconf: falling back to frontend: Readline` --- package.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.cmake b/package.cmake index 63989e7535..b2000e1ed5 100644 --- a/package.cmake +++ b/package.cmake @@ -61,7 +61,7 @@ set(CPACK_DEBIAN_BASE_FILE_NAME "${CPACK_DEBIAN_FILE_NAME}.deb") string(REGEX REPLACE "^(${CMAKE_PROJECT_NAME})" "\\1-dev" CPACK_DEBIAN_DEV_FILE_NAME "${CPACK_DEBIAN_BASE_FILE_NAME}") #deb package tooling will be unable to detect deps for the dev package. llvm is tricky since we don't know what package could have been used; try to figure it out -set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libssl-dev, libgmp-dev, python3-distutils, python3-numpy, file, zlib1g-dev") +set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libssl-dev, libgmp-dev, python3-distutils, python3-numpy, ubuntu-dev-tools, zlib1g-dev") find_program(DPKG_QUERY "dpkg-query") if(DPKG_QUERY AND OS_RELEASE MATCHES "\n?ID=\"?ubuntu" AND LLVM_CMAKE_DIR) execute_process(COMMAND "${DPKG_QUERY}" -S "${LLVM_CMAKE_DIR}" COMMAND cut -d: -f1 RESULT_VARIABLE LLVM_PKG_FIND_RESULT OUTPUT_VARIABLE LLVM_PKG_FIND_OUTPUT) From 4977b08add7c6155de7971487787dc405f5dcbd6 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 24 Jul 2023 10:03:15 -0400 Subject: [PATCH 170/180] Simplify exclude patterns. --- CMakeLists.txt | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c7a561ba9a..d5ca041374 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -280,13 +280,7 @@ install(DIRECTORY "${CMAKE_SOURCE_DIR}/libraries/boost/" PATTERN "example/*" EXCLUDE PATTERN "bench/*" EXCLUDE PATTERN "doc/*" EXCLUDE - PATTERN "testwave/*" EXCLUDE - PATTERN "math/test/*" EXCLUDE - PATTERN "json/test/*" EXCLUDE - PATTERN "graph/test/*" EXCLUDE - PATTERN "gil/test/*" EXCLUDE - PATTERN "geometry/test/*" EXCLUDE - PATTERN "beast/test/*" EXCLUDE + PATTERN "libs/*/test" EXCLUDE ) add_custom_target(dev-install From ee7829dd9f9d3584118f1c6295190704995fd944 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 24 Jul 2023 09:04:33 -0500 Subject: [PATCH 171/180] GH-1435 Protect against accessing destroyed types --- unittests/test_utils.hpp | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/unittests/test_utils.hpp b/unittests/test_utils.hpp index e24d0759cb..5bf3b74675 100644 --- a/unittests/test_utils.hpp +++ b/unittests/test_utils.hpp @@ -75,29 +75,29 @@ auto push_input_trx(appbase::scoped_app& app, eosio::chain::controller& control, } auto ptrx = std::make_shared( trx, packed_transaction::compression_type::zlib ); - std::promise trx_promise; - std::future trx_future = trx_promise.get_future(); + std::shared_ptr> trx_promise = std::make_shared>(); + std::future trx_future = trx_promise->get_future(); - app->executor().post( priority::low, exec_queue::read_write, [&ptrx, &app, &trx_promise]() { + app->executor().post( priority::low, exec_queue::read_write, [&ptrx, &app, trx_promise]() { app->get_method()(ptrx, false, // api_trx transaction_metadata::trx_type::input, // trx_type true, // return_failure_traces - [&trx_promise](const next_function_variant& result) { + [trx_promise](const next_function_variant& result) { if( std::holds_alternative( result ) ) { try { std::get(result)->dynamic_rethrow_exception(); } catch(...) { - trx_promise.set_exception(std::current_exception()); + trx_promise->set_exception(std::current_exception()); } } else if ( std::get( result )->except ) { try { std::get(result)->except->dynamic_rethrow_exception(); } catch(...) { - trx_promise.set_exception(std::current_exception()); + trx_promise->set_exception(std::current_exception()); } } else { - trx_promise.set_value(std::get(result)); + trx_promise->set_value(std::get(result)); } }); }); @@ -124,12 +124,12 @@ auto set_code(appbase::scoped_app& app, eosio::chain::controller& control, accou void activate_protocol_features_set_bios_contract(appbase::scoped_app& app, chain_plugin* chain_plug) { using namespace appbase; - std::atomic feature_set = false; + std::shared_ptr> feature_set = std::make_shared>(false); // has to execute when pending block is not null for (int tries = 0; tries < 100; ++tries) { - app->executor().post( priority::high, exec_queue::read_write, [&chain_plug=chain_plug, &feature_set](){ + app->executor().post( priority::high, exec_queue::read_write, [&chain_plug=chain_plug, feature_set](){ try { - if (!chain_plug->chain().is_building_block() || feature_set) + if (!chain_plug->chain().is_building_block() || *feature_set) return; const auto& pfm = chain_plug->chain().get_protocol_feature_manager(); auto preactivate_feature_digest = pfm.get_builtin_digest(builtin_protocol_feature_t::preactivate_feature); @@ -153,12 +153,12 @@ void activate_protocol_features_set_bios_contract(appbase::scoped_app& app, chai BOOST_CHECK( feature_digest ); chain_plug->chain().preactivate_feature( *feature_digest, false ); } - feature_set = true; + *feature_set = true; return; } FC_LOG_AND_DROP() BOOST_CHECK(!"exception setting protocol features"); }); - if (feature_set) + if (*feature_set) break; std::this_thread::sleep_for(std::chrono::milliseconds(50)); } From a3c571d14dee13bb1032ad9c822b1083af9e6205 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 24 Jul 2023 09:19:24 -0500 Subject: [PATCH 172/180] GH-1435 Use auto --- unittests/test_utils.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unittests/test_utils.hpp b/unittests/test_utils.hpp index 5bf3b74675..974fee3927 100644 --- a/unittests/test_utils.hpp +++ b/unittests/test_utils.hpp @@ -75,7 +75,7 @@ auto push_input_trx(appbase::scoped_app& app, eosio::chain::controller& control, } auto ptrx = std::make_shared( trx, packed_transaction::compression_type::zlib ); - std::shared_ptr> trx_promise = std::make_shared>(); + auto trx_promise = std::make_shared>(); std::future trx_future = trx_promise->get_future(); app->executor().post( priority::low, exec_queue::read_write, [&ptrx, &app, trx_promise]() { @@ -124,7 +124,7 @@ auto set_code(appbase::scoped_app& app, eosio::chain::controller& control, accou void activate_protocol_features_set_bios_contract(appbase::scoped_app& app, chain_plugin* chain_plug) { using namespace appbase; - std::shared_ptr> feature_set = std::make_shared>(false); + auto feature_set = std::make_shared>(false); // has to execute when pending block is not null for (int tries = 0; tries < 100; ++tries) { app->executor().post( priority::high, exec_queue::read_write, [&chain_plug=chain_plug, feature_set](){ From 632e728d77604c0c643c29b36a4b0b24c310d151 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 24 Jul 2023 09:19:40 -0500 Subject: [PATCH 173/180] GH-1435 Re-indent --- tests/test_read_only_trx.cpp | 184 ++++++++++++++++++----------------- 1 file changed, 93 insertions(+), 91 deletions(-) diff --git a/tests/test_read_only_trx.cpp b/tests/test_read_only_trx.cpp index 8bb3a83a9d..49134a54a7 100644 --- a/tests/test_read_only_trx.cpp +++ b/tests/test_read_only_trx.cpp @@ -87,101 +87,103 @@ BOOST_AUTO_TEST_CASE(not_check_configs_if_no_read_only_threads) { test_configs_common(specific_args, app_init_status::succeeded); } -void test_trxs_common(std::vector& specific_args, bool test_disable_tierup = false) { try { - fc::scoped_exit> on_exit = []() { - chain::wasm_interface_collection::test_disable_tierup = false; - }; - chain::wasm_interface_collection::test_disable_tierup = test_disable_tierup; - - using namespace std::chrono_literals; - fc::temp_directory temp; - appbase::scoped_app app; - auto temp_dir_str = temp.path().string(); - producer_plugin::set_test_mode(true); - - std::atomic next_calls = 0; - std::atomic num_get_account_calls = 0; - std::atomic num_posts = 0; - std::atomic trace_with_except = 0; - std::atomic trx_match = true; - const size_t num_pushes = 4242; - - { - std::promise> plugin_promise; - std::future> plugin_fut = plugin_promise.get_future(); - std::thread app_thread( [&]() { - try { - fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); - std::vector argv = {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str()}; - argv.insert(argv.end(), specific_args.begin(), specific_args.end()); - app->initialize(argv.size(), (char**)&argv[0]); - app->find_plugin()->chain(); - app->startup(); - plugin_promise.set_value({app->find_plugin(), app->find_plugin()}); - app->exec(); - return; - } FC_LOG_AND_DROP() - BOOST_CHECK(!"app threw exception see logged error"); - } ); - fc::scoped_exit> on_except = [&](){ - if (app_thread.joinable()) - app_thread.join(); +void test_trxs_common(std::vector& specific_args, bool test_disable_tierup = false) { + try { + fc::scoped_exit> on_exit = []() { + chain::wasm_interface_collection::test_disable_tierup = false; }; - - auto[prod_plug, chain_plug] = plugin_fut.get(); - - activate_protocol_features_set_bios_contract(app, chain_plug); - - for( size_t i = 1; i <= num_pushes; ++i ) { - auto ptrx = i % 3 == 0 ? make_unique_trx() : make_bios_ro_trx(chain_plug->chain()); - app->executor().post( priority::low, exec_queue::read_only, [&chain_plug=chain_plug, &num_get_account_calls]() { - chain_plug->get_read_only_api(fc::seconds(90)).get_account(chain_apis::read_only::get_account_params{.account_name=config::system_account_name}, fc::time_point::now()+fc::seconds(90)); - ++num_get_account_calls; - }); - app->executor().post( priority::low, exec_queue::read_only, [ptrx, &next_calls, &num_posts, &trace_with_except, &trx_match, &app]() { - ++num_posts; - bool return_failure_traces = true; - app->get_method()(ptrx, - false, // api_trx - transaction_metadata::trx_type::read_only, // trx_type - return_failure_traces, - [ptrx, &next_calls, &trace_with_except, &trx_match, return_failure_traces] - (const next_function_variant& result) { - if( !std::holds_alternative( result ) && !std::get( result )->except ) { - if( std::get( result )->id != ptrx->id() ) { - elog( "trace not for trx ${id}: ${t}", - ("id", ptrx->id())("t", fc::json::to_pretty_string(*std::get(result))) ); - trx_match = false; + chain::wasm_interface_collection::test_disable_tierup = test_disable_tierup; + + using namespace std::chrono_literals; + fc::temp_directory temp; + appbase::scoped_app app; + auto temp_dir_str = temp.path().string(); + producer_plugin::set_test_mode(true); + + std::atomic next_calls = 0; + std::atomic num_get_account_calls = 0; + std::atomic num_posts = 0; + std::atomic trace_with_except = 0; + std::atomic trx_match = true; + const size_t num_pushes = 4242; + + { + std::promise> plugin_promise; + std::future> plugin_fut = plugin_promise.get_future(); + std::thread app_thread( [&]() { + try { + fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); + std::vector argv = {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str()}; + argv.insert(argv.end(), specific_args.begin(), specific_args.end()); + app->initialize(argv.size(), (char**)&argv[0]); + app->find_plugin()->chain(); + app->startup(); + plugin_promise.set_value({app->find_plugin(), app->find_plugin()}); + app->exec(); + return; + } FC_LOG_AND_DROP() + BOOST_CHECK(!"app threw exception see logged error"); + } ); + fc::scoped_exit> on_except = [&](){ + if (app_thread.joinable()) + app_thread.join(); + }; + + auto[prod_plug, chain_plug] = plugin_fut.get(); + + activate_protocol_features_set_bios_contract(app, chain_plug); + + for( size_t i = 1; i <= num_pushes; ++i ) { + auto ptrx = i % 3 == 0 ? make_unique_trx() : make_bios_ro_trx(chain_plug->chain()); + app->executor().post( priority::low, exec_queue::read_only, [&chain_plug=chain_plug, &num_get_account_calls]() { + chain_plug->get_read_only_api(fc::seconds(90)).get_account(chain_apis::read_only::get_account_params{.account_name=config::system_account_name}, fc::time_point::now()+fc::seconds(90)); + ++num_get_account_calls; + }); + app->executor().post( priority::low, exec_queue::read_only, [ptrx, &next_calls, &num_posts, &trace_with_except, &trx_match, &app]() { + ++num_posts; + bool return_failure_traces = true; + app->get_method()(ptrx, + false, // api_trx + transaction_metadata::trx_type::read_only, // trx_type + return_failure_traces, + [ptrx, &next_calls, &trace_with_except, &trx_match, return_failure_traces] + (const next_function_variant& result) { + if( !std::holds_alternative( result ) && !std::get( result )->except ) { + if( std::get( result )->id != ptrx->id() ) { + elog( "trace not for trx ${id}: ${t}", + ("id", ptrx->id())("t", fc::json::to_pretty_string(*std::get(result))) ); + trx_match = false; + } + } else if( !return_failure_traces && !std::holds_alternative( result ) && std::get( result )->except ) { + elog( "trace with except ${e}", + ("e", fc::json::to_pretty_string( *std::get( result ) )) ); + ++trace_with_except; } - } else if( !return_failure_traces && !std::holds_alternative( result ) && std::get( result )->except ) { - elog( "trace with except ${e}", - ("e", fc::json::to_pretty_string( *std::get( result ) )) ); - ++trace_with_except; - } - ++next_calls; - }); - }); - app->executor().post( priority::low, exec_queue::read_only, [&chain_plug=chain_plug]() { - chain_plug->get_read_only_api(fc::seconds(90)).get_consensus_parameters(chain_apis::read_only::get_consensus_parameters_params{}, fc::time_point::now()+fc::seconds(90)); - }); + ++next_calls; + }); + }); + app->executor().post( priority::low, exec_queue::read_only, [&chain_plug=chain_plug]() { + chain_plug->get_read_only_api(fc::seconds(90)).get_consensus_parameters(chain_apis::read_only::get_consensus_parameters_params{}, fc::time_point::now()+fc::seconds(90)); + }); + } + + // Wait long enough such that all transactions are executed + auto start = fc::time_point::now(); + auto hard_deadline = start + fc::seconds(10); // To protect against waiting forever + while ( (next_calls < num_pushes || num_get_account_calls < num_pushes) && fc::time_point::now() < hard_deadline ){ + std::this_thread::sleep_for( 100ms ); + } + + app->quit(); } - // Wait long enough such that all transactions are executed - auto start = fc::time_point::now(); - auto hard_deadline = start + fc::seconds(10); // To protect against waiting forever - while ( (next_calls < num_pushes || num_get_account_calls < num_pushes) && fc::time_point::now() < hard_deadline ){ - std::this_thread::sleep_for( 100ms ); - } - - app->quit(); - } - - BOOST_CHECK_EQUAL( trace_with_except, 0 ); // should not have any traces with except in it - BOOST_CHECK_EQUAL( num_pushes, num_posts ); - BOOST_CHECK_EQUAL( num_pushes, next_calls.load() ); - BOOST_CHECK_EQUAL( num_pushes, num_get_account_calls.load() ); - BOOST_CHECK( trx_match.load() ); // trace should match the transaction -} FC_LOG_AND_RETHROW() } + BOOST_CHECK_EQUAL( trace_with_except, 0 ); // should not have any traces with except in it + BOOST_CHECK_EQUAL( num_pushes, num_posts ); + BOOST_CHECK_EQUAL( num_pushes, next_calls.load() ); + BOOST_CHECK_EQUAL( num_pushes, num_get_account_calls.load() ); + BOOST_CHECK( trx_match.load() ); // trace should match the transaction + } FC_LOG_AND_RETHROW() +} // test read-only trxs on main thread (no --read-only-threads) BOOST_AUTO_TEST_CASE(no_read_only_threads) { From b784e141d6846c5da817c93b5a7f54fea14e0803 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Mon, 24 Jul 2023 10:46:12 -0400 Subject: [PATCH 174/180] set DEBIAN_FRONTEND & TZ globally during libtester job --- .github/workflows/build.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 8275dea347..7fa8aa236a 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -199,6 +199,9 @@ jobs: test: [build-tree, make-dev-install, deb-install] runs-on: ["self-hosted", "enf-x86-midtier"] container: ${{ matrix.test != 'deb-install' && fromJSON(needs.build-base.outputs.p)[matrix.platform].image || matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy' }} + env: + DEBIAN_FRONTEND: noninteractive + TZ: Etc/UTC steps: - name: Update Package Index & Upgrade Packages run: | @@ -241,8 +244,6 @@ jobs: - if: ${{ matrix.test == 'deb-install' }} name: Install leap-dev Package run: | - export DEBIAN_FRONTEND='noninteractive' - export TZ='Etc/UTC' apt-get install -y ./*.deb rm ./*.deb From 8332a802f0a0fd6528e4cf0903afc2754773b3cc Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 24 Jul 2023 10:57:46 -0400 Subject: [PATCH 175/180] Exclude test directories from `boost/tools` --- CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index d5ca041374..058a270774 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -281,6 +281,7 @@ install(DIRECTORY "${CMAKE_SOURCE_DIR}/libraries/boost/" PATTERN "bench/*" EXCLUDE PATTERN "doc/*" EXCLUDE PATTERN "libs/*/test" EXCLUDE + PATTERN "tools/*/test" EXCLUDE ) add_custom_target(dev-install From a8b17a2b2ceb9ea43a0cd80f51b29f7cfb32b6f9 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 24 Jul 2023 11:06:05 -0400 Subject: [PATCH 176/180] Remove again `ubuntu-dev-tools` from package.cmake --- package.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.cmake b/package.cmake index b2000e1ed5..63989e7535 100644 --- a/package.cmake +++ b/package.cmake @@ -61,7 +61,7 @@ set(CPACK_DEBIAN_BASE_FILE_NAME "${CPACK_DEBIAN_FILE_NAME}.deb") string(REGEX REPLACE "^(${CMAKE_PROJECT_NAME})" "\\1-dev" CPACK_DEBIAN_DEV_FILE_NAME "${CPACK_DEBIAN_BASE_FILE_NAME}") #deb package tooling will be unable to detect deps for the dev package. llvm is tricky since we don't know what package could have been used; try to figure it out -set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libssl-dev, libgmp-dev, python3-distutils, python3-numpy, ubuntu-dev-tools, zlib1g-dev") +set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libssl-dev, libgmp-dev, python3-distutils, python3-numpy, file, zlib1g-dev") find_program(DPKG_QUERY "dpkg-query") if(DPKG_QUERY AND OS_RELEASE MATCHES "\n?ID=\"?ubuntu" AND LLVM_CMAKE_DIR) execute_process(COMMAND "${DPKG_QUERY}" -S "${LLVM_CMAKE_DIR}" COMMAND cut -d: -f1 RESULT_VARIABLE LLVM_PKG_FIND_RESULT OUTPUT_VARIABLE LLVM_PKG_FIND_OUTPUT) From f9e6188892e9e9b5b5a41c380378a4f117f296f9 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 24 Jul 2023 14:38:08 -0400 Subject: [PATCH 177/180] remove `file` from `package.cmake` --- package.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.cmake b/package.cmake index bc567d3776..72e88c6c56 100644 --- a/package.cmake +++ b/package.cmake @@ -64,7 +64,7 @@ set(CPACK_DEBIAN_BASE_FILE_NAME "${CPACK_DEBIAN_FILE_NAME}.deb") string(REGEX REPLACE "^(${CMAKE_PROJECT_NAME})" "\\1-dev" CPACK_DEBIAN_DEV_FILE_NAME "${CPACK_DEBIAN_BASE_FILE_NAME}") #deb package tooling will be unable to detect deps for the dev package. llvm is tricky since we don't know what package could have been used; try to figure it out -set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libssl-dev, libgmp-dev, python3-distutils, python3-numpy, file, zlib1g-dev") +set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libssl-dev, libgmp-dev, python3-distutils, python3-numpy, zlib1g-dev") find_program(DPKG_QUERY "dpkg-query") if(DPKG_QUERY AND OS_RELEASE MATCHES "\n?ID=\"?ubuntu" AND LLVM_CMAKE_DIR) execute_process(COMMAND "${DPKG_QUERY}" -S "${LLVM_CMAKE_DIR}" COMMAND cut -d: -f1 RESULT_VARIABLE LLVM_PKG_FIND_RESULT OUTPUT_VARIABLE LLVM_PKG_FIND_OUTPUT) From 31f38898e752e30ccad0e8aaeacef2ccd94566a8 Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Mon, 24 Jul 2023 19:40:15 -0500 Subject: [PATCH 178/180] Extend auto bp peering test to include p2p-server-address args. Add network topology diagrams to p2p_multiple_listen_test. --- tests/auto_bp_peering_test.py | 22 ++++++++++++++-------- tests/p2p_multiple_listen_test.py | 18 ++++++++++++++---- 2 files changed, 28 insertions(+), 12 deletions(-) diff --git a/tests/auto_bp_peering_test.py b/tests/auto_bp_peering_test.py index 912ab10e3c..a55bdd8807 100755 --- a/tests/auto_bp_peering_test.py +++ b/tests/auto_bp_peering_test.py @@ -1,10 +1,8 @@ #!/usr/bin/env python3 -import re -import signal -import time +import socket -from TestHarness import Cluster, TestHelper, Utils, WalletMgr, ReturnType +from TestHarness import Cluster, TestHelper, Utils, WalletMgr ############################################################### # auto_bp_peering_test @@ -35,7 +33,7 @@ dumpErrorDetails = args.dump_error_details keepLogs = args.keep_logs -# Setup cluster and it's wallet manager +# Setup cluster and its wallet manager walletMgr = WalletMgr(True) cluster = Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) cluster.setWalletMgr(walletMgr) @@ -47,12 +45,17 @@ for nodeId in range(0, producerNodes): producer_name = "defproducer" + chr(ord('a') + nodeId) port = cluster.p2pBasePort + nodeId - hostname = "localhost:" + str(port) + if producer_name == 'defproducerf': + hostname = 'ext-ip0:9999' + elif producer_name == 'defproducerk': + hostname = socket.gethostname() + ':9886' + else: + hostname = "localhost:" + str(port) peer_names[hostname] = producer_name auto_bp_peer_args += (" --p2p-auto-bp-peer " + producer_name + "," + hostname) -def neigbors_in_schedule(name, schedule): +def neighbors_in_schedule(name, schedule): index = schedule.index(name) result = [] num = len(schedule) @@ -71,6 +74,9 @@ def neigbors_in_schedule(name, schedule): for nodeId in range(0, producerNodes): specificNodeosArgs[nodeId] = auto_bp_peer_args + specificNodeosArgs[5] = specificNodeosArgs[5] + ' --p2p-server-address ext-ip0:9999' + specificNodeosArgs[10] = specificNodeosArgs[10] + ' --p2p-server-address ""' + TestHelper.printSystemInfo("BEGIN") cluster.launch( prodCount=producerCountInEachNode, @@ -113,7 +119,7 @@ def neigbors_in_schedule(name, schedule): peers = peers.sort() name = "defproducer" + chr(ord('a') + nodeId) - expected_peers = neigbors_in_schedule(name, scheduled_producers) + expected_peers = neighbors_in_schedule(name, scheduled_producers) if peers != expected_peers: Utils.Print("ERROR: expect {} has connections to {}, got connections to {}".format( name, expected_peers, peers)) diff --git a/tests/p2p_multiple_listen_test.py b/tests/p2p_multiple_listen_test.py index 1b2948c7ec..62f1534c63 100755 --- a/tests/p2p_multiple_listen_test.py +++ b/tests/p2p_multiple_listen_test.py @@ -46,15 +46,25 @@ specificExtraNodeosArgs=specificArgs) is False: errorExit("Failed to stand up eos cluster.") - # Be sure all nodes start out connected + # Be sure all nodes start out connected (bios node omitted from diagram for brevity) + # node00 node01 node02 node03 node04 + # localhost:9876 -> localhost:9877 -> localhost:9878 -> localhost:9879 -> localhost:9880 + # localhost:9779 ^ | | + # ^ +---------------------------+ | + # +------------------------------------------------------------------------+ cluster.waitOnClusterSync(blockAdvancing=5) # Shut down bios node, which is connected to all other nodes in all topologies cluster.biosNode.kill(signal.SIGTERM) - # Shut down second node, interrupting the default connections between it and nodes 0 and 3 + # Shut down second node, interrupting the default connections between it and nodes 00 and 02 cluster.getNode(1).kill(signal.SIGTERM) - # Shut down the fourth node, interrupting the default connections between it and nodes 3 and 5 + # Shut down the fourth node, interrupting the default connections between it and nodes 02 and 04 cluster.getNode(3).kill(signal.SIGTERM) - # Be sure all remaining nodes continue to sync via the two listen ports on node 0 + # Be sure all remaining nodes continue to sync via the two listen ports on node 00 + # node00 node01 node02 node03 node04 + # localhost:9876 offline localhost:9878 offline localhost:9880 + # localhost:9779 ^ | | + # ^ +---------------------------+ | + # +------------------------------------------------------------------------+ cluster.waitOnClusterSync(blockAdvancing=5) connections = cluster.nodes[0].processUrllibRequest('net', 'connections') open_socket_count = 0 From 553a09d905f1e9d00fe8b614ceb9cae248d3ad16 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Tue, 25 Jul 2023 14:30:00 -0400 Subject: [PATCH 179/180] don't build leap-dev .deb package by default --- .github/workflows/build_base.yaml | 2 +- CMakeLists.txt | 2 ++ package.cmake | 7 ++++--- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build_base.yaml b/.github/workflows/build_base.yaml index 5d47ba37cc..b1771fbfbf 100644 --- a/.github/workflows/build_base.yaml +++ b/.github/workflows/build_base.yaml @@ -77,7 +77,7 @@ jobs: run: | # https://github.com/actions/runner/issues/2033 chown -R $(id -u):$(id -g) $PWD - cmake -B build -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -GNinja + cmake -B build -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DENABLE_LEAP_DEV_DEB=On -GNinja cmake --build build tar -pc --exclude "*.o" build | zstd --long -T0 -9 > build.tar.zst - name: Upload builddir diff --git a/CMakeLists.txt b/CMakeLists.txt index 058a270774..479cd1ea81 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -293,5 +293,7 @@ add_custom_target(dev-install include(doxygen) +option(ENABLE_LEAP_DEV_DEB "Enable building the leap-dev .deb package" OFF) + include(package.cmake) include(CPack) diff --git a/package.cmake b/package.cmake index 72e88c6c56..dd1c1b8e57 100644 --- a/package.cmake +++ b/package.cmake @@ -53,9 +53,10 @@ endif() set(CPACK_DEBIAN_PACKAGE_CONFLICTS "eosio, mandel") set(CPACK_RPM_PACKAGE_CONFLICTS "eosio, mandel") -#only consider "base" and "dev" components for per-component packages -get_cmake_property(CPACK_COMPONENTS_ALL COMPONENTS) -list(REMOVE_ITEM CPACK_COMPONENTS_ALL "Unspecified") +set(CPACK_COMPONENTS_ALL "base") +if(ENABLE_LEAP_DEV_DEB) + list(APPEND CPACK_COMPONENTS_ALL "dev") +endif() #enable per component packages for .deb; ensure main package is just "leap", not "leap-base", and make the dev package have "leap-dev" at the front not the back set(CPACK_DEB_COMPONENT_INSTALL ON) From cf2b9049adbc9a889a9646a6a0926b4955043d38 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 25 Jul 2023 16:00:44 -0400 Subject: [PATCH 180/180] Update `appbase` and `chainbase` to tip of `main` --- libraries/appbase | 2 +- libraries/chainbase | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/appbase b/libraries/appbase index fe1b3a6cd9..b75b31e14f 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit fe1b3a6cd9b6f7529d6fb4beac0e880d136308a8 +Subproject commit b75b31e14f966fa3de6246e120dcba36c6ce5264 diff --git a/libraries/chainbase b/libraries/chainbase index 148aac7461..bffb7ebde6 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit 148aac7461fffbe8730ba0b55367dde6fdaa0e08 +Subproject commit bffb7ebde635be15d406d74d6fef46f4c744d441