diff --git a/.buildkite/long_running_tests.yml b/.buildkite/long_running_tests.yml index 6383f57c392..c242d219b0e 100644 --- a/.buildkite/long_running_tests.yml +++ b/.buildkite/long_running_tests.yml @@ -1,6 +1,5 @@ steps: - - - command: | + - command: | # Ubuntu 16.04 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -17,11 +16,11 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" workdir: /data/job timeout: 60 - - command: | + - command: | # Ubuntu 18.04 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -38,11 +37,11 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" workdir: /data/job timeout: 60 - - command: | + - command: | # CentOS 7 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -59,32 +58,11 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" - workdir: /data/job - timeout: 60 - - - command: | - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/ - label: ":aws: 1 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" workdir: /data/job timeout: 60 - - command: | + - command: | # Amazon Linux 2 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -101,32 +79,11 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" - workdir: /data/job - timeout: 60 - - - command: | - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/ - label: ":fedora: 27 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" workdir: /data/job timeout: 60 - - command: | + - command: | # macOS Mojave Build echo "--- Creating symbolic link to job directory :file_folder:" sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job echo "+++ Building :hammer:" @@ -140,20 +97,6 @@ steps: artifact_paths: "build.tar.gz" timeout: 60 - - command: | - echo "--- Creating symbolic link to job directory :file_folder:" - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job - echo "+++ Building :hammer:" - ./scripts/eosio_build.sh -y - echo "--- Compressing build directory :compression:" - tar -pczf build.tar.gz build/ - label: ":darwin: High Sierra Build" - agents: - - "role=builder-v2-1" - - "os=high-sierra" - artifact_paths: "build.tar.gz" - timeout: 60 - - wait - command: | # Ubuntu 16.04 Tests @@ -172,7 +115,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" workdir: /data/job timeout: 90 @@ -192,7 +135,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" workdir: /data/job timeout: 90 @@ -212,27 +155,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" - workdir: /data/job - timeout: 90 - - - command: | # Amazon AWS-1 Linux Tests - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" - echo "+++ :microscope: Running LR Tests" - ./scripts/long-running-test.sh - label: ":aws: 1 LR Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" workdir: /data/job timeout: 90 @@ -252,41 +175,10 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" workdir: /data/job timeout: 90 - - command: | # Fedora Tests - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" - echo "+++ :microscope: Running LR Tests" - ./scripts/long-running-test.sh - label: ":fedora: 27 LR Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" - workdir: /data/job - timeout: 90 - - - command: | # High Sierra Tests - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" - echo "+++ :microscope: Running LR Tests" - ln -s "$(pwd)" /data/job && ./scripts/long-running-test.sh - label: ":darwin: High Sierra LR Tests" - agents: - - "role=tester-v2-1" - - "os=high-sierra" - timeout: 90 - - command: | # Mojave Tests echo "--- :arrow_down: Downloading Build Directory" buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 57ce31e5a6c..19bbdf114ff 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -1,6 +1,5 @@ steps: - - - command: | + - command: | # Ubuntu 16.04 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -17,11 +16,11 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" workdir: /data/job timeout: 60 - - command: | + - command: | # Ubuntu 18.04 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -38,11 +37,11 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" workdir: /data/job timeout: 60 - - command: | + - command: | # CentOS 7 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -59,32 +58,11 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" workdir: /data/job timeout: 60 - - command: | - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/ - label: ":aws: 1 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" - workdir: /data/job - timeout: 60 - - - command: | + - command: | # Amazon Linux 2 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -101,32 +79,11 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" workdir: /data/job timeout: 60 - - command: | - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/ - label: ":fedora: 27 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" - workdir: /data/job - timeout: 60 - - - command: | + - command: | # macOS Mojave Build echo "--- Creating symbolic link to job directory :file_folder:" sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job echo "+++ Building :hammer:" @@ -140,20 +97,6 @@ steps: artifact_paths: "build.tar.gz" timeout: 60 - - command: | - echo "--- Creating symbolic link to job directory :file_folder:" - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job - echo "+++ Building :hammer:" - ./scripts/eosio_build.sh -y - echo "--- Compressing build directory :compression:" - tar -pczf build.tar.gz build/ - label: ":darwin: High Sierra Build" - agents: - - "role=builder-v2-1" - - "os=high-sierra" - artifact_paths: "build.tar.gz" - timeout: 60 - - wait # Ubuntu 16.04 Tests @@ -173,7 +116,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" workdir: /data/job timeout: 60 @@ -193,7 +136,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" workdir: /data/job timeout: 60 @@ -214,7 +157,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" workdir: /data/job timeout: 60 @@ -234,7 +177,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" workdir: /data/job timeout: 60 @@ -255,7 +198,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" workdir: /data/job timeout: 60 @@ -275,48 +218,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" - workdir: /data/job - timeout: 60 - - # Amazon AWS-1 Linux Tests - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" - echo "+++ :microscope: Running Tests" - ./scripts/parallel-test.sh - label: ":aws: 1 Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" - echo "+++ :microscope: Running Tests" - ./scripts/serial-test.sh - label: ":aws: 1 NP Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" workdir: /data/job timeout: 60 @@ -337,7 +239,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" workdir: /data/job timeout: 60 @@ -357,75 +259,10 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" - workdir: /data/job - timeout: 60 - - # Fedora Tests - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" - echo "+++ :microscope: Running Tests" - ./scripts/parallel-test.sh - label: ":fedora: 27 Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" workdir: /data/job timeout: 60 - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" - echo "+++ :microscope: Running Tests" - ./scripts/serial-test.sh - label: ":fedora: 27 NP Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" - workdir: /data/job - timeout: 60 - - # High Sierra Tests - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" - echo "+++ :microscope: Running Tests" - ln -s "$(pwd)" /data/job - ./scripts/parallel-test.sh - label: ":darwin: High Sierra Tests" - agents: - - "role=tester-v2-1" - - "os=high-sierra" - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" - echo "+++ :microscope: Running Tests" - ln -s "$(pwd)" /data/job && ./scripts/serial-test.sh - label: ":darwin: High Sierra NP Tests" - agents: - - "role=tester-v2-1" - - "os=high-sierra" - timeout: 60 - # Mojave Tests - command: | echo "--- :arrow_down: Downloading Build Directory" @@ -452,37 +289,7 @@ steps: - wait - - command: | - echo "--- :arrow_down: Downloading build directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" - tar -zxf build.tar.gz - echo "+++ :microscope: Starting package build" - ln -s "$(pwd)" /data/job && cd /data/job/build/packages && bash generate_package.sh brew - label: ":darwin: High Sierra Package Builder" - agents: - - "role=builder-v2-1" - - "os=high-sierra" - artifact_paths: - - "build/packages/*.tar.gz" - - "build/packages/*.rb" - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" - tar -zxf build.tar.gz - echo "+++ :microscope: Starting package build" - ln -s "$(pwd)" /data/job && cd /data/job/build/packages && bash generate_package.sh brew - label: ":darwin: Mojave Package Builder" - agents: - - "role=builder-v2-1" - - "os=mojave" - artifact_paths: - - "build/packages/*.tar.gz" - - "build/packages/*.rb" - timeout: 60 - - - command: | + - command: | # Ubuntu 16.04 Package Builder echo "--- :arrow_down: Downloading build directory" buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" tar -zxf build.tar.gz @@ -501,14 +308,14 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" workdir: /data/job env: OS: "ubuntu-16.04" PKGTYPE: "deb" timeout: 60 - - command: | + - command: | # Ubuntu 18.04 Package Builder echo "--- :arrow_down: Downloading build directory" buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" tar -zxf build.tar.gz @@ -527,16 +334,16 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" workdir: /data/job env: OS: "ubuntu-18.04" PKGTYPE: "deb" timeout: 60 - - command: | + - command: | # CentOS 7 Package Builder echo "--- :arrow_down: Downloading build directory" - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" + buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" tar -zxf build.tar.gz echo "+++ :microscope: Starting package build" yum install -y rpm-build @@ -547,7 +354,7 @@ steps: mkdir -p /root/rpmbuild/SPECS mkdir -p /root/rpmbuild/SRPMS cd /data/job/build/packages && bash generate_package.sh rpm - label: ":fedora: 27 Package builder" + label: ":centos: 7 Package builder" agents: queue: "automation-large-builder-fleet" artifact_paths: @@ -560,57 +367,44 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" workdir: /data/job env: - OS: "fc27" + OS: "el7" PKGTYPE: "rpm" timeout: 60 - - command: | + - command: | # macOS Mojave Package Builder echo "--- :arrow_down: Downloading build directory" - buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" tar -zxf build.tar.gz echo "+++ :microscope: Starting package build" - yum install -y rpm-build - mkdir -p /root/rpmbuild/BUILD - mkdir -p /root/rpmbuild/BUILDROOT - mkdir -p /root/rpmbuild/RPMS - mkdir -p /root/rpmbuild/SOURCES - mkdir -p /root/rpmbuild/SPECS - mkdir -p /root/rpmbuild/SRPMS - cd /data/job/build/packages && bash generate_package.sh rpm - label: ":centos: 7 Package builder" + ln -s "$(pwd)" /data/job && cd /data/job/build/packages && bash generate_package.sh brew + label: ":darwin: Mojave Package Builder" agents: - queue: "automation-large-builder-fleet" + - "role=builder-v2-1" + - "os=mojave" artifact_paths: - - "build/packages/*.rpm" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" - workdir: /data/job - env: - OS: "el7" - PKGTYPE: "rpm" + - "build/packages/*.tar.gz" + - "build/packages/*.rb" timeout: 60 - wait - command: | echo "--- :arrow_down: Downloading brew files" - buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: High Sierra Package Builder" - mv build/packages/eosio.rb build/packages/eosio_highsierra.rb buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: Mojave Package Builder" label: ":darwin: Brew Updater" agents: queue: "automation-large-builder-fleet" artifact_paths: - - "build/packages/eosio_highsierra.rb" - "build/packages/eosio.rb" timeout: 60 + + - command: | + echo "+++ :microscope: Running git submodule regression check" && \ + ./scripts/submodule_check.sh + label: "Git submodule regression check" + agents: + queue: "automation-large-builder-fleet" + timeout: 240 diff --git a/CMakeLists.txt b/CMakeLists.txt index f9375f0f8b9..17c3df72451 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -14,14 +14,8 @@ endif() list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/libraries/fc/CMakeModules") list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules") -if (UNIX) - if (APPLE) - execute_process(COMMAND xcrun --show-sdk-path - OUTPUT_VARIABLE CMAKE_OSX_SYSROOT - OUTPUT_STRIP_TRAILING_WHITESPACE) - list(APPEND CMAKE_PREFIX_PATH "/usr/local/opt/llvm@4") - list(APPEND CMAKE_PREFIX_PATH "/usr/local/opt/gettext") - endif() +if (UNIX AND APPLE) + list(APPEND CMAKE_PREFIX_PATH "/usr/local/opt/llvm@4" "/usr/local/opt/gettext") endif() include( GNUInstallDirs ) @@ -110,18 +104,23 @@ IF( WIN32 ) set(BOOST_ALL_DYN_LINK OFF) # force dynamic linking for all libraries ENDIF(WIN32) FIND_PACKAGE(Boost 1.67 REQUIRED COMPONENTS - thread date_time filesystem system program_options - serialization chrono unit_test_framework - context - locale iostreams) +# Some new stdlibc++s will #error on ; a problem for boost pre-1.69 +if( APPLE AND UNIX ) + add_definitions(-DBOOST_ASIO_DISABLE_STD_EXPERIMENTAL_STRING_VIEW) +endif() + +set(THREADS_PREFER_PTHREAD_FLAG 1) +find_package(Threads) +link_libraries(Threads::Threads) + if( WIN32 ) message( STATUS "Configuring EOSIO on WIN32") diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000000..40ecbf9cea8 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,148 @@ +# Contributing to eos + +Interested in contributing? That's awesome! Here are some guidelines to get started quickly and easily: + +- [Reporting An Issue](#reporting-an-issue) + - [Bug Reports](#bug-reports) + - [Feature Requests](#feature-requests) + - [Change Requests](#change-requests) +- [Working on eos](#working-on-eos) + - [Feature Branches](#feature-branches) + - [Submitting Pull Requests](#submitting-pull-requests) + - [Testing and Quality Assurance](#testing-and-quality-assurance) +- [Conduct](#conduct) +- [Contributor License & Acknowledgments](#contributor-license--acknowledgments) +- [References](#references) + +## Reporting An Issue + +If you're about to raise an issue because you think you've found a problem with eos, or you'd like to make a request for a new feature in the codebase, or any other reason… please read this first. + +The GitHub issue tracker is the preferred channel for [bug reports](#bug-reports), [feature requests](#feature-requests), and [submitting pull requests](#submitting-pull-requests), but please respect the following restrictions: + +* Please **search for existing issues**. Help us keep duplicate issues to a minimum by checking to see if someone has already reported your problem or requested your idea. + +* Please **be civil**. Keep the discussion on topic and respect the opinions of others. See also our [Contributor Code of Conduct](#conduct). + +### Bug Reports + +A bug is a _demonstrable problem_ that is caused by the code in the repository. Good bug reports are extremely helpful - thank you! + +Guidelines for bug reports: + +1. **Use the GitHub issue search** — check if the issue has already been + reported. + +1. **Check if the issue has been fixed** — look for [closed issues in the + current milestone](https://github.com/EOSIO/eos/issues?q=is%3Aissue+is%3Aclosed) or try to reproduce it + using the latest `develop` branch. + +A good bug report shouldn't leave others needing to chase you up for more information. Be sure to include the details of your environment and relevant tests that demonstrate the failure. + +[Report a bug](https://github.com/EOSIO/eos/issues/new?title=Bug%3A) + +### Feature Requests + +Feature requests are welcome. Before you submit one be sure to have: + +1. **Use the GitHub search** and check the feature hasn't already been requested. +1. Take a moment to think about whether your idea fits with the scope and aims of the project. +1. Remember, it's up to *you* to make a strong case to convince the project's leaders of the merits of this feature. Please provide as much detail and context as possible, this means explaining the use case and why it is likely to be common. + +### Change Requests + +Change requests cover both architectural and functional changes to how eos works. If you have an idea for a new or different dependency, a refactor, or an improvement to a feature, etc - please be sure to: + +1. **Use the GitHub search** and check someone else didn't get there first +1. Take a moment to think about the best way to make a case for, and explain what you're thinking. Are you sure this shouldn't really be + a [bug report](#bug-reports) or a [feature request](#feature-requests)? Is it really one idea or is it many? What's the context? What problem are you solving? Why is what you are suggesting better than what's already there? + +## Working on eos + +Code contributions are welcome and encouraged! If you are looking for a good place to start, check out the [good first issue](https://github.com/EOSIO/eos/labels/good%20first%20issue) label in GitHub issues. + +Also, please follow these guidelines when submitting code: + +### Feature Branches + +To get it out of the way: + +- **[develop](https://github.com/EOSIO/eos/tree/develop)** is the development branch. All work on the next release happens here so you should generally branch off `develop`. Do **NOT** use this branch for a production site. +- **[master](https://github.com/EOSIO/eos/tree/master)** contains the latest release of eos. This branch may be used in production. Do **NOT** use this branch to work on eos's source. + +### Submitting Pull Requests + +Pull requests are awesome. If you're looking to raise a PR for something which doesn't have an open issue, please think carefully about [raising an issue](#reporting-an-issue) which your PR can close, especially if you're fixing a bug. This makes it more likely that there will be enough information available for your PR to be properly tested and merged. + +### Testing and Quality Assurance + +Never underestimate just how useful quality assurance is. If you're looking to get involved with the code base and don't know where to start, checking out and testing a pull request is one of the most useful things you could do. + +Essentially, [check out the latest develop branch](#working-on-eos), take it for a spin, and if you find anything odd, please follow the [bug report guidelines](#bug-reports) and let us know! + +## Conduct + +While contributing, please be respectful and constructive, so that participation in our project is a positive experience for everyone. + +Examples of behavior that contributes to creating a positive environment include: +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community +- Showing empathy towards other community members + +Examples of unacceptable behavior include: +- The use of sexualized language or imagery and unwelcome sexual attention or advances +- Trolling, insulting/derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others’ private information, such as a physical or electronic address, without explicit permission +- Other conduct which could reasonably be considered inappropriate in a professional setting + +## Contributor License & Acknowledgments + +Whenever you make a contribution to this project, you license your contribution under the same terms as set out in LICENSE, and you represent and warrant that you have the right to license your contribution under those terms. Whenever you make a contribution to this project, you also certify in the terms of the Developer’s Certificate of Origin set out below: + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +1 Letterman Drive +Suite D4700 +San Francisco, CA, 94129 + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +## References + +* Overall CONTRIB adapted from https://github.com/mathjax/MathJax/blob/master/CONTRIBUTING.md +* Conduct section adapted from the Contributor Covenant, version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html diff --git a/Docker/README.md b/Docker/README.md index 1aa0513cca9..6eade280f9b 100644 --- a/Docker/README.md +++ b/Docker/README.md @@ -133,45 +133,7 @@ docker volume rm keosd-data-volume ### Docker Hub -Docker Hub image available from [docker hub](https://hub.docker.com/r/eosio/eos/). -Create a new `docker-compose.yaml` file with the content below - -```bash -version: "3" - -services: - nodeosd: - image: eosio/eos:latest - command: /opt/eosio/bin/nodeosd.sh --data-dir /opt/eosio/bin/data-dir -e --http-alias=nodeosd:8888 --http-alias=127.0.0.1:8888 --http-alias=localhost:8888 - hostname: nodeosd - ports: - - 8888:8888 - - 9876:9876 - expose: - - "8888" - volumes: - - nodeos-data-volume:/opt/eosio/bin/data-dir - - keosd: - image: eosio/eos:latest - command: /opt/eosio/bin/keosd --wallet-dir /opt/eosio/bin/data-dir --http-server-address=127.0.0.1:8900 --http-alias=localhost:8900 --http-alias=keosd:8900 - hostname: keosd - links: - - nodeosd - volumes: - - keosd-data-volume:/opt/eosio/bin/data-dir - -volumes: - nodeos-data-volume: - keosd-data-volume: - -``` - -*NOTE:* the default version is the latest, you can change it to what you want - -run `docker pull eosio/eos:latest` - -run `docker-compose up` +Docker Hub images are now deprecated. New build images were discontinued on January 1st, 2019. The existing old images will be removed on June 1st, 2019. ### EOSIO Testnet diff --git a/LICENSE b/LICENSE index 1516b96cbdf..22d36d65db1 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2019 Respective Authors all rights reserved. +Copyright (c) 2017-2019 block.one and its contributors. All rights reserved. The MIT License diff --git a/README.md b/README.md index e22a2b2cebc..e4ec6e0b69d 100644 --- a/README.md +++ b/README.md @@ -63,7 +63,7 @@ $ sudo yum install ./eosio-1.7.0-rc1.el7.x86_64.rpm ``` #### Centos RPM Package Uninstall ```sh -$ sudo yum remove eosio.cdt +$ sudo yum remove eosio ``` #### Fedora RPM Package Install ```sh @@ -105,3 +105,17 @@ EOSIO currently supports the following operating systems: ## Getting Started Instructions detailing the process of getting the software, building it, running a simple test network that produces blocks, account creation and uploading a sample contract to the blockchain can be found in [Getting Started](https://developers.eos.io/eosio-home/docs) on the [EOSIO Developer Portal](https://developers.eos.io). + +## Contributing + +[Contributing Guide](./CONTRIBUTING.md) + +[Code of Conduct](./CONTRIBUTING.md#conduct) + +## License + +[MIT](./LICENSE) + +## Important + +See LICENSE for copyright and license terms. Block.one makes its contribution on a voluntary basis as a member of the EOSIO community and is not responsible for ensuring the overall performance of the software or any related applications. We make no representation, warranty, guarantee or undertaking in respect of the software or any related documentation, whether expressed or implied, including but not limited to the warranties or merchantability, fitness for a particular purpose and noninfringement. In no event shall we be liable for any claim, damages or other liability, whether in an action of contract, tort or otherwise, arising from, out of or in connection with the software or documentation or the use or other dealings in the software or documentation. Any test results or performance figures are indicative and will not reflect performance under all conditions. Any reference to any third party or third-party product, service or other resource is not an endorsement or recommendation by Block.one. We are not responsible, and disclaim any and all responsibility and liability, for your use of or reliance on any of these resources. Third-party resources may be updated, changed or terminated at any time, so the information here may be out of date or inaccurate. diff --git a/libraries/appbase b/libraries/appbase index 013246f52f1..b6b55f5ff99 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit 013246f52f13a7bc129193c3a64e6cd0cea44ac0 +Subproject commit b6b55f5ff993f4be954d2aa556538636fbdaabb4 diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index f12bdabe70e..254d462c5ed 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -51,6 +51,7 @@ add_library( eosio_chain protocol_feature_manager.cpp genesis_intrinsics.cpp whitelisted_intrinsics.cpp + thread_utils.cpp ${HEADERS} ) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 779f936a614..c3ee723308b 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -25,6 +25,7 @@ #include #include +#include #include #include @@ -223,7 +224,7 @@ struct controller_impl { optional subjective_cpu_leeway; bool trusted_producer_light_validation = false; uint32_t snapshot_head_block = 0; - boost::asio::thread_pool thread_pool; + named_thread_pool thread_pool; typedef pair handler_key; map< account_name, map > apply_handlers; @@ -297,7 +298,7 @@ struct controller_impl { conf( cfg ), chain_id( cfg.genesis.compute_chain_id() ), read_mode( cfg.read_mode ), - thread_pool( cfg.thread_pool_size ) + thread_pool( "chain", cfg.thread_pool_size ) { fork_db.open( [this]( block_timestamp_type timestamp, @@ -668,6 +669,7 @@ struct controller_impl { } ~controller_impl() { + thread_pool.stop(); pending.reset(); } @@ -1226,9 +1228,10 @@ struct controller_impl { auto start = fc::time_point::now(); const bool check_auth = !self.skip_auth_check() && !trx->implicit; // call recover keys so that trx->sig_cpu_usage is set correctly - const flat_set& recovered_keys = check_auth ? trx->recover_keys( chain_id ) : flat_set(); + const fc::microseconds sig_cpu_usage = check_auth ? std::get<0>( trx->recover_keys( chain_id ) ) : fc::microseconds(); + const flat_set& recovered_keys = check_auth ? std::get<1>( trx->recover_keys( chain_id ) ) : flat_set(); if( !explicit_billed_cpu_time ) { - fc::microseconds already_consumed_time( EOS_PERCENT(trx->sig_cpu_usage.count(), conf.sig_cpu_bill_pct) ); + fc::microseconds already_consumed_time( EOS_PERCENT(sig_cpu_usage.count(), conf.sig_cpu_bill_pct) ); if( start.time_since_epoch() < already_consumed_time ) { start = fc::time_point(); @@ -1655,7 +1658,7 @@ struct controller_impl { auto& pt = receipt.trx.get(); auto mtrx = std::make_shared( std::make_shared( pt ) ); if( !self.skip_auth_check() ) { - transaction_metadata::create_signing_keys_future( mtrx, thread_pool, chain_id, microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx, thread_pool.get_executor(), chain_id, microseconds::maximum() ); } packed_transactions.emplace_back( std::move( mtrx ) ); } @@ -1739,7 +1742,7 @@ struct controller_impl { EOS_ASSERT( prev, unlinkable_block_exception, "unlinkable block ${id}", ("id", id)("previous", b->previous) ); - return async_thread_pool( thread_pool, [b, prev, control=this]() { + return async_thread_pool( thread_pool.get_executor(), [b, prev, control=this]() { const bool skip_validate_signee = false; return std::make_shared( *prev, @@ -2415,8 +2418,8 @@ void controller::abort_block() { my->abort_block(); } -boost::asio::thread_pool& controller::get_thread_pool() { - return my->thread_pool; +boost::asio::io_context& controller::get_thread_pool() { + return my->thread_pool.get_executor(); } std::future controller::create_block_state_future( const signed_block_ptr& b ) { diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index c3d20bb4ea0..fc6d28132f5 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -159,7 +159,7 @@ namespace eosio { namespace chain { std::future create_block_state_future( const signed_block_ptr& b ); void push_block( std::future& block_state_future ); - boost::asio::thread_pool& get_thread_pool(); + boost::asio::io_context& get_thread_pool(); const chainbase::database& db()const; diff --git a/libraries/chain/include/eosio/chain/thread_utils.hpp b/libraries/chain/include/eosio/chain/thread_utils.hpp index 31b32cbd91f..b3aea3085f5 100644 --- a/libraries/chain/include/eosio/chain/thread_utils.hpp +++ b/libraries/chain/include/eosio/chain/thread_utils.hpp @@ -4,6 +4,8 @@ */ #pragma once +#include +#include #include #include #include @@ -11,9 +13,36 @@ namespace eosio { namespace chain { + /** + * Wrapper class for boost asio thread pool and io_context run. + * Also names threads so that tools like htop can see thread name. + */ + class named_thread_pool { + public: + // name_prefix is name appended with -## of thread. + // short name_prefix (6 chars or under) is recommended as console_appender uses 9 chars for thread name + named_thread_pool( std::string name_prefix, size_t num_threads ); + + // calls stop() + ~named_thread_pool(); + + boost::asio::io_context& get_executor() { return _ioc; } + + // destroy work guard, stop io_context, join thread_pool, and stop thread_pool + void stop(); + + private: + using ioc_work_t = boost::asio::executor_work_guard; + + boost::asio::thread_pool _thread_pool; + boost::asio::io_context _ioc; + fc::optional _ioc_work; + }; + + // async on thread_pool and return future template - auto async_thread_pool( boost::asio::thread_pool& thread_pool, F&& f ) { + auto async_thread_pool( boost::asio::io_context& thread_pool, F&& f ) { auto task = std::make_shared>( std::forward( f ) ); boost::asio::post( thread_pool, [task]() { (*task)(); } ); return task->get_future(); diff --git a/libraries/chain/include/eosio/chain/transaction_metadata.hpp b/libraries/chain/include/eosio/chain/transaction_metadata.hpp index 6136580fa44..e61ddc854f7 100644 --- a/libraries/chain/include/eosio/chain/transaction_metadata.hpp +++ b/libraries/chain/include/eosio/chain/transaction_metadata.hpp @@ -15,6 +15,10 @@ namespace eosio { namespace chain { class transaction_metadata; using transaction_metadata_ptr = std::shared_ptr; +using signing_keys_future_value_type = std::tuple>; +using signing_keys_future_type = std::shared_future; +using recovery_keys_type = std::pair&>; + /** * This data structure should store context-free cached data about a transaction such as * packed/unpacked/compressed and recovered keys @@ -24,10 +28,7 @@ class transaction_metadata { transaction_id_type id; transaction_id_type signed_id; packed_transaction_ptr packed_trx; - fc::microseconds sig_cpu_usage; - optional>> signing_keys; - std::future>> - signing_keys_future; + signing_keys_future_type signing_keys_future; bool accepted = false; bool implicit = false; bool scheduled = false; @@ -50,11 +51,13 @@ class transaction_metadata { signed_id = digest_type::hash(*packed_trx); } - const flat_set& recover_keys( const chain_id_type& chain_id ); - - static void create_signing_keys_future( const transaction_metadata_ptr& mtrx, boost::asio::thread_pool& thread_pool, - const chain_id_type& chain_id, fc::microseconds time_limit ); + // must be called from main application thread + static signing_keys_future_type + start_recover_keys( const transaction_metadata_ptr& mtrx, boost::asio::io_context& thread_pool, + const chain_id_type& chain_id, fc::microseconds time_limit ); + // start_recover_keys must be called first + recovery_keys_type recover_keys( const chain_id_type& chain_id ); }; } } // eosio::chain diff --git a/libraries/chain/thread_utils.cpp b/libraries/chain/thread_utils.cpp new file mode 100644 index 00000000000..1d8a2707c14 --- /dev/null +++ b/libraries/chain/thread_utils.cpp @@ -0,0 +1,40 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ + +#include +#include + +namespace eosio { namespace chain { + + +// +// named_thread_pool +// +named_thread_pool::named_thread_pool( std::string name_prefix, size_t num_threads ) +: _thread_pool( num_threads ) +{ + _ioc_work.emplace( boost::asio::make_work_guard( _ioc ) ); + for( size_t i = 0; i < num_threads; ++i ) { + boost::asio::post( _thread_pool, [&ioc = _ioc, name_prefix, i]() { + std::string tn = name_prefix + "-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + ioc.run(); + } ); + } +} + +named_thread_pool::~named_thread_pool() { + stop(); +} + +void named_thread_pool::stop() { + _ioc_work.reset(); + _ioc.stop(); + _thread_pool.join(); + _thread_pool.stop(); +} + + +} } // eosio::chain \ No newline at end of file diff --git a/libraries/chain/transaction_metadata.cpp b/libraries/chain/transaction_metadata.cpp index 482b3c488f7..ddcbd2d934e 100644 --- a/libraries/chain/transaction_metadata.cpp +++ b/libraries/chain/transaction_metadata.cpp @@ -4,34 +4,39 @@ namespace eosio { namespace chain { - -const flat_set& transaction_metadata::recover_keys( const chain_id_type& chain_id ) { +recovery_keys_type transaction_metadata::recover_keys( const chain_id_type& chain_id ) { // Unlikely for more than one chain_id to be used in one nodeos instance - if( !signing_keys || signing_keys->first != chain_id ) { - if( signing_keys_future.valid() ) { - std::tuple> sig_keys = signing_keys_future.get(); - if( std::get<0>( sig_keys ) == chain_id ) { - sig_cpu_usage = std::get<1>( sig_keys ); - signing_keys.emplace( std::get<0>( sig_keys ), std::move( std::get<2>( sig_keys ))); - return signing_keys->second; - } + if( signing_keys_future.valid() ) { + const std::tuple>& sig_keys = signing_keys_future.get(); + if( std::get<0>( sig_keys ) == chain_id ) { + return std::make_pair( std::get<1>( sig_keys ), std::cref( std::get<2>( sig_keys ) ) ); } - flat_set recovered_pub_keys; - sig_cpu_usage = packed_trx->get_signed_transaction().get_signature_keys( chain_id, fc::time_point::maximum(), recovered_pub_keys ); - signing_keys.emplace( chain_id, std::move( recovered_pub_keys )); } - return signing_keys->second; + + // shared_keys_future not created or different chain_id + std::promise p; + flat_set recovered_pub_keys; + const signed_transaction& trn = packed_trx->get_signed_transaction(); + fc::microseconds cpu_usage = trn.get_signature_keys( chain_id, fc::time_point::maximum(), recovered_pub_keys ); + p.set_value( std::make_tuple( chain_id, cpu_usage, std::move( recovered_pub_keys ) ) ); + signing_keys_future = p.get_future().share(); + + const std::tuple>& sig_keys = signing_keys_future.get(); + return std::make_pair( std::get<1>( sig_keys ), std::cref( std::get<2>( sig_keys ) ) ); } -void transaction_metadata::create_signing_keys_future( const transaction_metadata_ptr& mtrx, - boost::asio::thread_pool& thread_pool, const chain_id_type& chain_id, fc::microseconds time_limit ) { - if( mtrx->signing_keys_future.valid() || mtrx->signing_keys.valid() ) // already created - return; +signing_keys_future_type transaction_metadata::start_recover_keys( const transaction_metadata_ptr& mtrx, + boost::asio::io_context& thread_pool, + const chain_id_type& chain_id, + fc::microseconds time_limit ) +{ + if( mtrx->signing_keys_future.valid() && std::get<0>( mtrx->signing_keys_future.get() ) == chain_id ) // already created + return mtrx->signing_keys_future; std::weak_ptr mtrx_wp = mtrx; mtrx->signing_keys_future = async_thread_pool( thread_pool, [time_limit, chain_id, mtrx_wp]() { fc::time_point deadline = time_limit == fc::microseconds::maximum() ? - fc::time_point::maximum() : fc::time_point::now() + time_limit; + fc::time_point::maximum() : fc::time_point::now() + time_limit; auto mtrx = mtrx_wp.lock(); fc::microseconds cpu_usage; flat_set recovered_pub_keys; @@ -41,6 +46,8 @@ void transaction_metadata::create_signing_keys_future( const transaction_metadat } return std::make_tuple( chain_id, cpu_usage, std::move( recovered_pub_keys )); } ); + + return mtrx->signing_keys_future; } diff --git a/libraries/chainbase b/libraries/chainbase index a2563660f08..eb2d0c28bc1 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit a2563660f082622ab7a18778f5b91cc91f51c0c3 +Subproject commit eb2d0c28bc1f1328e8a5fc899291336ad487b084 diff --git a/libraries/fc b/libraries/fc index 4fd24eaabd8..ae6ec564f0d 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 4fd24eaabd88eeaf5cf4ac1125bb48815d6e58df +Subproject commit ae6ec564f0db6d3378348ef6b475042e332e612a diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index 0bccbfe8034..cda857ff2c2 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -104,8 +104,8 @@ namespace eosio { namespace testing { void open( const snapshot_reader_ptr& snapshot); bool is_same_chain( base_tester& other ); - virtual signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ ) = 0; - virtual signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ ) = 0; + virtual signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) ) = 0; + virtual signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) ) = 0; virtual signed_block_ptr finish_block() = 0; void produce_blocks( uint32_t n = 1, bool empty = false ); void produce_blocks_until_end_of_round(); @@ -301,7 +301,7 @@ namespace eosio { namespace testing { void preactivate_all_builtin_protocol_features(); protected: - signed_block_ptr _produce_block( fc::microseconds skip_time, bool skip_pending_trxs = false, uint32_t skip_flag = 0 ); + signed_block_ptr _produce_block( fc::microseconds skip_time, bool skip_pending_trxs = false ); void _start_block(fc::time_point block_time); signed_block_ptr _finish_block(); @@ -334,13 +334,13 @@ namespace eosio { namespace testing { init(config, std::move(pfs)); } - signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ )override { - return _produce_block(skip_time, false, skip_flag); + signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { + return _produce_block(skip_time, false); } - signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ )override { + signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { control->abort_block(); - return _produce_block(skip_time, true, skip_flag); + return _produce_block(skip_time, true); } signed_block_ptr finish_block()override { @@ -418,16 +418,16 @@ namespace eosio { namespace testing { init(config); } - signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0 /*skip_missed_block_penalty*/ )override { - auto sb = _produce_block(skip_time, false, skip_flag | 2); + signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { + auto sb = _produce_block(skip_time, false); auto bs = validating_node->create_block_state_future( sb ); validating_node->push_block( bs ); return sb; } - signed_block_ptr produce_block_no_validation( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0 /*skip_missed_block_penalty*/ ) { - return _produce_block(skip_time, false, skip_flag | 2); + signed_block_ptr produce_block_no_validation( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) ) { + return _produce_block(skip_time, false); } void validate_push_block(const signed_block_ptr& sb) { @@ -435,9 +435,9 @@ namespace eosio { namespace testing { validating_node->push_block( bs ); } - signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0 /*skip_missed_block_penalty*/ )override { + signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { control->abort_block(); - auto sb = _produce_block(skip_time, true, skip_flag | 2); + auto sb = _produce_block(skip_time, true); auto bs = validating_node->create_block_state_future( sb ); validating_node->push_block( bs ); diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index 82f91b90576..5d56814a7a5 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -236,7 +236,7 @@ namespace eosio { namespace testing { return b; } - signed_block_ptr base_tester::_produce_block( fc::microseconds skip_time, bool skip_pending_trxs, uint32_t skip_flag) { + signed_block_ptr base_tester::_produce_block( fc::microseconds skip_time, bool skip_pending_trxs) { auto head = control->head_block_state(); auto head_time = control->head_block_time(); auto next_time = head_time + skip_time; @@ -446,7 +446,13 @@ namespace eosio { namespace testing { { try { if( !control->is_building_block() ) _start_block(control->head_block_time() + fc::microseconds(config::block_interval_us)); - auto r = control->push_transaction( std::make_shared(std::make_shared(trx)), deadline, billed_cpu_time_us ); + + auto mtrx = std::make_shared( std::make_shared(trx) ); + auto time_limit = deadline == fc::time_point::maximum() ? + fc::microseconds::maximum() : + fc::microseconds( deadline - fc::time_point::now() ); + transaction_metadata::start_recover_keys( mtrx, control->get_thread_pool(), control->get_chain_id(), time_limit ); + auto r = control->push_transaction( mtrx, deadline, billed_cpu_time_us ); if( r->except_ptr ) std::rethrow_exception( r->except_ptr ); if( r->except ) throw *r->except; return r; @@ -466,7 +472,12 @@ namespace eosio { namespace testing { c = packed_transaction::zlib; } - auto r = control->push_transaction( std::make_shared(trx,c), deadline, billed_cpu_time_us ); + auto time_limit = deadline == fc::time_point::maximum() ? + fc::microseconds::maximum() : + fc::microseconds( deadline - fc::time_point::now() ); + auto mtrx = std::make_shared(trx, c); + transaction_metadata::start_recover_keys( mtrx, control->get_thread_pool(), control->get_chain_id(), time_limit ); + auto r = control->push_transaction( mtrx, deadline, billed_cpu_time_us ); if (no_throw) return r; if( r->except_ptr ) std::rethrow_exception( r->except_ptr ); if( r->except) throw *r->except; diff --git a/plugins/bnet_plugin/bnet_plugin.cpp b/plugins/bnet_plugin/bnet_plugin.cpp index d7a105b9835..76851e2b9db 100644 --- a/plugins/bnet_plugin/bnet_plugin.cpp +++ b/plugins/bnet_plugin/bnet_plugin.cpp @@ -51,6 +51,7 @@ #include #include +#include #include #include @@ -1398,7 +1399,13 @@ namespace eosio { my->_socket_threads.reserve( my->_num_threads ); for( auto i = 0; i < my->_num_threads; ++i ) { - my->_socket_threads.emplace_back( [&ioc]{ wlog( "start thread" ); ioc.run(); wlog( "end thread" ); } ); + my->_socket_threads.emplace_back( [&ioc, i]{ + std::string tn = "bnet-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + wlog( "start thread" ); + ioc.run(); + wlog( "end thread" ); + } ); } for( const auto& peer : my->_connect_to_peers ) { diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index e367d0b85d2..e245e3017fd 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -987,8 +987,6 @@ void chain_plugin::plugin_shutdown() { my->irreversible_block_connection.reset(); my->accepted_transaction_connection.reset(); my->applied_transaction_connection.reset(); - my->chain->get_thread_pool().stop(); - my->chain->get_thread_pool().join(); my->chain.reset(); } diff --git a/plugins/http_plugin/http_plugin.cpp b/plugins/http_plugin/http_plugin.cpp index 7e205736874..3345fcdb68c 100644 --- a/plugins/http_plugin/http_plugin.cpp +++ b/plugins/http_plugin/http_plugin.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -123,7 +124,6 @@ namespace eosio { using websocket_local_server_type = websocketpp::server; using websocket_server_tls_type = websocketpp::server>; using ssl_context_ptr = websocketpp::lib::shared_ptr; - using io_work_t = boost::asio::executor_work_guard; static bool verbose_http_errors = false; @@ -140,9 +140,7 @@ namespace eosio { websocket_server_type server; uint16_t thread_pool_size = 2; - optional thread_pool; - std::shared_ptr server_ioc; - optional server_ioc_work; + optional thread_pool; std::atomic bytes_in_flight{0}; size_t max_bytes_in_flight = 0; @@ -301,12 +299,12 @@ namespace eosio { con->defer_http_response(); bytes_in_flight += body.size(); app().post( appbase::priority::low, - [ioc = this->server_ioc, &bytes_in_flight = this->bytes_in_flight, handler_itr, + [&ioc = thread_pool->get_executor(), &bytes_in_flight = this->bytes_in_flight, handler_itr, resource{std::move( resource )}, body{std::move( body )}, con]() { try { handler_itr->second( resource, body, - [ioc{std::move(ioc)}, &bytes_in_flight, con]( int code, fc::variant response_body ) { - boost::asio::post( *ioc, [ioc, response_body{std::move( response_body )}, &bytes_in_flight, con, code]() mutable { + [&ioc, &bytes_in_flight, con]( int code, fc::variant response_body ) { + boost::asio::post( ioc, [response_body{std::move( response_body )}, &bytes_in_flight, con, code]() mutable { std::string json = fc::json::to_string( response_body ); response_body.clear(); const size_t json_size = json.size(); @@ -340,11 +338,11 @@ namespace eosio { void create_server_for_endpoint(const tcp::endpoint& ep, websocketpp::server>& ws) { try { ws.clear_access_channels(websocketpp::log::alevel::all); - ws.init_asio(&(*server_ioc)); + ws.init_asio( &thread_pool->get_executor() ); ws.set_reuse_addr(true); ws.set_max_http_body_size(max_body_size); // capture server_ioc shared_ptr in http handler to keep it alive while in use - ws.set_http_handler([&, ioc = this->server_ioc](connection_hdl hdl) { + ws.set_http_handler([&](connection_hdl hdl) { handle_http_request>(ws.get_con_from_hdl(hdl)); }); } catch ( const fc::exception& e ){ @@ -518,12 +516,7 @@ namespace eosio { void http_plugin::plugin_startup() { - my->thread_pool.emplace( my->thread_pool_size ); - my->server_ioc = std::make_shared(); - my->server_ioc_work.emplace( boost::asio::make_work_guard(*my->server_ioc) ); - for( uint16_t i = 0; i < my->thread_pool_size; ++i ) { - boost::asio::post( *my->thread_pool, [ioc = my->server_ioc]() { ioc->run(); } ); - } + my->thread_pool.emplace( "http", my->thread_pool_size ); if(my->listen_endpoint) { try { @@ -547,10 +540,10 @@ namespace eosio { if(my->unix_endpoint) { try { my->unix_server.clear_access_channels(websocketpp::log::alevel::all); - my->unix_server.init_asio(&(*my->server_ioc)); + my->unix_server.init_asio( &my->thread_pool->get_executor() ); my->unix_server.set_max_http_body_size(my->max_body_size); my->unix_server.listen(*my->unix_endpoint); - my->unix_server.set_http_handler([&, ioc = my->server_ioc](connection_hdl hdl) { + my->unix_server.set_http_handler([&, &ioc = my->thread_pool->get_executor()](connection_hdl hdl) { my->handle_http_request( my->unix_server.get_con_from_hdl(hdl)); }); my->unix_server.start_accept(); @@ -610,12 +603,7 @@ namespace eosio { if(my->unix_server.is_listening()) my->unix_server.stop_listening(); - if( my->server_ioc_work ) - my->server_ioc_work->reset(); - if( my->server_ioc ) - my->server_ioc->stop(); if( my->thread_pool ) { - my->thread_pool->join(); my->thread_pool->stop(); } } diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 3b3b39f4f84..bf66e570730 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -10,17 +10,17 @@ #include #include +#include #include #include #include #include #include -#include -#include -#include #include +#include +#include #include #include @@ -164,9 +164,9 @@ class mongo_db_plugin_impl { std::deque block_state_process_queue; std::deque irreversible_block_state_queue; std::deque irreversible_block_state_process_queue; - boost::mutex mtx; - boost::condition_variable condition; - boost::thread consume_thread; + std::mutex mtx; + std::condition_variable condition; + std::thread consume_thread; std::atomic_bool done{false}; std::atomic_bool startup{true}; fc::optional chain_id; @@ -292,7 +292,7 @@ bool mongo_db_plugin_impl::filter_include( const transaction& trx ) const template void mongo_db_plugin_impl::queue( Queue& queue, const Entry& e ) { - boost::mutex::scoped_lock lock( mtx ); + std::unique_lock lock( mtx ); auto queue_size = queue.size(); if( queue_size > max_queue_size ) { lock.unlock(); @@ -300,7 +300,7 @@ void mongo_db_plugin_impl::queue( Queue& queue, const Entry& e ) { queue_sleep_time += 10; if( queue_sleep_time > 1000 ) wlog("queue size: ${q}", ("q", queue_size)); - boost::this_thread::sleep_for( boost::chrono::milliseconds( queue_sleep_time )); + std::this_thread::sleep_for( std::chrono::milliseconds( queue_sleep_time )); lock.lock(); } else { queue_sleep_time -= 10; @@ -408,7 +408,7 @@ void mongo_db_plugin_impl::consume_blocks() { _account_controls = mongo_conn[db_name][account_controls_col]; while (true) { - boost::mutex::scoped_lock lock(mtx); + std::unique_lock lock(mtx); while ( transaction_metadata_queue.empty() && transaction_trace_queue.empty() && block_state_queue.empty() && @@ -781,8 +781,8 @@ void mongo_db_plugin_impl::_process_accepted_transaction( const chain::transacti } string signing_keys_json; - if( t->signing_keys.valid() ) { - signing_keys_json = fc::json::to_string( t->signing_keys->second ); + if( t->signing_keys_future.valid() ) { + signing_keys_json = fc::json::to_string( std::get<2>( t->signing_keys_future.get() ) ); } else { flat_set keys; trx.get_signature_keys( *chain_id, fc::time_point::maximum(), keys, false ); @@ -1456,39 +1456,44 @@ void mongo_db_plugin_impl::init() { } try { + // MongoDB administrators (to enable sharding) : + // 1. enableSharding database (default to EOS) + // 2. shardCollection: blocks, action_traces, transaction_traces, especially action_traces + // 3. Compound index with shard key (default to _id below), to improve query performance. + // blocks indexes auto blocks = mongo_conn[db_name][blocks_col]; - blocks.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1 })xxx" )); - blocks.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1 })xxx" )); + blocks.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1, "_id" : 1 })xxx" )); + blocks.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1, "_id" : 1 })xxx" )); auto block_states = mongo_conn[db_name][block_states_col]; - block_states.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1 })xxx" )); - block_states.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1 })xxx" )); + block_states.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1, "_id" : 1 })xxx" )); + block_states.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1, "_id" : 1 })xxx" )); // accounts indexes - accounts.create_index( bsoncxx::from_json( R"xxx({ "name" : 1 })xxx" )); + accounts.create_index( bsoncxx::from_json( R"xxx({ "name" : 1, "_id" : 1 })xxx" )); // transactions indexes auto trans = mongo_conn[db_name][trans_col]; - trans.create_index( bsoncxx::from_json( R"xxx({ "trx_id" : 1 })xxx" )); + trans.create_index( bsoncxx::from_json( R"xxx({ "trx_id" : 1, "_id" : 1 })xxx" )); auto trans_trace = mongo_conn[db_name][trans_traces_col]; - trans_trace.create_index( bsoncxx::from_json( R"xxx({ "id" : 1 })xxx" )); + trans_trace.create_index( bsoncxx::from_json( R"xxx({ "id" : 1, "_id" : 1 })xxx" )); // action traces indexes auto action_traces = mongo_conn[db_name][action_traces_col]; - action_traces.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1 })xxx" )); + action_traces.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1, "_id" : 1 })xxx" )); // pub_keys indexes auto pub_keys = mongo_conn[db_name][pub_keys_col]; - pub_keys.create_index( bsoncxx::from_json( R"xxx({ "account" : 1, "permission" : 1 })xxx" )); - pub_keys.create_index( bsoncxx::from_json( R"xxx({ "public_key" : 1 })xxx" )); + pub_keys.create_index( bsoncxx::from_json( R"xxx({ "account" : 1, "permission" : 1, "_id" : 1 })xxx" )); + pub_keys.create_index( bsoncxx::from_json( R"xxx({ "public_key" : 1, "_id" : 1 })xxx" )); // account_controls indexes auto account_controls = mongo_conn[db_name][account_controls_col]; account_controls.create_index( - bsoncxx::from_json( R"xxx({ "controlled_account" : 1, "controlled_permission" : 1 })xxx" )); - account_controls.create_index( bsoncxx::from_json( R"xxx({ "controlling_account" : 1 })xxx" )); + bsoncxx::from_json( R"xxx({ "controlled_account" : 1, "controlled_permission" : 1, "_id" : 1 })xxx" )); + account_controls.create_index( bsoncxx::from_json( R"xxx({ "controlling_account" : 1, "_id" : 1 })xxx" )); } catch (...) { handle_mongo_exception( "create indexes", __LINE__ ); @@ -1517,7 +1522,10 @@ void mongo_db_plugin_impl::init() { ilog("starting db plugin thread"); - consume_thread = boost::thread([this] { consume_blocks(); }); + consume_thread = std::thread( [this] { + fc::set_os_thread_name( "mongodb" ); + consume_blocks(); + } ); startup = false; } diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 43de70330e3..94a9848ce7e 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -18,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -150,11 +152,8 @@ namespace eosio { channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; - uint16_t thread_pool_size = 1; // currently used by server_ioc - optional thread_pool; - std::shared_ptr server_ioc; - optional server_ioc_work; - + uint16_t thread_pool_size = 1; + optional thread_pool; void connect(const connection_ptr& c); void connect(const connection_ptr& c, tcp::resolver::iterator endpoint_itr); @@ -489,7 +488,7 @@ namespace eosio { peer_block_state_index blk_state; transaction_state_index trx_state; optional peer_requested; // this peer is requesting info from us - std::shared_ptr server_ioc; // keep ioc alive + boost::asio::io_context& server_ioc; boost::asio::io_context::strand strand; socket_ptr socket; @@ -723,9 +722,9 @@ namespace eosio { : blk_state(), trx_state(), peer_requested(), - server_ioc( my_impl->server_ioc ), + server_ioc( my_impl->thread_pool->get_executor() ), strand( app().get_io_service() ), - socket( std::make_shared( std::ref( *my_impl->server_ioc ))), + socket( std::make_shared( my_impl->thread_pool->get_executor() ) ), node_id(), last_handshake_recv(), last_handshake_sent(), @@ -749,7 +748,7 @@ namespace eosio { : blk_state(), trx_state(), peer_requested(), - server_ioc( my_impl->server_ioc ), + server_ioc( my_impl->thread_pool->get_executor() ), strand( app().get_io_service() ), socket( s ), node_id(), @@ -776,8 +775,8 @@ namespace eosio { void connection::initialize() { auto *rnd = node_id.data(); rnd[0] = 0; - response_expected.reset(new boost::asio::steady_timer( *my_impl->server_ioc )); - read_delay_timer.reset(new boost::asio::steady_timer( *my_impl->server_ioc )); + response_expected.reset(new boost::asio::steady_timer( my_impl->thread_pool->get_executor() )); + read_delay_timer.reset(new boost::asio::steady_timer( my_impl->thread_pool->get_executor() )); } bool connection::connected() { @@ -819,7 +818,6 @@ namespace eosio { fc_dlog(logger, "canceling wait on ${p}", ("p",peer_name())); cancel_wait(); if( read_delay_timer ) read_delay_timer->cancel(); - pending_message_buffer.reset(); } void connection::txn_send_pending(const vector& ids) { @@ -1879,6 +1877,7 @@ namespace eosio { auto current_endpoint = *endpoint_itr; ++endpoint_itr; c->connecting = true; + c->pending_message_buffer.reset(); connection_wptr weak_conn = c; c->socket->async_connect( current_endpoint, boost::asio::bind_executor( c->strand, [weak_conn, endpoint_itr, this]( const boost::system::error_code& err ) { @@ -1925,9 +1924,9 @@ namespace eosio { void net_plugin_impl::start_listen_loop() { - auto socket = std::make_shared( std::ref( *server_ioc ) ); - acceptor->async_accept( *socket, [socket, this, ioc = server_ioc]( boost::system::error_code ec ) { - app().post( priority::low, [socket, this, ec, ioc{std::move(ioc)}]() { + auto socket = std::make_shared( my_impl->thread_pool->get_executor() ); + acceptor->async_accept( *socket, [socket, this]( boost::system::error_code ec ) { + app().post( priority::low, [socket, this, ec]() { if( !ec ) { uint32_t visitors = 0; uint32_t from_addr = 0; @@ -2054,7 +2053,7 @@ namespace eosio { [this,weak_conn]( boost::system::error_code ec, std::size_t bytes_transferred ) { app().post( priority::medium, [this,weak_conn, ec, bytes_transferred]() { auto conn = weak_conn.lock(); - if (!conn) { + if (!conn || !conn->socket || !conn->socket->is_open()) { return; } @@ -2654,8 +2653,8 @@ namespace eosio { } void net_plugin_impl::start_monitors() { - connector_check.reset(new boost::asio::steady_timer( *server_ioc )); - transaction_check.reset(new boost::asio::steady_timer( *server_ioc )); + connector_check.reset(new boost::asio::steady_timer( my_impl->thread_pool->get_executor() )); + transaction_check.reset(new boost::asio::steady_timer( my_impl->thread_pool->get_executor() )); start_conn_timer(connector_period, std::weak_ptr()); start_txn_timer(); } @@ -3004,15 +3003,10 @@ namespace eosio { void net_plugin::plugin_startup() { my->producer_plug = app().find_plugin(); - my->thread_pool.emplace( my->thread_pool_size ); - my->server_ioc = std::make_shared(); - my->server_ioc_work.emplace( boost::asio::make_work_guard( *my->server_ioc ) ); // currently thread_pool only used for server_ioc - for( uint16_t i = 0; i < my->thread_pool_size; ++i ) { - boost::asio::post( *my->thread_pool, [ioc = my->server_ioc]() { ioc->run(); } ); - } + my->thread_pool.emplace( "net", my->thread_pool_size ); - my->resolver = std::make_shared( std::ref( *my->server_ioc )); + my->resolver = std::make_shared( my->thread_pool->get_executor() ); if( my->p2p_address.size() > 0 ) { auto host = my->p2p_address.substr( 0, my->p2p_address.find( ':' )); auto port = my->p2p_address.substr( host.size() + 1, my->p2p_address.size()); @@ -3021,7 +3015,7 @@ namespace eosio { my->listen_endpoint = *my->resolver->resolve( query ); - my->acceptor.reset( new tcp::acceptor( *my->server_ioc ) ); + my->acceptor.reset( new tcp::acceptor( my_impl->thread_pool->get_executor() ) ); if( !my->p2p_server_address.empty() ) { my->p2p_address = my->p2p_server_address; @@ -3041,7 +3035,7 @@ namespace eosio { } } - my->keepalive_timer.reset( new boost::asio::steady_timer( *my->server_ioc ) ); + my->keepalive_timer.reset( new boost::asio::steady_timer( my->thread_pool->get_executor() ) ); my->ticker(); if( my->acceptor ) { @@ -3086,9 +3080,6 @@ namespace eosio { void net_plugin::plugin_shutdown() { try { fc_ilog( logger, "shutdown.." ); - if( my->server_ioc_work ) - my->server_ioc_work->reset(); - if( my->connector_check ) my->connector_check->cancel(); if( my->transaction_check ) @@ -3110,10 +3101,7 @@ namespace eosio { my->connections.clear(); } - if( my->server_ioc ) - my->server_ioc->stop(); if( my->thread_pool ) { - my->thread_pool->join(); my->thread_pool->stop(); } fc_ilog( logger, "exit shutdown" ); diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index cf39d0bb8df..053064f2ce1 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -8,9 +8,11 @@ #include #include #include +#include #include #include +#include #include #include @@ -132,7 +134,7 @@ class producer_plugin_impl : public std::enable_shared_from_this _producer_watermarks; pending_block_mode _pending_block_mode; transaction_id_with_expiry_index _persistent_transactions; - fc::optional _thread_pool; + fc::optional _thread_pool; int32_t _max_transaction_time_ms; fc::microseconds _max_irreversible_block_age_us; @@ -351,10 +353,11 @@ class producer_plugin_impl : public std::enable_shared_from_this next) { chain::controller& chain = chain_plug->chain(); const auto& cfg = chain.get_global_properties().configuration; - transaction_metadata::create_signing_keys_future( trx, *_thread_pool, chain.get_chain_id(), fc::microseconds( cfg.max_transaction_cpu_usage ) ); - boost::asio::post( *_thread_pool, [self = this, trx, persist_until_expired, next]() { - if( trx->signing_keys_future.valid() ) - trx->signing_keys_future.wait(); + signing_keys_future_type future = transaction_metadata::start_recover_keys( trx, _thread_pool->get_executor(), + chain.get_chain_id(), fc::microseconds( cfg.max_transaction_cpu_usage ) ); + boost::asio::post( _thread_pool->get_executor(), [self = this, future, trx, persist_until_expired, next]() { + if( future.valid() ) + future.wait(); app().post(priority::low, [self, trx, persist_until_expired, next]() { self->process_incoming_transaction_async( trx, persist_until_expired, next ); }); @@ -688,7 +691,7 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ auto thread_pool_size = options.at( "producer-threads" ).as(); EOS_ASSERT( thread_pool_size > 0, plugin_config_exception, "producer-threads ${num} must be greater than 0", ("num", thread_pool_size)); - my->_thread_pool.emplace( thread_pool_size ); + my->_thread_pool.emplace( "prod", thread_pool_size ); if( options.count( "snapshots-dir" )) { auto sd = options.at( "snapshots-dir" ).as(); @@ -785,7 +788,6 @@ void producer_plugin::plugin_shutdown() { } if( my->_thread_pool ) { - my->_thread_pool->join(); my->_thread_pool->stop(); } my->_accepted_block_connection.reset(); diff --git a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp index 60383175387..670114ea85c 100755 --- a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp +++ b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp @@ -28,9 +28,13 @@ using namespace eosio::testing; namespace eosio { namespace detail { struct txn_test_gen_empty {}; + struct txn_test_gen_status { + string status; + }; }} FC_REFLECT(eosio::detail::txn_test_gen_empty, ); +FC_REFLECT(eosio::detail::txn_test_gen_status, (status)); namespace eosio { @@ -53,8 +57,8 @@ using io_work_t = boost::asio::executor_work_guard(); \ - api_handle->call_name(vs.at(0).as(), vs.at(1).as(), vs.at(2).as()); \ - eosio::detail::txn_test_gen_empty result; + auto status = api_handle->call_name(vs.at(0).as(), vs.at(1).as(), vs.at(2).as()); \ + eosio::detail::txn_test_gen_status result = { status }; #define INVOKE_V_R_R(api_handle, call_name, in_param0, in_param1) \ const auto& vs = fc::json::json::from_string(body).as(); \ @@ -101,6 +105,9 @@ struct txn_test_gen_plugin_impl { uint16_t thread_pool_size; optional thread_pool; std::shared_ptr timer; + name newaccountA; + name newaccountB; + name newaccountT; void push_next_transaction(const std::shared_ptr>& trxs, const std::function& next ) { chain_plugin& cp = app().get_plugin(); @@ -127,13 +134,11 @@ struct txn_test_gen_plugin_impl { } void create_test_accounts(const std::string& init_name, const std::string& init_priv_key, const std::function& next) { + ilog("create_test_accounts"); std::vector trxs; trxs.reserve(2); try { - name newaccountA("txn.test.a"); - name newaccountB("txn.test.b"); - name newaccountC("txn.test.t"); name creator(init_name); abi_def currency_abi_def = fc::json::from_string(contracts::eosio_token_abi().data()).as(); @@ -170,73 +175,85 @@ struct txn_test_gen_plugin_impl { trx.actions.emplace_back(vector{{creator,"active"}}, newaccount{creator, newaccountB, owner_auth, active_auth}); } - //create "txn.test.t" account + //create "T" account { auto owner_auth = eosio::chain::authority{1, {{txn_text_receiver_C_pub_key, 1}}, {}}; auto active_auth = eosio::chain::authority{1, {{txn_text_receiver_C_pub_key, 1}}, {}}; - trx.actions.emplace_back(vector{{creator,"active"}}, newaccount{creator, newaccountC, owner_auth, active_auth}); + trx.actions.emplace_back(vector{{creator,"active"}}, newaccount{creator, newaccountT, owner_auth, active_auth}); } - trx.expiration = cc.head_block_time() + fc::seconds(30); + trx.expiration = cc.head_block_time() + fc::seconds(180); trx.set_reference_block(cc.head_block_id()); trx.sign(creator_priv_key, chainid); trxs.emplace_back(std::move(trx)); } - //set txn.test.t contract to eosio.token & initialize it + //set newaccountT contract to eosio.token & initialize it { signed_transaction trx; vector wasm = contracts::eosio_token_wasm(); setcode handler; - handler.account = newaccountC; + handler.account = newaccountT; handler.code.assign(wasm.begin(), wasm.end()); - trx.actions.emplace_back( vector{{newaccountC,"active"}}, handler); + trx.actions.emplace_back( vector{{newaccountT,"active"}}, handler); { setabi handler; - handler.account = newaccountC; + handler.account = newaccountT; handler.abi = fc::raw::pack(json::from_string(contracts::eosio_token_abi().data()).as()); - trx.actions.emplace_back( vector{{newaccountC,"active"}}, handler); + trx.actions.emplace_back( vector{{newaccountT,"active"}}, handler); } { action act; - act.account = N(txn.test.t); + act.account = newaccountT; act.name = N(create); - act.authorization = vector{{newaccountC,config::active_name}}; - act.data = eosio_token_serializer.variant_to_binary("create", fc::json::from_string("{\"issuer\":\"txn.test.t\",\"maximum_supply\":\"1000000000.0000 CUR\"}}"), abi_serializer_max_time); + act.authorization = vector{{newaccountT,config::active_name}}; + act.data = eosio_token_serializer.variant_to_binary("create", + fc::json::from_string(fc::format_string("{\"issuer\":\"${issuer}\",\"maximum_supply\":\"1000000000.0000 CUR\"}}", + fc::mutable_variant_object()("issuer",newaccountT.to_string()))), + abi_serializer_max_time); trx.actions.push_back(act); } { action act; - act.account = N(txn.test.t); + act.account = newaccountT; act.name = N(issue); - act.authorization = vector{{newaccountC,config::active_name}}; - act.data = eosio_token_serializer.variant_to_binary("issue", fc::json::from_string("{\"to\":\"txn.test.t\",\"quantity\":\"60000.0000 CUR\",\"memo\":\"\"}"), abi_serializer_max_time); + act.authorization = vector{{newaccountT,config::active_name}}; + act.data = eosio_token_serializer.variant_to_binary("issue", + fc::json::from_string(fc::format_string("{\"to\":\"${to}\",\"quantity\":\"60000.0000 CUR\",\"memo\":\"\"}", + fc::mutable_variant_object()("to",newaccountT.to_string()))), + abi_serializer_max_time); trx.actions.push_back(act); } { action act; - act.account = N(txn.test.t); + act.account = newaccountT; act.name = N(transfer); - act.authorization = vector{{newaccountC,config::active_name}}; - act.data = eosio_token_serializer.variant_to_binary("transfer", fc::json::from_string("{\"from\":\"txn.test.t\",\"to\":\"txn.test.a\",\"quantity\":\"20000.0000 CUR\",\"memo\":\"\"}"), abi_serializer_max_time); + act.authorization = vector{{newaccountT,config::active_name}}; + act.data = eosio_token_serializer.variant_to_binary("transfer", + fc::json::from_string(fc::format_string("{\"from\":\"${from}\",\"to\":\"${to}\",\"quantity\":\"20000.0000 CUR\",\"memo\":\"\"}", + fc::mutable_variant_object()("from",newaccountT.to_string())("to",newaccountA.to_string()))), + abi_serializer_max_time); trx.actions.push_back(act); } { action act; - act.account = N(txn.test.t); + act.account = newaccountT; act.name = N(transfer); - act.authorization = vector{{newaccountC,config::active_name}}; - act.data = eosio_token_serializer.variant_to_binary("transfer", fc::json::from_string("{\"from\":\"txn.test.t\",\"to\":\"txn.test.b\",\"quantity\":\"20000.0000 CUR\",\"memo\":\"\"}"), abi_serializer_max_time); + act.authorization = vector{{newaccountT,config::active_name}}; + act.data = eosio_token_serializer.variant_to_binary("transfer", + fc::json::from_string(fc::format_string("{\"from\":\"${from}\",\"to\":\"${to}\",\"quantity\":\"20000.0000 CUR\",\"memo\":\"\"}", + fc::mutable_variant_object()("from",newaccountT.to_string())("to",newaccountB.to_string()))), + abi_serializer_max_time); trx.actions.push_back(act); } - trx.expiration = cc.head_block_time() + fc::seconds(30); + trx.expiration = cc.head_block_time() + fc::seconds(180); trx.set_reference_block(cc.head_block_id()); trx.max_net_usage_words = 5000; trx.sign(txn_test_receiver_C_priv_key, chainid); @@ -250,15 +267,17 @@ struct txn_test_gen_plugin_impl { push_transactions(std::move(trxs), next); } - void start_generation(const std::string& salt, const uint64_t& period, const uint64_t& batch_size) { + string start_generation(const std::string& salt, const uint64_t& period, const uint64_t& batch_size) { + ilog("Starting transaction test plugin"); if(running) - throw fc::exception(fc::invalid_operation_exception_code); + return "start_generation already running"; if(period < 1 || period > 2500) - throw fc::exception(fc::invalid_operation_exception_code); + return "period must be between 1 and 2500"; if(batch_size < 1 || batch_size > 250) - throw fc::exception(fc::invalid_operation_exception_code); + return "batch_size must be between 1 and 250"; if(batch_size & 1) - throw fc::exception(fc::invalid_operation_exception_code); + return "batch_size must be even"; + ilog("Starting transaction test plugin valid"); running = true; @@ -266,20 +285,20 @@ struct txn_test_gen_plugin_impl { auto abi_serializer_max_time = app().get_plugin().get_abi_serializer_max_time(); abi_serializer eosio_token_serializer{fc::json::from_string(contracts::eosio_token_abi().data()).as(), abi_serializer_max_time}; //create the actions here - act_a_to_b.account = N(txn.test.t); + act_a_to_b.account = newaccountT; act_a_to_b.name = N(transfer); - act_a_to_b.authorization = vector{{name("txn.test.a"),config::active_name}}; - act_a_to_b.data = eosio_token_serializer.variant_to_binary("transfer", - fc::json::from_string(fc::format_string("{\"from\":\"txn.test.a\",\"to\":\"txn.test.b\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", - fc::mutable_variant_object()("l", salt))), + act_a_to_b.authorization = vector{{newaccountA,config::active_name}}; + act_a_to_b.data = eosio_token_serializer.variant_to_binary("transfer", + fc::json::from_string(fc::format_string("{\"from\":\"${from}\",\"to\":\"${to}\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", + fc::mutable_variant_object()("from",newaccountA.to_string())("to",newaccountB.to_string())("l", salt))), abi_serializer_max_time); - act_b_to_a.account = N(txn.test.t); + act_b_to_a.account = newaccountT; act_b_to_a.name = N(transfer); - act_b_to_a.authorization = vector{{name("txn.test.b"),config::active_name}}; - act_b_to_a.data = eosio_token_serializer.variant_to_binary("transfer", - fc::json::from_string(fc::format_string("{\"from\":\"txn.test.b\",\"to\":\"txn.test.a\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", - fc::mutable_variant_object()("l", salt))), + act_b_to_a.authorization = vector{{newaccountB,config::active_name}}; + act_b_to_a.data = eosio_token_serializer.variant_to_binary("transfer", + fc::json::from_string(fc::format_string("{\"from\":\"${from}\",\"to\":\"${to}\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", + fc::mutable_variant_object()("from",newaccountB.to_string())("to",newaccountA.to_string())("l", salt))), abi_serializer_max_time); timer_timeout = period; @@ -299,6 +318,7 @@ struct txn_test_gen_plugin_impl { boost::asio::post( *gen_ioc, [this]() { arm_timer(boost::asio::high_resolution_timer::clock_type::now()); }); + return "success"; } void arm_timer(boost::asio::high_resolution_timer::time_point s) { @@ -371,6 +391,7 @@ struct txn_test_gen_plugin_impl { next(e.dynamic_copy_exception()); } + ilog("send ${c} transactions", ("c",trxs.size())); push_transactions(std::move(trxs), next); } @@ -414,6 +435,7 @@ void txn_test_gen_plugin::set_program_options(options_description&, options_desc cfg.add_options() ("txn-reference-block-lag", bpo::value()->default_value(0), "Lag in number of blocks from the head block when selecting the reference block for transactions (-1 means Last Irreversible Block)") ("txn-test-gen-threads", bpo::value()->default_value(2), "Number of worker threads in txn_test_gen thread pool") + ("txn-test-gen-account-prefix", bpo::value()->default_value("txn.test."), "Prefix to use for accounts generated and used by this plugin") ; } @@ -422,6 +444,10 @@ void txn_test_gen_plugin::plugin_initialize(const variables_map& options) { my.reset( new txn_test_gen_plugin_impl ); my->txn_reference_block_lag = options.at( "txn-reference-block-lag" ).as(); my->thread_pool_size = options.at( "txn-test-gen-threads" ).as(); + const std::string thread_pool_account_prefix = options.at( "txn-test-gen-account-prefix" ).as(); + my->newaccountA = thread_pool_account_prefix + "a"; + my->newaccountB = thread_pool_account_prefix + "b"; + my->newaccountT = thread_pool_account_prefix + "t"; EOS_ASSERT( my->thread_pool_size > 0, chain::plugin_config_exception, "txn-test-gen-threads ${num} must be greater than 0", ("num", my->thread_pool_size) ); } FC_LOG_AND_RETHROW() diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 8a3a75a721b..7e6bfbaf7b3 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -247,6 +247,7 @@ class tn_node_def { vector producers; eosd_def* instance; string gelf_endpoint; + bool dont_start = false; }; void @@ -390,6 +391,7 @@ string producer_names::producer_name(unsigned int producer_number) { struct launcher_def { bool force_overwrite; size_t total_nodes; + size_t unstarted_nodes; size_t prod_nodes; size_t producers; size_t next_node; @@ -481,6 +483,7 @@ launcher_def::set_options (bpo::options_description &cfg) { cfg.add_options() ("force,f", bpo::bool_switch(&force_overwrite)->default_value(false), "Force overwrite of existing configuration files and erase blockchain") ("nodes,n",bpo::value(&total_nodes)->default_value(1),"total number of nodes to configure and launch") + ("unstarted-nodes",bpo::value(&unstarted_nodes)->default_value(0),"total number of nodes to configure, but not launch") ("pnodes,p",bpo::value(&prod_nodes)->default_value(1),"number of nodes that contain one or more producers") ("producers",bpo::value(&producers)->default_value(21),"total number of non-bios producer instances in this network") ("mode,m",bpo::value>()->multitoken()->default_value({"any"}, "any"),"connection mode, combination of \"any\", \"producers\", \"specified\", \"none\"") @@ -634,7 +637,31 @@ launcher_def::initialize (const variables_map &vmap) { if (prod_nodes > (producers + 1)) prod_nodes = producers; if (prod_nodes > total_nodes) - total_nodes = prod_nodes; + total_nodes = prod_nodes + unstarted_nodes; + else if (total_nodes < prod_nodes + unstarted_nodes) { + cerr << "ERROR: if provided, \"--nodes\" must be equal or greater than the number of nodes indicated by \"--pnodes\" and \"--unstarted-nodes\"." << endl; + exit (-1); + } + + if (vmap.count("specific-num")) { + const auto specific_nums = vmap["specific-num"].as>(); + const auto specific_args = vmap["specific-nodeos"].as>(); + if (specific_nums.size() != specific_args.size()) { + cerr << "ERROR: every specific-num argument must be paired with a specific-nodeos argument" << endl; + exit (-1); + } + // don't include bios + const auto allowed_nums = total_nodes - 1; + for(uint i = 0; i < specific_nums.size(); ++i) + { + const auto& num = specific_nums[i]; + if (num >= allowed_nums) { + cerr << "\"--specific-num\" provided value= " << num << " is higher than \"--nodes\" provided value=" << total_nodes << endl; + exit (-1); + } + specific_nodeos_args[num] = specific_args[i]; + } + } char* erd_env_var = getenv ("EOSIO_HOME"); if (erd_env_var == nullptr || std::string(erd_env_var).empty()) { @@ -733,7 +760,7 @@ launcher_def::generate () { write_dot_file (); if (!output.empty()) { - bfs::path savefile = output; + bfs::path savefile = output; { bfs::ofstream sf (savefile); sf << fc::json::to_pretty_string (network) << endl; @@ -754,6 +781,7 @@ launcher_def::generate () { } return false; } + return true; } @@ -864,6 +892,7 @@ launcher_def::bind_nodes () { int extra = producers % non_bios; unsigned int i = 0; unsigned int producer_number = 0; + const auto to_not_start_node = total_nodes - unstarted_nodes - 1; for (auto &h : bindings) { for (auto &inst : h.instances) { bool is_bios = inst.name == "bios"; @@ -894,6 +923,7 @@ launcher_def::bind_nodes () { ++producer_number; } } + node.dont_start = i >= to_not_start_node; } node.gelf_endpoint = gelf_endpoint; network.nodes[node.name] = move(node); @@ -1564,6 +1594,10 @@ launcher_def::launch (eosd_def &instance, string >s) { } if (!host->is_local()) { + if (instance.node->dont_start) { + cerr << "Unable to use \"unstarted-nodes\" with a remote hose" << endl; + exit (-1); + } string cmdl ("cd "); cmdl += host->eosio_home + "; nohup " + eosdcmd + " > " + reout.string() + " 2> " + reerr.string() + "& echo $! > " + pidf.string() @@ -1578,7 +1612,7 @@ launcher_def::launch (eosd_def &instance, string >s) { string cmd = "cd " + host->eosio_home + "; kill -15 $(cat " + pidf.string() + ")"; format_ssh (cmd, host->host_name, info.kill_cmd); } - else { + else if (!instance.node->dont_start) { cerr << "spawning child, " << eosdcmd << endl; bp::child c(eosdcmd, bp::std_out > reout, bp::std_err > reerr ); @@ -1600,6 +1634,16 @@ launcher_def::launch (eosd_def &instance, string >s) { } c.detach(); } + else { + cerr << "not spawning child, " << eosdcmd << endl; + + const bfs::path dd = instance.data_dir_name; + const bfs::path start_file = dd / "start.cmd"; + bfs::ofstream sf (start_file); + + sf << eosdcmd << endl; + sf.close(); + } last_run.running_nodes.emplace_back (move(info)); } @@ -1629,20 +1673,35 @@ launcher_def::kill (launch_modes mode, string sig_opt) { case LM_LOCAL: case LM_REMOTE : { bfs::path source = "last_run.json"; - fc::json::from_file(source).as(last_run); - for (auto &info : last_run.running_nodes) { - if (mode == LM_ALL || (info.remote && mode == LM_REMOTE) || - (!info.remote && mode == LM_LOCAL)) { - if (info.pid_file.length()) { - string pid; - fc::json::from_file(info.pid_file).as(pid); - string kill_cmd = "kill " + sig_opt + " " + pid; - boost::process::system (kill_cmd); - } - else { - boost::process::system (info.kill_cmd); - } - } + try { + fc::json::from_file( source ).as( last_run ); + for( auto& info : last_run.running_nodes ) { + if( mode == LM_ALL || (info.remote && mode == LM_REMOTE) || + (!info.remote && mode == LM_LOCAL) ) { + try { + if( info.pid_file.length() ) { + string pid; + fc::json::from_file( info.pid_file ).as( pid ); + string kill_cmd = "kill " + sig_opt + " " + pid; + boost::process::system( kill_cmd ); + } else { + boost::process::system( info.kill_cmd ); + } + } catch( fc::exception& fce ) { + cerr << "unable to kill fc::exception=" << fce.to_detail_string() << endl; + } catch( std::exception& stde ) { + cerr << "unable to kill std::exception=" << stde.what() << endl; + } catch( ... ) { + cerr << "Unable to kill" << endl; + } + } + } + } catch( fc::exception& fce ) { + cerr << "unable to open " << source << " fc::exception=" << fce.to_detail_string() << endl; + } catch( std::exception& stde ) { + cerr << "unable to open " << source << " std::exception=" << stde.what() << endl; + } catch( ... ) { + cerr << "Unable to open " << source << endl; } } } @@ -2046,7 +2105,7 @@ FC_REFLECT( eosd_def, (p2p_endpoint) ) // @ignore instance, gelf_endpoint -FC_REFLECT( tn_node_def, (name)(keys)(peers)(producers) ) +FC_REFLECT( tn_node_def, (name)(keys)(peers)(producers)(dont_start) ) FC_REFLECT( testnet_def, (name)(ssh_helper)(nodes) ) diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index d3128903097..a97ceaa5058 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -30,9 +30,8 @@ # https://github.com/EOSIO/eos/blob/master/LICENSE ########################################################################## -VERSION=2.1 # Build script version +VERSION=2.2 # Build script version CMAKE_BUILD_TYPE=Release -export DISK_MIN=20 DOXYGEN=false ENABLE_COVERAGE_TESTING=false CORE_SYMBOL_NAME="SYS" @@ -75,6 +74,7 @@ export LLVM_DIR=${LLVM_ROOT}/lib/cmake/llvm export DOXYGEN_VERSION=1_8_14 export DOXYGEN_ROOT=${SRC_LOCATION}/doxygen-${DOXYGEN_VERSION} export TINI_VERSION=0.18.0 +export DISK_MIN=5 # Setup directories mkdir -p $SRC_LOCATION diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 7a16e4486e9..ff655496a7b 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -8,13 +8,13 @@ DISK_AVAIL_KB=$( df . | tail -1 | awk '{print $4}' ) DISK_TOTAL=$(( DISK_TOTAL_KB / 1048576 )) DISK_AVAIL=$(( DISK_AVAIL_KB / 1048576 )) -if [[ "${OS_NAME}" == "Amazon Linux AMI" ]]; then +if [[ "${OS_NAME}" == "Amazon Linux AMI" ]]; then # Amazonlinux1 DEP_ARRAY=( sudo procps util-linux which gcc72 gcc72-c++ autoconf automake libtool make doxygen graphviz \ - bzip2 bzip2-devel openssl-devel gmp gmp-devel libstdc++72 python27 python27-devel python34 python34-devel \ + bzip2 bzip2-devel openssl-devel gmp gmp-devel libstdc++72 python27 python27-devel python36 python36-devel \ libedit-devel ncurses-devel swig wget file libcurl-devel libusb1-devel ) -else +else # Amazonlinux2 DEP_ARRAY=( git procps-ng util-linux gcc gcc-c++ autoconf automake libtool make bzip2 \ bzip2-devel openssl-devel gmp-devel libstdc++ libcurl-devel libusbx-devel \ diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index e779f2957fd..98ae1a31605 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -121,7 +121,7 @@ printf "\\n" DEP_ARRAY=( git autoconf automake libtool make bzip2 doxygen graphviz \ bzip2-devel openssl-devel gmp-devel \ - ocaml libicu-devel python python-devel python33 \ + ocaml libicu-devel python python-devel rh-python36 \ gettext-devel file sudo libusbx-devel libcurl-devel ) COUNT=1 @@ -160,10 +160,11 @@ else printf " - No required YUM dependencies to install.\\n\\n" fi -if [ -d /opt/rh/python33 ]; then - printf "Enabling python33...\\n" - source /opt/rh/python33/enable || exit 1 - printf " - Python33 successfully enabled!\\n" +export PYTHON3PATH="/opt/rh/rh-python36" +if [ -d $PYTHON3PATH ]; then + printf "Enabling python36...\\n" + source $PYTHON3PATH/enable || exit 1 + printf " - Python36 successfully enabled!\\n" fi printf "\\n" @@ -190,7 +191,7 @@ if [ $? -ne 0 ]; then exit -1; fi printf "\\n" -export CPATH="$CPATH:/opt/rh/python33/root/usr/include/python3.3m" # m on the end causes problems with boost finding python3 +export CPATH="${CPATH}:${PYTHON3PATH}/root/usr/include/python3.6m" # m on the end causes problems with boost finding python3 printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" BOOSTVERSION=$( grep "#define BOOST_VERSION" "$HOME/opt/boost/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then @@ -299,7 +300,7 @@ cd .. printf "\\n" function print_instructions() { - printf "source /opt/rh/python33/enable\\n" + printf "source ${PYTHON3PATH}/enable\\n" printf "source /opt/rh/devtoolset-7/enable\\n" return 0 } diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index e418be9a717..224b0839f1d 100755 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -171,7 +171,8 @@ if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ && cd $BOOST_ROOT \ && ./bootstrap.sh --prefix=$BOOST_ROOT \ - && ./b2 -q -j$(sysctl -in machdep.cpu.core_count) install \ + && ./b2 -q -j$(sysctl -in machdep.cpu.core_count) --with-iostreams --with-date_time --with-filesystem \ + --with-system --with-program_options --with-chrono --with-test install \ && cd .. \ && rm -f boost_$BOOST_VERSION.tar.bz2 \ && rm -rf $BOOST_LINK_LOCATION \ diff --git a/scripts/submodule_check.sh b/scripts/submodule_check.sh new file mode 100755 index 00000000000..b9ec13204fa --- /dev/null +++ b/scripts/submodule_check.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +REPO_DIR=`mktemp -d` +git clone "$BUILDKITE_REPO" "$REPO_DIR" +git submodule update --init --recursive +cd "$REPO_DIR" + +declare -A PR_MAP +declare -A BASE_MAP + +echo "getting submodule info for $BUILDKITE_BRANCH" +git checkout "$BUILDKITE_BRANCH" &> /dev/null +git submodule update --init &> /dev/null +while read -r a b; do + PR_MAP[$a]=$b +done < <(git submodule --quiet foreach --recursive 'echo $path `git log -1 --format=%ct`') + +echo "getting submodule info for $BUILDKITE_PULL_REQUEST_BASE_BRANCH" +git checkout "$BUILDKITE_PULL_REQUEST_BASE_BRANCH" &> /dev/null +git submodule update --init &> /dev/null +while read -r a b; do + BASE_MAP[$a]=$b +done < <(git submodule --quiet foreach --recursive 'echo $path `git log -1 --format=%ct`') + +for k in "${!BASE_MAP[@]}"; do + base_ts=${BASE_MAP[$k]} + pr_ts=${PR_MAP[$k]} + echo "submodule $k" + echo " timestamp on $BUILDKITE_BRANCH: $pr_ts" + echo " timestamp on $BUILDKITE_PULL_REQUEST_BASE_BRANCH: $base_ts" + if (( $pr_ts < $base_ts)); then + echo "$k is older on $BUILDKITE_BRANCH than $BUILDKITE_PULL_REQUEST_BASE_BRANCH; investigating..." + + if for c in `git log $BUILDKITE_BRANCH ^$BUILDKITE_PULL_REQUEST_BASE_BRANCH --pretty=format:"%H"`; do git show --pretty="" --name-only $c; done | grep -q "^$k$"; then + echo "ERROR: $k has regressed" + exit 1 + else + echo "$k was not in the diff; no regression detected" + fi + fi +done diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 96d51701655..ba87969bb80 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -35,6 +35,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/distributed-transactions-test.py ${CM configure_file(${CMAKE_CURRENT_SOURCE_DIR}/distributed-transactions-remote-test.py ${CMAKE_CURRENT_BINARY_DIR}/distributed-transactions-remote-test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/sample-cluster-map.json ${CMAKE_CURRENT_BINARY_DIR}/sample-cluster-map.json COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/restart-scenarios-test.py ${CMAKE_CURRENT_BINARY_DIR}/restart-scenarios-test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_startup_catchup.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_startup_catchup.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_forked_chain_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_forked_chain_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_run_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_run_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_run_remote_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_run_remote_test.py COPYONLY) @@ -54,12 +55,8 @@ add_test(NAME plugin_test COMMAND plugin_test --report_level=detailed --color_ou add_test(NAME nodeos_sanity_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_sanity_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME nodeos_sanity_bnet_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --p2p-plugin bnet --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST nodeos_sanity_bnet_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME nodeos_run_test COMMAND tests/nodeos_run_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_run_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME nodeos_run_bnet_test COMMAND tests/nodeos_run_test.py -v --clean-run --p2p-plugin bnet --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST nodeos_run_bnet_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME p2p_dawn515_test COMMAND tests/p2p_tests/dawn_515/test.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST p2p_dawn515_test PROPERTY LABELS nonparallelizable_tests) @@ -75,8 +72,6 @@ set_property(TEST nodeos_protocol_feature_test PROPERTY LABELS nonparallelizable add_test(NAME distributed-transactions-test COMMAND tests/distributed-transactions-test.py -d 2 -p 4 -n 6 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST distributed-transactions-test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME distributed-transactions-bnet-test COMMAND tests/distributed-transactions-test.py -d 2 -p 1 -n 4 --p2p-plugin bnet -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST distributed-transactions-bnet-test PROPERTY LABELS nonparallelizable_tests) add_test(NAME restart-scenarios-test-resync COMMAND tests/restart-scenarios-test.py -c resync -p4 -v --clean-run --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST restart-scenarios-test-resync PROPERTY LABELS nonparallelizable_tests) add_test(NAME restart-scenarios-test-hard_replay COMMAND tests/restart-scenarios-test.py -c hardReplay -p4 -v --clean-run --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) @@ -93,8 +88,6 @@ add_test(NAME db_modes_test COMMAND tests/db_modes_test.sh WORKING_DIRECTORY ${C # Long running tests add_test(NAME nodeos_sanity_lr_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_sanity_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_sanity_bnet_lr_test COMMAND tests/nodeos_run_test.py -v --sanity-test --p2p-plugin bnet --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST nodeos_sanity_bnet_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_run_check_lr_test COMMAND tests/nodeos_run_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_run_check_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_remote_lr_test COMMAND tests/nodeos_run_remote_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) @@ -109,15 +102,16 @@ set_property(TEST nodeos_forked_chain_lr_test PROPERTY LABELS long_running_tests add_test(NAME nodeos_voting_lr_test COMMAND tests/nodeos_voting_test.py -v --wallet-port 9902 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_voting_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_voting_bnet_lr_test COMMAND tests/nodeos_voting_test.py -v --wallet-port 9903 --p2p-plugin bnet --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST nodeos_voting_bnet_lr_test PROPERTY LABELS long_running_tests) - add_test(NAME nodeos_under_min_avail_ram_lr_test COMMAND tests/nodeos_under_min_avail_ram.py -v --wallet-port 9904 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_under_min_avail_ram_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_irreversible_mode_lr_test COMMAND tests/nodeos_irreversible_mode_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_irreversible_mode_lr_test PROPERTY LABELS long_running_tests) +add_test(NAME nodeos_startup_catchup_lr_test COMMAND tests/nodeos_startup_catchup.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_tests_properties(nodeos_startup_catchup_lr_test PROPERTIES TIMEOUT 3000) +set_property(TEST nodeos_startup_catchup_lr_test PROPERTY LABELS long_running_tests) + set(ALTERNATE_VERSION_LABELS_FILE "${CMAKE_BINARY_DIR}/tests/multiversion_paths.conf") add_test(NAME nodeos_multiple_version_protocol_feature_mv_test COMMAND tests/nodeos_multiple_version_protocol_feature_test.py @@ -141,7 +135,7 @@ if(ENABLE_COVERAGE_TESTING) endif() # NOT GENHTML_PATH # no spaces allowed within tests list - set(ctest_tests 'plugin_test|p2p_dawn515_test|nodeos_run_test|bnet_nodeos_run_test|distributed-transactions-test|restart-scenarios-test_resync') + set(ctest_tests 'plugin_test|p2p_dawn515_test|nodeos_run_test|distributed-transactions-test|restart-scenarios-test_resync') set(ctest_exclude_tests 'nodeos_run_remote_test|nodeos_run_test-mongodb|distributed-transactions-remote-test|restart-scenarios-test_replay') # Setup target diff --git a/tests/Cluster.py b/tests/Cluster.py index 3476bf9de03..3ccbabe43aa 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -45,8 +45,6 @@ class Cluster(object): __BiosPort=8788 __LauncherCmdArr=[] __bootlog="eosio-ignition-wd/bootlog.txt" - __configDir="etc/eosio/" - __dataDir="var/lib/" # pylint: disable=too-many-arguments # walletd [True|False] Is keosd running. If not load the wallet plugin @@ -67,6 +65,7 @@ def __init__(self, walletd=False, localCluster=True, host="localhost", port=8888 """ self.accounts={} self.nodes={} + self.unstartedNodes=[] self.localCluster=localCluster self.wallet=None self.walletd=walletd @@ -143,12 +142,13 @@ def setAlternateVersionLabels(self, file): # pylint: disable=too-many-return-statements # pylint: disable=too-many-branches # pylint: disable=too-many-statements - def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontBootstrap=False, + def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontBootstrap=False, totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True, specificExtraNodeosArgs=None, onlySetProds=False, - pfSetupPolicy=PFSetupPolicy.FULL, alternateVersionLabelsFile=None, associatedNodeLabels=None): + pfSetupPolicy=PFSetupPolicy.FULL, alternateVersionLabelsFile=None, associatedNodeLabels=None, loadSystemContract=True): """Launch cluster. pnodes: producer nodes count - totalNodes: producer + non-producer nodes count + unstartedNodes: non-producer nodes that are configured into the launch, but not started. Should be included in totalNodes. + totalNodes: producer + non-producer nodes + unstarted non-producer nodes count prodCount: producers per producer node count topo: cluster topology (as defined by launcher, and "bridge" shape that is specific to this launch method) delay: delay between individual nodes launch (as defined by launcher) @@ -165,6 +165,7 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne pfSetupPolicy: determine the protocol feature setup policy (none, preactivate_feature_only, or full) alternateVersionLabelsFile: Supply an alternate version labels file to use with associatedNodeLabels. associatedNodeLabels: Supply a dictionary of node numbers to use an alternate label for a specific node. + loadSystemContract: indicate whether the eosio.system contract should be loaded (setting this to False causes useBiosBootFile to be treated as False) """ assert(isinstance(topo, str)) assert PFSetupPolicy.isValid(pfSetupPolicy) @@ -187,6 +188,8 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne if pnodes > totalNodes: raise RuntimeError("totalNodes (%d) must be equal to or greater than pnodes(%d)." % (totalNodes, pnodes)) + if pnodes + unstartedNodes > totalNodes: + raise RuntimeError("totalNodes (%d) must be equal to or greater than pnodes(%d) + unstartedNodes(%d)." % (totalNodes, pnodes, unstartedNodes)) if self.walletMgr is None: self.walletMgr=WalletMgr(True) @@ -206,14 +209,14 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne tries = tries - 1 time.sleep(2) - cmd="%s -p %s -n %s -d %s -i %s -f --p2p-plugin %s %s" % ( + cmd="%s -p %s -n %s -d %s -i %s -f --p2p-plugin %s %s --unstarted-nodes %s" % ( Utils.EosLauncherPath, pnodes, totalNodes, delay, datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3], - p2pPlugin, producerFlag) + p2pPlugin, producerFlag, unstartedNodes) cmdArr=cmd.split() if self.staging: cmdArr.append("--nogen") - nodeosArgs="--max-transaction-time -1 --abi-serializer-max-time-ms 990000 --filter-on * --p2p-max-nodes-per-host %d" % (totalNodes) + nodeosArgs="--max-transaction-time -1 --abi-serializer-max-time-ms 990000 --filter-on \"*\" --p2p-max-nodes-per-host %d" % (totalNodes) if not self.walletd: nodeosArgs += " --plugin eosio::wallet_api_plugin" if self.enableMongo: @@ -281,7 +284,7 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne # of two entries - [ , ] with first being the name and second being the node definition shapeFileNodes = shapeFileObject["nodes"] - numProducers=totalProducers if totalProducers is not None else totalNodes + numProducers=totalProducers if totalProducers is not None else (totalNodes - unstartedNodes) maxProducers=ord('z')-ord('a')+1 assert numProducers 0: + self.unstartedNodes=self.discoverUnstartedLocalNodes(unstartedNodes, totalNodes) + biosNode=self.discoverBiosNode() if not biosNode or not biosNode.checkPulse(): Utils.Print("ERROR: Bios node doesn't appear to be running...") @@ -420,11 +427,19 @@ def connectGroup(group, producerNodes, bridgeNodes) : return True Utils.Print("Bootstrap cluster.") + if not loadSystemContract: + useBiosBootFile=False #ensure we use Cluster.bootstrap if onlyBios or not useBiosBootFile: - self.biosNode=self.bootstrap(biosNode, totalNodes, prodCount, totalProducers, pfSetupPolicy, onlyBios, onlySetProds) + self.biosNode=self.bootstrap(biosNode, startedNodes, prodCount, totalProducers, pfSetupPolicy, onlyBios, onlySetProds, loadSystemContract) + if self.biosNode is None: + Utils.Print("ERROR: Bootstrap failed.") + return False else: self.useBiosBootFile=True - self.biosNode=self.bios_bootstrap(biosNode, totalNodes, pfSetupPolicy) + self.biosNode=self.bios_bootstrap(biosNode, startedNodes, pfSetupPolicy) + if self.biosNode is None: + Utils.Print("ERROR: Bootstrap failed.") + return False if self.biosNode is None: Utils.Print("ERROR: Bootstrap failed.") @@ -529,18 +544,28 @@ def waitOnClusterBlockNumSync(self, targetBlockNum, timeout=None, blockType=Bloc """Wait for all nodes to have targetBlockNum finalized.""" assert(self.nodes) - def doNodesHaveBlockNum(nodes, targetBlockNum, blockType): + def doNodesHaveBlockNum(nodes, targetBlockNum, blockType, printCount): + ret=True for node in nodes: try: if (not node.killed) and (not node.isBlockPresent(targetBlockNum, blockType=blockType)): - return False + ret=False + break except (TypeError) as _: # This can happen if client connects before server is listening - return False + ret=False + break - return True + printCount+=1 + if Utils.Debug and not ret and printCount%5==0: + blockNums=[] + for i in range(0, len(nodes)): + blockNums.append(nodes[i].getBlockNum()) + Utils.Print("Cluster still not in sync, head blocks for nodes: [ %s ]" % (", ".join(blockNums))) + return ret - lam = lambda: doNodesHaveBlockNum(self.nodes, targetBlockNum, blockType) + printCount=0 + lam = lambda: doNodesHaveBlockNum(self.nodes, targetBlockNum, blockType, printCount) ret=Utils.waitForBool(lam, timeout) return ret @@ -661,6 +686,16 @@ def getNode(self, nodeId=0, exitOnError=True): def getNodes(self): return self.nodes + def launchUnstarted(self, numToLaunch=1, cachePopen=False): + assert(isinstance(numToLaunch, int)) + assert(numToLaunch>0) + launchList=self.unstartedNodes[:numToLaunch] + del self.unstartedNodes[:numToLaunch] + for node in launchList: + # the node number is indexed off of the started nodes list + node.launchUnstarted(len(self.nodes), cachePopen=cachePopen) + self.nodes.append(node) + # Spread funds across accounts with transactions spread through cluster nodes. # Validate transactions are synchronized on root node def spreadFunds(self, source, accounts, amount=1): @@ -827,15 +862,6 @@ def nodeNameToId(name): m=re.search(r"node_([\d]+)", name) return int(m.group(1)) - @staticmethod - def nodeExtensionToName(ext): - r"""Convert node extension (bios, 0, 1, etc) to node name. """ - prefix="node_" - if ext == "bios": - return prefix + ext - - return "node_%02d" % (ext) - @staticmethod def parseProducerKeys(configFile, nodeName): """Parse node config file for producer keys. Returns dictionary. (Keys: account name; Values: dictionary objects (Keys: ["name", "node", "private","public"]; Values: account name, node id returned by nodeNameToId(nodeName), private key(string)and public key(string))).""" @@ -873,7 +899,7 @@ def parseProducerKeys(configFile, nodeName): def parseProducers(nodeNum): """Parse node config file for producers.""" - configFile=Cluster.__configDir + Cluster.nodeExtensionToName(nodeNum) + "/config.ini" + configFile=Utils.getNodeConfigDir(nodeNum, "config.ini") if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) configStr=None with open(configFile, 'r') as f: @@ -891,19 +917,19 @@ def parseProducers(nodeNum): def parseClusterKeys(totalNodes): """Parse cluster config file. Updates producer keys data members.""" - nodeName=Cluster.nodeExtensionToName("bios") - configFile=Cluster.__configDir + nodeName + "/config.ini" + configFile=Utils.getNodeConfigDir("bios", "config.ini") if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) + nodeName=Utils.nodeExtensionToName("bios") producerKeys=Cluster.parseProducerKeys(configFile, nodeName) if producerKeys is None: Utils.Print("ERROR: Failed to parse eosio private keys from cluster config files.") return None for i in range(0, totalNodes): - nodeName=Cluster.nodeExtensionToName(i) - configFile=Cluster.__configDir + nodeName + "/config.ini" + configFile=Utils.getNodeConfigDir(i, "config.ini") if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) + nodeName=Utils.nodeExtensionToName(i) keys=Cluster.parseProducerKeys(configFile, nodeName) if keys is not None: producerKeys.update(keys) @@ -999,7 +1025,7 @@ def bios_bootstrap(self, biosNode, totalNodes, pfSetupPolicy, silent=False): return biosNode - def bootstrap(self, biosNode, totalNodes, prodCount, totalProducers, pfSetupPolicy, onlyBios=False, onlySetProds=False): + def bootstrap(self, biosNode, totalNodes, prodCount, totalProducers, pfSetupPolicy, onlyBios=False, onlySetProds=False, loadSystemContract=True): """Create 'prodCount' init accounts and deposits 10000000000 SYS in each. If prodCount is -1 will initialize all possible producers. Ensure nodes are inter-connected prior to this call. One way to validate this will be to check if every node has block 1.""" @@ -1226,17 +1252,18 @@ def bootstrap(self, biosNode, totalNodes, prodCount, totalProducers, pfSetupPoli (expectedAmount, actualAmount)) return None - contract="eosio.system" - contractDir="unittests/contracts/%s" % (contract) - wasmFile="%s.wasm" % (contract) - abiFile="%s.abi" % (contract) - Utils.Print("Publish %s contract" % (contract)) - trans=biosNode.publishContract(eosioAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) - if trans is None: - Utils.Print("ERROR: Failed to publish contract %s." % (contract)) - return None + if loadSystemContract: + contract="eosio.system" + contractDir="unittests/contracts/%s" % (contract) + wasmFile="%s.wasm" % (contract) + abiFile="%s.abi" % (contract) + Utils.Print("Publish %s contract" % (contract)) + trans=biosNode.publishContract(eosioAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) + if trans is None: + Utils.Print("ERROR: Failed to publish contract %s." % (contract)) + return None - Node.validateTransaction(trans) + Node.validateTransaction(trans) initialFunds="1000000.0000 {0}".format(CORE_SYMBOL) Utils.Print("Transfer initial fund %s to individual accounts." % (initialFunds)) @@ -1286,8 +1313,8 @@ def myFunc(): @staticmethod def pgrepEosServerPattern(nodeInstance): - dataLocation=Cluster.__dataDir + Cluster.nodeExtensionToName(nodeInstance) - return r"[\n]?(\d+) (.* --data-dir %s.*)\n" % (dataLocation) + dataLocation=Utils.getNodeDataDir(nodeInstance) + return r"[\n]?(\d+) (.* --data-dir %s .*)\n" % (dataLocation) # Populates list of EosInstanceInfo objects, matched to actual running instances def discoverLocalNodes(self, totalNodes, timeout=None): @@ -1304,18 +1331,30 @@ def discoverLocalNodes(self, totalNodes, timeout=None): psOutDisplay=psOut[:6660]+"..." if Utils.Debug: Utils.Print("pgrep output: \"%s\"" % psOutDisplay) for i in range(0, totalNodes): - pattern=Cluster.pgrepEosServerPattern(i) - m=re.search(pattern, psOut, re.MULTILINE) - if m is None: - Utils.Print("ERROR: Failed to find %s pid. Pattern %s" % (Utils.EosServerName, pattern)) + instance=self.discoverLocalNode(i, psOut) + if instance is None: break - instance=Node(self.host, self.port + i, pid=int(m.group(1)), cmd=m.group(2), walletMgr=self.walletMgr, enableMongo=self.enableMongo, mongoHost=self.mongoHost, mongoPort=self.mongoPort, mongoDb=self.mongoDb) - if Utils.Debug: Utils.Print("Node>", instance) nodes.append(instance) if Utils.Debug: Utils.Print("Found %d nodes" % (len(nodes))) return nodes + # Populate a node matched to actual running instance + def discoverLocalNode(self, nodeNum, psOut=None): + if psOut is None: + psOut=Cluster.pgrepEosServers(timeout) + if psOut is None: + Utils.Print("ERROR: No nodes discovered.") + return nodes + pattern=Cluster.pgrepEosServerPattern(nodeNum) + m=re.search(pattern, psOut, re.MULTILINE) + if m is None: + Utils.Print("ERROR: Failed to find %s pid. Pattern %s" % (Utils.EosServerName, pattern)) + return None + instance=Node(self.host, self.port + nodeNum, pid=int(m.group(1)), cmd=m.group(2), walletMgr=self.walletMgr, enableMongo=self.enableMongo, mongoHost=self.mongoHost, mongoPort=self.mongoPort, mongoDb=self.mongoDb) + if Utils.Debug: Utils.Print("Node>", instance) + return instance + def discoverBiosNode(self, timeout=None): psOut=Cluster.pgrepEosServers(timeout=timeout) pattern=Cluster.pgrepEosServerPattern("bios") @@ -1381,20 +1420,20 @@ def __findFiles(path): return files def dumpErrorDetails(self): - fileName=os.path.join(Cluster.__configDir + Cluster.nodeExtensionToName("bios"), "config.ini") + fileName=Utils.getNodeConfigDir("bios", "config.ini") Cluster.dumpErrorDetailImpl(fileName) - path=Cluster.__dataDir + Cluster.nodeExtensionToName("bios") + path=Utils.getNodeDataDir("bios") fileNames=Cluster.__findFiles(path) for fileName in fileNames: Cluster.dumpErrorDetailImpl(fileName) for i in range(0, len(self.nodes)): - configLocation=Cluster.__configDir + Cluster.nodeExtensionToName(i) + configLocation=Utils.getNodeConfigDir(i) fileName=os.path.join(configLocation, "config.ini") Cluster.dumpErrorDetailImpl(fileName) fileName=os.path.join(configLocation, "genesis.json") Cluster.dumpErrorDetailImpl(fileName) - path=Cluster.__dataDir + Cluster.nodeExtensionToName(i) + path=Utils.getNodeDataDir(i) fileNames=Cluster.__findFiles(path) for fileName in fileNames: Cluster.dumpErrorDetailImpl(fileName) @@ -1468,9 +1507,9 @@ def waitForNextBlock(self, timeout=None): return node.waitForNextBlock(timeout) def cleanup(self): - for f in glob.glob(Cluster.__dataDir + "node_*"): + for f in glob.glob(Utils.DataDir + "node_*"): shutil.rmtree(f) - for f in glob.glob(Cluster.__configDir + "node_*"): + for f in glob.glob(Utils.ConfigDir + "node_*"): shutil.rmtree(f) for f in self.filesToCleanup: @@ -1509,6 +1548,23 @@ def createAccounts(self, creator, waitForTransBlock=True, stakedDeposit=1000): return True + def discoverUnstartedLocalNodes(self, unstartedNodes, totalNodes): + unstarted=[] + firstUnstartedNode=totalNodes-unstartedNodes + for nodeId in range(firstUnstartedNode, totalNodes): + unstarted.append(self.discoverUnstartedLocalNode(nodeId)) + return unstarted + + def discoverUnstartedLocalNode(self, nodeId): + startFile=Node.unstartedFile(nodeId) + with open(startFile, 'r') as file: + cmd=file.read() + Utils.Print("unstarted local node cmd: %s" % (cmd)) + p=re.compile(r'^\s*(\w+)\s*=\s*([^\s](?:.*[^\s])?)\s*$') + instance=Node(self.host, port=self.port+nodeId, pid=None, cmd=cmd, walletMgr=self.walletMgr, enableMongo=self.enableMongo, mongoHost=self.mongoHost, mongoPort=self.mongoPort, mongoDb=self.mongoDb) + if Utils.Debug: Utils.Print("Unstarted Node>", instance) + return instance + def getInfos(self, silentErrors=False, exitOnError=False): infos=[] for node in self.nodes: @@ -1543,7 +1599,7 @@ def printBlockLogIfNeeded(self): self.printBlockLog() def getBlockLog(self, nodeExtension): - blockLogDir=Cluster.__dataDir + Cluster.nodeExtensionToName(nodeExtension) + "/blocks/" + blockLogDir=Utils.getNodeDataDir(nodeExtension, "blocks") return Utils.getBlockLog(blockLogDir, exitOnError=False) def printBlockLog(self): @@ -1633,8 +1689,8 @@ def compareCommon(blockLogs, blockNameExtensions, first, last): if Utils.Debug: Utils.Print("context=%s" % (context)) ret=Utils.compare(commonBlockLogs[0], commonBlockLogs[i], context) if ret is not None: - blockLogDir1=Cluster.__dataDir + Cluster.nodeExtensionToName(commonBlockNameExtensions[0]) + "/blocks/" - blockLogDir2=Cluster.__dataDir + Cluster.nodeExtensionToName(commonBlockNameExtensions[i]) + "/blocks/" + blockLogDir1=Utils.DataDir + Utils.nodeExtensionToName(commonBlockNameExtensions[0]) + "/blocks/" + blockLogDir2=Utils.DataDir + Utils.nodeExtensionToName(commonBlockNameExtensions[i]) + "/blocks/" Utils.Print(Utils.FileDivider) Utils.Print("Block log from %s:\n%s" % (blockLogDir1, json.dumps(commonBlockLogs[0], indent=1))) Utils.Print(Utils.FileDivider) @@ -1655,17 +1711,3 @@ def stripValues(lowestMaxes,greaterThan): while len(lowestMaxes)>0 and compareCommon(blockLogs, blockNameExtensions, first, lowestMaxes[0]): first=lowestMaxes[0]+1 lowestMaxes=stripValues(lowestMaxes,lowestMaxes[0]) - - @staticmethod - def getDataDir(nodeId): - assert isinstance(nodeId, int) or (isinstance(nodeId, str) and nodeId == "bios"), "Invalid Node ID is passed" - extName = nodeId - if isinstance(nodeId, int): extName = "%02d" % (nodeId) - return os.path.abspath(os.path.join(Cluster.__dataDir, "node_{}".format(extName))) - - @staticmethod - def getConfigDir(nodeId): - assert isinstance(nodeId, int) or (isinstance(nodeId, str) and nodeId == "bios"), "Invalid Node ID is passed" - extName = nodeId - if isinstance(nodeId, int): extName = "%02d" % (nodeId) - return os.path.abspath(os.path.join(Cluster.__configDir, "node_{}".format(extName))) diff --git a/tests/Node.py b/tests/Node.py index 9621186c9f9..334d9d2e7d5 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -7,10 +7,6 @@ import datetime import json import signal -import urllib.request -import urllib.parse -from urllib.error import HTTPError -import tempfile from core_symbol import CORE_SYMBOL from testUtils import Utils @@ -66,7 +62,7 @@ def eosClientArgs(self): def __str__(self): #return "Host: %s, Port:%d, Pid:%s, Cmd:\"%s\"" % (self.host, self.port, self.pid, self.cmd) - return "Host: %s, Port:%d" % (self.host, self.port) + return "Host: %s, Port:%d, Pid:%s" % (self.host, self.port, self.pid) @staticmethod def validateTransaction(trans): @@ -1079,8 +1075,12 @@ def killNodeOnProducer(self, producer, whereInSequence, blockType=BlockType.head assert(isinstance(blockType, BlockType)) assert(isinstance(returnType, ReturnType)) basedOnLib="true" if blockType==BlockType.lib else "false" - cmd="curl %s/v1/test_control/kill_node_on_producer -d '{ \"producer\":\"%s\", \"where_in_sequence\":%d, \"based_on_lib\":\"%s\" }' -X POST -H \"Content-Type: application/json\"" % \ - (self.endpointHttp, producer, whereInSequence, basedOnLib) + payload="{ \"producer\":\"%s\", \"where_in_sequence\":%d, \"based_on_lib\":\"%s\" }" % (producer, whereInSequence, basedOnLib) + return self.processCurlCmd("test_control", "kill_node_on_producer", payload, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=exitMsg, returnType=returnType) + + def processCurlCmd(self, resource, command, payload, silentErrors=True, exitOnError=False, exitMsg=None, returnType=ReturnType.json): + cmd="curl %s/v1/%s/%s -d '%s' -X POST -H \"Content-Type: application/json\"" % \ + (self.endpointHttp, resource, command, payload) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) rtn=None start=time.perf_counter() @@ -1095,6 +1095,8 @@ def killNodeOnProducer(self, producer, whereInSequence, blockType=BlockType.head if Utils.Debug: end=time.perf_counter() Utils.Print("cmd Duration: %.3f sec" % (end-start)) + printReturn=json.dumps(rtn) if returnType==ReturnType.json else rtn + Utils.Print("cmd returned: %s" % (printReturn)) except subprocess.CalledProcessError as ex: if not silentErrors: end=time.perf_counter() @@ -1117,6 +1119,23 @@ def killNodeOnProducer(self, producer, whereInSequence, blockType=BlockType.head return rtn + def txnGenCreateTestAccounts(self, genAccount, genKey, silentErrors=True, exitOnError=False, exitMsg=None, returnType=ReturnType.json): + assert(isinstance(genAccount, str)) + assert(isinstance(genKey, str)) + assert(isinstance(returnType, ReturnType)) + + payload="[ \"%s\", \"%s\" ]" % (genAccount, genKey) + return self.processCurlCmd("txn_test_gen", "create_test_accounts", payload, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=exitMsg, returnType=returnType) + + def txnGenStart(self, salt, period, batchSize, silentErrors=True, exitOnError=False, exitMsg=None, returnType=ReturnType.json): + assert(isinstance(salt, str)) + assert(isinstance(period, int)) + assert(isinstance(batchSize, int)) + assert(isinstance(returnType, ReturnType)) + + payload="[ \"%s\", %d, %d ]" % (salt, period, batchSize) + return self.processCurlCmd("txn_test_gen", "start_generation", payload, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=exitMsg, returnType=returnType) + def waitForTransBlockIfNeeded(self, trans, waitForTransBlock, exitOnError=False): if not waitForTransBlock: return trans @@ -1228,16 +1247,20 @@ def myFunc(): self.killed=True return True - def interruptAndVerifyExitStatus(self): + def interruptAndVerifyExitStatus(self, timeout=15): if Utils.Debug: Utils.Print("terminating node: %s" % (self.cmd)) assert self.popenProc is not None, "node: \"%s\" does not have a popenProc, this may be because it is only set after a relaunch." % (self.cmd) self.popenProc.send_signal(signal.SIGINT) try: - outs, _ = self.popenProc.communicate(timeout=15) + outs, _ = self.popenProc.communicate(timeout=timeout) assert self.popenProc.returncode == 0, "Expected terminating \"%s\" to have an exit status of 0, but got %d" % (self.cmd, self.popenProc.returncode) except subprocess.TimeoutExpired: Utils.errorExit("Terminate call failed on node: %s" % (self.cmd)) + # mark node as killed + self.pid=None + self.killed=True + def verifyAlive(self, silent=False): if not silent and Utils.Debug: Utils.Print("Checking if node(pid=%s) is alive(killed=%s): %s" % (self.pid, self.killed, self.cmd)) if self.killed or self.pid is None: @@ -1304,7 +1327,7 @@ def getNextCleanProductionCycle(self, trans): # TBD: make nodeId an internal property # pylint: disable=too-many-locals # If nodeosPath is equal to None, it will use the existing nodeos path - def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTimeout, addOrSwapFlags=None, cachePopen=False, nodeosPath=None): + def relaunch(self, nodeId, chainArg=None, newChain=False, timeout=Utils.systemWaitTimeout, addOrSwapFlags=None, cachePopen=False, nodeosPath=None): assert(self.pid is None) assert(self.killed) @@ -1342,23 +1365,9 @@ def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTim cmdArr.append(k) cmdArr.append(v) myCmd=" ".join(cmdArr) - if nodeId == "bios": - dataDir="var/lib/node_bios" - else: - dataDir="var/lib/node_%02d" % (nodeId) - dt = datetime.datetime.now() - dateStr="%d_%02d_%02d_%02d_%02d_%02d" % ( - dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) - stdoutFile="%s/stdout.%s.txt" % (dataDir, dateStr) - stderrFile="%s/stderr.%s.txt" % (dataDir, dateStr) - with open(stdoutFile, 'w') as sout, open(stderrFile, 'w') as serr: - cmd=myCmd + ("" if chainArg is None else (" " + chainArg)) - Utils.Print("cmd: %s" % (cmd)) - popen=subprocess.Popen(cmd.split(), stdout=sout, stderr=serr) - if cachePopen: - self.popenProc=popen - self.pid=popen.pid - if Utils.Debug: Utils.Print("restart Node host=%s, port=%s, pid=%s, cmd=%s" % (self.host, self.port, self.pid, self.cmd)) + + cmd=myCmd + ("" if chainArg is None else (" " + chainArg)) + self.launchCmd(cmd, nodeId, cachePopen) def isNodeAlive(): """wait for node to be responsive.""" @@ -1384,6 +1393,32 @@ def isNodeAlive(): self.killed=False return True + @staticmethod + def unstartedFile(nodeId): + assert(isinstance(nodeId, int)) + startFile=Utils.getNodeDataDir(nodeId, "start.cmd") + if not os.path.exists(startFile): + Utils.errorExit("Cannot find unstarted node since %s file does not exist" % startFile) + return startFile + + def launchUnstarted(self, nodeId, cachePopen=False): + Utils.Print("launchUnstarted cmd: %s" % (self.cmd)) + self.launchCmd(self.cmd, nodeId, cachePopen) + + def launchCmd(self, cmd, nodeId, cachePopen=False): + dataDir=Utils.getNodeDataDir(nodeId) + dt = datetime.datetime.now() + dateStr=Utils.getDateString(dt) + stdoutFile="%s/stdout.%s.txt" % (dataDir, dateStr) + stderrFile="%s/stderr.%s.txt" % (dataDir, dateStr) + with open(stdoutFile, 'w') as sout, open(stderrFile, 'w') as serr: + Utils.Print("cmd: %s" % (cmd)) + popen=subprocess.Popen(cmd.split(), stdout=sout, stderr=serr) + if cachePopen: + self.popenProc=popen + self.pid=popen.pid + if Utils.Debug: Utils.Print("start Node host=%s, port=%s, pid=%s, cmd=%s" % (self.host, self.port, self.pid, self.cmd)) + def trackCmdTransaction(self, trans, ignoreNonTrans=False): if trans is None: if Utils.Debug: Utils.Print(" cmd returned transaction: %s" % (trans)) @@ -1416,26 +1451,10 @@ def reportStatus(self): Utils.Print(" hbn : %s (%s)" % (self.lastRetrievedHeadBlockNum, status)) Utils.Print(" lib : %s (%s)" % (self.lastRetrievedLIB, status)) - def sendRpcApi(self, relativeUrl, data={}): - url = urllib.parse.urljoin(self.endpointHttp, relativeUrl) - req = urllib.request.Request(url) - req.add_header('Content-Type', 'application/json; charset=utf-8') - reqData = json.dumps(data).encode("utf-8") - rpcApiResult = None - try: - response = urllib.request.urlopen(req, reqData) - rpcApiResult = json.loads(response.read().decode("utf-8")) - except HTTPError as e: - Utils.Print("Fail to send RPC API to {} with data {} ({})".format(url, data, e.read())) - raise e - except Exception as e: - Utils.Print("Fail to send RPC API to {} with data {} ({})".format(url, data, e)) - raise e - return rpcApiResult - # Require producer_api_plugin def scheduleProtocolFeatureActivations(self, featureDigests=[]): - self.sendRpcApi("v1/producer/schedule_protocol_feature_activations", {"protocol_features_to_activate": featureDigests}) + param = { "protocol_features_to_activate": featureDigests } + self.processCurlCmd("producer", "schedule_protocol_feature_activations", json.dumps(param)) # Require producer_api_plugin def getSupportedProtocolFeatures(self, excludeDisabled=False, excludeUnactivatable=False): @@ -1443,7 +1462,7 @@ def getSupportedProtocolFeatures(self, excludeDisabled=False, excludeUnactivatab "exclude_disabled": excludeDisabled, "exclude_unactivatable": excludeUnactivatable } - res = self.sendRpcApi("v1/producer/get_supported_protocol_features", param) + res = self.processCurlCmd("producer", "get_supported_protocol_features", json.dumps(param)) return res # This will return supported protocol features in a dict (feature codename as the key), i.e. @@ -1525,8 +1544,7 @@ def getActivatedProtocolFeatures(self): return latestBlockHeaderState["activated_protocol_features"]["protocol_features"] def modifyBuiltinPFSubjRestrictions(self, nodeId, featureCodename, subjectiveRestriction={}): - from Cluster import Cluster - jsonPath = os.path.join(Cluster.getConfigDir(nodeId), + jsonPath = os.path.join(Utils.getNodeConfigDir(nodeId), "protocol_features", "BUILTIN-{}.json".format(featureCodename)) protocolFeatureJson = [] diff --git a/tests/TestHelper.py b/tests/TestHelper.py index 2c606263420..5bcdbafbcbe 100644 --- a/tests/TestHelper.py +++ b/tests/TestHelper.py @@ -22,6 +22,11 @@ def add(self, flag, type, help, default, choices=None): arg=self.AppArg(flag, type, help, default, choices) self.args.append(arg) + + def add_bool(self, flag, help, action='store_true'): + arg=self.AppArg(flag=flag, help=help, action=action) + self.args.append(arg) + # pylint: disable=too-many-instance-attributes class TestHelper(object): LOCAL_HOST="localhost" diff --git a/tests/consensus-validation-malicious-producers.py b/tests/consensus-validation-malicious-producers.py index 971228854d9..6a3ac94d511 100755 --- a/tests/consensus-validation-malicious-producers.py +++ b/tests/consensus-validation-malicious-producers.py @@ -246,7 +246,7 @@ def myTest(transWillEnterBlock): topo="mesh" delay=0 Print("Stand up cluster") - if cluster.launch(pnodes, total_nodes, topo, delay) is False: + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay) is False: error("Failed to stand up eos cluster.") return False diff --git a/tests/distributed-transactions-test.py b/tests/distributed-transactions-test.py index 5b302dcf141..2ea4edfe462 100755 --- a/tests/distributed-transactions-test.py +++ b/tests/distributed-transactions-test.py @@ -63,7 +63,7 @@ (pnodes, total_nodes-pnodes, topo, delay)) Print("Stand up cluster") - if cluster.launch(pnodes, total_nodes, topo=topo, delay=delay, p2pPlugin=p2pPlugin) is False: + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay, p2pPlugin=p2pPlugin) is False: errorExit("Failed to stand up eos cluster.") Print ("Wait for Cluster stabilization") diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index 77b1f96e28d..b77208115fb 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -7,7 +7,6 @@ from WalletMgr import WalletMgr from Node import BlockType from Node import Node -from TestHelper import AppArgs from TestHelper import TestHelper import decimal diff --git a/tests/nodeos_irreversible_mode_test.py b/tests/nodeos_irreversible_mode_test.py index 4468173e0a2..934c5fcdaaa 100755 --- a/tests/nodeos_irreversible_mode_test.py +++ b/tests/nodeos_irreversible_mode_test.py @@ -51,13 +51,13 @@ def makeSnapshot(nodeId): urllib.request.urlopen(req) def backupBlksDir(nodeId): - dataDir = Cluster.getDataDir(nodeId) + dataDir = Utils.getNodeDataDir(nodeId) sourceDir = os.path.join(dataDir, "blocks") destinationDir = os.path.join(os.path.dirname(dataDir), os.path.basename(dataDir) + "-backup", "blocks") shutil.copytree(sourceDir, destinationDir) def recoverBackedupBlksDir(nodeId): - dataDir = Cluster.getDataDir(nodeId) + dataDir = Utils.getNodeDataDir(nodeId) # Delete existing one and copy backed up one existingBlocksDir = os.path.join(dataDir, "blocks") backedupBlocksDir = os.path.join(os.path.dirname(dataDir), os.path.basename(dataDir) + "-backup", "blocks") @@ -65,7 +65,7 @@ def recoverBackedupBlksDir(nodeId): shutil.copytree(backedupBlocksDir, existingBlocksDir) def getLatestSnapshot(nodeId): - snapshotDir = os.path.join(Cluster.getDataDir(nodeId), "snapshots") + snapshotDir = os.path.join(Utils.getNodeDataDir(nodeId), "snapshots") snapshotDirContents = os.listdir(snapshotDir) assert len(snapshotDirContents) > 0 snapshotDirContents.sort() @@ -73,12 +73,12 @@ def getLatestSnapshot(nodeId): def removeReversibleBlks(nodeId): - dataDir = Cluster.getDataDir(nodeId) + dataDir = Utils.getNodeDataDir(nodeId) reversibleBlks = os.path.join(dataDir, "blocks", "reversible") shutil.rmtree(reversibleBlks, ignore_errors=True) def removeState(nodeId): - dataDir = Cluster.getDataDir(nodeId) + dataDir = Utils.getNodeDataDir(nodeId) state = os.path.join(dataDir, "state") shutil.rmtree(state, ignore_errors=True) diff --git a/tests/nodeos_multiple_version_protocol_feature_test.py b/tests/nodeos_multiple_version_protocol_feature_test.py index 2b8523aa1bd..be3324f969e 100755 --- a/tests/nodeos_multiple_version_protocol_feature_test.py +++ b/tests/nodeos_multiple_version_protocol_feature_test.py @@ -82,6 +82,7 @@ def hasBlockBecomeIrr(): associatedNodeLabels = { "3": "170" } + Utils.Print("Alternate Version Labels File is {}".format(alternateVersionLabelsFile)) assert exists(alternateVersionLabelsFile), "Alternate version labels file does not exist" assert cluster.launch(pnodes=4, totalNodes=4, prodCount=1, totalProducers=4, extraNodeosArgs=" --plugin eosio::producer_api_plugin ", @@ -91,31 +92,31 @@ def hasBlockBecomeIrr(): alternateVersionLabelsFile=alternateVersionLabelsFile, associatedNodeLabels=associatedNodeLabels), "Unable to launch cluster" - def pauseBlockProduction(nodes:[Node]): - for node in nodes: - node.sendRpcApi("v1/producer/pause") + newNodeIds = [0, 1, 2] + oldNodeId = 3 + newNodes = list(map(lambda id: cluster.getNode(id), newNodeIds)) + oldNode = cluster.getNode(oldNodeId) + allNodes = [*newNodes, oldNode] - def resumeBlockProduction(nodes:[Node]): - for node in nodes: - node.sendRpcApi("v1/producer/resume") + def pauseBlockProductions(): + for node in allNodes: + if not node.killed: node.processCurlCmd("producer", "pause", "") + + def resumeBlockProductions(): + for node in allNodes: + if not node.killed: node.processCurlCmd("producer", "resume", "") def shouldNodesBeInSync(nodes:[Node]): # Pause all block production to ensure the head is not moving - pauseBlockProduction(nodes) + pauseBlockProductions() time.sleep(1) # Wait for some time to ensure all blocks are propagated headBlockIds = [] for node in nodes: headBlockId = node.getInfo()["head_block_id"] headBlockIds.append(headBlockId) - resumeBlockProduction(nodes) + resumeBlockProductions() return len(set(headBlockIds)) == 1 - newNodeIds = [0, 1, 2] - oldNodeId = 3 - newNodes = list(map(lambda id: cluster.getNode(id), newNodeIds)) - oldNode = cluster.getNode(oldNodeId) - allNodes = [*newNodes, oldNode] - # Before everything starts, all nodes (new version and old version) should be in sync assert shouldNodesBeInSync(allNodes), "Nodes are not in sync before preactivation" @@ -173,7 +174,7 @@ def shouldNodesBeInSync(nodes:[Node]): # and --import-reversible-blocks with the new version to ensure the compatibility of the reversible blocks # Finally, when we restart the 4th node with the version of nodeos that supports protocol feature, # all nodes should be in sync, and the 4th node will also contain PREACTIVATE_FEATURE - portableRevBlkPath = join(Cluster.getDataDir(oldNodeId), "rev_blk_portable_format") + portableRevBlkPath = os.path.join(Utils.getNodeDataDir(oldNodeId), "rev_blk_portable_format") oldNode.kill(signal.SIGTERM) # Note, for the following relaunch, these will fail to relaunch immediately (expected behavior of export/import), so the chainArg will not replace the old cmd oldNode.relaunch(oldNodeId, chainArg="--export-reversible-blocks {}".format(portableRevBlkPath), timeout=1) diff --git a/tests/nodeos_startup_catchup.py b/tests/nodeos_startup_catchup.py new file mode 100755 index 00000000000..03a55936385 --- /dev/null +++ b/tests/nodeos_startup_catchup.py @@ -0,0 +1,179 @@ +#!/usr/bin/env python3 + +from testUtils import Utils +import testUtils +import time +from Cluster import Cluster +from WalletMgr import WalletMgr +from Node import BlockType +from Node import Node +import signal +from TestHelper import AppArgs +from TestHelper import TestHelper + +import decimal +import math +import re + +############################################################### +# nodeos_startup_catchup +# Test configures a producing node and <--txn-plugins count> non-producing nodes with the +# txn_test_gen_plugin. Each non-producing node starts generating transactions and sending them +# to the producing node. +# 1) After 10 seconds a new node is started. +# 2) the node is allowed to catch up to the producing node +# 3) that node is killed +# 4) restart the node +# 5) the node is allowed to catch up to the producing node +# 3) Repeat steps 2-5, <--catchup-count - 1> more times +############################################################### + +Print=Utils.Print +errorExit=Utils.errorExit + +from core_symbol import CORE_SYMBOL + +appArgs=AppArgs() +extraArgs = appArgs.add(flag="--catchup-count", type=int, help="How many catchup-nodes to launch", default=10) +extraArgs = appArgs.add(flag="--txn-gen-nodes", type=int, help="How many transaction generator nodes", default=2) +args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run", + "-p","--p2p-plugin","--wallet-port"}, applicationSpecificArgs=appArgs) +Utils.Debug=args.v +pnodes=args.p if args.p > 0 else 1 +startedNonProdNodes = args.txn_gen_nodes if args.txn_gen_nodes >= 2 else 2 +cluster=Cluster(walletd=True) +dumpErrorDetails=args.dump_error_details +keepLogs=args.keep_logs +dontKill=args.leave_running +prodCount=args.prod_count if args.prod_count > 1 else 2 +killAll=args.clean_run +p2pPlugin=args.p2p_plugin +walletPort=args.wallet_port +catchupCount=args.catchup_count if args.catchup_count > 0 else 1 +totalNodes=startedNonProdNodes+pnodes+catchupCount + +walletMgr=WalletMgr(True, port=walletPort) +testSuccessful=False +killEosInstances=not dontKill +killWallet=not dontKill + +WalletdName=Utils.EosWalletName +ClientName="cleos" + +try: + TestHelper.printSystemInfo("BEGIN") + cluster.setWalletMgr(walletMgr) + + cluster.killall(allInstances=killAll) + cluster.cleanup() + specificExtraNodeosArgs={} + txnGenNodeNum=pnodes # next node after producer nodes + for nodeNum in range(txnGenNodeNum, txnGenNodeNum+startedNonProdNodes): + specificExtraNodeosArgs[nodeNum]="--plugin eosio::txn_test_gen_plugin --txn-test-gen-account-prefix txntestacct" + Print("Stand up cluster") + if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=pnodes, totalNodes=totalNodes, totalProducers=pnodes*prodCount, p2pPlugin=p2pPlugin, + useBiosBootFile=False, specificExtraNodeosArgs=specificExtraNodeosArgs, unstartedNodes=catchupCount, loadSystemContract=False) is False: + Utils.errorExit("Failed to stand up eos cluster.") + + Print("Validating system accounts after bootstrap") + cluster.validateAccounts(None) + + Print("Create txn generate nodes") + txnGenNodes=[] + for nodeNum in range(txnGenNodeNum, txnGenNodeNum+startedNonProdNodes): + txnGenNodes.append(cluster.getNode(nodeNum)) + + Print("Create accounts for generated txns") + txnGenNodes[0].txnGenCreateTestAccounts(cluster.eosioAccount.name, cluster.eosioAccount.activePrivateKey) + + def lib(node): + return node.getBlockNum(BlockType.lib) + + def head(node): + return node.getBlockNum(BlockType.head) + + node0=cluster.getNode(0) + + Print("Wait for account creation to be irreversible") + blockNum=head(node0) + node0.waitForBlock(blockNum, blockType=BlockType.lib) + + Print("Startup txn generation") + period=1500 + transPerPeriod=150 + for genNum in range(0, len(txnGenNodes)): + salt="%d" % genNum + txnGenNodes[genNum].txnGenStart(salt, period, transPerPeriod) + time.sleep(1) + + blockNum=head(node0) + timePerBlock=500 + blocksPerPeriod=period/timePerBlock + transactionsPerBlock=transPerPeriod/blocksPerPeriod + steadyStateWait=20 + startBlockNum=blockNum+steadyStateWait + numBlocks=20 + endBlockNum=startBlockNum+numBlocks + node0.waitForBlock(endBlockNum) + transactions=0 + avg=0 + for blockNum in range(startBlockNum, endBlockNum): + block=node0.getBlock(blockNum) + transactions+=len(block["transactions"]) + + avg=transactions / (blockNum - startBlockNum + 1) + + Print("Validate transactions are generating") + minRequiredTransactions=transactionsPerBlock + assert avg>minRequiredTransactions, "Expected to at least receive %s transactions per block, but only getting %s" % (minRequiredTransactions, avg) + + Print("Cycle through catchup scenarios") + twoRounds=21*2*12 + for catchup_num in range(0, catchupCount): + Print("Start catchup node") + cluster.launchUnstarted(cachePopen=True) + lastLibNum=lib(node0) + time.sleep(2) + # verify producer lib is still advancing + node0.waitForBlock(lastLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + + catchupNode=cluster.getNodes()[-1] + catchupNodeNum=cluster.getNodes().index(catchupNode) + lastCatchupLibNum=lib(catchupNode) + + Print("Verify catchup node %s's LIB is advancing" % (catchupNodeNum)) + # verify lib is advancing (before we wait for it to have to catchup with producer) + catchupNode.waitForBlock(lastCatchupLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + + Print("Verify catchup node is advancing to producer") + numBlocksToCatchup=(lastLibNum-lastCatchupLibNum-1)+twoRounds + catchupNode.waitForBlock(lastLibNum, timeout=(numBlocksToCatchup)/2, blockType=BlockType.lib) + + Print("Shutdown catchup node and validate exit code") + catchupNode.interruptAndVerifyExitStatus(60) + + Print("Restart catchup node") + catchupNode.relaunch(catchupNodeNum) + lastCatchupLibNum=lib(catchupNode) + + Print("Verify catchup node is advancing") + # verify catchup node is advancing to producer + catchupNode.waitForBlock(lastCatchupLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + + Print("Verify producer is still advancing LIB") + lastLibNum=lib(node0) + # verify producer lib is still advancing + node0.waitForBlock(lastLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + + Print("Verify catchup node is advancing to producer") + # verify catchup node is advancing to producer + catchupNode.waitForBlock(lastLibNum, timeout=(numBlocksToCatchup)/2, blockType=BlockType.lib) + catchupNode.kill(signal.SIGTERM) + catchupNode.popenProc=None + + testSuccessful=True + +finally: + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) + +exit(0) diff --git a/tests/restart-scenarios-test.py b/tests/restart-scenarios-test.py index 6b3c217d75d..894a7d0d271 100755 --- a/tests/restart-scenarios-test.py +++ b/tests/restart-scenarios-test.py @@ -66,7 +66,7 @@ pnodes, topo, delay, chainSyncStrategyStr)) Print("Stand up cluster") - if cluster.launch(pnodes, total_nodes, topo=topo, delay=delay, p2pPlugin=p2pPlugin) is False: + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay, p2pPlugin=p2pPlugin) is False: errorExit("Failed to stand up eos cluster.") Print ("Wait for Cluster stabilization") diff --git a/tests/testUtils.py b/tests/testUtils.py index 351b56537b1..f633f9bed5f 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -36,6 +36,8 @@ class Utils: EosBlockLogPath="programs/eosio-blocklog/eosio-blocklog" FileDivider="=================================================================" + DataDir="var/lib/" + ConfigDir="etc/eosio/" @staticmethod def Print(*args, **kwargs): @@ -65,6 +67,38 @@ def setIrreversibleTimeout(timeout): def setSystemWaitTimeout(timeout): Utils.systemWaitTimeout=timeout + @staticmethod + def getDateString(dt): + return "%d_%02d_%02d_%02d_%02d_%02d" % ( + dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) + + @staticmethod + def nodeExtensionToName(ext): + r"""Convert node extension (bios, 0, 1, etc) to node name. """ + prefix="node_" + if ext == "bios": + return prefix + ext + + return "node_%02d" % (ext) + + @staticmethod + def getNodeDataDir(ext, relativeDir=None, trailingSlash=False): + path=os.path.join(Utils.DataDir, Utils.nodeExtensionToName(ext)) + if relativeDir is not None: + path=os.path.join(path, relativeDir) + if trailingSlash: + path=os.path.join(path, "") + return path + + @staticmethod + def getNodeConfigDir(ext, relativeDir=None, trailingSlash=False): + path=os.path.join(Utils.ConfigDir, Utils.nodeExtensionToName(ext)) + if relativeDir is not None: + path=os.path.join(path, relativeDir) + if trailingSlash: + path=os.path.join(path, "") + return path + @staticmethod def getChainStrategies(): chainSyncStrategies={} @@ -179,7 +213,8 @@ def runCmdArrReturnJson(cmdArr, trace=False, silentErrors=True): @staticmethod def runCmdReturnStr(cmd, trace=False): - retStr=Utils.checkOutput(cmd.split()) + cmdArr=shlex.split(cmd) + retStr=Utils.checkOutput(cmdArr) if trace: Utils.Print ("RAW > %s" % (retStr)) return retStr diff --git a/tests/validate-dirty-db.py b/tests/validate-dirty-db.py index ac7520bc353..afcf2767b73 100755 --- a/tests/validate-dirty-db.py +++ b/tests/validate-dirty-db.py @@ -74,7 +74,7 @@ def runNodeosAndGetOutput(myTimeout=3): pnodes, topo, delay, chainSyncStrategyStr)) Print("Stand up cluster") - if cluster.launch(pnodes, total_nodes, topo=topo, delay=delay, dontBootstrap=True) is False: + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay, dontBootstrap=True) is False: errorExit("Failed to stand up eos cluster.") node=cluster.getNode(0) diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index bfaeca76727..611d9f1f40e 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -7,12 +7,13 @@ #include #include #include +#include #include #include +#include #include -#include #include #ifdef NON_VALIDATING_TEST @@ -829,34 +830,47 @@ BOOST_AUTO_TEST_CASE(transaction_metadata_test) { try { BOOST_CHECK_EQUAL(trx.id(), mtrx->id); BOOST_CHECK_EQUAL(trx.id(), mtrx2->id); - boost::asio::thread_pool thread_pool(5); + named_thread_pool thread_pool( "misc", 5 ); BOOST_CHECK( !mtrx->signing_keys_future.valid() ); BOOST_CHECK( !mtrx2->signing_keys_future.valid() ); - transaction_metadata::create_signing_keys_future( mtrx, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); - transaction_metadata::create_signing_keys_future( mtrx2, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx, thread_pool.get_executor(), test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx2, thread_pool.get_executor(), test.control->get_chain_id(), fc::microseconds::maximum() ); BOOST_CHECK( mtrx->signing_keys_future.valid() ); BOOST_CHECK( mtrx2->signing_keys_future.valid() ); // no-op - transaction_metadata::create_signing_keys_future( mtrx, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); - transaction_metadata::create_signing_keys_future( mtrx2, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx, thread_pool.get_executor(), test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx2, thread_pool.get_executor(), test.control->get_chain_id(), fc::microseconds::maximum() ); auto keys = mtrx->recover_keys( test.control->get_chain_id() ); - BOOST_CHECK_EQUAL(1u, keys.size()); - BOOST_CHECK_EQUAL(public_key, *keys.begin()); + BOOST_CHECK_EQUAL(1u, keys.second.size()); + BOOST_CHECK_EQUAL(public_key, *keys.second.begin()); // again - keys = mtrx->recover_keys( test.control->get_chain_id() ); - BOOST_CHECK_EQUAL(1u, keys.size()); - BOOST_CHECK_EQUAL(public_key, *keys.begin()); + auto keys2 = mtrx->recover_keys( test.control->get_chain_id() ); + BOOST_CHECK_EQUAL(1u, keys2.second.size()); + BOOST_CHECK_EQUAL(public_key, *keys2.second.begin()); - auto keys2 = mtrx2->recover_keys( test.control->get_chain_id() ); - BOOST_CHECK_EQUAL(1u, keys.size()); - BOOST_CHECK_EQUAL(public_key, *keys.begin()); + auto keys3 = mtrx2->recover_keys( test.control->get_chain_id() ); + BOOST_CHECK_EQUAL(1u, keys3.second.size()); + BOOST_CHECK_EQUAL(public_key, *keys3.second.begin()); + // recover keys without first calling start_recover_keys + transaction_metadata_ptr mtrx4 = std::make_shared( std::make_shared( trx, packed_transaction::none) ); + transaction_metadata_ptr mtrx5 = std::make_shared( std::make_shared( trx, packed_transaction::zlib) ); + + auto keys4 = mtrx4->recover_keys( test.control->get_chain_id() ); + BOOST_CHECK_EQUAL(1u, keys4.second.size()); + BOOST_CHECK_EQUAL(public_key, *keys4.second.begin()); + + auto keys5 = mtrx5->recover_keys( test.control->get_chain_id() ); + BOOST_CHECK_EQUAL(1u, keys5.second.size()); + BOOST_CHECK_EQUAL(public_key, *keys5.second.begin()); + + thread_pool.stop(); } FC_LOG_AND_RETHROW() } diff --git a/unittests/snapshot_tests.cpp b/unittests/snapshot_tests.cpp index c3578e15750..a3749f9656a 100644 --- a/unittests/snapshot_tests.cpp +++ b/unittests/snapshot_tests.cpp @@ -49,13 +49,13 @@ class snapshotted_tester : public base_tester { init(copied_config, snapshot); } - signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ )override { - return _produce_block(skip_time, false, skip_flag); + signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { + return _produce_block(skip_time, false); } - signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ )override { + signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { control->abort_block(); - return _produce_block(skip_time, true, skip_flag); + return _produce_block(skip_time, true); } signed_block_ptr finish_block()override {