diff --git a/.buildkite/coverage.yml b/.buildkite/coverage.yml deleted file mode 100644 index c5a50bc64f4..00000000000 --- a/.buildkite/coverage.yml +++ /dev/null @@ -1,29 +0,0 @@ -steps: - - command: | - echo "--- :hammer: Building" && \ - /usr/bin/cmake -GNinja -DCMAKE_BUILD_TYPE=Debug -DCMAKE_CXX_COMPILER=clang++-4.0 -DCMAKE_C_COMPILER=clang-4.0 -DBOOST_ROOT="${BOOST_ROOT}" -DWASM_ROOT="${WASM_ROOT}" -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true -DENABLE_COVERAGE_TESTING=true -DBUILD_DOXYGEN=false && \ - /usr/bin/ninja - echo "--- :spiral_note_pad: Generating Code Coverage Report" && \ - /usr/bin/ninja EOSIO_ut_coverage && \ - echo "--- :arrow_up: Publishing Code Coverage Report" && \ - buildkite-agent artifact upload "EOSIO_ut_coverage/**/*" s3://eos-coverage/$BUILDKITE_JOB_ID && \ - cp /config/.coveralls.yml . && \ - /usr/local/bin/coveralls-lcov EOSIO_ut_coverage_filtered.info && \ - echo "+++ View Report" && \ - printf "\033]1339;url=https://eos-coverage.s3-us-west-2.amazonaws.com/$BUILDKITE_JOB_ID/EOSIO_ut_coverage/index.html;content=View Full Coverage Report\a\n" - label: ":spiral_note_pad: Generate Report" - agents: - queue: "automation-large-builder-fleet" - plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" - workdir: /data/job - mounts: - - /etc/buildkite-agent/config:/config - environment: - - BOOST_ROOT=/root/opt/boost - - OPENSSL_ROOT_DIR=/usr/include/openssl - - WASM_ROOT=/root/opt/wasm - - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/root/opt/wasm/bin - - CI=true - timeout: 60 diff --git a/.buildkite/debug.yml b/.buildkite/debug.yml deleted file mode 100644 index 28576d56195..00000000000 --- a/.buildkite/debug.yml +++ /dev/null @@ -1,230 +0,0 @@ -steps: - - command: | - echo "--- Creating symbolic link to job directory :file_folder:" && \ - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job && \ - echo "+++ Building :hammer:" && \ - echo 1 | ./eosio_build.sh -o Debug && \ - echo "--- Compressing build directory :compression:" && \ - tar -pczf build.tar.gz build/ - label: ":darwin: Build" - agents: - - "role=macos-builder" - artifact_paths: "build.tar.gz" - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh -o Debug && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":ubuntu: Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - docker#v2.0.0: - image: "eosio/ci:ubuntu" - workdir: /data/job - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh -o Debug && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":ubuntu: 18.04 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - docker#v2.0.0: - image: "eosio/ci:ubuntu18" - workdir: /data/job - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh -o Debug && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":fedora: Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - docker#v2.0.0: - image: "eosio/ci:fedora" - workdir: /data/job - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh -o Debug && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":centos: Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - docker#v2.0.0: - image: "eosio/ci:centos" - workdir: /data/job - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh -o Debug && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":aws: Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - docker#v2.0.0: - image: "eosio/ci:amazonlinux" - workdir: /data/job - timeout: 60 - - - wait - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -LE long_running_tests --output-on-failure - retry: - automatic: - limit: 1 - label: ":darwin: Tests" - agents: - - "role=macos-tester" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure - retry: - automatic: - limit: 1 - label: ":ubuntu: Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - docker#v2.0.0: - image: "eosio/ci:ubuntu" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure - retry: - automatic: - limit: 1 - label: ":ubuntu: 18.04 Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - docker#v2.0.0: - image: "eosio/ci:ubuntu18" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure - retry: - automatic: - limit: 1 - label: ":fedora: Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - docker#v2.0.0: - image: "eosio/ci:fedora" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":centos: Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure - retry: - automatic: - limit: 1 - label: ":centos: Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - docker#v2.0.0: - image: "eosio/ci:centos" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":aws: Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure - retry: - automatic: - limit: 1 - label: ":aws: Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - docker#v2.0.0: - image: "eosio/ci:amazonlinux" - workdir: /data/job - timeout: 60 diff --git a/.buildkite/docker.yml b/.buildkite/docker.yml deleted file mode 100644 index f8f6d8e0a12..00000000000 --- a/.buildkite/docker.yml +++ /dev/null @@ -1,74 +0,0 @@ -steps: - - command: | - echo "AUTHENTICATING GOOGLE SERVICE ACCOUNT" && \ - gcloud --quiet auth activate-service-account b1-automation-svc@b1-automation-dev.iam.gserviceaccount.com --key-file=/etc/gcp-service-account.json && \ - docker-credential-gcr configure-docker && \ - echo "BUILDING BUILD IMAGE" && \ - cd Docker/builder && \ - docker build -t eosio/builder:latest -t eosio/builder:$BUILDKITE_COMMIT . --build-arg branch=$BUILDKITE_COMMIT && \ - docker tag eosio/builder:$BUILDKITE_COMMIT gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT && \ - docker tag eosio/builder:latest gcr.io/b1-automation-dev/eosio/builder:latest && \ - echo "PUSHING DOCKER IMAGES" && \ - docker push gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT && \ - docker push gcr.io/b1-automation-dev/eosio/builder:latest && \ - echo "TRASHING OLD IMAGES" && \ - docker rmi eosio/builder:$BUILDKITE_COMMIT && \ - docker rmi eosio/builder:latest && \ - docker rmi gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT && \ - docker rmi gcr.io/b1-automation-dev/eosio/builder:latest - label: "Docker build builder" - agents: - queue: "automation-docker-builder-fleet" - timeout: 300 - - - wait - - - command: | - echo "AUTHENTICATING GOOGLE SERVICE ACCOUNT" && \ - gcloud --quiet auth activate-service-account b1-automation-svc@b1-automation-dev.iam.gserviceaccount.com --key-file=/etc/gcp-service-account.json && \ - docker-credential-gcr configure-docker && \ - echo "BUILDING EOS IMAGE" && \ - docker pull gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT && \ - cd Docker && \ - docker build -t eosio/eos:latest -t eosio/eos:$BUILDKITE_COMMIT . --build-arg branch=$BUILDKITE_BRANCH && \ - docker tag eosio/eos:$BUILDKITE_COMMIT gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_COMMIT && \ - docker tag eosio/eos:latest gcr.io/b1-automation-dev/eosio/eos:latest && \ - echo "PUSHING DOCKER IMAGES" && \ - docker push gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_COMMIT && \ - docker push gcr.io/b1-automation-dev/eosio/eos:latest && \ - echo "TRASHING OLD IMAGES" && \ - docker rmi eosio/eos:$BUILDKITE_COMMIT && \ - docker rmi eosio/eos:latest && \ - docker rmi gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_COMMIT && \ - docker rmi gcr.io/b1-automation-dev/eosio/eos:latest && \ - docker rmi gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT - label: "Docker build eos" - agents: - queue: "automation-docker-builder-fleet" - timeout: 300 - - - command: | - echo "AUTHENTICATING GOOGLE SERVICE ACCOUNT" && \ - gcloud --quiet auth activate-service-account b1-automation-svc@b1-automation-dev.iam.gserviceaccount.com --key-file=/etc/gcp-service-account.json && \ - docker-credential-gcr configure-docker && \ - echo "BUILDING EOS DEV IMAGE" && \ - docker pull gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT && \ - cd Docker/dev && \ - docker build -t eosio/eos-dev:latest -t eosio/eos-dev:$BUILDKITE_COMMIT . --build-arg branch=$BUILDKITE_BRANCH && \ - docker tag eosio/eos-dev:$BUILDKITE_COMMIT gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_COMMIT && \ - docker tag eosio/eos-dev:latest gcr.io/b1-automation-dev/eosio/eos-dev:latest && \ - echo "PUSHING DOCKER IMAGES" && \ - docker push gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_COMMIT && \ - docker push gcr.io/b1-automation-dev/eosio/eos-dev:latest && \ - echo "TRASHING OLD IMAGES" && \ - docker rmi eosio/eos-dev:$BUILDKITE_COMMIT && \ - docker rmi eosio/eos-dev:latest && \ - docker rmi gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_COMMIT && \ - docker rmi gcr.io/b1-automation-dev/eosio/eos-dev:latest && \ - docker rmi gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT - label: "Docker build eos-dev" - agents: - queue: "automation-docker-builder-fleet" - timeout: 300 - - - wait diff --git a/.buildkite/long_running_tests.yml b/.buildkite/long_running_tests.yml deleted file mode 100644 index e22016c4de4..00000000000 --- a/.buildkite/long_running_tests.yml +++ /dev/null @@ -1,212 +0,0 @@ -steps: - - command: | - echo "--- Creating symbolic link to job directory :file_folder:" && \ - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job && \ - echo "+++ Building :hammer:" && \ - echo 1 | ./eosio_build.sh && \ - echo "--- Compressing build directory :compression:" && \ - tar -pczf build.tar.gz build/ - label: ":darwin: Build" - agents: - - "role=macos-builder" - artifact_paths: "build.tar.gz" - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":ubuntu: Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu" - workdir: /data/job - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":ubuntu: 18.04 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" - workdir: /data/job - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":fedora: Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - docker#v1.4.0: - image: "eosio/ci:fedora" - workdir: /data/job - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":centos: Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - docker#v1.4.0: - image: "eosio/ci:centos" - workdir: /data/job - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":aws: Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - docker#v1.4.0: - image: "eosio/ci:amazonlinux" - workdir: /data/job - timeout: 60 - - - wait - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -L long_running_tests --output-on-failure - label: ":darwin: Tests" - agents: - - "role=macos-tester" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - timeout: 100 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -L long_running_tests --output-on-failure - label: ":ubuntu: Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu" - workdir: /data/job - timeout: 100 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -L long_running_tests --output-on-failure - label: ":ubuntu: 18.04 Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" - workdir: /data/job - timeout: 100 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -L long_running_tests --output-on-failure - label: ":fedora: Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - docker#v1.4.0: - image: "eosio/ci:fedora" - workdir: /data/job - timeout: 100 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":centos: Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -L long_running_tests --output-on-failure - label: ":centos: Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - docker#v1.4.0: - image: "eosio/ci:centos" - workdir: /data/job - timeout: 100 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":aws: Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -L long_running_tests --output-on-failure - label: ":aws: Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - docker#v1.4.0: - image: "eosio/ci:amazonlinux" - workdir: /data/job - timeout: 100 diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml deleted file mode 100644 index 893a19f9ff9..00000000000 --- a/.buildkite/pipeline.yml +++ /dev/null @@ -1,526 +0,0 @@ -steps: - - command: | - echo "--- Creating symbolic link to job directory :file_folder:" && \ - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job && \ - echo "+++ Building :hammer:" && \ - echo 1 | ./eosio_build.sh && \ - echo "--- Compressing build directory :compression:" && \ - tar -pczf build.tar.gz build/ - label: ":darwin: High Sierra Build" - agents: - - "role=macos-builder" - artifact_paths: "build.tar.gz" - timeout: 60 - - - command: | - echo "--- Creating symbolic link to job directory :file_folder:" && \ - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job && \ - echo "+++ Building :hammer:" && \ - echo 1 | ./eosio_build.sh && \ - echo "--- Compressing build directory :compression:" && \ - tar -pczf build.tar.gz build/ - label: ":darwin: Mojave Build" - agents: - - "role=builder" - - "os=mojave" - artifact_paths: "build.tar.gz" - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":ubuntu: Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu" - workdir: /data/job - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":ubuntu: 18.04 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" - workdir: /data/job - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":fedora: Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - docker#v1.4.0: - image: "eosio/ci:fedora" - workdir: /data/job - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":centos: Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - docker#v1.4.0: - image: "eosio/ci:centos" - workdir: /data/job - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":aws: Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - docker#v1.4.0: - image: "eosio/ci:amazonlinux" - workdir: /data/job - timeout: 60 - - - wait - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -j8 -LE _tests --output-on-failure - label: ":darwin: High Sierra Tests" - agents: - - "role=macos-tester" - - "os=high-sierra" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure - label: ":darwin: High Sierra NP Tests" - agents: - - "role=macos-tester" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -j8 -LE _tests --output-on-failure - label: ":darwin: Mojave Tests" - agents: - - "role=tester" - - "os=mojave" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure - label: ":darwin: Mojave NP Tests" - agents: - - "role=tester" - - "os=mojave" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -j8 -LE _tests --output-on-failure - label: ":ubuntu: Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure - label: ":ubuntu: NP Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -j8 -LE _tests --output-on-failure - label: ":ubuntu: 18.04 Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure - label: ":ubuntu: 18.04 NP Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -j8 -LE _tests --output-on-failure - label: ":fedora: Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - docker#v1.4.0: - image: "eosio/ci:fedora" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure - label: ":fedora: NP Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - docker#v1.4.0: - image: "eosio/ci:fedora" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":centos: Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -j8 -LE _tests --output-on-failure - label: ":centos: Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - docker#v1.4.0: - image: "eosio/ci:centos" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":centos: Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure - label: ":centos: NP Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - docker#v1.4.0: - image: "eosio/ci:centos" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":aws: Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -j8 -LE _tests --output-on-failure - label: ":aws: Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - docker#v1.4.0: - image: "eosio/ci:amazonlinux" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":aws: Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure - label: ":aws: NP Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - docker#v1.4.0: - image: "eosio/ci:amazonlinux" - workdir: /data/job - timeout: 60 - - - wait - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" && \ - tar -zxf build.tar.gz && \ - echo "+++ :microscope: Starting package build" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build/packages && bash generate_package.sh brew - label: ":darwin: High Sierra Package Builder" - agents: - - "role=macos-builder" - - "os=high-sierra" - artifact_paths: - - "build/packages/*.tar.gz" - - "build/packages/*.rb" - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" && \ - tar -zxf build.tar.gz && \ - echo "+++ :microscope: Starting package build" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build/packages && bash generate_package.sh brew - label: ":darwin: Mojave Package Builder" - agents: - - "role=builder" - - "os=mojave" - artifact_paths: - - "build/packages/*.tar.gz" - - "build/packages/*.rb" - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Build" && \ - tar -zxf build.tar.gz && \ - echo "+++ :microscope: Starting package build" && \ - cd /data/job/build/packages && bash generate_package.sh deb - label: ":ubuntu: Package builder" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "build/packages/*.deb" - plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu" - workdir: /data/job - env: - OS: "ubuntu-16.04" - PKGTYPE: "deb" - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "+++ :microscope: Starting package build" && \ - cd /data/job/build/packages && bash generate_package.sh deb - label: ":ubuntu: 18.04 Package builder" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "build/packages/*.deb" - plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" - workdir: /data/job - env: - OS: "ubuntu-18.04" - PKGTYPE: "deb" - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: Build" && \ - tar -zxf build.tar.gz && \ - echo "+++ :microscope: Starting package build" && \ - yum install -y rpm-build && \ - mkdir -p /root/rpmbuild/BUILD && \ - mkdir -p /root/rpmbuild/BUILDROOT && \ - mkdir -p /root/rpmbuild/RPMS && \ - mkdir -p /root/rpmbuild/SOURCES && \ - mkdir -p /root/rpmbuild/SPECS && \ - mkdir -p /root/rpmbuild/SRPMS && \ - cd /data/job/build/packages && bash generate_package.sh rpm - label: ":fedora: Package builder" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "build/packages/x86_64/*.rpm" - plugins: - docker#v1.4.0: - image: "eosio/ci:fedora" - workdir: /data/job - env: - OS: "fc27" - PKGTYPE: "rpm" - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":centos: Build" && \ - tar -zxf build.tar.gz && \ - echo "+++ :microscope: Starting package build" && \ - yum install -y rpm-build && \ - mkdir -p /root/rpmbuild/BUILD && \ - mkdir -p /root/rpmbuild/BUILDROOT && \ - mkdir -p /root/rpmbuild/RPMS && \ - mkdir -p /root/rpmbuild/SOURCES && \ - mkdir -p /root/rpmbuild/SPECS && \ - mkdir -p /root/rpmbuild/SRPMS && \ - cd /data/job/build/packages && bash generate_package.sh rpm - label: ":centos: Package builder" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "build/packages/x86_64/*.rpm" - plugins: - docker#v1.4.0: - image: "eosio/ci:centos" - workdir: /data/job - env: - OS: "el7" - PKGTYPE: "rpm" - timeout: 60 - - - wait - - - command: | - echo "--- :arrow_down: Downloading brew files" && \ - buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: High Sierra Package Builder" && \ - mv build/packages/eosio.rb build/packages/eosio_highsierra.rb && \ - buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: Mojave Package Builder" - label: ":darwin: Brew Updater" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "build/packages/eosio_highsierra.rb" - - "build/packages/eosio.rb" - timeout: 60 diff --git a/.buildkite/sanitizers.yml b/.buildkite/sanitizers.yml deleted file mode 100644 index b8588135610..00000000000 --- a/.buildkite/sanitizers.yml +++ /dev/null @@ -1,131 +0,0 @@ -steps: - - command: | - echo "--- :hammer: Building with Undefined Sanitizer" && \ - /usr/bin/cmake -GNinja \ - -DCMAKE_BUILD_TYPE=Debug \ - -DCMAKE_CXX_COMPILER=clang++-4.0 \ - -DCMAKE_C_COMPILER=clang-4.0 \ - -DBOOST_ROOT="${BOOST_ROOT}" \ - -DWASM_ROOT="${WASM_ROOT}" \ - -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" \ - -DBUILD_MONGO_DB_PLUGIN=true \ - -DENABLE_COVERAGE_TESTING=true\ - -DBUILD_DOXYGEN=false -DCMAKE_CXX_FLAGS="-fsanitize=undefined -fsanitize-recover=all -g -fno-omit-frame-pointer" \ - -DCMAKE_C_FLAGS="-fsanitize=undefined -fsanitize-recover=all -g -fno-omit-frame-pointer" \ - -DCMAKE_EXE_LINKER_FLAGS="-fsanitize=undefined -fsanitize-recover=all -rtlib=compiler-rt -lgcc_s -pthread" \ - -DCMAKE_MODULE_LINKER_FLAGS="-fsanitize=undefined -fsanitize-recover=all -rtlib=compiler-rt -lgcc_s -pthread" && \ - echo "--- :shinto_shrine: Running ninja" && \ - /usr/bin/ninja | tee ninja.log && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz * - echo "--- :beers: Done" - label: ":_: Undefined Sanitizer" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "build.tar.gz" - - "ninja.log" - plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" - command: ["--privileged"] - workdir: /data/job - mounts: - - /etc/buildkite-agent/config:/config - environment: - - BOOST_ROOT=/root/opt/boost - - OPENSSL_ROOT_DIR=/usr/include/openssl - - WASM_ROOT=/root/opt/wasm - - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/root/opt/wasm/bin - - CI=true - - UBSAN_OPTIONS=print_stacktrace=1 - timeout: 60 - - - command: | - echo "--- :hammer: Building with Address Sanitizer" && \ - /usr/bin/cmake -GNinja \ - -DCMAKE_BUILD_TYPE=Debug \ - -DCMAKE_CXX_COMPILER=clang++-4.0 \ - -DCMAKE_C_COMPILER=clang-4.0 \ - -DBOOST_ROOT="${BOOST_ROOT}" \ - -DWASM_ROOT="${WASM_ROOT}" \ - -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" \ - -DBUILD_MONGO_DB_PLUGIN=true \ - -DENABLE_COVERAGE_TESTING=true \ - -DBUILD_DOXYGEN=false \ - -DCMAKE_CXX_FLAGS="-fsanitize=address -fsanitize-recover=all -O1 -g -fno-omit-frame-pointer" \ - -DCMAKE_C_FLAGS="-fsanitize=address -fsanitize-recover=all -O1 -g -fno-omit-frame-pointer" \ - -DCMAKE_EXE_LINKER_FLAGS="-fsanitize=address -fsanitize-recover=all -rtlib=compiler-rt -lgcc_s" \ - -DCMAKE_MODULE_LINKER_FLAGS="-fsanitize=address -fsanitize-recover=all -rtlib=compiler-rt -lgcc_s" - echo "--- :shinto_shrine: Running ninja" && \ - /usr/bin/ninja | tee ninja.log && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz * - echo "--- :beers: Done" - label: ":_: Address Sanitizer" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "build.tar.gz" - - "ninja.log" - plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" - command: ["--privileged"] - workdir: /data/job - mounts: - - /etc/buildkite-agent/config:/config - environment: - - BOOST_ROOT=/root/opt/boost - - OPENSSL_ROOT_DIR=/usr/include/openssl - - WASM_ROOT=/root/opt/wasm - - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/root/opt/wasm/bin - - CI=true - - ASAN_OPTIONS=fast_unwind_on_malloc=0:halt_on_error=0:detect_odr_violation=0:detect_leaks=0:symbolize=1:verbosity=1 - timeout: 60 - - - wait - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":_: Undefined Sanitizer" && \ - tar -zxf build.tar.gz --no-same-owner && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ctest -j8 -LE _tests -V -O sanitizer.log || true - label: ":_: Undefined Sanitizer Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "sanitizer.log" - plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" - workdir: /data/job - mounts: - - /etc/buildkite-agent/config:/config - timeout: 120 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":_: Address Sanitizer" && \ - tar -zxf build.tar.gz --no-same-owner && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ctest -j8 -LE _tests -V -O sanitizer.log || true - label: ":_: Address Sanitizer Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "sanitizer.log" - plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" - workdir: /data/job - mounts: - - /etc/buildkite-agent/config:/config - timeout: 120 \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index e49f6dd0907..6ec500711d7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -33,9 +33,9 @@ set( CMAKE_CXX_STANDARD 14 ) set( CMAKE_CXX_EXTENSIONS ON ) set( CXX_STANDARD_REQUIRED ON) -set(VERSION_MAJOR 2) +set(VERSION_MAJOR 3) set(VERSION_MINOR 0) -set(VERSION_PATCH 3) +set(VERSION_PATCH 0) if(VERSION_SUFFIX) set(VERSION_FULL "${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}-${VERSION_SUFFIX}") diff --git a/Docker/README.md b/Docker/README.md index b5228e17bbc..02eabc07b1c 100644 --- a/Docker/README.md +++ b/Docker/README.md @@ -20,10 +20,10 @@ cd bos/Docker docker build . -t boscore/bos -s BOS ``` -The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v2.0.3 tag, you could do the following: +The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v3.0.0 tag, you could do the following: ```bash -docker build -t boscore/bos:v2.0.3 --build-arg branch=v2.0.3 . +docker build -t boscore/bos:v3.0.0 --build-arg branch=v3.0.0 . ``` diff --git a/README.md b/README.md index 0b90a01c333..db7f7e0f82c 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # BOSCore - Born for DApps. Born for Usability. -## BOSCore Version: v2.0.3 -### Basic EOSIO Version: v1.6.4 (support REX) +## BOSCore Version: v3.0.0 +### Basic EOSIO Version: v1.6.6 (support REX) # Background The emergence of EOS has brought new imagination to the blockchain. In just a few months since the main network was launched, the version has undergone dozens of upgrades, not only the stability has been greatly improved, but also the new functions have been gradually realized. The node team is also actively involved in building the EOSIO ecosystem. What is even more exciting is that EOS has attracted more and more development teams. There are already hundreds of DApp running on the EOS main network. The transaction volume and circulation market value far exceed Ethereum, and the space for development is growing broader. @@ -39,16 +39,6 @@ Attention: 3. Treat update of eosio/eos code as new feature. 4. Emergent issues must repaired by adopting hotfixes mode. -## BOSCore Workflow -BOSCore encourage community developer actively participate in contributing the code, members should follow the workflow below. -![BOSCore Workflow](./images/bos-workflow.png) - -Attention: -1. Only allow Feature Branch or bug fix to submit PR to Develop Branch. -2. Rebase is required before submitting PR to Develop Branch. -3. Treat update of eosio/eos code as new feature. -4. Emergent issues must repaired by adopting hotfixes mode. - BOSCore bases on EOSIO, so you can also referer: [Getting Started](https://developers.eos.io/eosio-nodeos/docs/overview-1) diff --git a/README_CN.md b/README_CN.md index 23b9199068f..718c2ace476 100644 --- a/README_CN.md +++ b/README_CN.md @@ -1,7 +1,7 @@ # BOSCore - 更可用的链,为DApp而生。 -## BOSCore Version: v2.0.3 -### Basic EOSIO Version: v1.6.4 (support REX) +## BOSCore Version: v3.0.0 +### Basic EOSIO Version: v1.6.6 (support REX) # 背景 EOS的出现给区块链带来了新的想象力,主网启动短短几个月以来,版本经历了几十次升级,不仅稳定性得到了很大提高,并且新功能也逐步实现,各个节点团队也积极参与建设EOSIO生态。让人更加兴奋的是,EOS已经吸引了越来越多的开发团队,当前已经有数百个DApp在EOS主网上面运行,其交易量和流通市值远超以太坊,可发展的空间愈来愈广阔。 @@ -39,16 +39,6 @@ BOSCore 鼓励社区开发者参与代码贡献,社区成员应当遵循以下 3. EOSIO 主网版本作为一个 Feature Branch 来对待 4. 紧急问题修复采用 hotfixes 模式 -## BOSCore 开发流程 -BOSCore 鼓励社区开发者参与代码贡献,社区成员应当遵循以下工作流: -![BOSCore Workflow](./images/bos-workflow.png) - -注意: -1. 只有待发布的 Feature Branch 或者Bug修复才应该向 Develop Branch 提交 -2. 向 Develop Branch 提交 PR 之前需要现在本地执行 rebase 操作 -3. EOSIO 主网版本作为一个 Feature Branch 来对待 -4. 紧急问题修复采用 hotfixes 模式 - BOSCore是基于EOSIO技术的扩展,所以EOSIO的相关资料也可以参考: [EOSIO 开始](https://developers.eos.io/eosio-nodeos/docs/overview-1) diff --git a/contracts/eosio.system/delegate_bandwidth.cpp b/contracts/eosio.system/delegate_bandwidth.cpp index a2920d70295..a5e9ad14efe 100644 --- a/contracts/eosio.system/delegate_bandwidth.cpp +++ b/contracts/eosio.system/delegate_bandwidth.cpp @@ -193,7 +193,7 @@ namespace eosiosystem { auto fee = ( tokens_out.amount + 199 ) / 200; /// .5% fee (round up) // since tokens_out.amount was asserted to be at least 2 earlier, fee.amount < tokens_out.amount - + if( fee > 0 ) { INLINE_ACTION_SENDER(eosio::token, transfer)( N(eosio.token), {account,N(active)}, { account, N(eosio.ramfee), asset(fee), std::string("sell ram fee") } ); diff --git a/contracts/eosio.system/eosio.system.abi b/contracts/eosio.system/eosio.system.abi index 87937c787f9..f2f9f394cf9 100644 --- a/contracts/eosio.system/eosio.system.abi +++ b/contracts/eosio.system/eosio.system.abi @@ -575,4 +575,4 @@ ], "ricardian_clauses": [], "abi_extensions": [] -} +} \ No newline at end of file diff --git a/contracts/eosio.system/exchange_state.hpp b/contracts/eosio.system/exchange_state.hpp index 3705a9b8b98..e6434e32b49 100644 --- a/contracts/eosio.system/exchange_state.hpp +++ b/contracts/eosio.system/exchange_state.hpp @@ -28,7 +28,7 @@ namespace eosiosystem { uint64_t primary_key()const { return supply.symbol; } - asset convert_to_exchange( connector& c, asset in ); + asset convert_to_exchange( connector& c, asset in ); asset convert_from_exchange( connector& c, asset in ); asset convert( asset from, symbol_type to ); diff --git a/contracts/eosiolib/eosiolib.cpp b/contracts/eosiolib/eosiolib.cpp index 48d80b1037b..35bc7460c70 100644 --- a/contracts/eosiolib/eosiolib.cpp +++ b/contracts/eosiolib/eosiolib.cpp @@ -55,6 +55,13 @@ namespace eosio { ds >> params; } + void set_upgrade_parameters(const eosio::upgrade_parameters& params) { + char buf[sizeof(eosio::upgrade_parameters)]; + eosio::datastream ds( buf, sizeof(buf) ); + ds << params; + set_upgrade_parameters_packed( buf, ds.tellp() ); + } + using ::memset; using ::memcpy; diff --git a/contracts/eosiolib/privileged.h b/contracts/eosiolib/privileged.h index 8943a09db23..d6d6761e9e3 100644 --- a/contracts/eosiolib/privileged.h +++ b/contracts/eosiolib/privileged.h @@ -92,6 +92,7 @@ extern "C" { */ uint32_t get_blockchain_parameters_packed(char* data, uint32_t datalen); + void set_upgrade_parameters_packed(char* data, uint32_t datalen); /** * @brief Activate new feature * Activate new feature diff --git a/contracts/eosiolib/privileged.hpp b/contracts/eosiolib/privileged.hpp index 3091acf8b3b..e6541f31eb6 100644 --- a/contracts/eosiolib/privileged.hpp +++ b/contracts/eosiolib/privileged.hpp @@ -108,6 +108,14 @@ namespace eosio { ) }; + struct upgrade_parameters { + uint32_t target_block_num; + + EOSLIB_SERIALIZE(upgrade_parameters, + (target_block_num) + ) + }; + /** * @brief Set the blockchain parameters * Set the blockchain parameters @@ -122,6 +130,8 @@ namespace eosio { */ void get_blockchain_parameters(eosio::blockchain_parameters& params); + void set_upgrade_parameters(const eosio::upgrade_parameters& params); + ///@} priviledgedcppapi /** diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index 2c430fecea0..8f765f91ec1 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -14,6 +14,8 @@ add_library( eosio_chain block_header_state.cpp block_state.cpp fork_database.cpp + pbft_database.cpp + pbft.cpp controller.cpp authorization_manager.cpp resource_limits.cpp diff --git a/libraries/chain/block_header_state.cpp b/libraries/chain/block_header_state.cpp index 70bcfa3a236..690b10a947e 100644 --- a/libraries/chain/block_header_state.cpp +++ b/libraries/chain/block_header_state.cpp @@ -33,7 +33,7 @@ namespace eosio { namespace chain { * contain a transaction mroot, action mroot, or new_producers as those components * are derived from chain state. */ - block_header_state block_header_state::generate_next( block_timestamp_type when )const { + block_header_state block_header_state::generate_next( block_timestamp_type when, bool pbft_enabled )const { block_header_state result; if( when != block_timestamp_type() ) { @@ -62,9 +62,17 @@ namespace eosio { namespace chain { result.pending_schedule = pending_schedule; result.dpos_proposed_irreversible_blocknum = dpos_proposed_irreversible_blocknum; result.bft_irreversible_blocknum = bft_irreversible_blocknum; + result.pbft_stable_checkpoint_blocknum = pbft_stable_checkpoint_blocknum; + + + if (pbft_enabled) { + result.dpos_irreversible_blocknum = dpos_irreversible_blocknum; + } else { + result.producer_to_last_implied_irb[prokey.producer_name] = result.dpos_proposed_irreversible_blocknum; + result.dpos_irreversible_blocknum = result.calc_dpos_last_irreversible(); + } + - result.producer_to_last_implied_irb[prokey.producer_name] = result.dpos_proposed_irreversible_blocknum; - result.dpos_irreversible_blocknum = result.calc_dpos_last_irreversible(); /// grow the confirmed count static_assert(std::numeric_limits::max() >= (config::max_producers * 2 / 3) + 1, "8bit confirmations may not be able to hold all of the needed confirmations"); @@ -73,23 +81,30 @@ namespace eosio { namespace chain { auto num_active_producers = active_schedule.producers.size(); uint32_t required_confs = (uint32_t)(num_active_producers * 2 / 3) + 1; - if( confirm_count.size() < config::maximum_tracked_dpos_confirmations ) { - result.confirm_count.reserve( confirm_count.size() + 1 ); - result.confirm_count = confirm_count; - result.confirm_count.resize( confirm_count.size() + 1 ); - result.confirm_count.back() = (uint8_t)required_confs; - } else { - result.confirm_count.resize( confirm_count.size() ); - memcpy( &result.confirm_count[0], &confirm_count[1], confirm_count.size() - 1 ); - result.confirm_count.back() = (uint8_t)required_confs; + if (!pbft_enabled) { + if (confirm_count.size() < config::maximum_tracked_dpos_confirmations) { + result.confirm_count.reserve(confirm_count.size() + 1); + result.confirm_count = confirm_count; + result.confirm_count.resize(confirm_count.size() + 1); + result.confirm_count.back() = (uint8_t) required_confs; + } else { + result.confirm_count.resize(confirm_count.size()); + memcpy(&result.confirm_count[0], &confirm_count[1], confirm_count.size() - 1); + result.confirm_count.back() = (uint8_t) required_confs; + } } return result; } /// generate_next - bool block_header_state::maybe_promote_pending() { - if( pending_schedule.producers.size() && - dpos_irreversible_blocknum >= pending_schedule_lib_num ) + bool block_header_state::maybe_promote_pending( bool pbft_enabled ) { + + bool should_promote_pending = pending_schedule.producers.size(); + if ( !pbft_enabled ) { + should_promote_pending = should_promote_pending && dpos_irreversible_blocknum >= pending_schedule_lib_num; + } + + if (should_promote_pending) { active_schedule = move( pending_schedule ); @@ -99,7 +114,13 @@ namespace eosio { namespace chain { if( existing != producer_to_last_produced.end() ) { new_producer_to_last_produced[pro.producer_name] = existing->second; } else { - new_producer_to_last_produced[pro.producer_name] = dpos_irreversible_blocknum; + //TODO: max of bft and dpos lib + if (pbft_enabled) { + new_producer_to_last_produced[pro.producer_name] = bft_irreversible_blocknum; + } else { + new_producer_to_last_produced[pro.producer_name] = dpos_irreversible_blocknum; + } + } } @@ -109,7 +130,13 @@ namespace eosio { namespace chain { if( existing != producer_to_last_implied_irb.end() ) { new_producer_to_last_implied_irb[pro.producer_name] = existing->second; } else { - new_producer_to_last_implied_irb[pro.producer_name] = dpos_irreversible_blocknum; + //TODO: max of bft and dpos lib + if (pbft_enabled) { + new_producer_to_last_implied_irb[pro.producer_name] = bft_irreversible_blocknum; + } else { + new_producer_to_last_implied_irb[pro.producer_name] = dpos_irreversible_blocknum; + } + } } @@ -141,13 +168,13 @@ namespace eosio { namespace chain { * * If the header specifies new_producers then apply them accordingly. */ - block_header_state block_header_state::next( const signed_block_header& h, bool skip_validate_signee )const { + block_header_state block_header_state::next( const signed_block_header& h, bool skip_validate_signee, bool pbft_enabled )const { EOS_ASSERT( h.timestamp != block_timestamp_type(), block_validate_exception, "", ("h",h) ); //EOS_ASSERT( h.header_extensions.size() == 0, block_validate_exception, "no supported extensions" ); EOS_ASSERT( h.timestamp > header.timestamp, block_validate_exception, "block must be later in time" ); EOS_ASSERT( h.previous == id, unlinkable_block_exception, "block must link to current state" ); - auto result = generate_next( h.timestamp ); + auto result = generate_next( h.timestamp, pbft_enabled); EOS_ASSERT( result.header.producer == h.producer, wrong_producer, "wrong producer specified" ); EOS_ASSERT( result.header.schedule_version == h.schedule_version, producer_schedule_exception, "schedule_version in signed block is corrupted" ); @@ -161,9 +188,11 @@ namespace eosio { namespace chain { /// below this point is state changes that cannot be validated with headers alone, but never-the-less, /// must result in header state changes - result.set_confirmed( h.confirmed ); - auto was_pending_promoted = result.maybe_promote_pending(); + result.set_confirmed(h.confirmed, pbft_enabled); + + + auto was_pending_promoted = result.maybe_promote_pending(pbft_enabled); if( h.new_producers ) { EOS_ASSERT( !was_pending_promoted, producer_schedule_exception, "cannot set pending producer schedule in the same block in which pending was promoted to active" ); @@ -185,7 +214,7 @@ namespace eosio { namespace chain { return result; } /// next - void block_header_state::set_confirmed( uint16_t num_prev_blocks ) { + void block_header_state::set_confirmed( uint16_t num_prev_blocks, bool pbft_enabled ) { /* idump((num_prev_blocks)(confirm_count.size())); @@ -193,6 +222,10 @@ namespace eosio { namespace chain { std::cerr << "confirm_count["<block_file.generic_string())); + block_stream.open(block_file.generic_string().c_str(), LOG_WRITE); + index_stream.open(index_file.generic_string().c_str(), LOG_WRITE); + + close(); + + block_stream.open(block_file.generic_string().c_str(), LOG_RW); + index_stream.open(index_file.generic_string().c_str(), LOG_RW); + + open_files = true; + } } block_log::block_log(const fc::path& data_dir) @@ -88,26 +84,21 @@ namespace eosio { namespace chain { block_log::~block_log() { if (my) { flush(); + my->close(); my.reset(); } } void block_log::open(const fc::path& data_dir) { - if (my->block_stream.is_open()) - my->block_stream.close(); - if (my->index_stream.is_open()) - my->index_stream.close(); + my->close(); if (!fc::is_directory(data_dir)) fc::create_directories(data_dir); + my->block_file = data_dir / "blocks.log"; my->index_file = data_dir / "blocks.index"; - //ilog("Opening block log at ${path}", ("path", my->block_file.generic_string())); - my->block_stream.open(my->block_file.generic_string().c_str(), LOG_WRITE); - my->index_stream.open(my->index_file.generic_string().c_str(), LOG_WRITE); - my->block_write = true; - my->index_write = true; + my->reopen(); /* On startup of the block log, there are several states the log file and the index file can be * in relation to each other. @@ -132,7 +123,6 @@ namespace eosio { namespace chain { if (log_size) { ilog("Log is nonempty"); - my->check_block_read(); my->block_stream.seekg( 0 ); my->version = 0; my->block_stream.read( (char*)&my->version, sizeof(my->version) ); @@ -152,12 +142,13 @@ namespace eosio { namespace chain { } my->head = read_head(); - my->head_id = my->head->id(); + if( my->head ) { + my->head_id = my->head->id(); + } else { + my->head_id = {}; + } if (index_size) { - my->check_block_read(); - my->check_index_read(); - ilog("Index is nonempty"); uint64_t block_pos; my->block_stream.seekg(-sizeof(uint64_t), std::ios::end); @@ -180,10 +171,9 @@ namespace eosio { namespace chain { } } else if (index_size) { ilog("Index is nonempty, remove and recreate it"); - my->index_stream.close(); + my->close(); fc::remove_all(my->index_file); - my->index_stream.open(my->index_file.generic_string().c_str(), LOG_WRITE); - my->index_write = true; + my->reopen(); } } @@ -191,9 +181,10 @@ namespace eosio { namespace chain { try { EOS_ASSERT( my->genesis_written_to_block_log, block_log_append_fail, "Cannot append to block log until the genesis is first written" ); - my->check_block_write(); - my->check_index_write(); + my->check_open_files(); + my->block_stream.seekp(0, std::ios::end); + my->index_stream.seekp(0, std::ios::end); uint64_t pos = my->block_stream.tellp(); EOS_ASSERT(my->index_stream.tellp() == sizeof(uint64_t) * (b->block_num() - my->first_block_num), block_log_append_fail, @@ -220,22 +211,17 @@ namespace eosio { namespace chain { } void block_log::reset( const genesis_state& gs, const signed_block_ptr& first_block, uint32_t first_block_num ) { - if (my->block_stream.is_open()) - my->block_stream.close(); - if (my->index_stream.is_open()) - my->index_stream.close(); + my->close(); fc::remove_all(my->block_file); fc::remove_all(my->index_file); - my->block_stream.open(my->block_file.generic_string().c_str(), LOG_WRITE); - my->index_stream.open(my->index_file.generic_string().c_str(), LOG_WRITE); - my->block_write = true; - my->index_write = true; + my->reopen(); auto data = fc::raw::pack(gs); my->version = 0; // version of 0 is invalid; it indicates that the genesis was not properly written to the block log my->first_block_num = first_block_num; + my->block_stream.seekp(0, std::ios::end); my->block_stream.write((char*)&my->version, sizeof(my->version)); my->block_stream.write((char*)&my->first_block_num, sizeof(my->first_block_num)); my->block_stream.write(data.data(), data.size()); @@ -251,22 +237,16 @@ namespace eosio { namespace chain { auto pos = my->block_stream.tellp(); - my->block_stream.close(); - my->block_stream.open(my->block_file.generic_string().c_str(), std::ios::in | std::ios::out | std::ios::binary ); // Bypass append-only writing just once - static_assert( block_log::max_supported_version > 0, "a version number of zero is not supported" ); my->version = block_log::max_supported_version; my->block_stream.seekp( 0 ); my->block_stream.write( (char*)&my->version, sizeof(my->version) ); my->block_stream.seekp( pos ); flush(); - - my->block_write = false; - my->check_block_write(); // Reset to append-only writing. } std::pair block_log::read_block(uint64_t pos)const { - my->check_block_read(); + my->check_open_files(); my->block_stream.seekg(pos); std::pair result; @@ -290,7 +270,7 @@ namespace eosio { namespace chain { } uint64_t block_log::get_block_pos(uint32_t block_num) const { - my->check_index_read(); + my->check_open_files(); if (!(my->head && block_num <= block_header::num_from_id(my->head_id) && block_num >= my->first_block_num)) return npos; my->index_stream.seekg(sizeof(uint64_t) * (block_num - my->first_block_num)); @@ -300,7 +280,7 @@ namespace eosio { namespace chain { } signed_block_ptr block_log::read_head()const { - my->check_block_read(); + my->check_open_files(); uint64_t pos; @@ -328,16 +308,22 @@ namespace eosio { namespace chain { void block_log::construct_index() { ilog("Reconstructing Block Log Index..."); - my->index_stream.close(); + my->close(); + fc::remove_all(my->index_file); - my->index_stream.open(my->index_file.generic_string().c_str(), LOG_WRITE); - my->index_write = true; + + my->reopen(); uint64_t end_pos; - my->check_block_read(); my->block_stream.seekg(-sizeof( uint64_t), std::ios::end); my->block_stream.read((char*)&end_pos, sizeof(end_pos)); + + if( end_pos == npos ) { + ilog( "Block log contains no blocks. No need to construct index." ); + return; + } + signed_block tmp; uint64_t pos = 0; @@ -357,6 +343,7 @@ namespace eosio { namespace chain { my->block_stream.read((char*) &totem, sizeof(totem)); } + my->index_stream.seekp(0, std::ios::end); while( pos < end_pos ) { fc::raw::unpack(my->block_stream, tmp); my->block_stream.read((char*)&pos, sizeof(pos)); diff --git a/libraries/chain/block_state.cpp b/libraries/chain/block_state.cpp index b4834775951..77080583b1e 100644 --- a/libraries/chain/block_state.cpp +++ b/libraries/chain/block_state.cpp @@ -3,15 +3,15 @@ namespace eosio { namespace chain { - block_state::block_state( const block_header_state& prev, block_timestamp_type when ) - :block_header_state( prev.generate_next( when ) ), + block_state::block_state( const block_header_state& prev, block_timestamp_type when, bool pbft_enabled ) + :block_header_state( prev.generate_next( when, pbft_enabled) ), block( std::make_shared() ) { static_cast(*block) = header; } - block_state::block_state( const block_header_state& prev, signed_block_ptr b, bool skip_validate_signee ) - :block_header_state( prev.next( *b, skip_validate_signee )), block( move(b) ) + block_state::block_state( const block_header_state& prev, signed_block_ptr b, bool skip_validate_signee, bool pbft_enabled ) + :block_header_state( prev.next( *b, skip_validate_signee, pbft_enabled)), block( move(b) ) { } diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 2a5581a1c20..2aa4644afb4 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -35,6 +35,7 @@ using controller_index_set = index_set< global_property_multi_index, global_property2_multi_index, dynamic_global_property_multi_index, + upgrade_property_multi_index, block_summary_multi_index, transaction_multi_index, generated_transaction_multi_index, @@ -122,6 +123,12 @@ struct controller_impl { chainbase::database reversible_blocks; ///< a special database to persist blocks that have successfully been applied but are still reversible block_log blog; optional pending; + bool pbft_enabled = false; + bool pbft_upgrading = false; + optional pending_pbft_lib; + optional pending_pbft_checkpoint; + block_state_ptr pbft_prepared; + block_state_ptr my_prepare; block_state_ptr head; fork_database fork_db; wasm_interface wasmif; @@ -344,6 +351,7 @@ struct controller_impl { void init(std::function shutdown, const snapshot_reader_ptr& snapshot) { + bool report_integrity_hash = !!snapshot; if (snapshot) { EOS_ASSERT( !head, fork_database_exception, "" ); @@ -351,9 +359,14 @@ struct controller_impl { read_from_snapshot( snapshot ); + //do upgrade migration if necessary; + update_pbft_status(); //compatiable for snapshot integrity test + auto end = blog.read_head(); if( !end ) { - blog.reset( conf.genesis, signed_block_ptr(), head->block_num + 1 ); + auto reset_block_num = head->block_num + 1; + if (pbft_enabled) reset_block_num = head->pbft_stable_checkpoint_blocknum; + blog.reset( conf.genesis, signed_block_ptr(), reset_block_num ); } else if( end->block_num() > head->block_num ) { replay( shutdown ); } else { @@ -361,6 +374,8 @@ struct controller_impl { "Block log is provided with snapshot but does not contain the head block from the snapshot" ); } } else { + //do upgrade migration if necessary; + update_pbft_status(); //compatiable for snapshot integrity test if( !head ) { initialize_fork_db(); // set head to genesis state } @@ -373,7 +388,7 @@ struct controller_impl { report_integrity_hash = true; } } - + if( shutdown() ) return; const auto& ubi = reversible_blocks.get_index(); @@ -411,6 +426,37 @@ struct controller_impl { //*bos end* } + void update_pbft_status() { + try { + auto utb = optional{}; + auto& upo = db.get(); + if (upo.upgrade_target_block_num > 0) utb = upo.upgrade_target_block_num; + + auto ucb = optional{}; + if (upo.upgrade_complete_block_num > 0) ucb = upo.upgrade_complete_block_num; + + if (utb && !ucb && head->dpos_irreversible_blocknum >= *utb) { + db.modify( upo, [&]( auto& up ) { + up.upgrade_complete_block_num = head->block_num; + }); + if (!replaying) wlog("pbft will be working after the block ${b}", ("b", head->block_num)); + } + + if ( !pbft_enabled && utb && head->block_num >= *utb) { + if (!pbft_upgrading) pbft_upgrading = true; + + // new version starts from the next block of ucb, this is to avoid inconsistency after pre calculation inside schedule loop. + if (ucb && head->block_num > *ucb) { + if (pbft_upgrading) pbft_upgrading = false; + pbft_enabled = true; + } + } + } catch( const boost::exception& e) { + wlog("no upo found, generating..."); + db.create([](auto&){}); + } + } + ~controller_impl() { pending.reset(); @@ -497,9 +543,21 @@ struct controller_impl { section.add_row(conf.genesis, db); }); - snapshot->write_section([this]( auto §ion ){ - section.template add_row(*fork_db.head(), db); - }); + snapshot->write_section([]( auto §ion ){}); + + auto lscb = fork_db.get_block_in_current_chain_by_num(fork_db.head()->pbft_stable_checkpoint_blocknum); + if (pbft_enabled && lscb) { + snapshot->write_section([]( auto §ion ) {}); + + snapshot->write_section([this, &lscb](auto §ion) { + auto bss = fork_db.fetch_branch_from(fork_db.head()->id, lscb->id).first; + section.template add_row(bss, db); + }); + } else { + snapshot->write_section([this]( auto §ion ) { + section.template add_row(*fork_db.head(), db); + }); + } controller_index_set::walk_indices([this, &snapshot]( auto utils ){ using value_t = typename decltype(utils)::index_t::value_type; @@ -529,18 +587,45 @@ struct controller_impl { header.validate(); }); + bool migrated = snapshot->has_section(); + auto upgraded = snapshot->has_section(); + if (migrated && upgraded) { + snapshot->read_section([this](auto §ion) { + branch_type bss; + section.template read_row(bss, db); + if (bss.empty()) elog( "no last stable checkpoint block found in the snapshot, perhaps corrupted"); + + ilog("${n} fork_db blocks found in the snapshot", ("n", bss.size())); + + for (auto i = bss.rbegin(); i != bss.rend(); ++i ) { + if (i == bss.rbegin()) { + fork_db.set(*i); + snapshot_head_block = (*i)->block_num; + } else { + fork_db.add((*i), true, true); + } + fork_db.set_validity((*i), true); + fork_db.mark_in_current_chain((*i), true); + } + head = fork_db.head(); + }); + } else { + snapshot->read_section([this, &migrated](snapshot_reader::section_reader §ion) { + block_header_state head_header_state; + if (migrated) { + section.read_row(head_header_state, db); + } else { + section.read_pbft_migrate_row(head_header_state, db); + } + auto head_state = std::make_shared(head_header_state); + fork_db.set(head_state); + fork_db.set_validity(head_state, true); + fork_db.mark_in_current_chain(head_state, true); + head = head_state; + snapshot_head_block = head->block_num; + }); - snapshot->read_section([this]( auto §ion ){ - block_header_state head_header_state; - section.read_row(head_header_state, db); - - auto head_state = std::make_shared(head_header_state); - fork_db.set(head_state); - fork_db.set_validity(head_state, true); - fork_db.mark_in_current_chain(head_state, true); - head = head_state; - snapshot_head_block = head->block_num; - }); + } controller_index_set::walk_indices([this, &snapshot]( auto utils ){ using value_t = typename decltype(utils)::index_t::value_type; @@ -550,14 +635,16 @@ struct controller_impl { return; } - snapshot->read_section([this]( auto& section ) { - bool more = !section.empty(); - while(more) { - decltype(utils)::create(db, [this, §ion, &more]( auto &row ) { - more = section.read_row(row, db); - }); - } - }); + if(snapshot->has_section()){ + snapshot->read_section([this]( auto& section ) { + bool more = !section.empty(); + while(more) { + decltype(utils)::create(db, [this, §ion, &more]( auto &row ) { + more = section.read_row(row, db); + }); + } + }); + } }); read_contract_tables_from_snapshot(snapshot); @@ -659,6 +746,7 @@ struct controller_impl { // *bos end* + authorization.initialize_database(); resource_limits.initialize_database(); @@ -691,7 +779,7 @@ struct controller_impl { // remove action db_list U msig_list -> msig_list db_list U conf_list -> conf_list // msig_list-name_list -> msig_list conf_list - name_list -> conf_list msig_list->db_list // producer api set_whitelist_blacklist - // blacklst -> conf.xxx_blacklist conf_list U msig_list -> conf_list + // blacklst -> conf.xxx_blacklist conf_list U msig_list -> conf_list // remove_grey_list // check if remove acount in msig_list then assert fail could not remove account in msig blacklist void set_name_list(list_type list, list_action_type action, std::vector name_list) @@ -774,7 +862,7 @@ struct controller_impl { void check_msig_blacklist(list_type blacklist_type,account_name account) { auto check_blacklist = [&](const flat_set& msig_blacklist){ - EOS_ASSERT(msig_blacklist.find(account) == msig_blacklist.end(), transaction_exception, + EOS_ASSERT(msig_blacklist.find(account) == msig_blacklist.end(), transaction_exception, " do not remove account in multisig blacklist , account: ${account}", ("account", account)); }; @@ -799,7 +887,7 @@ struct controller_impl { { try{ auto merge_blacklist = [&](const shared_vector& msig_blacklist_in_db,flat_set& conf_blacklist){ - + for (auto& a : msig_blacklist_in_db) { conf_blacklist.insert(a); @@ -824,13 +912,17 @@ struct controller_impl { void commit_block( bool add_to_fork_db ) { auto reset_pending_on_exit = fc::make_scoped_exit([this]{ pending.reset(); + }); try { + if (add_to_fork_db) { pending->_pending_block_state->validated = true; - auto new_bsp = fork_db.add(pending->_pending_block_state, true); + + auto new_bsp = fork_db.add(pending->_pending_block_state, true, pbft_enabled); emit(self.accepted_block_header, pending->_pending_block_state); + head = fork_db.head(); EOS_ASSERT(new_bsp == head, fork_database_exception, "committed block did not become the new head in fork database"); } @@ -842,6 +934,10 @@ struct controller_impl { }); } + if (pbft_enabled && pending->_pending_block_state->pbft_watermark) { + if (auto bs = fork_db.get_block(pending->_pending_block_state->id)) fork_db.mark_as_pbft_watermark(bs); + } + emit( self.accepted_block, pending->_pending_block_state ); } catch (...) { // dont bother resetting pending, instead abort the block @@ -1250,6 +1346,9 @@ struct controller_impl { { EOS_ASSERT( !pending, block_validate_exception, "pending block already exists" ); + set_pbft_lib(); + set_pbft_lscb(); + auto guard_pending = fc::make_scoped_exit([this](){ pending.reset(); }); @@ -1263,39 +1362,68 @@ struct controller_impl { pending.emplace(maybe_session()); } + update_pbft_status(); + pending->_block_status = s; pending->_producer_block_id = producer_block_id; pending->_signer = signer; - pending->_pending_block_state = std::make_shared( *head, when ); // promotes pending schedule (if any) to active + pending->_pending_block_state = std::make_shared( *head, when, pbft_enabled); // promotes pending schedule (if any) to active pending->_pending_block_state->in_current_chain = true; - pending->_pending_block_state->set_confirmed(confirm_block_count); + pending->_pending_block_state->set_confirmed(confirm_block_count, pbft_enabled); + - auto was_pending_promoted = pending->_pending_block_state->maybe_promote_pending(); + auto was_pending_promoted = pending->_pending_block_state->maybe_promote_pending(pbft_enabled); //modify state in speculative block only if we are speculative reads mode (other wise we need clean state for head or irreversible reads) if ( read_mode == db_read_mode::SPECULATIVE || pending->_block_status != controller::block_status::incomplete ) { const auto& gpo = db.get(); - if( gpo.proposed_schedule_block_num.valid() && // if there is a proposed schedule that was proposed in a block ... - ( *gpo.proposed_schedule_block_num <= pending->_pending_block_state->dpos_irreversible_blocknum ) && // ... that has now become irreversible ... - pending->_pending_block_state->pending_schedule.producers.size() == 0 && // ... and there is room for a new pending schedule ... - !was_pending_promoted // ... and not just because it was promoted to active at the start of this block, then: - ) - { - // Promote proposed schedule to pending schedule. - if( !replaying ) { - ilog( "promoting proposed schedule (set in block ${proposed_num}) to pending; current block: ${n} lib: ${lib} schedule: ${schedule} ", - ("proposed_num", *gpo.proposed_schedule_block_num)("n", pending->_pending_block_state->block_num) - ("lib", pending->_pending_block_state->dpos_irreversible_blocknum) - ("schedule", static_cast(gpo.proposed_schedule) ) ); - } - pending->_pending_block_state->set_new_producers( gpo.proposed_schedule ); - db.modify( gpo, [&]( auto& gp ) { - gp.proposed_schedule_block_num = optional(); - gp.proposed_schedule.clear(); - }); - } + + auto lib_num = std::max(pending->_pending_block_state->dpos_irreversible_blocknum, pending->_pending_block_state->bft_irreversible_blocknum); + + if (pbft_enabled && gpo.proposed_schedule_block_num) { + auto bs = fork_db.get_block_in_current_chain_by_num(*gpo.proposed_schedule_block_num); + if (bs) fork_db.mark_as_pbft_watermark(bs); + } + + bool should_promote_pending_schedule = false; + + should_promote_pending_schedule = gpo.proposed_schedule_block_num.valid() // if there is a proposed schedule that was proposed in a block ... + && pending->_pending_block_state->pending_schedule.producers.size() == 0 // ... and there is room for a new pending schedule ... + && !was_pending_promoted; // ... and not just because it was promoted to active at the start of this block, then: + + if (pbft_enabled) { + should_promote_pending_schedule = should_promote_pending_schedule + && pending->_pending_block_state->block_num > *gpo.proposed_schedule_block_num; + } else { + should_promote_pending_schedule = should_promote_pending_schedule + && ( *gpo.proposed_schedule_block_num <= pending->_pending_block_state->dpos_irreversible_blocknum ); + } + + if ( pbft_upgrading && !replaying) wlog("system is upgrading, no producer schedule promotion will happen until fully upgraded."); + + if ( should_promote_pending_schedule ) + { + if (!pbft_upgrading) { + // Promote proposed schedule to pending schedule. + if (!replaying) { + ilog("promoting proposed schedule (set in block ${proposed_num}) to pending; current block: ${n} lib: ${lib} schedule: ${schedule} ", + ("proposed_num", *gpo.proposed_schedule_block_num)("n", pending->_pending_block_state->block_num) + ("lib", lib_num) + ("schedule", static_cast(gpo.proposed_schedule))); + } + pending->_pending_block_state->set_new_producers(gpo.proposed_schedule); + + if (pbft_enabled) { + pending->_pending_block_state->pbft_watermark = true; + } + } + db.modify( gpo, [&]( auto& gp ) { + gp.proposed_schedule_block_num = optional(); + gp.proposed_schedule.clear(); + }); + } try { auto onbtrx = std::make_shared( get_on_block_transaction() ); @@ -1333,7 +1461,7 @@ struct controller_impl { void apply_block( const signed_block_ptr& b, controller::block_status s ) { try { try { - //EOS_ASSERT( b->block_extensions.size() == 0, block_validate_exception, "no supported extensions" ); +// EOS_ASSERT( b->block_extensions.size() == 0, block_validate_exception, "no supported extensions" ); auto producer_block_id = b->id(); start_block( b->timestamp, b->confirmed, s , producer_block_id); @@ -1351,7 +1479,15 @@ struct controller_impl { } pending->_pending_block_state->block->header_extensions = b->header_extensions; - pending->_pending_block_state->block->block_extensions = b->block_extensions; + + extensions_type pending_block_extensions; + for ( const auto& extn: b->block_extensions) { + if (extn.first != static_cast(block_extension_type::pbft_stable_checkpoint)) { + pending_block_extensions.emplace_back(extn); + } + } + + pending->_pending_block_state->block->block_extensions = pending_block_extensions; transaction_trace_ptr trace; @@ -1425,9 +1561,11 @@ struct controller_impl { auto prev = fork_db.get_block( b->previous ); EOS_ASSERT( prev, unlinkable_block_exception, "unlinkable block ${id}", ("id", id)("previous", b->previous) ); - return async_thread_pool( thread_pool, [b, prev]() { + auto pbft = pbft_enabled; + + return async_thread_pool( thread_pool, [b, prev, pbft]() { const bool skip_validate_signee = false; - return std::make_shared( *prev, move( b ), skip_validate_signee ); + return std::make_shared( *prev, move( b ), skip_validate_signee, pbft); } ); } @@ -1442,17 +1580,20 @@ struct controller_impl { auto& b = new_header_state->block; emit( self.pre_accepted_block, b ); - fork_db.add( new_header_state, false ); + fork_db.add( new_header_state, false, pbft_enabled); if (conf.trusted_producers.count(b->producer)) { trusted_producer_light_validation = true; - }; + } emit( self.accepted_block_header, new_header_state ); + set_pbft_lib(); + if ( read_mode != db_read_mode::IRREVERSIBLE ) { - maybe_switch_forks( s ); + maybe_switch_forks( s ); } + set_pbft_lscb(); } FC_LOG_AND_RETHROW( ) } @@ -1462,13 +1603,17 @@ struct controller_impl { EOS_ASSERT(!pending, block_validate_exception, "it is not valid to push a block when there is a pending block"); + set_pbft_lib(); + set_pbft_lscb(); + try { EOS_ASSERT( b, block_validate_exception, "trying to push empty block" ); EOS_ASSERT( (s == controller::block_status::irreversible || s == controller::block_status::validated), block_validate_exception, "invalid block status for replay" ); emit( self.pre_accepted_block, b ); const bool skip_validate_signee = !conf.force_all_checks; - auto new_header_state = fork_db.add( b, skip_validate_signee ); + + auto new_header_state = fork_db.add( b, skip_validate_signee, pbft_enabled); emit( self.accepted_block_header, new_header_state ); @@ -1476,6 +1621,15 @@ struct controller_impl { maybe_switch_forks( s ); } + // apply stable checkpoint when there is one + // TODO: verify required one more time? + for (const auto &extn: b->block_extensions) { + if (extn.first == static_cast(block_extension_type::pbft_stable_checkpoint)) { + pbft_commit_local(b->id()); + set_pbft_latest_checkpoint(b->id()); + break; + } + } // on replay irreversible is not emitted by fork database, so emit it explicitly here if( s == controller::block_status::irreversible ) emit( self.irreversible_block, new_header_state ); @@ -1483,6 +1637,52 @@ struct controller_impl { } FC_LOG_AND_RETHROW( ) } + void pbft_commit_local( const block_id_type& id ) { + pending_pbft_lib.reset(); + pending_pbft_lib.emplace(id); + } + + void set_pbft_lib() { + + if (!pbft_enabled) return; + + if ( pending_pbft_lib ) { + fork_db.set_bft_irreversible(*pending_pbft_lib); + pending_pbft_lib.reset(); + + if (!pending && read_mode != db_read_mode::IRREVERSIBLE) { + maybe_switch_forks(controller::block_status::complete); + } + } + } + + void set_pbft_latest_checkpoint( const block_id_type& id ) { + pending_pbft_checkpoint.reset(); + pending_pbft_checkpoint.emplace(id); + } + + void set_pbft_lscb() { + + if (!pbft_enabled) return; + + if ( pending_pbft_checkpoint ) { + + auto checkpoint_block_state = fork_db.get_block(*pending_pbft_checkpoint); + if (checkpoint_block_state) { + fork_db.set_latest_checkpoint(*pending_pbft_checkpoint); + auto checkpoint_num = checkpoint_block_state->block_num; + if (pbft_prepared && pbft_prepared->block_num < checkpoint_num) { + pbft_prepared.reset(); + } + if (my_prepare && my_prepare->block_num < checkpoint_num) { + my_prepare.reset(); + } + } + pending_pbft_checkpoint.reset(); + + } + } + void maybe_switch_forks( controller::block_status s ) { auto new_head = fork_db.head(); @@ -1583,7 +1783,14 @@ struct controller_impl { void set_ext_merkle() { vector ext_digests; - const auto& exts = pending->_pending_block_state->block->block_extensions; + extensions_type exts; + for ( const auto& extn: pending->_pending_block_state->block->block_extensions) { + if (extn.first != static_cast(block_extension_type::pbft_stable_checkpoint)) + { + exts.emplace_back(extn); + } + } + ext_digests.reserve( exts.size()); for( const auto& a : exts ) ext_digests.emplace_back( digest_type::hash(a) ); @@ -1912,6 +2119,13 @@ chainbase::database& controller::mutable_db()const { return my->db; } const fork_database& controller::fork_db()const { return my->fork_db; } +std::map controller:: my_signature_providers()const{ + return my->conf.my_signature_providers; +} + +void controller::set_my_signature_providers(std::map msp){ + my->conf.my_signature_providers = msp; +} void controller::start_block( block_timestamp_type when, uint16_t confirm_block_count, std::function signer) { validate_db_available_size(); @@ -1952,6 +2166,20 @@ void controller::push_block( std::future& block_state_future ) my->push_block( block_state_future ); } +void controller::pbft_commit_local( const block_id_type& id ) { + validate_db_available_size(); + my->pbft_commit_local(id); +} + +bool controller::pending_pbft_lib() { + if (my->pending_pbft_lib) return true; + return false; +} + +void controller::set_pbft_latest_checkpoint( const block_id_type& id ) { + my->set_pbft_latest_checkpoint(id); +} + transaction_trace_ptr controller::push_transaction( const transaction_metadata_ptr& trx, fc::time_point deadline, uint32_t billed_cpu_time_us ) { validate_db_available_size(); EOS_ASSERT( get_read_mode() != chain::db_read_mode::READ_ONLY, transaction_type_exception, "push transaction not allowed in read-only mode" ); @@ -2079,6 +2307,30 @@ block_id_type controller::last_irreversible_block_id() const { } +uint32_t controller::last_stable_checkpoint_block_num() const { + return my->head->pbft_stable_checkpoint_blocknum; +} + +block_id_type controller::last_stable_checkpoint_block_id() const { + auto lscb_num = last_stable_checkpoint_block_num(); + const auto& tapos_block_summary = db().get((uint16_t)lscb_num); + + if( block_header::num_from_id(tapos_block_summary.block_id) == lscb_num ) + return tapos_block_summary.block_id; + + auto b = fetch_block_by_number(lscb_num); + if (b) return b->id(); + return block_id_type{}; +} + +vector controller::get_watermarks() const { + return my->fork_db.get_watermarks_in_forkdb(); +} + +bool controller::is_replaying() const { + return my->replaying; +} + const dynamic_global_property_object& controller::get_dynamic_global_properties()const { return my->db.get(); } @@ -2253,6 +2505,48 @@ chain_id_type controller::get_chain_id()const { return my->chain_id; } +void controller::set_pbft_prepared(const block_id_type& id) { + my->pbft_prepared.reset(); + auto bs = fetch_block_state_by_id(id); + if (bs) { + my->pbft_prepared = bs; + my->fork_db.mark_pbft_prepared_fork(bs); + maybe_switch_forks(); + } +} + +void controller::set_pbft_my_prepare(const block_id_type& id) { + my->my_prepare.reset(); + auto bs = fetch_block_state_by_id(id); + if (bs) { + my->my_prepare = bs; + my->fork_db.mark_pbft_my_prepare_fork(bs); + maybe_switch_forks(); + } +} + +block_id_type controller::get_pbft_prepared() const { + if (my->pbft_prepared) return my->pbft_prepared->id; + return block_id_type{}; +} + +block_id_type controller::get_pbft_my_prepare() const { + if (my->my_prepare) return my->my_prepare->id; + return block_id_type{}; +} + +void controller::reset_pbft_my_prepare() { + my->fork_db.remove_pbft_my_prepare_fork(); + maybe_switch_forks(); + if (my->my_prepare) my->my_prepare.reset(); +} + +void controller::reset_pbft_prepared() { + my->fork_db.remove_pbft_prepared_fork(); + maybe_switch_forks(); + if (my->pbft_prepared) my->pbft_prepared.reset(); +} + db_read_mode controller::get_read_mode()const { return my->read_mode; } @@ -2355,6 +2649,18 @@ void controller::validate_reversible_available_size() const { EOS_ASSERT(free >= guard, reversible_guard_exception, "reversible free: ${f}, guard size: ${g}", ("f", free)("g",guard)); } +path controller::state_dir() const { + return my->conf.state_dir; +} + +path controller::blocks_dir() const { + return my->conf.blocks_dir; +} + +producer_schedule_type controller::initial_schedule() const { + return producer_schedule_type{ 0, {{eosio::chain::config::system_account_name, my->conf.genesis.initial_key}} }; +} + bool controller::is_known_unexpired_transaction( const transaction_id_type& id) const { return db().find(id); } @@ -2391,5 +2697,34 @@ void controller::set_name_list(int64_t list, int64_t action, std::vectordb.get(); +} + +bool controller::is_pbft_enabled() const { + return my->pbft_enabled; +} + +bool controller::under_maintenance() const { + return my->pbft_upgrading; +} + +void controller::maybe_switch_forks() { + if (!pending_block_state() && my->read_mode != db_read_mode::IRREVERSIBLE) { + my->maybe_switch_forks(controller::block_status::complete); + } +} + +// this will be used in unit_test only, should not be called anywhere else. +void controller::set_upo(uint32_t target_block_num) { + try { + const auto& upo = my->db.get(); + my->db.modify( upo, [&]( auto& up ) { up.upgrade_target_block_num = (block_num_type)target_block_num;}); + } catch( const boost::exception& e) { + my->db.create([&](auto& up){ + up.upgrade_target_block_num = (block_num_type)target_block_num; + }); + } +} } } /// eosio::chain diff --git a/libraries/chain/fork_database.cpp b/libraries/chain/fork_database.cpp index 441677bb8a8..2163a5a5960 100644 --- a/libraries/chain/fork_database.cpp +++ b/libraries/chain/fork_database.cpp @@ -17,6 +17,7 @@ namespace eosio { namespace chain { struct by_block_id; struct by_block_num; struct by_lib_block_num; + struct by_watermark; struct by_prev; typedef multi_index_container< block_state_ptr, @@ -31,12 +32,21 @@ namespace eosio { namespace chain { composite_key_compare< std::less, std::greater > >, ordered_non_unique< tag, - composite_key< block_header_state, - member, - member, - member + composite_key< block_state, + member, + member, + member, + member, + member >, - composite_key_compare< std::greater, std::greater, std::greater > + composite_key_compare< std::greater, std::greater, std::greater, std::greater, std::greater > + >, + ordered_non_unique< tag, + composite_key< block_state, + member, + member + >, + composite_key_compare< std::greater<>, std::less<> > > > > fork_multi_index_type; @@ -59,19 +69,72 @@ namespace eosio { namespace chain { if( fc::exists( fork_db_dat ) ) { string content; fc::read_file_contents( fork_db_dat, content ); - fc::datastream ds( content.data(), content.size() ); - unsigned_int size; fc::raw::unpack( ds, size ); - for( uint32_t i = 0, n = size.value; i < n; ++i ) { - block_state s; - fc::raw::unpack( ds, s ); - set( std::make_shared( move( s ) ) ); - } - block_id_type head_id; - fc::raw::unpack( ds, head_id ); - - my->head = get_block( head_id ); + string version_label = content.substr(1,7);//start from position 1 because fc pack type in pos 0 + bool is_version_1 = version_label != "version"; + if(is_version_1){ + /*start upgrade migration and this is a hack and ineffecient, but lucky we only need to do it once */ + wlog("doing LIB upgrade migration"); + auto start = ds.pos(); + unsigned_int size; fc::raw::unpack( ds, size ); + auto skipped_size_pos = ds.pos(); + + vector data(content.begin()+(skipped_size_pos - start), content.end()); + + data.insert(data.end(),{0,0,0,0});//append 4 bytes for the very last block state, avoid underflow in case + fc::datastream tmp_ds(data.data(), data.size()); + + for( uint32_t i = 0, n = size.value; i < n; ++i ) { + wlog("processing block state in fork database ${i} of ${size}", ("i",i+1)("size",n)); + block_header_state h; + fc::raw::unpack( tmp_ds, h ); + h.pbft_stable_checkpoint_blocknum = 0; + + //move pos backward 4 bytes for pbft_stable_checkpoint_blocknum + auto tmp_accumulated_data_length = tmp_ds.tellp() - 4; + tmp_ds.seekp(tmp_accumulated_data_length); + + signed_block_ptr b; + fc::raw::unpack( tmp_ds, b ); + bool validated; + fc::raw::unpack( tmp_ds, validated ); + bool in_current_chain; + fc::raw::unpack( tmp_ds, in_current_chain ); + block_state s{h}; + s.block = b; + s.validated = validated; + s.in_current_chain = in_current_chain; + + s.pbft_prepared = false; + s.pbft_my_prepare = false; + set( std::make_shared( move( s ) ) ); + } + block_id_type head_id; + fc::raw::unpack( tmp_ds, head_id ); + + my->head = get_block( head_id ); + /*end upgrade migration*/ + }else{ + //get version number + fc::raw::unpack( ds, version_label ); + EOS_ASSERT(version_label=="version", fork_database_exception, "invalid version label in forkdb.dat"); + uint8_t version_num; + fc::raw::unpack( ds, version_num ); + + EOS_ASSERT(version_num==2, fork_database_exception, "invalid version num in forkdb.dat"); + + unsigned_int size; fc::raw::unpack( ds, size ); + for( uint32_t i = 0, n = size.value; i < n; ++i ) { + block_state s; + fc::raw::unpack( ds, s ); + set( std::make_shared( move( s ) ) ); + } + block_id_type head_id; + fc::raw::unpack( ds, head_id ); + + my->head = get_block( head_id ); + } fc::remove( fork_db_dat ); } } @@ -81,6 +144,12 @@ namespace eosio { namespace chain { auto fork_db_dat = my->datadir / config::forkdb_filename; std::ofstream out( fork_db_dat.generic_string().c_str(), std::ios::out | std::ios::binary | std::ofstream::trunc ); + + string version_label = "version"; + fc::raw::pack( out, version_label ); + uint8_t version_num = 2; + fc::raw::pack( out, version_num ); + uint32_t num_blocks_in_fork_db = my->index.size(); fc::raw::pack( out, unsigned_int{num_blocks_in_fork_db} ); for( const auto& s : my->index ) { @@ -95,9 +164,10 @@ namespace eosio { namespace chain { /// we cannot normally prune the lib if it is the head block because /// the next block needs to build off of the head block. We are exiting /// now so we can prune this block as irreversible before exiting. - auto lib = my->head->dpos_irreversible_blocknum; + auto lib = std::max(my->head->bft_irreversible_blocknum, my->head->dpos_irreversible_blocknum); + auto checkpoint = my->head->pbft_stable_checkpoint_blocknum; auto oldest = *my->index.get().begin(); - if( oldest->block_num <= lib ) { + if( oldest->block_num < lib && oldest->block_num < checkpoint ) { prune( oldest ); } @@ -123,7 +193,7 @@ namespace eosio { namespace chain { } } - block_state_ptr fork_database::add( const block_state_ptr& n, bool skip_validate_previous ) { + block_state_ptr fork_database::add( const block_state_ptr& n, bool skip_validate_previous, bool pbft_enabled ) { EOS_ASSERT( n, fork_database_exception, "attempt to add null block state" ); EOS_ASSERT( my->head, fork_db_block_not_found, "no head block set" ); @@ -136,19 +206,34 @@ namespace eosio { namespace chain { auto inserted = my->index.insert(n); EOS_ASSERT( inserted.second, fork_database_exception, "duplicate block added?" ); + auto prior = my->index.find( n->block->previous ); + + if (prior != my->index.end()) { + if ((*prior)->pbft_prepared) mark_pbft_prepared_fork(*prior); + if ((*prior)->pbft_my_prepare) mark_pbft_my_prepare_fork(*prior); + } + my->head = *my->index.get().begin(); - auto lib = my->head->dpos_irreversible_blocknum; + auto lib = std::max(my->head->bft_irreversible_blocknum, my->head->dpos_irreversible_blocknum); + auto checkpoint = my->head->pbft_stable_checkpoint_blocknum; + auto oldest = *my->index.get().begin(); - if( oldest->block_num < lib ) { - prune( oldest ); + if (!pbft_enabled && oldest->block_num < lib) { + prune( oldest ); + } else { + // prune all blocks below lscb + while (oldest->block_num < lib && oldest->block_num < checkpoint ) { + prune( oldest ); + oldest = *my->index.get().begin(); + } } return n; } - block_state_ptr fork_database::add( signed_block_ptr b, bool skip_validate_signee ) { + block_state_ptr fork_database::add( signed_block_ptr b, bool skip_validate_signee, bool pbft_enabled ) { EOS_ASSERT( b, fork_database_exception, "attempt to add null block" ); EOS_ASSERT( my->head, fork_db_block_not_found, "no head block set" ); const auto& by_id_idx = my->index.get(); @@ -158,9 +243,9 @@ namespace eosio { namespace chain { auto prior = by_id_idx.find( b->previous ); EOS_ASSERT( prior != by_id_idx.end(), unlinkable_block_exception, "unlinkable block", ("id", string(b->id()))("previous", string(b->previous)) ); - auto result = std::make_shared( **prior, move(b), skip_validate_signee ); + auto result = std::make_shared( **prior, move(b), skip_validate_signee, pbft_enabled); EOS_ASSERT( result, fork_database_exception , "fail to add new block state" ); - return add(result, true); + return add(result, true, pbft_enabled); } const block_state_ptr& fork_database::head()const { return my->head; } @@ -275,16 +360,143 @@ namespace eosio { namespace chain { } } - block_state_ptr fork_database::get_block(const block_id_type& id)const { + block_state_ptr fork_database::get_block(const block_id_type& id) const { auto itr = my->index.find( id ); if( itr != my->index.end() ) return *itr; return block_state_ptr(); } + void fork_database::mark_pbft_prepared_fork(const block_state_ptr& h) { + auto& by_id_idx = my->index.get(); + auto itr = by_id_idx.find( h->id ); + EOS_ASSERT( itr != by_id_idx.end(), fork_db_block_not_found, "could not find block in fork database" ); + by_id_idx.modify( itr, [&]( auto& bsp ) { bsp->pbft_prepared = true; }); + + auto update = [&]( const vector& in ) { + vector updated; + + for( const auto& i : in ) { + auto& pidx = my->index.get(); + auto pitr = pidx.lower_bound( i ); + auto epitr = pidx.upper_bound( i ); + while( pitr != epitr ) { + pidx.modify( pitr, [&]( auto& bsp ) { + bsp->pbft_prepared = true; + updated.push_back( bsp->id ); + }); + ++pitr; + } + } + return updated; + }; + + vector queue{ h->id }; + while(!queue.empty()) { + queue = update( queue ); + } + my->head = *my->index.get().begin(); + } + + void fork_database::mark_pbft_my_prepare_fork(const block_state_ptr& h) { + auto& by_id_idx = my->index.get(); + auto itr = by_id_idx.find( h->id ); + EOS_ASSERT( itr != by_id_idx.end(), fork_db_block_not_found, "could not find block in fork database" ); + by_id_idx.modify( itr, [&]( auto& bsp ) { bsp->pbft_my_prepare = true; }); + + auto update = [&]( const vector& in ) { + vector updated; + + for( const auto& i : in ) { + auto& pidx = my->index.get(); + auto pitr = pidx.lower_bound( i ); + auto epitr = pidx.upper_bound( i ); + while( pitr != epitr ) { + pidx.modify( pitr, [&]( auto& bsp ) { + bsp->pbft_my_prepare = true; + updated.push_back( bsp->id ); + }); + ++pitr; + } + } + return updated; + }; + + vector queue{ h->id }; + while(!queue.empty()) { + queue = update( queue ); + } + my->head = *my->index.get().begin(); + } + + void fork_database::remove_pbft_my_prepare_fork() { + auto oldest = *my->index.get().begin(); + + auto& by_id_idx = my->index.get(); + auto itr = by_id_idx.find( oldest->id ); + by_id_idx.modify( itr, [&]( auto& bsp ) { bsp->pbft_my_prepare = false; }); + + auto update = [&]( const vector& in ) { + vector updated; + + for( const auto& i : in ) { + auto& pidx = my->index.get(); + auto pitr = pidx.lower_bound( i ); + auto epitr = pidx.upper_bound( i ); + while( pitr != epitr ) { + pidx.modify( pitr, [&]( auto& bsp ) { + bsp->pbft_my_prepare = false; + updated.push_back( bsp->id ); + }); + ++pitr; + } + } + return updated; + }; + + vector queue{ oldest->id }; + while(!queue.empty()) { + queue = update( queue ); + } + my->head = *my->index.get().begin(); + } + + void fork_database::remove_pbft_prepared_fork() { + auto oldest = *my->index.get().begin(); + + auto& by_id_idx = my->index.get(); + auto itr = by_id_idx.find( oldest->id ); + by_id_idx.modify( itr, [&]( auto& bsp ) { bsp->pbft_prepared = false; }); + + auto update = [&]( const vector& in ) { + vector updated; + + for( const auto& i : in ) { + auto& pidx = my->index.get(); + auto pitr = pidx.lower_bound( i ); + auto epitr = pidx.upper_bound( i ); + while( pitr != epitr ) { + pidx.modify( pitr, [&]( auto& bsp ) { + bsp->pbft_prepared = false; + updated.push_back( bsp->id ); + }); + ++pitr; + } + } + return updated; + }; + + vector queue{ oldest->id }; + while(!queue.empty()) { + queue = update( queue ); + } + my->head = *my->index.get().begin(); + } + block_state_ptr fork_database::get_block_in_current_chain_by_num( uint32_t n )const { const auto& numidx = my->index.get(); auto nitr = numidx.lower_bound( n ); + // following asserts removed so null can be returned //FC_ASSERT( nitr != numidx.end() && (*nitr)->block_num == n, // "could not find block in fork database with block number ${block_num}", ("block_num", n) ); @@ -314,10 +526,13 @@ namespace eosio { namespace chain { * This will require a search over all forks */ void fork_database::set_bft_irreversible( block_id_type id ) { - auto& idx = my->index.get(); - auto itr = idx.find(id); - uint32_t block_num = (*itr)->block_num; - idx.modify( itr, [&]( auto& bsp ) { + auto b = get_block( id ); + EOS_ASSERT( b, fork_db_block_not_found, "unable to find block id ${id}", ("id",id)); + + auto& idx = my->index.get(); + auto itr = idx.find(id); + uint32_t block_num = (*itr)->block_num; + idx.modify( itr, [&]( auto& bsp ) { bsp->bft_irreversible_blocknum = bsp->block_num; }); @@ -330,27 +545,85 @@ namespace eosio { namespace chain { auto update = [&]( const vector& in ) { vector updated; - for( const auto& i : in ) { - auto& pidx = my->index.get(); - auto pitr = pidx.lower_bound( i ); - auto epitr = pidx.upper_bound( i ); - while( pitr != epitr ) { - pidx.modify( pitr, [&]( auto& bsp ) { - if( bsp->bft_irreversible_blocknum < block_num ) { - bsp->bft_irreversible_blocknum = block_num; - updated.push_back( bsp->id ); - } - }); - ++pitr; - } - } - return updated; - }; + for( const auto& i : in ) { + auto& pidx = my->index.get(); + auto pitr = pidx.lower_bound( i ); + auto epitr = pidx.upper_bound( i ); + while( pitr != epitr ) { + pidx.modify( pitr, [&]( auto& bsp ) { + if( bsp->bft_irreversible_blocknum < block_num ) { + bsp->bft_irreversible_blocknum = block_num; + updated.push_back( bsp->id ); + } + }); + ++pitr; + } + } + return updated; + }; + + vector queue{id}; + while( queue.size() ) { + queue = update( queue ); + } + my->head = *my->index.get().begin(); + } - vector queue{id}; - while( queue.size() ) { - queue = update( queue ); - } + void fork_database::set_latest_checkpoint( block_id_type id) { + auto b = get_block( id ); + EOS_ASSERT( b, fork_db_block_not_found, "unable to find block id ${id}", ("id",id)); + + auto& idx = my->index.get(); + auto itr = idx.find(id); + uint32_t block_num = (*itr)->block_num; + idx.modify( itr, [&]( auto& bsp ) { + bsp->pbft_stable_checkpoint_blocknum = bsp->block_num; + }); + + auto update = [&]( const vector& in ) { + vector updated; + + for( const auto& i : in ) { + auto& pidx = my->index.get(); + auto pitr = pidx.lower_bound( i ); + auto epitr = pidx.upper_bound( i ); + while( pitr != epitr ) { + pidx.modify( pitr, [&]( auto& bsp ) { + if( bsp->pbft_stable_checkpoint_blocknum < block_num ) { + bsp->pbft_stable_checkpoint_blocknum = block_num; + updated.push_back( bsp->id ); + } + }); + ++pitr; + } + } + return updated; + }; + + vector queue{id}; + while( queue.size() ) { + queue = update( queue ); + } + } + + vector fork_database::get_watermarks_in_forkdb() { + vector watermarks; + auto& pidx = my->index.get(); + auto pitr = pidx.begin(); + while (pitr != pidx.end() && (*pitr)->pbft_watermark) { + watermarks.emplace_back((*pitr)->block_num); //should consider only current_chain? + ++pitr; + } + return watermarks; } -} } /// eosio::chain + void fork_database::mark_as_pbft_watermark( const block_state_ptr& h) { + auto& by_id_idx = my->index.get(); + auto itr = by_id_idx.find( h->id ); + EOS_ASSERT( itr != by_id_idx.end(), fork_db_block_not_found, "could not find block in fork database" ); + by_id_idx.modify( itr, [&]( auto& bsp ) { bsp->pbft_watermark = true; }); + } + + + + } } /// eosio::chain diff --git a/libraries/chain/include/eosio/chain/abi_serializer.hpp b/libraries/chain/include/eosio/chain/abi_serializer.hpp index 8f8fca4cdeb..06b9d2bb761 100644 --- a/libraries/chain/include/eosio/chain/abi_serializer.hpp +++ b/libraries/chain/include/eosio/chain/abi_serializer.hpp @@ -680,7 +680,7 @@ void abi_serializer::to_variant( const T& o, variant& vo, Resolver resolver, con impl::abi_traverse_context ctx(max_serialization_time); impl::abi_to_variant::add(mvo, "_", o, resolver, ctx); vo = std::move(mvo["_"]); -} FC_RETHROW_EXCEPTIONS(error, "Failed to serialize type", ("object",o)) +} FC_RETHROW_EXCEPTIONS(error, "Failed to serialize: ${type}", ("type", boost::core::demangle( typeid(o).name() ) )) template void abi_serializer::from_variant( const variant& v, T& o, Resolver resolver, const fc::microseconds& max_serialization_time ) try { diff --git a/libraries/chain/include/eosio/chain/block.hpp b/libraries/chain/include/eosio/chain/block.hpp index 9cd942026cf..246515e1200 100644 --- a/libraries/chain/include/eosio/chain/block.hpp +++ b/libraries/chain/include/eosio/chain/block.hpp @@ -52,7 +52,8 @@ namespace eosio { namespace chain { }; enum class block_extension_type : uint16_t { - bpsig_action_time_seed + bpsig_action_time_seed, + pbft_stable_checkpoint }; /** diff --git a/libraries/chain/include/eosio/chain/block_header.hpp b/libraries/chain/include/eosio/chain/block_header.hpp index 723824b5310..e1943741bc9 100644 --- a/libraries/chain/include/eosio/chain/block_header.hpp +++ b/libraries/chain/include/eosio/chain/block_header.hpp @@ -24,7 +24,7 @@ namespace eosio { namespace chain { * behavior. When producing a block a producer is always confirming at least the block he * is building off of. A producer cannot confirm "this" block, only prior blocks. */ - uint16_t confirmed = 1; + uint16_t confirmed = 1; block_id_type previous; diff --git a/libraries/chain/include/eosio/chain/block_header_state.hpp b/libraries/chain/include/eosio/chain/block_header_state.hpp index c318843d5df..56d234cf10b 100644 --- a/libraries/chain/include/eosio/chain/block_header_state.hpp +++ b/libraries/chain/include/eosio/chain/block_header_state.hpp @@ -16,6 +16,7 @@ struct block_header_state { uint32_t dpos_proposed_irreversible_blocknum = 0; uint32_t dpos_irreversible_blocknum = 0; uint32_t bft_irreversible_blocknum = 0; + uint32_t pending_schedule_lib_num = 0; /// last irr block num digest_type pending_schedule_hash; producer_schedule_type pending_schedule; @@ -26,14 +27,15 @@ struct block_header_state { public_key_type block_signing_key; vector confirm_count; vector confirmations; + uint32_t pbft_stable_checkpoint_blocknum = 0; - block_header_state next( const signed_block_header& h, bool trust = false )const; - block_header_state generate_next( block_timestamp_type when )const; + block_header_state next( const signed_block_header& h, bool trust = false, bool pbft_enabled = false )const; + block_header_state generate_next( block_timestamp_type when, bool pbft_enabled = false )const; void set_new_producers( producer_schedule_type next_pending ); - void set_confirmed( uint16_t num_prev_blocks ); + void set_confirmed( uint16_t num_prev_blocks, bool pbft_enabled = false ); void add_confirmation( const header_confirmation& c ); - bool maybe_promote_pending(); + bool maybe_promote_pending( bool pbft_enabled = false ); bool has_pending_producers()const { return pending_schedule.producers.size(); } @@ -61,7 +63,9 @@ struct block_header_state { FC_REFLECT( eosio::chain::block_header_state, (id)(block_num)(header)(dpos_proposed_irreversible_blocknum)(dpos_irreversible_blocknum)(bft_irreversible_blocknum) + (pending_schedule_lib_num)(pending_schedule_hash) (pending_schedule)(active_schedule)(blockroot_merkle) (producer_to_last_produced)(producer_to_last_implied_irb)(block_signing_key) - (confirm_count)(confirmations) ) + (confirm_count)(confirmations) + (pbft_stable_checkpoint_blocknum)) diff --git a/libraries/chain/include/eosio/chain/block_state.hpp b/libraries/chain/include/eosio/chain/block_state.hpp index 2292392ade4..ef42af36bab 100644 --- a/libraries/chain/include/eosio/chain/block_state.hpp +++ b/libraries/chain/include/eosio/chain/block_state.hpp @@ -13,14 +13,17 @@ namespace eosio { namespace chain { struct block_state : public block_header_state { explicit block_state( const block_header_state& cur ):block_header_state(cur){} - block_state( const block_header_state& prev, signed_block_ptr b, bool skip_validate_signee ); - block_state( const block_header_state& prev, block_timestamp_type when ); + block_state( const block_header_state& prev, signed_block_ptr b, bool skip_validate_signee, bool pbft_enabled ); + block_state( const block_header_state& prev, block_timestamp_type when, bool pbft_enabled ); block_state() = default; /// weak_ptr prev_block_state.... signed_block_ptr block; bool validated = false; bool in_current_chain = false; + bool pbft_prepared = false; + bool pbft_my_prepare = false; + bool pbft_watermark = false; /// this data is redundant with the data stored in block, but facilitates /// recapturing transactions when we pop a block @@ -31,4 +34,4 @@ namespace eosio { namespace chain { } } /// namespace eosio::chain -FC_REFLECT_DERIVED( eosio::chain::block_state, (eosio::chain::block_header_state), (block)(validated)(in_current_chain) ) +FC_REFLECT_DERIVED( eosio::chain::block_state, (eosio::chain::block_header_state), (block)(validated)(in_current_chain)(pbft_prepared)(pbft_my_prepare)(pbft_watermark) ) diff --git a/libraries/chain/include/eosio/chain/chain_snapshot.hpp b/libraries/chain/include/eosio/chain/chain_snapshot.hpp index 3b3e64f264f..58ea0d45725 100644 --- a/libraries/chain/include/eosio/chain/chain_snapshot.hpp +++ b/libraries/chain/include/eosio/chain/chain_snapshot.hpp @@ -29,6 +29,11 @@ struct chain_snapshot_header { } }; +struct batch_pbft_snapshot_migrated{}; + +struct batch_pbft_enabled{}; + +struct batch_pbft_lscb_branch{}; } } FC_REFLECT(eosio::chain::chain_snapshot_header,(version)) \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/config.hpp b/libraries/chain/include/eosio/chain/config.hpp index f34b7702095..cb294eecc7f 100644 --- a/libraries/chain/include/eosio/chain/config.hpp +++ b/libraries/chain/include/eosio/chain/config.hpp @@ -20,9 +20,12 @@ const static auto default_reversible_guard_size = 2*1024*1024ll;/// 1MB * 340 bl const static auto default_state_dir_name = "state"; const static auto forkdb_filename = "forkdb.dat"; +const static auto pbftdb_filename = "pbftdb.dat"; const static auto default_state_size = 1*1024*1024*1024ll; const static auto default_state_guard_size = 128*1024*1024ll; +const static auto checkpoints_filename = "checkpoints.dat"; + const static uint64_t system_account_name = N(eosio); const static uint64_t null_account_name = N(eosio.null); diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 2257fa56210..7a90125da6e 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -30,6 +30,7 @@ namespace eosio { namespace chain { class dynamic_global_property_object; class global_property_object; class global_property2_object; // *bos* + class upgrade_property_object; class permission_object; class account_object; using resource_limits::resource_limits_manager; @@ -65,6 +66,8 @@ namespace eosio { namespace chain { // *bos end* + using signature_provider_type = std::function; + class controller { public: @@ -82,6 +85,8 @@ namespace eosio { namespace chain { uint64_t state_guard_size = chain::config::default_state_guard_size; uint64_t reversible_cache_size = chain::config::default_reversible_cache_size; uint64_t reversible_guard_size = chain::config::default_reversible_guard_size; + path checkpoints_dir = blocks_dir; + uint32_t sig_cpu_bill_pct = chain::config::default_sig_cpu_bill_pct; uint16_t thread_pool_size = chain::config::default_controller_thread_pool_size; bool read_only = false; @@ -98,6 +103,10 @@ namespace eosio { namespace chain { flat_set resource_greylist; flat_set trusted_producers; + + + std::map my_signature_providers; + std::set my_producers; }; enum class block_status { @@ -155,8 +164,23 @@ namespace eosio { namespace chain { const chainbase::database& db()const; + void pbft_commit_local( const block_id_type& id ); + + bool pending_pbft_lib(); + + vector get_watermarks() const; + + void set_pbft_latest_checkpoint( const block_id_type& id ); + uint32_t last_stable_checkpoint_block_num()const; + block_id_type last_stable_checkpoint_block_id()const; + + const fork_database& fork_db()const; + std::map my_signature_providers()const; + void set_my_signature_providers(std::map msp); + + const account_object& get_account( account_name n )const; const global_property_object& get_global_properties()const; const dynamic_global_property_object& get_dynamic_global_properties()const; @@ -229,7 +253,7 @@ namespace eosio { namespace chain { // *bos begin* const global_property2_object& get_global_properties2()const; // *bos* void set_name_list(int64_t list, int64_t action, std::vector name_list); - + // *bos end* bool is_resource_greylisted(const account_name &name) const; @@ -259,6 +283,19 @@ namespace eosio { namespace chain { void set_subjective_cpu_leeway(fc::microseconds leeway); + path state_dir()const; + path blocks_dir()const; + producer_schedule_type initial_schedule()const; + bool is_replaying()const; + + void set_pbft_prepared(const block_id_type& id); + void set_pbft_my_prepare(const block_id_type& id); + block_id_type get_pbft_prepared()const; + block_id_type get_pbft_my_prepare()const; + void reset_pbft_my_prepare(); + void reset_pbft_prepared(); + void maybe_switch_forks(); + signal pre_accepted_block; signal accepted_block_header; signal accepted_block; @@ -268,6 +305,11 @@ namespace eosio { namespace chain { signal accepted_confirmation; signal bad_alloc; + const upgrade_property_object& get_upgrade_properties()const; + bool is_pbft_enabled()const; + bool under_maintenance()const; + void set_upo(uint32_t target_block_num); + /* signal pre_apply_block; signal post_apply_block; @@ -324,6 +366,7 @@ FC_REFLECT( eosio::chain::controller::config, (state_dir) (state_size) (reversible_cache_size) + (checkpoints_dir) (read_only) (force_all_checks) (disable_replay_opts) diff --git a/libraries/chain/include/eosio/chain/exceptions.hpp b/libraries/chain/include/eosio/chain/exceptions.hpp index 6c3e504d349..65d9e8c0ce6 100644 --- a/libraries/chain/include/eosio/chain/exceptions.hpp +++ b/libraries/chain/include/eosio/chain/exceptions.hpp @@ -136,6 +136,8 @@ namespace eosio { namespace chain { FC_DECLARE_DERIVED_EXCEPTION( fork_db_block_not_found, fork_database_exception, 3020001, "Block can not be found" ) + FC_DECLARE_DERIVED_EXCEPTION( pbft_exception, chain_exception, + 4010000, "PBFT exception" ) FC_DECLARE_DERIVED_EXCEPTION( block_validate_exception, chain_exception, 3030000, "Block exception" ) diff --git a/libraries/chain/include/eosio/chain/fork_database.hpp b/libraries/chain/include/eosio/chain/fork_database.hpp index 998157ab41a..823c65c5b92 100644 --- a/libraries/chain/include/eosio/chain/fork_database.hpp +++ b/libraries/chain/include/eosio/chain/fork_database.hpp @@ -40,8 +40,8 @@ namespace eosio { namespace chain { * block_state and will return a pointer to the new block state or * throw on error. */ - block_state_ptr add( signed_block_ptr b, bool skip_validate_signee ); - block_state_ptr add( const block_state_ptr& next_block, bool skip_validate_previous ); + block_state_ptr add( signed_block_ptr b, bool skip_validate_signee, bool pbft_enabled ); + block_state_ptr add( const block_state_ptr& next_block, bool skip_validate_previous, bool pbft_enabled ); void remove( const block_id_type& id ); void add( const header_confirmation& c ); @@ -69,9 +69,24 @@ namespace eosio { namespace chain { * it is removed unless it is the head block. */ signal irreversible; - - private: + void set_bft_irreversible( block_id_type id ); + + void set_latest_checkpoint( block_id_type id); + + void mark_pbft_prepared_fork(const block_state_ptr& h); + + void mark_pbft_my_prepare_fork(const block_state_ptr& h); + + void remove_pbft_my_prepare_fork(); + + void remove_pbft_prepared_fork(); + + vector get_watermarks_in_forkdb(); + + void mark_as_pbft_watermark( const block_state_ptr& h); + + private: unique_ptr my; }; diff --git a/libraries/chain/include/eosio/chain/global_property_object.hpp b/libraries/chain/include/eosio/chain/global_property_object.hpp index bdb49d3ce06..98f86939ad6 100644 --- a/libraries/chain/include/eosio/chain/global_property_object.hpp +++ b/libraries/chain/include/eosio/chain/global_property_object.hpp @@ -44,6 +44,16 @@ namespace eosio { namespace chain { guaranteed_minimum_resources gmr;//guaranteed_minimum_resources }; + class upgrade_property_object : public chainbase::object + { + OBJECT_CTOR(upgrade_property_object) + //TODO: should use a more complicated struct to include id, digest and status of every single upgrade. + + id_type id; + block_num_type upgrade_target_block_num = 0; + block_num_type upgrade_complete_block_num = 0; + }; + /** * @class dynamic_global_property_object @@ -89,6 +99,15 @@ namespace eosio { namespace chain { > > >; + + using upgrade_property_multi_index = chainbase::shared_multi_index_container< + upgrade_property_object, + indexed_by< + ordered_unique, + BOOST_MULTI_INDEX_MEMBER(upgrade_property_object, upgrade_property_object::id_type, id) + > + > + >; }} CHAINBASE_SET_INDEX_TYPE(eosio::chain::global_property_object, eosio::chain::global_property_multi_index) @@ -96,6 +115,7 @@ CHAINBASE_SET_INDEX_TYPE(eosio::chain::dynamic_global_property_object, eosio::chain::dynamic_global_property_multi_index) // *bos* CHAINBASE_SET_INDEX_TYPE(eosio::chain::global_property2_object, eosio::chain::global_property2_multi_index) +CHAINBASE_SET_INDEX_TYPE(eosio::chain::upgrade_property_object, eosio::chain::upgrade_property_multi_index) FC_REFLECT(eosio::chain::dynamic_global_property_object, (global_action_sequence) @@ -107,4 +127,7 @@ FC_REFLECT(eosio::chain::global_property_object, // *bos* FC_REFLECT(eosio::chain::global_property2_object, (cfg)(gmr) + ) +FC_REFLECT(eosio::chain::upgrade_property_object, + (upgrade_target_block_num)(upgrade_complete_block_num) ) \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/pbft.hpp b/libraries/chain/include/eosio/chain/pbft.hpp new file mode 100644 index 00000000000..ba78d3ab1ea --- /dev/null +++ b/libraries/chain/include/eosio/chain/pbft.hpp @@ -0,0 +1,199 @@ +#include + +#pragma once + +#include +#include +#include + +namespace eosio { + namespace chain { + using namespace std; + using namespace fc; + + struct psm_cache { + pbft_prepare prepares_cache; + pbft_commit commits_cache; + pbft_view_change view_changes_cache; + pbft_prepared_certificate prepared_certificate; + vector committed_certificate; + pbft_view_changed_certificate view_changed_certificate; + }; + + class psm_state; + using psm_state_ptr = std::shared_ptr; + + class psm_machine : public std::enable_shared_from_this { + + public: + explicit psm_machine(pbft_database& pbft_db); + ~psm_machine(); + + void set_current(psm_state_ptr s) { + current = std::move(s); + } + + psm_state_ptr get_current() { + return current; + } + + void on_prepare(pbft_metadata_ptr e); + void on_commit(pbft_metadata_ptr e); + void on_view_change(pbft_metadata_ptr e); + void on_new_view(const pbft_metadata_ptr &e); + + void send_prepare(); + void send_commit(); + void send_view_change(); + + void transit_to_committed_state(psm_state_ptr s, bool to_new_view); + void transit_to_prepared_state(psm_state_ptr s); + void transit_to_view_change_state(psm_state_ptr s); + void transit_to_new_view(const pbft_metadata_ptr& e, const psm_state_ptr& s); + + void do_send_view_change(); + bool maybe_new_view(const psm_state_ptr& s); + + const pbft_prepare& get_prepares_cache() const; + void set_prepares_cache(const pbft_prepare &pcache); + + const pbft_commit& get_commits_cache() const; + void set_commits_cache(const pbft_commit &ccache); + + const pbft_view_change& get_view_changes_cache() const; + void set_view_changes_cache(const pbft_view_change &vc_cache); + + const uint32_t &get_current_view() const; + void set_current_view(const uint32_t &cv); + + const pbft_prepared_certificate& get_prepared_certificate() const; + void set_prepared_certificate(const pbft_prepared_certificate &pcert); + + const vector& get_committed_certificate() const; + void set_committed_certificate(const vector &ccert); + + const pbft_view_changed_certificate& get_view_changed_certificate() const; + void set_view_changed_certificate(const pbft_view_changed_certificate &vc_cert); + + const uint32_t& get_target_view_retries() const; + void set_target_view_retries(const uint32_t &tv_reties); + + const uint32_t& get_target_view() const; + void set_target_view(const uint32_t &tv); + + const uint32_t& get_view_change_timer() const; + void set_view_change_timer(const uint32_t &vc_timer); + + void manually_set_current_view(const uint32_t &cv); + + protected: + psm_cache cache; + uint32_t current_view; + uint32_t target_view_retries; + uint32_t target_view; + uint32_t view_change_timer; + + private: + psm_state_ptr current; + pbft_database &pbft_db; + }; + + using psm_machine_ptr = std::shared_ptr; + + class psm_state : public std::enable_shared_from_this { + + public: + psm_state(); + ~psm_state(); + + virtual void on_prepare(psm_machine_ptr m, pbft_metadata_ptr e, pbft_database &pbft_db) = 0; + virtual void on_commit(psm_machine_ptr m, pbft_metadata_ptr e, pbft_database &pbft_db) = 0; + virtual void on_view_change(psm_machine_ptr m, pbft_metadata_ptr e, pbft_database &pbft_db) = 0; + + virtual void send_prepare(psm_machine_ptr m, pbft_database &pbft_db) = 0; + virtual void send_commit(psm_machine_ptr m, pbft_database &pbft_db) = 0; + virtual void send_view_change(psm_machine_ptr m, pbft_database &pbft_db) = 0; + + virtual const char* get_name() = 0; + std::shared_ptr get_self() { return shared_from_this(); }; + }; + + class psm_prepared_state final: public psm_state { + + public: + psm_prepared_state(); + ~psm_prepared_state(); + + void on_prepare(psm_machine_ptr m, pbft_metadata_ptr e, pbft_database &pbft_db) override; + void on_commit(psm_machine_ptr m, pbft_metadata_ptr e, pbft_database &pbft_db) override; + void on_view_change(psm_machine_ptr m, pbft_metadata_ptr e, pbft_database &pbft_db) override; + + void send_prepare(psm_machine_ptr m, pbft_database &pbft_db) override; + void send_commit(psm_machine_ptr m, pbft_database &pbft_db) override; + void send_view_change(psm_machine_ptr m, pbft_database &pbft_db) override; + + bool pending_commit_local; + + const char* get_name() override { return "{==== PREPARED ====}"; } + }; + + class psm_committed_state final: public psm_state { + public: + psm_committed_state(); + ~psm_committed_state(); + + void on_prepare(psm_machine_ptr m, pbft_metadata_ptr e, pbft_database &pbft_db) override; + void on_commit(psm_machine_ptr m, pbft_metadata_ptr e, pbft_database &pbft_db) override; + void on_view_change(psm_machine_ptr m, pbft_metadata_ptr e, pbft_database &pbft_db) override; + + void send_prepare(psm_machine_ptr m, pbft_database &pbft_db) override; + void send_commit(psm_machine_ptr m, pbft_database &pbft_db) override; + void send_view_change(psm_machine_ptr m, pbft_database &pbft_db) override; + + const char* get_name() override { return "{==== COMMITTED ====}"; } + }; + + class psm_view_change_state final: public psm_state { + public: + psm_view_change_state(); + ~psm_view_change_state(); + + void on_prepare(psm_machine_ptr m, pbft_metadata_ptr e, pbft_database &pbft_db) override; + void on_commit(psm_machine_ptr m, pbft_metadata_ptr e, pbft_database &pbft_db) override; + void on_view_change(psm_machine_ptr m, pbft_metadata_ptr e, pbft_database &pbft_db) override; + + void send_prepare(psm_machine_ptr m, pbft_database &pbft_db) override; + void send_commit(psm_machine_ptr m, pbft_database &pbft_db) override; + void send_view_change(psm_machine_ptr m, pbft_database &pbft_db) override; + + const char* get_name() override { return "{==== VIEW CHANGE ====}"; } + }; + + class pbft_controller { + public: + explicit pbft_controller(controller& ctrl); + ~pbft_controller(); + + const uint16_t view_change_timeout = 6; + + pbft_database pbft_db; + std::shared_ptr state_machine; + + void maybe_pbft_prepare(); + void maybe_pbft_commit(); + void maybe_pbft_view_change(); + void maybe_pbft_checkpoint(); + + void on_pbft_prepare(pbft_metadata_ptr p); + void on_pbft_commit(pbft_metadata_ptr c); + void on_pbft_view_change(pbft_metadata_ptr vc); + void on_pbft_new_view(const pbft_metadata_ptr &nv); + void on_pbft_checkpoint(const pbft_metadata_ptr &cp); + + private: + fc::path datadir; + }; + } +} /// namespace eosio::chain + +FC_REFLECT(eosio::chain::pbft_controller, (pbft_db)(state_machine)) \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/pbft_database.hpp b/libraries/chain/include/eosio/chain/pbft_database.hpp new file mode 100644 index 00000000000..7fd03ebb3e7 --- /dev/null +++ b/libraries/chain/include/eosio/chain/pbft_database.hpp @@ -0,0 +1,556 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace eosio { + namespace chain { + using boost::multi_index_container; + using namespace boost::multi_index; + using namespace std; + + using pbft_view_type = uint32_t; + + constexpr uint16_t pbft_checkpoint_granularity = 100; + constexpr uint16_t oldest_stable_checkpoint = 10000; + + enum class pbft_message_type : uint8_t { + prepare, + commit, + checkpoint, + view_change, + new_view + }; + + struct block_info_type { + block_id_type block_id; + + block_num_type block_num() const { + return fc::endian_reverse_u32(block_id._hash[0]); + } + + bool operator==(const block_info_type &rhs) const { + return block_id == rhs.block_id; + } + + bool operator!=(const block_info_type &rhs) const { + return !(*this == rhs); + } + + bool empty() const { + return block_id == block_id_type(); + } + }; + + struct pbft_message_common { + explicit pbft_message_common(pbft_message_type t): type{t} {}; + + pbft_message_type type; + time_point timestamp = time_point::now(); + + ~pbft_message_common() = default; + }; + + template + struct pbft_message_metadata { + explicit pbft_message_metadata(pbft_message_body m, chain_id_type chain_id): msg{m} { + try { + sender_key = crypto::public_key(msg.sender_signature, msg.digest(chain_id), true); + } catch (fc::exception & /*e*/) { + wlog("bad pbft message signature: ${m}", ("m", msg)); + } + } + + pbft_message_body msg; + public_key_type sender_key; + }; + + template + using pbft_metadata_ptr = std::shared_ptr>; + + struct pbft_prepare { + explicit pbft_prepare() = default; + + pbft_message_common common = pbft_message_common(pbft_message_type::prepare); + pbft_view_type view = 0; + block_info_type block_info; + signature_type sender_signature; + + bool operator<(const pbft_prepare &rhs) const { + if (block_info.block_num() < rhs.block_info.block_num()) { + return true; + } else if (block_info.block_num() == rhs.block_info.block_num()) { + return view < rhs.view; + } else { + return false; + } + } + + bool empty() const { + return !view + && block_info.empty() + && sender_signature == signature_type(); + } + + digest_type digest(chain_id_type chain_id) const { + digest_type::encoder enc; + fc::raw::pack(enc, chain_id); + fc::raw::pack(enc, common); + fc::raw::pack(enc, view); + fc::raw::pack(enc, block_info); + return enc.result(); + } + }; + + struct pbft_commit { + explicit pbft_commit() = default; + + pbft_message_common common = pbft_message_common(pbft_message_type::commit); + pbft_view_type view = 0; + block_info_type block_info; + signature_type sender_signature; + + bool operator<(const pbft_commit &rhs) const { + if (block_info.block_num() < rhs.block_info.block_num()) { + return true; + } else if (block_info.block_num() == rhs.block_info.block_num()) { + return view < rhs.view; + } else { + return false; + } + } + + bool empty() const { + return !view + && block_info.empty() + && sender_signature == signature_type(); + } + + digest_type digest(chain_id_type chain_id) const { + digest_type::encoder enc; + fc::raw::pack(enc, chain_id); + fc::raw::pack(enc, common); + fc::raw::pack(enc, view); + fc::raw::pack(enc, block_info); + return enc.result(); + } + }; + + using pbft_commit_ptr = std::shared_ptr; + + struct pbft_checkpoint { + explicit pbft_checkpoint() = default; + + pbft_message_common common = pbft_message_common(pbft_message_type::checkpoint); + block_info_type block_info; + signature_type sender_signature; + + bool operator<(const pbft_checkpoint &rhs) const { + return block_info.block_num() < rhs.block_info.block_num(); + } + + digest_type digest(chain_id_type chain_id) const { + digest_type::encoder enc; + fc::raw::pack(enc, chain_id); + fc::raw::pack(enc, common); + fc::raw::pack(enc, block_info); + return enc.result(); + } + }; + + struct pbft_stable_checkpoint { + explicit pbft_stable_checkpoint() = default; + + block_info_type block_info; + vector checkpoints; + + bool operator<(const pbft_stable_checkpoint &rhs) const { + return block_info.block_num() < rhs.block_info.block_num(); + } + + bool empty() const { + return block_info == block_info_type() + && checkpoints.empty(); + } + }; + + struct pbft_prepared_certificate { + explicit pbft_prepared_certificate() = default; + + block_info_type block_info; + set pre_prepares; + vector prepares; + + bool operator<(const pbft_prepared_certificate &rhs) const { + return block_info.block_num() < rhs.block_info.block_num(); + } + + bool empty() const { + return block_info == block_info_type() + && prepares.empty(); + } + }; + + struct pbft_committed_certificate { + explicit pbft_committed_certificate() = default; + + block_info_type block_info; + vector commits; + + bool operator<(const pbft_committed_certificate &rhs) const { + return block_info.block_num() < rhs.block_info.block_num(); + } + + bool empty() const { + return block_info == block_info_type() + && commits.empty(); + } + }; + + struct pbft_view_change { + explicit pbft_view_change() = default; + + pbft_message_common common = pbft_message_common(pbft_message_type::view_change); + pbft_view_type current_view = 0; + pbft_view_type target_view = 1; + pbft_prepared_certificate prepared_cert; + vector committed_certs; + pbft_stable_checkpoint stable_checkpoint; + signature_type sender_signature; + + bool operator<(const pbft_view_change &rhs) const { + return target_view < rhs.target_view; + } + + digest_type digest(chain_id_type chain_id) const { + digest_type::encoder enc; + fc::raw::pack(enc, chain_id); + fc::raw::pack(enc, common); + fc::raw::pack(enc, current_view); + fc::raw::pack(enc, target_view); + fc::raw::pack(enc, prepared_cert); + fc::raw::pack(enc, committed_certs); + fc::raw::pack(enc, stable_checkpoint); + return enc.result(); + } + + bool empty() const { + return !current_view + && target_view == 1 + && prepared_cert.empty() + && committed_certs.empty() + && stable_checkpoint.empty() + && sender_signature == signature_type(); + } + }; + + struct pbft_view_changed_certificate { + explicit pbft_view_changed_certificate() = default; + + pbft_view_type target_view = 0; + vector view_changes; + + bool empty() const { + return !target_view + && view_changes.empty(); + } + }; + + struct pbft_new_view { + explicit pbft_new_view() = default; + + pbft_message_common common = pbft_message_common(pbft_message_type::new_view); + pbft_view_type new_view = 0; + pbft_prepared_certificate prepared_cert; + vector committed_certs; + pbft_stable_checkpoint stable_checkpoint; + pbft_view_changed_certificate view_changed_cert; + signature_type sender_signature; + + bool operator<(const pbft_new_view &rhs) const { + return new_view < rhs.new_view; + } + + digest_type digest(chain_id_type chain_id) const { + digest_type::encoder enc; + fc::raw::pack(enc, chain_id); + fc::raw::pack(enc, common); + fc::raw::pack(enc, new_view); + fc::raw::pack(enc, prepared_cert); + fc::raw::pack(enc, committed_certs); + fc::raw::pack(enc, stable_checkpoint); + fc::raw::pack(enc, view_changed_cert); + return enc.result(); + } + + bool empty() const { + return new_view == 0 + && prepared_cert.empty() + && committed_certs.empty() + && stable_checkpoint.empty() + && view_changed_cert.empty() + && sender_signature == signature_type(); + } + }; + + struct pbft_state { + block_id_type block_id; + block_num_type block_num = 0; + flat_map, pbft_prepare> prepares; + bool is_prepared = false; + flat_map, pbft_commit> commits; + bool is_committed = false; + }; + + struct pbft_view_change_state { + pbft_view_type view; + flat_map view_changes; + bool is_view_changed = false; + }; + + struct pbft_checkpoint_state { + block_id_type block_id; + block_num_type block_num = 0; + flat_map checkpoints; + bool is_stable = false; + }; + + using pbft_state_ptr = std::shared_ptr; + using pbft_view_change_state_ptr = std::shared_ptr; + using pbft_checkpoint_state_ptr = std::shared_ptr; + + struct by_block_id; + struct by_num; + struct by_prepare_and_num; + struct by_commit_and_num; + typedef multi_index_container< + pbft_state_ptr, + indexed_by< + hashed_unique < + tag, + member, + std::hash + >, + ordered_non_unique< + tag, + member, + less<> + >, + ordered_non_unique< + tag, + composite_key< + pbft_state, + member, + member + >, + composite_key_compare< greater<>, greater<> > + >, + ordered_non_unique< + tag, + composite_key< + pbft_state, + member, + member + >, + composite_key_compare< greater<>, greater<> > + > + > + > pbft_state_multi_index_type; + + struct by_view; + struct by_count_and_view; + typedef multi_index_container< + pbft_view_change_state_ptr, + indexed_by< + ordered_unique< + tag, + member, + greater<> + >, + ordered_non_unique< + tag, + composite_key< + pbft_view_change_state, + member, + member + >, + composite_key_compare, greater<>> + > + > + > pbft_view_state_multi_index_type; + + struct by_block_id; + struct by_num; + typedef multi_index_container< + pbft_checkpoint_state_ptr, + indexed_by< + hashed_unique< + tag, + member, + std::hash + >, + ordered_non_unique< + tag, + member, + less<> + > + > + > pbft_checkpoint_state_multi_index_type; + + class pbft_database { + public: + explicit pbft_database(controller &ctrl); + + ~pbft_database(); + + void close(); + + void add_pbft_prepare(pbft_prepare &p, const public_key_type &pk); + void add_pbft_commit(pbft_commit &c, const public_key_type &pk); + void add_pbft_view_change(pbft_view_change &vc, const public_key_type &pk); + void add_pbft_checkpoint(pbft_checkpoint &cp, const public_key_type &pk); + + pbft_prepare send_and_add_pbft_prepare(const pbft_prepare &cached_prepare = pbft_prepare(), pbft_view_type current_view = 0); + pbft_commit send_and_add_pbft_commit(const pbft_commit &cached_commit = pbft_commit(), pbft_view_type current_view = 0); + pbft_view_change send_and_add_pbft_view_change( + const pbft_view_change &cached_view_change = pbft_view_change(), + const pbft_prepared_certificate &ppc = pbft_prepared_certificate(), + const vector &pcc = vector{}, + pbft_view_type current_view = 0, + pbft_view_type target_view = 1); + pbft_new_view send_pbft_new_view( + const pbft_view_changed_certificate &vcc = pbft_view_changed_certificate(), + pbft_view_type current_view = 1); + vector generate_and_add_pbft_checkpoint(); + void send_pbft_checkpoint(); + + bool should_prepared(); + bool should_committed(); + pbft_view_type should_view_change(); + bool should_new_view(pbft_view_type target_view); + + //new view + bool has_new_primary(const public_key_type &pk); + pbft_view_type get_proposed_new_view_num(); + pbft_view_type get_committed_view(); + public_key_type get_new_view_primary_key(pbft_view_type target_view); + + void mark_as_prepared(const block_id_type &bid); + void mark_as_committed(const block_id_type &bid); + void commit_local(); + void checkpoint_local(); + + //view change + pbft_prepared_certificate generate_prepared_certificate(); + vector generate_committed_certificate(); + pbft_view_changed_certificate generate_view_changed_certificate(pbft_view_type target_view); + bool should_stop_view_change(const pbft_view_change &vc); + + //validations + bool is_valid_prepare(const pbft_prepare &p, const public_key_type &pk); + bool is_valid_commit(const pbft_commit &c, const public_key_type &pk); + bool is_valid_checkpoint(const pbft_checkpoint &cp, const public_key_type &pk); + bool is_valid_view_change(const pbft_view_change &vc, const public_key_type &pk); + void validate_new_view(const pbft_new_view &nv, const public_key_type &pk); + bool is_valid_stable_checkpoint(const pbft_stable_checkpoint &scp, bool add_to_pbft_db = false); + bool should_send_pbft_msg(); + bool should_recv_pbft_msg(const public_key_type &pub_key); + + bool pending_pbft_lib(); + chain_id_type get_chain_id() {return chain_id;} + pbft_stable_checkpoint get_stable_checkpoint_by_id(const block_id_type &block_id, bool incl_blk_extn = true); + pbft_stable_checkpoint fetch_stable_checkpoint_from_blk_extn(const signed_block_ptr &b); + block_info_type cal_pending_stable_checkpoint() const; + + void cleanup_on_new_view(); + void update_fork_schedules(); + + //api related + pbft_state_ptr get_pbft_state_by_id(const block_id_type &id) const; + vector get_checkpoints_by_num(const block_num_type &num) const; + pbft_view_change_state_ptr get_view_changes_by_target_view(const pbft_view_type &tv) const; + vector get_pbft_watermarks() const; + flat_map get_pbft_fork_schedules() const; + + + signal pbft_outgoing_prepare; + signal pbft_incoming_prepare; + + signal pbft_outgoing_commit; + signal pbft_incoming_commit; + + signal pbft_outgoing_view_change; + signal pbft_incoming_view_change; + + signal pbft_outgoing_new_view; + signal pbft_incoming_new_view; + + signal pbft_outgoing_checkpoint; + signal pbft_incoming_checkpoint; + + private: + controller &ctrl; + pbft_state_multi_index_type pbft_state_index; + pbft_view_state_multi_index_type view_state_index; + pbft_checkpoint_state_multi_index_type checkpoint_index; + fc::path pbft_db_dir; + fc::path checkpoints_dir; + vector prepare_watermarks; + flat_map fork_schedules; + chain_id_type chain_id = ctrl.get_chain_id(); + + + bool is_less_than_high_watermark(const block_num_type &bnum); + bool is_valid_prepared_certificate(const pbft_prepared_certificate &certificate, bool add_to_pbft_db = false); + bool is_valid_committed_certificate(const pbft_committed_certificate &certificate, bool add_to_pbft_db = false); + bool is_valid_longest_fork(const block_info_type &bi, vector block_infos, unsigned long threshold, unsigned long non_fork_bp_count); + + producer_schedule_type lscb_active_producers() const; + vector& get_updated_watermarks(); + flat_map& get_updated_fork_schedules(); + block_num_type get_current_pbft_watermark(); + + vector> fetch_fork_from(vector &block_infos); + vector fetch_first_fork_from(vector &bi); + + template + void emit(const Signal &s, Arg &&a); + + void set(const pbft_state_ptr& s); + void set(const pbft_checkpoint_state_ptr& s); + void prune(const pbft_state_ptr &h); + void prune(const pbft_checkpoint_state_ptr &h); + }; + } +} /// namespace eosio::chain + +FC_REFLECT(eosio::chain::block_info_type, (block_id)) +FC_REFLECT_ENUM(eosio::chain::pbft_message_type, (prepare)(commit)(checkpoint)(view_change)(new_view)) + +FC_REFLECT(eosio::chain::pbft_message_common, (type)(timestamp)) + +FC_REFLECT_TEMPLATE((typename pbft_message_body), eosio::chain::pbft_message_metadata, (msg)(sender_key)) + +FC_REFLECT(eosio::chain::pbft_prepare, (common)(view)(block_info)(sender_signature)) +FC_REFLECT(eosio::chain::pbft_commit, (common)(view)(block_info)(sender_signature)) +FC_REFLECT(eosio::chain::pbft_checkpoint,(common)(block_info)(sender_signature)) +FC_REFLECT(eosio::chain::pbft_view_change, (common)(current_view)(target_view)(prepared_cert)(committed_certs)(stable_checkpoint)(sender_signature)) +FC_REFLECT(eosio::chain::pbft_new_view, (common)(new_view)(prepared_cert)(committed_certs)(stable_checkpoint)(view_changed_cert)(sender_signature)) + + +FC_REFLECT(eosio::chain::pbft_prepared_certificate, (block_info)(pre_prepares)(prepares)) +FC_REFLECT(eosio::chain::pbft_committed_certificate,(block_info)(commits)) +FC_REFLECT(eosio::chain::pbft_view_changed_certificate, (target_view)(view_changes)) +FC_REFLECT(eosio::chain::pbft_stable_checkpoint, (block_info)(checkpoints)) + +FC_REFLECT(eosio::chain::pbft_state, (block_id)(block_num)(prepares)(is_prepared)(commits)(is_committed)) +FC_REFLECT(eosio::chain::pbft_view_change_state, (view)(view_changes)(is_view_changed)) +FC_REFLECT(eosio::chain::pbft_checkpoint_state, (block_id)(block_num)(checkpoints)(is_stable)) \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/snapshot.hpp b/libraries/chain/include/eosio/chain/snapshot.hpp index 499fbe29960..b1a3a44892f 100644 --- a/libraries/chain/include/eosio/chain/snapshot.hpp +++ b/libraries/chain/include/eosio/chain/snapshot.hpp @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -219,10 +220,54 @@ namespace eosio { namespace chain { T& data; }; + template + struct snapshot_pbft_migrate_row_reader : abstract_snapshot_row_reader { + explicit snapshot_pbft_migrate_row_reader( T& data ) + :data(data) {} + + + void provide(std::istream& in) const override { + row_validation_helper::apply(data, [&in,this](){ + if(typeid(T)== typeid(eosio::chain::block_header_state)){ + std::ostringstream sstream; + sstream << in.rdbuf(); + std::string str(sstream.str()); + //append uint32_t 0 + std::vector tmp(str.begin(), str.end()); + tmp.insert(tmp.end(), {0,0,0,0}); + fc::datastream tmp_ds(tmp.data(), tmp.size()); + fc::raw::unpack(tmp_ds, data); + auto original_data_length = tmp_ds.tellp() - 4; + in.seekg(original_data_length); + data.pbft_stable_checkpoint_blocknum = 0; + }else{ + fc::raw::unpack(in, data); + } + }); + } + + void provide(const fc::variant& var) const override { + row_validation_helper::apply(data, [&var,this]() { + fc::from_variant(var, data); + }); + } + + std::string row_type_name() const override { + return boost::core::demangle( typeid( T ).name() ); + } + + T& data; + }; + template snapshot_row_reader make_row_reader( T& data ) { return snapshot_row_reader(data); } + + template + snapshot_pbft_migrate_row_reader make_pbft_migrate_row_reader( T& data ) { + return snapshot_pbft_migrate_row_reader(data); + } } class snapshot_reader { @@ -249,6 +294,12 @@ namespace eosio { namespace chain { return result; } + template + auto read_pbft_migrate_row( T& out, chainbase::database& db ) -> std::enable_if_t, typename detail::snapshot_row_traits::snapshot_type>::value,bool> { + auto reader = detail::make_pbft_migrate_row_reader(out); + return _reader.read_row(reader); + } + bool empty() { return _reader.empty(); } diff --git a/libraries/chain/include/eosio/chain/types.hpp b/libraries/chain/include/eosio/chain/types.hpp index bddeb1dd553..f8360c0bc7d 100644 --- a/libraries/chain/include/eosio/chain/types.hpp +++ b/libraries/chain/include/eosio/chain/types.hpp @@ -189,6 +189,7 @@ namespace eosio { namespace chain { account_history_object_type, ///< Defined by history_plugin action_history_object_type, ///< Defined by history_plugin reversible_block_object_type, + upgrade_property_object_type, OBJECT_TYPE_COUNT ///< Sentry value which contains the number of different object types }; diff --git a/libraries/chain/pbft.cpp b/libraries/chain/pbft.cpp new file mode 100644 index 00000000000..8789abce3cf --- /dev/null +++ b/libraries/chain/pbft.cpp @@ -0,0 +1,582 @@ +#include + +#include +#include +#include + +namespace eosio { + namespace chain { + + pbft_controller::pbft_controller(controller &ctrl) : + pbft_db(ctrl), + state_machine(new psm_machine(pbft_db)) { + datadir = ctrl.state_dir(); + + if (!fc::is_directory(datadir)) + fc::create_directories(datadir); + + auto pbft_db_dat = datadir / config::pbftdb_filename; + if (fc::exists(pbft_db_dat)) { + string content; + fc::read_file_contents(pbft_db_dat, content); + + fc::datastream ds(content.data(), content.size()); + uint32_t current_view; + fc::raw::unpack(ds, current_view); + state_machine->set_current_view(current_view); + state_machine->set_target_view(state_machine->get_current_view() + 1); + ilog("current view: ${cv}", ("cv", current_view)); + } + + fc::remove(pbft_db_dat); + } + + pbft_controller::~pbft_controller() { + fc::path pbft_db_dat = datadir / config::pbftdb_filename; + std::ofstream out(pbft_db_dat.generic_string().c_str(), + std::ios::out | std::ios::binary | std::ofstream::trunc); + + uint32_t current_view = state_machine->get_current_view(); + fc::raw::pack(out, current_view); + } + + void pbft_controller::maybe_pbft_prepare() { + if (!pbft_db.should_send_pbft_msg()) return; + state_machine->send_prepare(); + } + + void pbft_controller::maybe_pbft_commit() { + if (!pbft_db.should_send_pbft_msg()) return; + state_machine->send_commit(); + } + + void pbft_controller::maybe_pbft_view_change() { + if (!pbft_db.should_send_pbft_msg()) return; + if (state_machine->get_view_change_timer() <= view_change_timeout) { + if (!state_machine->get_view_changes_cache().empty()) { + pbft_db.send_and_add_pbft_view_change(state_machine->get_view_changes_cache()); + } + state_machine->set_view_change_timer(state_machine->get_view_change_timer() + 1); + } else { + state_machine->set_view_change_timer(0); + state_machine->send_view_change(); + } + } + + void pbft_controller::maybe_pbft_checkpoint() { + if (!pbft_db.should_send_pbft_msg()) return; + pbft_db.send_pbft_checkpoint(); + pbft_db.checkpoint_local(); + } + + void pbft_controller::on_pbft_prepare(pbft_metadata_ptr p) { + state_machine->on_prepare(std::move(p)); + } + + void pbft_controller::on_pbft_commit(pbft_metadata_ptr c) { + state_machine->on_commit(std::move(c)); + } + + void pbft_controller::on_pbft_view_change(pbft_metadata_ptr vc) { + state_machine->on_view_change(std::move(vc)); + } + + void pbft_controller::on_pbft_new_view(const pbft_metadata_ptr &nv) { + state_machine->on_new_view(nv); + } + + void pbft_controller::on_pbft_checkpoint(const pbft_metadata_ptr &cp) { + if (!pbft_db.is_valid_checkpoint(cp->msg, cp->sender_key)) return; + pbft_db.add_pbft_checkpoint(cp->msg, cp->sender_key); + pbft_db.checkpoint_local(); + } + + psm_state::psm_state() = default; + psm_state::~psm_state() = default; + + psm_machine::psm_machine(pbft_database &pbft_db) : pbft_db(pbft_db) { + set_current(std::make_shared()); + + set_prepares_cache(pbft_prepare()); + set_commits_cache(pbft_commit()); + set_view_changes_cache(pbft_view_change()); + + set_prepared_certificate(pbft_prepared_certificate{}); + set_committed_certificate(vector{}); + set_view_changed_certificate(pbft_view_changed_certificate{}); + + view_change_timer = 0; + target_view_retries = 0; + current_view = 0; + target_view = current_view + 1; + } + + psm_machine::~psm_machine() = default; + + void psm_machine::on_prepare(pbft_metadata_ptr e) { + current->on_prepare(shared_from_this(), std::move(e), pbft_db); + } + + void psm_machine::send_prepare() { + current->send_prepare(shared_from_this(), pbft_db); + } + + void psm_machine::on_commit(pbft_metadata_ptr e) { + current->on_commit(shared_from_this(), std::move(e), pbft_db); + } + + void psm_machine::send_commit() { + current->send_commit(shared_from_this(), pbft_db); + } + + void psm_machine::on_view_change(pbft_metadata_ptr e) { + current->on_view_change(shared_from_this(), std::move(e), pbft_db); + } + + void psm_machine::send_view_change() { + current->send_view_change(shared_from_this(), pbft_db); + } + + void psm_machine::on_new_view(const pbft_metadata_ptr &e) { + if (e->msg.new_view <= get_current_view()) return; + + try { + pbft_db.validate_new_view(e->msg, e->sender_key); + } catch (const fc::exception& ex) { + elog("bad new view, ${s} ", ("s",ex.to_string())); + return; + } + + try { + transit_to_new_view(e, current); + } catch(...) { + elog("apply new view failed, waiting for next round.. ${nv} ", ("nv", e->msg)); + } + } + + void psm_machine::manually_set_current_view(const uint32_t &cv) { + set_current_view(cv); + set_target_view(cv + 1); + transit_to_view_change_state(current); + } + + /** + * psm_prepared_state + */ + + psm_prepared_state::psm_prepared_state() {pending_commit_local = false;} + psm_prepared_state::~psm_prepared_state() = default; + + void psm_prepared_state::on_prepare(psm_machine_ptr m, pbft_metadata_ptr e, pbft_database &pbft_db) { + //ignore + } + + void psm_prepared_state::send_prepare(psm_machine_ptr m, pbft_database &pbft_db) { + //retry + if (m->get_prepares_cache().empty()) return; + + pbft_db.send_and_add_pbft_prepare(m->get_prepares_cache(), m->get_current_view()); + } + + void psm_prepared_state::on_commit(psm_machine_ptr m, pbft_metadata_ptr e, pbft_database &pbft_db) { + + if (e->msg.view < m->get_current_view()) return; + if (!pbft_db.is_valid_commit(e->msg, e->sender_key)) return; + + pbft_db.add_pbft_commit(e->msg, e->sender_key); + + //`pending_commit_local` is used to mark committed local status in psm machine; + //`pbft_db.pending_pbft_lib()` is used to mark commit local status in controller; + // following logic is implemented to resolve async problem during lib committing; + + if (pbft_db.should_committed() && !pending_commit_local) { + pbft_db.commit_local(); + pending_commit_local = true; + } + + if (pending_commit_local && !pbft_db.pending_pbft_lib()) { + pbft_db.send_pbft_checkpoint(); + pbft_db.checkpoint_local(); + m->transit_to_committed_state(shared_from_this(), false); + } + } + + void psm_prepared_state::send_commit(psm_machine_ptr m, pbft_database &pbft_db) { + auto commits = pbft_db.send_and_add_pbft_commit(m->get_commits_cache(), m->get_current_view()); + + if (!commits.empty()) { + m->set_commits_cache(commits); + } + + if (pbft_db.should_committed() && !pending_commit_local) { + pbft_db.commit_local(); + pending_commit_local = true; + } + + if (pending_commit_local && !pbft_db.pending_pbft_lib()) { + pbft_db.send_pbft_checkpoint(); + pbft_db.checkpoint_local(); + m->transit_to_committed_state(shared_from_this(), false); + } + } + + void psm_prepared_state::on_view_change(psm_machine_ptr m, pbft_metadata_ptr e, pbft_database &pbft_db) { + + if (e->msg.target_view <= m->get_current_view()) return; + if (!pbft_db.is_valid_view_change(e->msg, e->sender_key)) return; + + pbft_db.add_pbft_view_change(e->msg, e->sender_key); + + //if received >= f+1 view_change on some view, transit to view_change and send view change + auto target_view = pbft_db.should_view_change(); + if (target_view > 0 && target_view > m->get_current_view()) { + m->set_target_view(target_view); + m->transit_to_view_change_state(shared_from_this()); + } + } + + void psm_prepared_state::send_view_change(psm_machine_ptr m, pbft_database &pbft_db) { + m->transit_to_view_change_state(shared_from_this()); + } + + + psm_committed_state::psm_committed_state() = default; + psm_committed_state::~psm_committed_state() = default; + + /** + * psm_committed_state + */ + void psm_committed_state::on_prepare(psm_machine_ptr m, pbft_metadata_ptr e, pbft_database &pbft_db) { + //validate + if (e->msg.view < m->get_current_view()) return; + if (!pbft_db.is_valid_prepare(e->msg, e->sender_key)) return; + + //do action add prepare + pbft_db.add_pbft_prepare(e->msg, e->sender_key); + + //if prepare >= 2f+1, transit to prepared + if (pbft_db.should_prepared()) m->transit_to_prepared_state(shared_from_this()); + } + + void psm_committed_state::send_prepare(psm_machine_ptr m, pbft_database &pbft_db) { + + auto prepares = pbft_db.send_and_add_pbft_prepare(m->get_prepares_cache(), m->get_current_view()); + + if (!prepares.empty()) { + m->set_prepares_cache(prepares); + } + + //if prepare >= 2f+1, transit to prepared + if (pbft_db.should_prepared()) m->transit_to_prepared_state(shared_from_this()); + } + + void psm_committed_state::on_commit(psm_machine_ptr m, pbft_metadata_ptr e, pbft_database &pbft_db) { + + if (e->msg.view < m->get_current_view()) return; + if (!pbft_db.is_valid_commit(e->msg, e->sender_key)) return; + + pbft_db.add_pbft_commit(e->msg, e->sender_key); + } + + void psm_committed_state::send_commit(psm_machine_ptr m, pbft_database &pbft_db) { + + if (m->get_commits_cache().empty()) return; + pbft_db.send_and_add_pbft_commit(m->get_commits_cache(), m->get_current_view()); + + } + + void psm_committed_state::on_view_change(psm_machine_ptr m, pbft_metadata_ptr e, pbft_database &pbft_db) { + + if (e->msg.target_view <= m->get_current_view()) return; + if (!pbft_db.is_valid_view_change(e->msg, e->sender_key)) return; + + pbft_db.add_pbft_view_change(e->msg, e->sender_key); + + //if received >= f+1 view_change on some view, transit to view_change and send view change + auto new_view = pbft_db.should_view_change(); + if (new_view > 0 && new_view > m->get_current_view()) { + m->set_target_view(new_view); + m->transit_to_view_change_state(shared_from_this()); + } + } + + void psm_committed_state::send_view_change(psm_machine_ptr m, pbft_database &pbft_db) { + m->transit_to_view_change_state(shared_from_this()); + } + + psm_view_change_state::psm_view_change_state() = default; + psm_view_change_state::~psm_view_change_state() = default; + /** + * psm_view_change_state + */ + void psm_view_change_state::on_prepare(psm_machine_ptr m, pbft_metadata_ptr e, pbft_database &pbft_db) { + //ignore; + } + + void psm_view_change_state::send_prepare(psm_machine_ptr m, pbft_database &pbft_db) { + //ignore; + } + + void psm_view_change_state::on_commit(psm_machine_ptr m, pbft_metadata_ptr e, pbft_database &pbft_db) { + //ignore; + } + + void psm_view_change_state::send_commit(psm_machine_ptr m, pbft_database &pbft_db) { + //ignore; + } + + void psm_view_change_state::on_view_change(psm_machine_ptr m, pbft_metadata_ptr e, pbft_database &pbft_db) { + + //skip from view change state if my lib is higher than my view change state height. + auto vc = m->get_view_changes_cache(); + if (!vc.empty() && pbft_db.should_stop_view_change(vc)) { + m->transit_to_committed_state(shared_from_this(), false); + return; + } + + if (e->msg.target_view <= m->get_current_view()) return; + if (!pbft_db.is_valid_view_change(e->msg, e->sender_key)) return; + + pbft_db.add_pbft_view_change(e->msg, e->sender_key); + + m->maybe_new_view(shared_from_this()); + } + + void psm_view_change_state::send_view_change(psm_machine_ptr m, pbft_database &pbft_db) { + + //skip from view change state if my lib is higher than my view change state height. + auto vc = m->get_view_changes_cache(); + if (!vc.empty() && pbft_db.should_stop_view_change(vc)) { + m->transit_to_committed_state(shared_from_this(), false); + return; + } + + m->do_send_view_change(); + + m->maybe_new_view(shared_from_this()); + } + + void psm_machine::transit_to_committed_state(psm_state_ptr s, bool to_new_view) { + + if (!to_new_view) { + auto nv = pbft_db.get_committed_view(); + if (nv > get_current_view()) set_current_view(nv); + set_target_view(get_current_view() + 1); + } + + auto prepares = pbft_db.send_and_add_pbft_prepare(pbft_prepare(), get_current_view()); + set_prepares_cache(prepares); + + set_view_changes_cache(pbft_view_change()); + set_view_change_timer(0); + + set_current(std::make_shared()); + s.reset(); + } + + void psm_machine::transit_to_prepared_state(psm_state_ptr s) { + + auto commits = pbft_db.send_and_add_pbft_commit(pbft_commit(), get_current_view()); + set_commits_cache(commits); + + set_view_changes_cache(pbft_view_change()); + + set_current(std::make_shared()); + s.reset(); + } + + void psm_machine::transit_to_view_change_state(psm_state_ptr s) { + + set_commits_cache(pbft_commit()); + set_prepares_cache(pbft_prepare()); + + set_view_change_timer(0); + set_target_view_retries(0); + + set_current(std::make_shared()); + if (pbft_db.should_send_pbft_msg()) { + do_send_view_change(); + auto nv = maybe_new_view(s); + if (nv) return; + } + s.reset(); + } + + bool psm_machine::maybe_new_view(const psm_state_ptr &s) { + //if view_change >= 2f+1, calculate next primary, send new view if is primary + auto nv = get_target_view(); + auto pk = pbft_db.get_new_view_primary_key(nv); + if (pbft_db.should_new_view(nv) && pbft_db.has_new_primary(pk)) { + + set_view_changed_certificate(pbft_db.generate_view_changed_certificate(nv)); + + auto new_view = pbft_db.get_proposed_new_view_num(); + if (new_view != nv) return false; + + auto nv_msg = pbft_db.send_pbft_new_view( + get_view_changed_certificate(), + new_view); + + if (nv_msg.empty()) return false; + + try { + pbft_db.validate_new_view(nv_msg, pk); + } catch (const fc::exception& ex) { + elog("bad new view, ${s} ", ("s", ex.to_string())); + return false; + } + + try { + transit_to_new_view(std::make_shared>(nv_msg, pbft_db.get_chain_id()), s); + return true; + } catch(const fc::exception& ex) { + elog("apply new view failed, waiting for next round.. ${nv} ", ("nv", nv_msg)); + } + } + return false; + } + + void psm_machine::transit_to_new_view(const pbft_metadata_ptr& e, const psm_state_ptr& s) { + + set_current_view(e->msg.new_view); + set_target_view(e->msg.new_view + 1); + + set_prepares_cache(pbft_prepare()); + + set_view_change_timer(0); + set_target_view_retries(0); + + pbft_db.cleanup_on_new_view(); + + if (!e->msg.committed_certs.empty()) { + auto committed_certs = e->msg.committed_certs; + std::sort(committed_certs.begin(), committed_certs.end()); + for (auto const &cc :committed_certs) { + pbft_db.mark_as_committed(cc.block_info.block_id); + } + } + + if (!e->msg.prepared_cert.prepares.empty()) { + pbft_db.mark_as_prepared(e->msg.prepared_cert.block_info.block_id); + if (pbft_db.should_prepared()) { + transit_to_prepared_state(s); + return; + } + } + + transit_to_committed_state(s, true); + } + + void psm_machine::do_send_view_change() { + + auto reset_view_change_state = [&]() { + set_view_changes_cache(pbft_view_change()); + set_prepared_certificate(pbft_db.generate_prepared_certificate()); + set_committed_certificate(pbft_db.generate_committed_certificate()); + }; + + if (get_target_view_retries() < pow(2,get_target_view() - get_current_view() - 1)) { + if (get_target_view_retries() == 0) reset_view_change_state(); + set_target_view_retries(get_target_view_retries() + 1); + } else { + set_target_view_retries(0); + set_target_view(get_target_view() + 1); + reset_view_change_state(); + } + + EOS_ASSERT((get_target_view() > get_current_view()), pbft_exception, + "target view should be always greater than current view"); + + auto view_changes = pbft_db.send_and_add_pbft_view_change( + get_view_changes_cache(), + get_prepared_certificate(), + get_committed_certificate(), + get_current_view(), + get_target_view()); + + if (!view_changes.empty()) { + set_view_changes_cache(view_changes); + } + } + + const pbft_prepare& psm_machine::get_prepares_cache() const { + return cache.prepares_cache; + } + + void psm_machine::set_prepares_cache(const pbft_prepare &pcache) { + cache.prepares_cache = pcache; + } + + const pbft_commit& psm_machine::get_commits_cache() const { + return cache.commits_cache; + } + + void psm_machine::set_commits_cache(const pbft_commit &ccache) { + cache.commits_cache = ccache; + } + + const pbft_view_change& psm_machine::get_view_changes_cache() const { + return cache.view_changes_cache; + } + + void psm_machine::set_view_changes_cache(const pbft_view_change &vc_cache) { + cache.view_changes_cache = vc_cache; + } + + const uint32_t& psm_machine::get_current_view() const { + return current_view; + } + + void psm_machine::set_current_view(const uint32_t &cv) { + current_view = cv; + } + + const pbft_prepared_certificate& psm_machine::get_prepared_certificate() const { + return cache.prepared_certificate; + } + + void psm_machine::set_prepared_certificate(const pbft_prepared_certificate &pcert) { + cache.prepared_certificate = pcert; + } + + const vector& psm_machine::get_committed_certificate() const { + return cache.committed_certificate; + } + + void psm_machine::set_committed_certificate(const vector &ccert) { + cache.committed_certificate = ccert; + } + + const pbft_view_changed_certificate& psm_machine::get_view_changed_certificate() const { + return cache.view_changed_certificate; + } + + void psm_machine::set_view_changed_certificate(const pbft_view_changed_certificate &vc_cert) { + cache.view_changed_certificate = vc_cert; + } + + const uint32_t& psm_machine::get_target_view_retries() const { + return target_view_retries; + } + + void psm_machine::set_target_view_retries(const uint32_t &tv_reties) { + target_view_retries = tv_reties; + } + + const uint32_t& psm_machine::get_target_view() const { + return target_view; + } + + void psm_machine::set_target_view(const uint32_t &tv) { + target_view = tv; + } + + const uint32_t& psm_machine::get_view_change_timer() const { + return view_change_timer; + } + + void psm_machine::set_view_change_timer(const uint32_t &vc_timer) { + view_change_timer = vc_timer; + } + } +} \ No newline at end of file diff --git a/libraries/chain/pbft_database.cpp b/libraries/chain/pbft_database.cpp new file mode 100644 index 00000000000..7bcaf19fa45 --- /dev/null +++ b/libraries/chain/pbft_database.cpp @@ -0,0 +1,1623 @@ +#include +#include +#include +#include + +namespace eosio { + namespace chain { + + pbft_database::pbft_database(controller &ctrl) : + ctrl(ctrl) { + checkpoint_index = pbft_checkpoint_state_multi_index_type(); + view_state_index = pbft_view_state_multi_index_type(); + prepare_watermarks = vector{}; + pbft_db_dir = ctrl.state_dir(); + checkpoints_dir = ctrl.blocks_dir(); + chain_id = ctrl.get_chain_id(); + + if (!fc::is_directory(pbft_db_dir)) fc::create_directories(pbft_db_dir); + + auto pbft_db_dat = pbft_db_dir / config::pbftdb_filename; + if (fc::exists(pbft_db_dat)) { + string content; + fc::read_file_contents(pbft_db_dat, content); + + fc::datastream ds(content.data(), content.size()); + + //skip current_view in pbftdb.dat. + ds.seekp(ds.tellp() + 4); + + unsigned_int size; + fc::raw::unpack(ds, size); + for (uint32_t i = 0, n = size.value; i < n; ++i) { + pbft_state s; + fc::raw::unpack(ds, s); + set(std::make_shared(move(s))); + } + } else { + pbft_state_index = pbft_state_multi_index_type(); + } + + if (!fc::is_directory(checkpoints_dir)) fc::create_directories(checkpoints_dir); + + auto checkpoints_db = checkpoints_dir / config::checkpoints_filename; + if (fc::exists(checkpoints_db)) { + string content; + fc::read_file_contents(checkpoints_db, content); + + fc::datastream ds(content.data(), content.size()); + + unsigned_int checkpoint_size; + fc::raw::unpack(ds, checkpoint_size); + for (uint32_t j = 0, m = checkpoint_size.value; j < m; ++j) { + pbft_checkpoint_state cs; + fc::raw::unpack(ds, cs); + set(std::make_shared(move(cs))); + } + ilog("checkpoint index size: ${cs}", ("cs", checkpoint_index.size())); + } else { + checkpoint_index = pbft_checkpoint_state_multi_index_type(); + } + + fc::remove(checkpoints_db); + } + + void pbft_database::close() { + + fc::path checkpoints_db = checkpoints_dir / config::checkpoints_filename; + std::ofstream c_out(checkpoints_db.generic_string().c_str(), + std::ios::out | std::ios::binary | std::ofstream::trunc); + + uint32_t num_records_in_checkpoint_db = checkpoint_index.size(); + fc::raw::pack(c_out, unsigned_int{num_records_in_checkpoint_db}); + + for (auto const &s: checkpoint_index) { + fc::raw::pack(c_out, *s); + } + + fc::path pbft_db_dat = pbft_db_dir / config::pbftdb_filename; + std::ofstream out(pbft_db_dat.generic_string().c_str(), + std::ios::out | std::ios::binary | std::ofstream::app); + uint32_t num_records_in_db = pbft_state_index.size(); + fc::raw::pack(out, unsigned_int{num_records_in_db}); + + for (auto const &s : pbft_state_index) { + fc::raw::pack(out, *s); + } + + pbft_state_index.clear(); + checkpoint_index.clear(); + } + + pbft_database::~pbft_database() { + close(); + } + + void pbft_database::add_pbft_prepare(pbft_prepare &p, const public_key_type &pk) { + + auto &by_block_id_index = pbft_state_index.get(); + + auto current = ctrl.fetch_block_state_by_id(p.block_info.block_id); + + while ((current) && (current->block_num > ctrl.last_irreversible_block_num())) { + auto curr_itr = by_block_id_index.find(current->id); + + if (curr_itr == by_block_id_index.end()) { + try { + flat_map, pbft_prepare> prepares; + prepares[std::make_pair(p.view, pk)] = p; + pbft_state curr_ps; + curr_ps.block_id = current->id; + curr_ps.block_num = current->block_num; + curr_ps.prepares = prepares; + auto curr_psp = std::make_shared(move(curr_ps)); + pbft_state_index.insert(curr_psp); + } catch (...) { + elog( "prepare insert failure: ${p}", ("p", p)); + } + } else { + auto prepares = (*curr_itr)->prepares; + if (prepares.find(std::make_pair(p.view, pk)) == prepares.end()) { + by_block_id_index.modify(curr_itr, [&](const pbft_state_ptr &psp) { + psp->prepares[std::make_pair(p.view, pk)] = p; + }); + } + } + curr_itr = by_block_id_index.find(current->id); + if (curr_itr == by_block_id_index.end()) return; + + auto cpsp = *curr_itr; + auto prepares = cpsp->prepares; + auto as = current->active_schedule.producers; + auto threshold = as.size()* 2 / 3 + 1; + if (prepares.size() >= threshold && !cpsp->is_prepared && is_less_than_high_watermark(cpsp->block_num)) { + flat_map prepare_count; + for (auto const &pre: prepares) { + if (prepare_count.find(pre.second.view) == prepare_count.end()) prepare_count[pre.second.view] = 0; + } + + for (auto const &bp: as) { + for (auto const &pp: prepares) { + if (bp.block_signing_key == pp.first.second) prepare_count[pp.first.first] += 1; + } + } + for (auto const &e: prepare_count) { + if (e.second >= threshold) { + mark_as_prepared(cpsp->block_id); + } + } + } + current = ctrl.fetch_block_state_by_id(current->prev()); + } + } + + void pbft_database::mark_as_prepared(const block_id_type &bid) { + auto &by_block_id_index = pbft_state_index.get(); + auto itr = by_block_id_index.find(bid); + auto bnum = block_info_type{bid}.block_num(); + + if (itr == by_block_id_index.end()) { + pbft_state ps; + ps.block_id = bid; + ps.block_num = bnum; + ps.is_prepared = true; + auto psp = std::make_shared(move(ps)); + pbft_state_index.insert(psp); + return; + } + by_block_id_index.modify(itr, [&](const pbft_state_ptr &p) { p->is_prepared = true; }); + } + + pbft_prepare pbft_database::send_and_add_pbft_prepare(const pbft_prepare &cached_prepare, pbft_view_type current_view) { + auto prepare_to_be_cached = pbft_prepare(); + + auto head_block_num = ctrl.head_block_num(); + if (head_block_num <= 1) return prepare_to_be_cached; + auto my_prepare = ctrl.get_pbft_my_prepare(); + + auto reserve_prepare = [&](const block_id_type &in) { + if (in == block_id_type() || !ctrl.fetch_block_state_by_id(in)) return false; + auto lib = ctrl.last_irreversible_block_id(); + if (lib == block_id_type()) return true; + auto forks = ctrl.fork_db().fetch_branch_from(in, lib); + return !forks.first.empty() && forks.second.empty(); + }; + + + if (!cached_prepare.empty()) { + for (auto const &sp : ctrl.my_signature_providers()) { + //sign again, update cache, then emit + auto retry_p = cached_prepare; + retry_p.common.timestamp = time_point::now(); + retry_p.sender_signature = sp.second(retry_p.digest(chain_id)); + emit(pbft_outgoing_prepare, retry_p); + } + return prepare_to_be_cached; + } else if (reserve_prepare(my_prepare)) { + for (auto const &sp : ctrl.my_signature_providers()) { + pbft_prepare reserve_p; + reserve_p.view=current_view; reserve_p.block_info={my_prepare}; + reserve_p.sender_signature = sp.second(reserve_p.digest(chain_id)); + emit(pbft_outgoing_prepare, reserve_p); + if (prepare_to_be_cached.empty()) prepare_to_be_cached = reserve_p; + } + return prepare_to_be_cached; + } else { + + auto current_watermark = get_current_pbft_watermark(); + auto lib = ctrl.last_irreversible_block_num(); + + uint32_t high_watermark_block_num = head_block_num; + + if ( current_watermark > 0 ) { + high_watermark_block_num = std::min(head_block_num, current_watermark); + } + + if (high_watermark_block_num <= lib) return prepare_to_be_cached; + + if (auto hwbs = ctrl.fork_db().get_block_in_current_chain_by_num(high_watermark_block_num)) { + auto sent = false; + for (auto const &sp : ctrl.my_signature_providers()) { + pbft_prepare new_p; + new_p.view=current_view; new_p.block_info={hwbs->id}; + new_p.sender_signature = sp.second(new_p.digest(chain_id)); + if (is_valid_prepare(new_p, sp.first)) { + emit(pbft_outgoing_prepare, new_p); + add_pbft_prepare(new_p, sp.first); + sent = true; + if (prepare_to_be_cached.empty()) prepare_to_be_cached = new_p; + } + } + if (sent) ctrl.set_pbft_my_prepare(hwbs->id); + } + return prepare_to_be_cached; + } + } + + bool pbft_database::should_prepared() { + + auto const &by_prepare_and_num_index = pbft_state_index.get(); + auto itr = by_prepare_and_num_index.begin(); + if (itr == by_prepare_and_num_index.end()) return false; + + pbft_state_ptr psp = *itr; + + if (psp->is_prepared && (psp->block_num > ctrl.last_irreversible_block_num())) { + ctrl.set_pbft_prepared((*itr)->block_id); + return true; + } + return false; + } + + bool pbft_database::is_valid_prepare(const pbft_prepare &p, const public_key_type &pk) { + // a prepare msg under lscb (which is no longer in fork_db), can be treated as null, thus true. + if (p.block_info.block_num() <= ctrl.last_stable_checkpoint_block_num()) return true; + return should_recv_pbft_msg(pk); + } + + void pbft_database::add_pbft_commit(pbft_commit &c, const public_key_type &pk) { + + auto &by_block_id_index = pbft_state_index.get(); + + auto current = ctrl.fetch_block_state_by_id(c.block_info.block_id); + + while ((current) && (current->block_num > ctrl.last_irreversible_block_num())) { + + auto curr_itr = by_block_id_index.find(current->id); + + if (curr_itr == by_block_id_index.end()) { + try { + flat_map, pbft_commit> commits; + commits[std::make_pair(c.view, pk)] = c; + pbft_state curr_ps; + curr_ps.block_id = current->id; + curr_ps.block_num = current->block_num; + curr_ps.commits = commits; + auto curr_psp = std::make_shared(move(curr_ps)); + pbft_state_index.insert(curr_psp); + } catch (...) { + elog("commit insertion failure: ${c}", ("c", c)); + } + } else { + auto commits = (*curr_itr)->commits; + if (commits.find(std::make_pair(c.view, pk)) == commits.end()) { + by_block_id_index.modify(curr_itr, [&](const pbft_state_ptr &psp) { + psp->commits[std::make_pair(c.view, pk)] = c; + std::sort(psp->commits.begin(), psp->commits.end(), less<>()); + }); + } + } + + curr_itr = by_block_id_index.find(current->id); + if (curr_itr == by_block_id_index.end()) return; + + auto cpsp = *curr_itr; + + auto as = current->active_schedule.producers; + auto threshold = as.size()* 2 / 3 + 1; + auto commits = cpsp->commits; + if (commits.size() >= threshold && !cpsp->is_committed && is_less_than_high_watermark(cpsp->block_num)) { + flat_map commit_count; + for (auto const &com: commits) { + if (commit_count.find(com.second.view) == commit_count.end()) commit_count[com.second.view] = 0; + } + + for (auto const &bp: as) { + for (auto const &pc: commits) { + if (bp.block_signing_key == pc.first.second) commit_count[pc.first.first] += 1; + } + } + + for (auto const &e: commit_count) { + if (e.second >= threshold) { + mark_as_committed(cpsp->block_id); + } + } + } + current = ctrl.fetch_block_state_by_id(current->prev()); + } + } + + pbft_commit pbft_database::send_and_add_pbft_commit(const pbft_commit &cached_commit, pbft_view_type current_view) { + auto commit_to_be_cached = pbft_commit(); + + if (!cached_commit.empty()) { + for (auto const &sp : ctrl.my_signature_providers()) { + //sign again, update cache, then emit + auto retry_c = cached_commit; + retry_c.common.timestamp = time_point::now(); + retry_c.sender_signature = sp.second(retry_c.digest(chain_id)); + emit(pbft_outgoing_commit, retry_c); + } + return commit_to_be_cached; + } else { + auto const &by_prepare_and_num_index = pbft_state_index.get(); + auto itr = by_prepare_and_num_index.begin(); + if (itr == by_prepare_and_num_index.end()) return commit_to_be_cached; + + pbft_state_ptr psp = *itr; + auto bs = ctrl.fork_db().get_block(psp->block_id); + if (!bs) return commit_to_be_cached; + + if (psp->is_prepared && (psp->block_num > ctrl.last_irreversible_block_num())) { + + for (auto const &sp : ctrl.my_signature_providers()) { + pbft_commit new_c; + new_c.view=current_view; + new_c.block_info={psp->block_id}; + new_c.sender_signature = sp.second(new_c.digest(chain_id)); + + if (is_valid_commit(new_c, sp.first)) { + emit(pbft_outgoing_commit, new_c); + add_pbft_commit(new_c, sp.first); + if (commit_to_be_cached.empty()) commit_to_be_cached = new_c; + } + } + } + return commit_to_be_cached; + } + } + + void pbft_database::mark_as_committed(const block_id_type &bid) { + auto &by_block_id_index = pbft_state_index.get(); + auto itr = by_block_id_index.find(bid); + if (itr == by_block_id_index.end()) return; + by_block_id_index.modify(itr, [&](const pbft_state_ptr &p) { p->is_committed = true; }); + } + + bool pbft_database::should_committed() { + auto const &by_commit_and_num_index = pbft_state_index.get(); + auto itr = by_commit_and_num_index.begin(); + if (itr == by_commit_and_num_index.end()) return false; + pbft_state_ptr psp = *itr; + + return (psp->is_committed && (psp->block_num > ctrl.last_irreversible_block_num())); + } + + pbft_view_type pbft_database::get_committed_view() { + pbft_view_type new_view = 0; + if (!should_committed()) return new_view; + + auto const &by_commit_and_num_index = pbft_state_index.get(); + auto itr = by_commit_and_num_index.begin(); + pbft_state_ptr psp = *itr; + + auto blk_state = ctrl.fetch_block_state_by_id((*itr)->block_id); + if (!blk_state) return new_view; + auto as = blk_state->active_schedule.producers; + + auto commits = (*itr)->commits; + + auto threshold = as.size() * 2 / 3 + 1; + + flat_map commit_count; + for (auto const &com: commits) { + if (commit_count.find(com.second.view) == commit_count.end()) commit_count[com.second.view] = 0; + } + + for (auto const &bp: as) { + for (auto const &pc: commits) { + if (bp.block_signing_key == pc.first.second) commit_count[pc.first.first] += 1; + } + } + + for (auto const &e: commit_count) { + if (e.second >= threshold && e.first > new_view) { + new_view = e.first; + } + } + return new_view; + } + + bool pbft_database::is_valid_commit(const pbft_commit &c, const public_key_type &pk) { + if (c.block_info.block_num() <= ctrl.last_stable_checkpoint_block_num()) return true; + return should_recv_pbft_msg(pk); + } + + void pbft_database::commit_local() { + auto const &by_commit_and_num_index = pbft_state_index.get(); + auto itr = by_commit_and_num_index.begin(); + if (itr == by_commit_and_num_index.end()) return; + + pbft_state_ptr psp = *itr; + + ctrl.pbft_commit_local(psp->block_id); + } + + bool pbft_database::pending_pbft_lib() { + return ctrl.pending_pbft_lib(); + } + + void pbft_database::add_pbft_view_change(pbft_view_change &vc, const public_key_type &pk) { + + auto lscb_bps = lscb_active_producers().producers; + + auto &by_view_index = view_state_index.get(); + auto itr = by_view_index.find(vc.target_view); + if (itr == by_view_index.end()) { + flat_map view_changes; + view_changes[pk] = vc; + pbft_view_change_state vcs; + vcs.view = vc.target_view; + vcs.view_changes = view_changes; + auto vcsp = std::make_shared(move(vcs)); + view_state_index.insert(vcsp); + } else { + auto pvs = (*itr); + auto view_changes = pvs->view_changes; + + if (view_changes.find(pk) == view_changes.end()) { + by_view_index.modify(itr, [&](const pbft_view_change_state_ptr &pvsp) { + pvsp->view_changes[pk] = vc; + }); + } + } + + itr = by_view_index.find(vc.target_view); + if (itr == by_view_index.end()) return; + + auto vsp = *itr; + auto threshold = lscb_bps.size() * 2 / 3 + 1; + if (vsp->view_changes.size() >= threshold && !vsp->is_view_changed) { + auto vc_count = 0; + + for (auto const &bp: lscb_bps) { + for (auto const &v: vsp->view_changes) { + if (bp.block_signing_key == v.first) vc_count += 1; + } + } + if (vc_count >= threshold) { + by_view_index.modify(itr, [&](const pbft_view_change_state_ptr &pvsp) { pvsp->is_view_changed = true; }); + } + } + } + + pbft_view_type pbft_database::should_view_change() { + pbft_view_type nv = 0; + auto &by_view_index = view_state_index.get(); + auto itr = by_view_index.begin(); + if (itr == by_view_index.end()) return nv; + + while (itr != by_view_index.end()) { + auto active_bps = lscb_active_producers().producers; + auto vc_count = 0; + auto pvs = (*itr); + + for (auto const &bp: active_bps) { + for (auto const &v: pvs->view_changes) { + if (bp.block_signing_key == v.first) vc_count += 1; + } + } + //if contains self or view_change >= f+1, transit to view_change and send view change + if (vc_count >= active_bps.size() / 3 + 1) { + nv = pvs->view; + break; + } + ++itr; + } + return nv; + } + + pbft_view_change pbft_database::send_and_add_pbft_view_change( + const pbft_view_change &cached_view_change, + const pbft_prepared_certificate &ppc, + const vector &pcc, + pbft_view_type current_view, + pbft_view_type target_view) { + + auto view_change_to_be_cached = pbft_view_change(); + if (!cached_view_change.empty()) { + for (auto const &sp : ctrl.my_signature_providers()) { + //sign again, update cache, then emit + auto retry_vc = cached_view_change; + retry_vc.common.timestamp = time_point::now(); + retry_vc.sender_signature = sp.second(retry_vc.digest(chain_id)); + emit(pbft_outgoing_view_change, retry_vc); + } + return view_change_to_be_cached; + } else { + for (auto const &my_sp : ctrl.my_signature_providers()) { + + auto my_lsc = get_stable_checkpoint_by_id(ctrl.last_stable_checkpoint_block_id()); + + pbft_view_change new_vc; + new_vc.current_view=current_view; + new_vc.target_view=target_view; + new_vc.prepared_cert=ppc; + new_vc.committed_certs=pcc; + new_vc.stable_checkpoint=my_lsc; + new_vc.sender_signature = my_sp.second(new_vc.digest(chain_id)); + if (is_valid_view_change(new_vc, my_sp.first)) { + emit(pbft_outgoing_view_change, new_vc); + add_pbft_view_change(new_vc, my_sp.first); + if (view_change_to_be_cached.empty()) view_change_to_be_cached = new_vc; + } + } + return view_change_to_be_cached; + } + } + + bool pbft_database::should_new_view(const pbft_view_type target_view) { + auto &by_view_index = view_state_index.get(); + auto itr = by_view_index.find(target_view); + if (itr == by_view_index.end()) return false; + return (*itr)->is_view_changed; + } + + pbft_view_type pbft_database::get_proposed_new_view_num() { + auto &by_count_and_view_index = view_state_index.get(); + auto itr = by_count_and_view_index.begin(); + if (itr == by_count_and_view_index.end() || !(*itr)->is_view_changed) return 0; + return (*itr)->view; + } + + bool pbft_database::has_new_primary(const public_key_type &pk) { + + if (pk == public_key_type()) return false; + auto sps = ctrl.my_signature_providers(); + auto sp_itr = sps.find(pk); + return sp_itr != sps.end(); + } + + void pbft_database::cleanup_on_new_view() { + view_state_index.clear(); + ctrl.reset_pbft_my_prepare(); + } + + pbft_new_view pbft_database::send_pbft_new_view( + const pbft_view_changed_certificate &vcc, + pbft_view_type current_view) { + + auto primary_key = get_new_view_primary_key(current_view); + if (!has_new_primary(primary_key) || vcc.empty()) return pbft_new_view(); + + //`sp_itr` is not possible to be the end iterator, since it's already been checked in `has_new_primary`. + auto my_sps = ctrl.my_signature_providers(); + auto sp_itr = my_sps.find(primary_key); + + auto highest_ppc = pbft_prepared_certificate(); + auto highest_pcc = vector{}; + auto highest_sc = pbft_stable_checkpoint(); + + for (auto const &vc: vcc.view_changes) { + if (vc.prepared_cert.block_info.block_num() > highest_ppc.block_info.block_num()) { + highest_ppc = vc.prepared_cert; + } + + for (auto const &cc: vc.committed_certs) { + auto p_itr = find_if(highest_pcc.begin(), highest_pcc.end(), + [&](const pbft_committed_certificate &ext) { return ext.block_info.block_id == cc.block_info.block_id; }); + if (p_itr == highest_pcc.end()) highest_pcc.emplace_back(cc); + } + + if (vc.stable_checkpoint.block_info.block_num() > highest_sc.block_info.block_num()) { + highest_sc = vc.stable_checkpoint; + } + } + + pbft_new_view nv; + nv.new_view=current_view; + nv.prepared_cert=highest_ppc; + nv.committed_certs=highest_pcc; + nv.stable_checkpoint=highest_sc; + nv.view_changed_cert=vcc; + nv.sender_signature = sp_itr->second(nv.digest(chain_id)); + emit(pbft_outgoing_new_view, nv); + return nv; + } + + pbft_prepared_certificate pbft_database::generate_prepared_certificate() { + + auto const &by_prepare_and_num_index = pbft_state_index.get(); + auto itr = by_prepare_and_num_index.begin(); + if (itr == by_prepare_and_num_index.end()) return pbft_prepared_certificate(); + pbft_state_ptr psp = *itr; + + auto prepared_block_state = ctrl.fetch_block_state_by_id(psp->block_id); + if (!prepared_block_state) return pbft_prepared_certificate(); + + auto as = prepared_block_state->active_schedule.producers; + if (psp->is_prepared && psp->block_num > ctrl.last_irreversible_block_num()) { + auto prepares = psp->prepares; + auto valid_prepares = vector{}; + + flat_map prepare_count; + flat_map> prepare_msg; + + for (auto const &pre: prepares) { + if (prepare_count.find(pre.first.first) == prepare_count.end()) prepare_count[pre.first.first] = 0; + prepare_msg[pre.first.first].emplace_back(pre.second); + } + + for (auto const &bp: as) { + for (auto const &pp: prepares) { + if (bp.block_signing_key == pp.first.second) prepare_count[pp.first.first] += 1; + } + } + + auto bp_threshold = as.size() * 2 / 3 + 1; + for (auto const &e: prepare_count) { + if (e.second >= bp_threshold) { + valid_prepares = prepare_msg[e.first]; + } + } + + if (valid_prepares.empty()) return pbft_prepared_certificate(); + + pbft_prepared_certificate pc; + pc.block_info={psp->block_id}; pc.prepares=valid_prepares; pc.pre_prepares.emplace(psp->block_id); + for (auto const &p: valid_prepares) { + auto bid = p.block_info.block_id; + while (bid != psp->block_id) { + pc.pre_prepares.emplace(bid); + bid = ctrl.fetch_block_state_by_id(bid)->prev(); + } + } + return pc; + } else return pbft_prepared_certificate(); + } + + vector pbft_database::generate_committed_certificate() { + + auto pcc = vector{}; + + auto const &by_commit_and_num_index = pbft_state_index.get(); + auto itr = by_commit_and_num_index.begin(); + if (itr == by_commit_and_num_index.end()) return pcc; + + pbft_state_ptr psp = *itr; + + if (!psp->is_committed) return pcc; + + auto highest_committed_block_num = psp->block_num; + + vector ccb; + + auto lib_num = ctrl.last_irreversible_block_num(); + + //adding my highest committed cert. + auto lscb_num = ctrl.last_stable_checkpoint_block_num(); + if ( highest_committed_block_num <= lib_num && highest_committed_block_num > lscb_num ) { + ccb.emplace_back(highest_committed_block_num); + } + + auto watermarks = get_updated_watermarks(); + for (auto& watermark : watermarks) { + //adding committed cert on every water mark. + if (watermark < lib_num && watermark > lscb_num) { + ccb.emplace_back(watermark); + } + } + + auto const &by_id_index = pbft_state_index.get(); + + std::sort(ccb.begin(), ccb.end()); + pcc.reserve(ccb.size()); + for (auto const &committed_block_num: ccb) { + auto cbs = ctrl.fetch_block_state_by_number(committed_block_num); + if (!cbs) return pcc; + + auto it = by_id_index.find(cbs->id); + if (it == by_id_index.end() || !(*it)->is_committed) { + return pcc; + } + + auto as = cbs->active_schedule.producers; + + auto commits = (*it)->commits; + auto valid_commits = vector{}; + + flat_map commit_count; + flat_map> commit_msg; + + for (auto const &com: commits) { + if (commit_count.find(com.first.first) == commit_count.end()) commit_count[com.first.first] = 0; + commit_msg[com.first.first].emplace_back(com.second); + } + + for (auto const &bp: as) { + for (auto const &cc: commits) { + if (bp.block_signing_key == cc.first.second) commit_count[cc.first.first] += 1; + } + } + + auto bp_threshold = as.size() * 2 / 3 + 1; + for (auto const &e: commit_count) { + if (e.second >= bp_threshold) { + valid_commits = commit_msg[e.first]; + } + } + + if (valid_commits.empty()) return pcc; + + pbft_committed_certificate cc; + cc.block_info={cbs->id}; cc.commits=valid_commits; + pcc.emplace_back(cc); + } + return pcc; + } + + pbft_view_changed_certificate pbft_database::generate_view_changed_certificate(pbft_view_type target_view) { + + auto pvcc = pbft_view_changed_certificate(); + + auto &by_view_index = view_state_index.get(); + auto itr = by_view_index.find(target_view); + if (itr == by_view_index.end()) return pvcc; + + auto pvs = *itr; + + if (pvs->is_view_changed) { + + pvcc.target_view=pvs->view; + pvcc.view_changes.reserve(pvs->view_changes.size()); + for(auto & view_change : pvs->view_changes) { + pvcc.view_changes.emplace_back( view_change.second ); + } + return pvcc; + } else return pvcc; + } + + + + bool pbft_database::is_valid_prepared_certificate(const pbft_prepared_certificate &certificate, bool add_to_pbft_db) { + // an empty certificate is valid since it acts as a null digest in pbft. + if (certificate.empty()) return true; + // a certificate under lscb (no longer in fork_db) is also treated as null. + if (certificate.block_info.block_num() <= ctrl.last_stable_checkpoint_block_num()) return true; + + auto prepares = certificate.prepares; + auto prepares_metadata = vector>{}; + prepares_metadata.reserve(prepares.size()); + + for (auto &p : prepares) { + auto pmm = pbft_message_metadata(p, chain_id); + prepares_metadata.emplace_back(pmm); + if (!is_valid_prepare(p, pmm.sender_key)) return false; + if (add_to_pbft_db) add_pbft_prepare(p, pmm.sender_key); + } + + auto cert_id = certificate.block_info.block_id; + auto cert_bs = ctrl.fetch_block_state_by_id(cert_id); + auto producer_schedule = lscb_active_producers(); + if (certificate.block_info.block_num() > 0 && cert_bs) { + producer_schedule = cert_bs->active_schedule; + } + auto bp_threshold = producer_schedule.producers.size() * 2 / 3 + 1; + + flat_map prepare_count; + + for (auto const &pm: prepares_metadata) { + if (prepare_count.find(pm.msg.view) == prepare_count.end()) prepare_count[pm.msg.view] = 0; + } + + for (auto const &bp: producer_schedule.producers) { + for (auto const &pm: prepares_metadata) { + if (bp.block_signing_key == pm.sender_key) prepare_count[pm.msg.view] += 1; + } + } + + auto should_prepared = false; + + for (auto const &e: prepare_count) { + if (e.second >= bp_threshold) { + should_prepared = true; + } + } + + if (!should_prepared) return false; + + //validate prepare + auto lscb_num = ctrl.last_stable_checkpoint_block_num(); + auto non_fork_bp_count = 0; + vector prepare_infos; + prepare_infos.reserve(certificate.prepares.size()); + for (auto const &p : certificate.prepares) { + //only search in fork db + if (p.block_info.block_num() <= lscb_num) { + ++non_fork_bp_count; + } else { + prepare_infos.emplace_back(p.block_info); + } + } + return is_valid_longest_fork(certificate.block_info, prepare_infos, bp_threshold, non_fork_bp_count); + } + + bool pbft_database::is_valid_committed_certificate(const pbft_committed_certificate &certificate, bool add_to_pbft_db) { + // an empty certificate is valid since it acts as a null digest in pbft. + if (certificate.empty()) return true; + // a certificate under lscb (no longer in fork_db) is also treated as null. + if (certificate.block_info.block_num() <= ctrl.last_stable_checkpoint_block_num()) return true; + + auto commits = certificate.commits; + auto commits_metadata = vector>{}; + commits_metadata.reserve(commits.size()); + + for (auto &c : commits) { + auto pmm = pbft_message_metadata(c, chain_id); + commits_metadata.emplace_back(pmm); + if (!is_valid_commit(c, pmm.sender_key)) return false; + if (add_to_pbft_db) add_pbft_commit(c, pmm.sender_key); + } + + if (add_to_pbft_db && should_committed()) commit_local(); + + auto cert_id = certificate.block_info.block_id; + auto cert_bs = ctrl.fetch_block_state_by_id(cert_id); + auto producer_schedule = lscb_active_producers(); + if (certificate.block_info.block_num() > 0 && cert_bs) { + producer_schedule = cert_bs->active_schedule; + } + auto bp_threshold = producer_schedule.producers.size() * 2 / 3 + 1; + + flat_map commit_count; + + for (auto const &cm: commits_metadata) { + if (commit_count.find(cm.msg.view) == commit_count.end()) commit_count[cm.msg.view] = 0; + } + + for (auto const &bp: producer_schedule.producers) { + for (auto const &cm: commits_metadata) { + if (bp.block_signing_key == cm.sender_key) commit_count[cm.msg.view] += 1; + } + } + + auto should_committed = false; + + for (auto const &e: commit_count) { + if (e.second >= bp_threshold) { + should_committed = true; + } + } + + if (!should_committed) return false; + + //validate commit + auto lscb_num = ctrl.last_stable_checkpoint_block_num(); + auto non_fork_bp_count = 0; + vector commit_infos; + commit_infos.reserve(certificate.commits.size()); + for (auto const &c : certificate.commits) { + //only search in fork db + if (c.block_info.block_num() <= lscb_num) { + ++non_fork_bp_count; + } else { + commit_infos.emplace_back(c.block_info); + } + } + return is_valid_longest_fork(certificate.block_info, commit_infos, bp_threshold, non_fork_bp_count); + } + + bool pbft_database::is_valid_view_change(const pbft_view_change &vc, const public_key_type &pk) { + + return should_recv_pbft_msg(pk); + // No need to check prepared cert and stable checkpoint, until generate or validate a new view msg + } + + + void pbft_database::validate_new_view(const pbft_new_view &nv, const public_key_type &pk) { + + EOS_ASSERT(pk == get_new_view_primary_key(nv.new_view), pbft_exception, + "new view is not signed with expected key"); + + EOS_ASSERT(is_valid_prepared_certificate(nv.prepared_cert, true), pbft_exception, + "bad prepared certificate: ${pc}", ("pc", nv.prepared_cert)); + + EOS_ASSERT(is_valid_stable_checkpoint(nv.stable_checkpoint, true), pbft_exception, + "bad stable checkpoint: ${scp}", ("scp", nv.stable_checkpoint)); + + auto committed_certs = nv.committed_certs; + std::sort(committed_certs.begin(), committed_certs.end()); + for (auto const &c: committed_certs) { + EOS_ASSERT(is_valid_committed_certificate(c, true), pbft_exception, + "bad committed certificate: ${cc}", ("cc", c)); + } + + EOS_ASSERT(nv.view_changed_cert.target_view == nv.new_view, pbft_exception, "target view not match"); + + vector lscb_producers; + lscb_producers.reserve(lscb_active_producers().producers.size()); + for (auto const &bp: lscb_active_producers().producers) { + lscb_producers.emplace_back(bp.block_signing_key); + } + auto schedule_threshold = lscb_producers.size() * 2 / 3 + 1; + + auto view_changes = nv.view_changed_cert.view_changes; + auto view_changes_metadata = vector>{}; + view_changes_metadata.reserve(view_changes.size()); + + vector view_change_producers; + view_change_producers.reserve(view_changes.size()); + for (auto &vc: view_changes) { + auto pmm = pbft_message_metadata(vc, chain_id); + view_changes_metadata.emplace_back(pmm); + if (is_valid_view_change(vc, pmm.sender_key)) { + add_pbft_view_change(vc, pmm.sender_key); + view_change_producers.emplace_back(pmm.sender_key); + } + } + + vector intersection; + + std::sort(lscb_producers.begin(), lscb_producers.end()); + std::sort(view_change_producers.begin(), view_change_producers.end()); + std::set_intersection(lscb_producers.begin(), lscb_producers.end(), + view_change_producers.begin(), view_change_producers.end(), + back_inserter(intersection)); + + EOS_ASSERT(intersection.size() >= schedule_threshold, pbft_exception, "view changes count not enough"); + + EOS_ASSERT(should_new_view(nv.new_view), pbft_exception, "should not enter new view: ${nv}", + ("nv", nv.new_view)); + + auto highest_ppc = pbft_prepared_certificate(); + auto highest_pcc = vector{}; + auto highest_scp = pbft_stable_checkpoint(); + + for (auto const &vc: nv.view_changed_cert.view_changes) { + if (vc.prepared_cert.block_info.block_num() > highest_ppc.block_info.block_num() + && is_valid_prepared_certificate(vc.prepared_cert)) { + highest_ppc = vc.prepared_cert; + } + + for (auto const &cc: vc.committed_certs) { + if (is_valid_committed_certificate(cc)) { + auto p_itr = find_if(highest_pcc.begin(), highest_pcc.end(), + [&](const pbft_committed_certificate &ext) { + return ext.block_info.block_id == cc.block_info.block_id; + }); + if (p_itr == highest_pcc.end()) highest_pcc.emplace_back(cc); + } + } + + if (vc.stable_checkpoint.block_info.block_num() > highest_scp.block_info.block_num() + && is_valid_stable_checkpoint(vc.stable_checkpoint, true)) { + highest_scp = vc.stable_checkpoint; + } + } + + EOS_ASSERT(highest_ppc.block_info == nv.prepared_cert.block_info, pbft_exception, + "prepared certificate does not match, should be ${hppc} but ${pc} given", + ("hppc", highest_ppc)("pc", nv.prepared_cert)); + + std::sort(highest_pcc.begin(), highest_pcc.end()); + EOS_ASSERT(highest_pcc.size() == committed_certs.size(), pbft_exception, + "wrong committed certificates size"); + for (auto i = 0; i < committed_certs.size(); ++i) { + EOS_ASSERT(highest_pcc[i].block_info == committed_certs[i].block_info, pbft_exception, + "committed certificate does not match, should be ${hpcc} but ${cc} given", + ("hpcc", highest_pcc[i])("cc", committed_certs[i])); + } + + EOS_ASSERT(highest_scp.block_info == nv.stable_checkpoint.block_info, pbft_exception, + "stable checkpoint does not match, should be ${hscp} but ${scp} given", + ("hpcc", highest_scp)("pc", nv.stable_checkpoint)); + } + + bool pbft_database::should_stop_view_change(const pbft_view_change &vc) { + auto lscb_num = ctrl.last_stable_checkpoint_block_num(); + auto vc_lscb = vc.stable_checkpoint.block_info.block_num(); + return vc_lscb > 0 && lscb_num > vc_lscb; + } + + vector> pbft_database::fetch_fork_from(vector &block_infos) { + + vector> result; + if (block_infos.empty()) { + return result; + } + if (block_infos.size() == 1) { + result.emplace_back(initializer_list{block_infos.front()}); + return result; + } + + sort(block_infos.begin(), block_infos.end(), + [](const block_info_type &a, const block_info_type &b) -> bool { return a.block_num() > b.block_num(); }); + + while (!block_infos.empty()) { + auto fork = fetch_first_fork_from(block_infos); + if (!fork.empty()) { + result.emplace_back(fork); + } + } + return result; + } + + vector pbft_database::fetch_first_fork_from(vector &bi) { + vector result; + if (bi.empty()) { + return result; + } + if (bi.size() == 1) { + result.emplace_back(bi.front()); + bi.clear(); + return result; + } + //bi should be sorted desc + auto high = bi.front().block_num(); + auto low = bi.back().block_num(); + + auto id = bi.front().block_id; + auto num = bi.front().block_num(); + while (num <= high && num >= low && !bi.empty()) { + auto bs = ctrl.fetch_block_state_by_id(id); + + for (auto it = bi.begin(); it != bi.end();) { + if (it->block_id == id) { + if (bs) { + //add to result only if b exist + result.emplace_back((*it)); + } + it = bi.erase(it); + } else { + it++; + } + } + if (bs) { + id = bs->prev(); + num--; + } else { + break; + } + } + + return result; + } + + bool pbft_database::is_valid_longest_fork(const block_info_type &bi, vector block_infos, unsigned long threshold, unsigned long non_fork_bp_count) { + + auto forks = fetch_fork_from(block_infos); + vector longest_fork; + for (auto const &f : forks) { + if (f.size() > longest_fork.size()) { + longest_fork = f; + } + } + if (longest_fork.size() + non_fork_bp_count < threshold) return false; + + if (longest_fork.empty()) return true; + + auto calculated_block_info = longest_fork.back(); + + return bi.block_id == calculated_block_info.block_id; + } + + pbft_stable_checkpoint pbft_database::fetch_stable_checkpoint_from_blk_extn(const signed_block_ptr &b) { + try { + if (b) { + auto &extn = b->block_extensions; + + for (auto it = extn.begin(); it != extn.end();) { + if (it->first == static_cast(block_extension_type::pbft_stable_checkpoint)) { + auto scp_ds = it->second; + fc::datastream ds(scp_ds.data(), scp_ds.size()); + + pbft_stable_checkpoint scp; + fc::raw::unpack(ds, scp); + + if (is_valid_stable_checkpoint(scp)) { + return scp; + } else { + it = extn.erase(it); + } + } else { + it++; + } + } + } + } catch(...) { + elog("no stable checkpoints found in the block extension"); + } + return pbft_stable_checkpoint(); + } + + pbft_stable_checkpoint pbft_database::get_stable_checkpoint_by_id(const block_id_type &block_id, bool incl_blk_extn ) { + auto const &by_block = checkpoint_index.get(); + auto itr = by_block.find(block_id); + if (itr == by_block.end()) { + if (incl_blk_extn) { + auto blk = ctrl.fetch_block_by_id(block_id); + return fetch_stable_checkpoint_from_blk_extn(blk); + } + return pbft_stable_checkpoint(); + } + + auto cpp = *itr; + + if (cpp->is_stable) { + pbft_stable_checkpoint psc; + psc.block_info={cpp->block_id}; + psc.checkpoints.reserve(cpp->checkpoints.size()); + for (auto & checkpoint : cpp->checkpoints) { + psc.checkpoints.emplace_back(checkpoint.second) ; + } + return psc; + } else return pbft_stable_checkpoint(); + } + + block_info_type pbft_database::cal_pending_stable_checkpoint() const { + + auto pending_scb_num = ctrl.last_stable_checkpoint_block_num(); + auto pending_scb_info = block_info_type{ctrl.last_stable_checkpoint_block_id()}; + + auto const &by_blk_num = checkpoint_index.get(); + auto itr = by_blk_num.lower_bound(pending_scb_num); + if (itr == by_blk_num.end()) return pending_scb_info; + + while (itr != by_blk_num.end()) { + if (auto bs = ctrl.fetch_block_state_by_id((*itr)->block_id)) { + auto scb = ctrl.fetch_block_state_by_number(pending_scb_num); + + auto head_checkpoint_schedule = bs->active_schedule; + + producer_schedule_type current_schedule; + producer_schedule_type new_schedule; + + if (pending_scb_num == 0) { + auto const &ucb = ctrl.get_upgrade_properties().upgrade_complete_block_num; + if (ucb == 0) { + current_schedule = ctrl.initial_schedule(); + new_schedule = ctrl.initial_schedule(); + } else { + auto ucb_state = ctrl.fetch_block_state_by_number(ucb); + if (!ucb_state) return pending_scb_info; + current_schedule = ucb_state->active_schedule; + new_schedule = ucb_state->pending_schedule; + } + } else if (scb) { + current_schedule = scb->active_schedule; + new_schedule = scb->pending_schedule; + } else { + return pending_scb_info; + } + + if ((*itr)->is_stable) { + if (head_checkpoint_schedule == current_schedule || head_checkpoint_schedule == new_schedule) { + pending_scb_info = block_info_type{(*itr)->block_id}; + pending_scb_num = pending_scb_info.block_num(); + } else { + return pending_scb_info; + } + } + } + ++itr; + } + return pending_scb_info; + } + + vector pbft_database::generate_and_add_pbft_checkpoint() { + auto checkpoint = [&](const block_num_type &in) { + auto const& ucb = ctrl.get_upgrade_properties().upgrade_complete_block_num; + if (!ctrl.is_pbft_enabled()) return false; + if (in <= ucb) return false; + auto watermarks = get_updated_watermarks(); + return in == ucb + 1 // checkpoint on first pbft block; + || in % pbft_checkpoint_granularity == 1 // checkpoint on every 100 block; + || std::find(watermarks.begin(), watermarks.end(), in) != watermarks.end(); // checkpoint on bp schedule change; + }; + + auto new_pc = vector{}; + + auto const &by_commit_and_num_index = pbft_state_index.get(); + auto itr = by_commit_and_num_index.begin(); + if (itr == by_commit_and_num_index.end() || !(*itr)->is_committed) return new_pc; + + pbft_state_ptr psp = (*itr); + + flat_map pending_checkpoint_block_num; // block_height and retry_flag + auto lscb_num = ctrl.last_stable_checkpoint_block_num(); + for (auto i = psp->block_num; i > lscb_num && i > 1; --i) { + if (checkpoint(i)) { + auto &by_block = checkpoint_index.get(); + + if (auto bs = ctrl.fork_db().get_block_in_current_chain_by_num(i)) { + auto c_itr = by_block.find(bs->id); + if (c_itr == by_block.end()) { + pending_checkpoint_block_num[i] = false; + } else { + auto checkpoints = (*c_itr)->checkpoints; + for (auto const &my_sp : ctrl.my_signature_providers()) { + if (checkpoints.find(my_sp.first) != checkpoints.end() && !(*c_itr)->is_stable) { + pending_checkpoint_block_num[i] = true; //retry sending at this time. + } + } + if (pending_checkpoint_block_num.find(i) == pending_checkpoint_block_num.end()) { + pending_checkpoint_block_num[i] = false; + } + } + } + } + } + auto &by_block = checkpoint_index.get(); + + if (!pending_checkpoint_block_num.empty()) { + std::sort(pending_checkpoint_block_num.begin(), pending_checkpoint_block_num.end()); + for (auto& bnum_and_retry: pending_checkpoint_block_num) { + if (auto bs = ctrl.fork_db().get_block_in_current_chain_by_num(bnum_and_retry.first)) { + for (auto const &my_sp : ctrl.my_signature_providers()) { + pbft_checkpoint cp; + cp.block_info={bs->id}; + cp.sender_signature = my_sp.second(cp.digest(chain_id)); + if (!bnum_and_retry.second && is_valid_checkpoint(cp, my_sp.first)) { //first time sending this checkpoint + add_pbft_checkpoint(cp, my_sp.first); + } + new_pc.emplace_back(cp); + } + } + } + } else if (lscb_num > 0) { //retry sending my lscb + for (auto const &my_sp : ctrl.my_signature_providers()) { + pbft_checkpoint cp; + cp.block_info={ctrl.last_stable_checkpoint_block_id()}; + cp.sender_signature = my_sp.second(cp.digest(chain_id)); + new_pc.emplace_back(cp); + } + } + return new_pc; + } + + void pbft_database::add_pbft_checkpoint(pbft_checkpoint &cp, const public_key_type &pk) { + + auto cp_block_state = ctrl.fetch_block_state_by_id(cp.block_info.block_id); + if (!cp_block_state) return; + + auto &by_block = checkpoint_index.get(); + auto itr = by_block.find(cp.block_info.block_id); + if (itr == by_block.end()) { + flat_map checkpoints; + checkpoints[pk] = cp; + pbft_checkpoint_state cs; + cs.block_id = cp.block_info.block_id; + cs.block_num = cp.block_info.block_num(); + cs.checkpoints = checkpoints; + auto csp = std::make_shared(move(cs)); + checkpoint_index.insert(csp); + itr = by_block.find(cp.block_info.block_id); + } else { + auto csp = (*itr); + auto checkpoints = csp->checkpoints; + if (checkpoints.find(pk) == checkpoints.end()) { + by_block.modify(itr, [&](const pbft_checkpoint_state_ptr &pcp) { + csp->checkpoints[pk] = cp; + }); + } + } + + auto csp = (*itr); + auto active_bps = cp_block_state->active_schedule.producers; + auto threshold = active_bps.size() * 2 / 3 + 1; + if (csp->checkpoints.size() >= threshold && !csp->is_stable) { + auto cp_count = 0; + + for (auto const &bp: active_bps) { + for (auto const &c: csp->checkpoints) { + if (bp.block_signing_key == c.first) cp_count += 1; + } + } + if (cp_count >= threshold) { + by_block.modify(itr, [&](const pbft_checkpoint_state_ptr &pcp) { csp->is_stable = true; }); + auto id = csp->block_id; + auto blk = ctrl.fetch_block_by_id(id); + + if (blk && (blk->block_extensions.empty() || blk->block_extensions.back().first != static_cast(block_extension_type::pbft_stable_checkpoint))) { + auto scp = get_stable_checkpoint_by_id(id); + auto scp_size = fc::raw::pack_size(scp); + + auto buffer = std::make_shared>(scp_size); + fc::datastream ds( buffer->data(), scp_size); + fc::raw::pack( ds, scp ); + + blk->block_extensions.emplace_back(); + auto &extension = blk->block_extensions.back(); + extension.first = static_cast(block_extension_type::pbft_stable_checkpoint ); + extension.second.resize(scp_size); + std::copy(buffer->begin(),buffer->end(), extension.second.data()); + } + } + } + } + + void pbft_database::send_pbft_checkpoint() { + auto cps_to_send = generate_and_add_pbft_checkpoint(); + for (auto const &cp: cps_to_send) { + emit(pbft_outgoing_checkpoint, cp); + } + } + + void pbft_database::checkpoint_local() { + auto pending_scb_info = cal_pending_stable_checkpoint(); + auto lscb_num = ctrl.last_stable_checkpoint_block_num(); + auto pending_num = pending_scb_info.block_num(); + auto pending_id = pending_scb_info.block_id; + if (pending_num > lscb_num) { + ctrl.set_pbft_latest_checkpoint(pending_id); + if (ctrl.last_irreversible_block_num() < pending_num) ctrl.pbft_commit_local(pending_id); + auto &by_block_id_index = pbft_state_index.get(); + auto pitr = by_block_id_index.find(pending_id); + if (pitr != by_block_id_index.end()) { + prune(*pitr); + } + } + auto &bni = checkpoint_index.get(); + auto oldest = bni.begin(); + if ( oldest != bni.end() + && (*oldest)->is_stable + && (*oldest)->block_num < lscb_num - oldest_stable_checkpoint ) { + prune(*oldest); + } + } + + bool pbft_database::is_valid_checkpoint(const pbft_checkpoint &cp, const public_key_type &pk) { + + if (cp.block_info.block_num() > ctrl.head_block_num() || cp.block_info.block_num() <= ctrl.last_stable_checkpoint_block_num()) return false; + + if (auto bs = ctrl.fetch_block_state_by_id(cp.block_info.block_id)) { + auto active_bps = bs->active_schedule.producers; + for (auto const &bp: active_bps) { + if (bp.block_signing_key == pk) return true; + } + } + return false; + } + + bool pbft_database::is_valid_stable_checkpoint(const pbft_stable_checkpoint &scp, bool add_to_pbft_db) { + if (scp.block_info.block_num() <= ctrl.last_stable_checkpoint_block_num()) + // the stable checkpoint is way behind lib, no way getting the block state, + // it will not be applied nor saved, thus considered safe. + return true; + + auto checkpoints = scp.checkpoints; + auto checkpoints_metadata = vector>{}; + checkpoints_metadata.reserve(checkpoints.size()); + + for (auto &cp : checkpoints) { + auto pmm = pbft_message_metadata(cp, chain_id); + checkpoints_metadata.emplace_back(pmm); + if (cp.block_info != scp.block_info || !is_valid_checkpoint(cp, pmm.sender_key)) return false; + if (add_to_pbft_db) add_pbft_checkpoint(cp, pmm.sender_key); + } + + if (add_to_pbft_db) checkpoint_local(); + + + if (auto bs = ctrl.fetch_block_state_by_number(scp.block_info.block_num())) { + auto as = bs->active_schedule; + auto cp_count = 0; + for (auto const &bp: as.producers) { + for (auto const &cpm: checkpoints_metadata) { + if (bp.block_signing_key == cpm.sender_key) cp_count += 1; + } + } + return cp_count >= as.producers.size() * 2 / 3 + 1; + } + return false; + + } + + bool pbft_database::should_send_pbft_msg() { + + auto schedules = get_updated_fork_schedules(); + for (auto const &bp: schedules) { + for (auto const &sp: ctrl.my_signature_providers()) { + if (bp.first == sp.first) return true; + } + } + return false; + } + + bool pbft_database::should_recv_pbft_msg(const public_key_type &pub_key) { + + auto schedules = get_updated_fork_schedules(); + for (auto const &bp: schedules) { + if (bp.first == pub_key) return true; + } + return false; + } + + public_key_type pbft_database::get_new_view_primary_key(const pbft_view_type target_view) { + + auto active_bps = lscb_active_producers().producers; + if (active_bps.empty()) return public_key_type(); + + return active_bps[target_view % active_bps.size()].block_signing_key; + } + + producer_schedule_type pbft_database::lscb_active_producers() const { + auto num = ctrl.last_stable_checkpoint_block_num(); + + if (num == 0) { + auto const &ucb = ctrl.get_upgrade_properties().upgrade_complete_block_num; + if (ucb == 0) return ctrl.initial_schedule(); + num = ucb; + } + + if (auto bs = ctrl.fetch_block_state_by_number(num)) { + if (bs->pending_schedule.producers.empty()) return bs->active_schedule; + return bs->pending_schedule; + } + return ctrl.initial_schedule(); + } + + block_num_type pbft_database::get_current_pbft_watermark() { + auto lib = ctrl.last_irreversible_block_num(); + + auto watermarks = get_updated_watermarks(); + if (watermarks.empty()) return 0; + + auto cw = std::upper_bound(watermarks.begin(), watermarks.end(), lib); + + if (cw == watermarks.end() || *cw <= lib) return 0; + + return *cw; + } + + void pbft_database::update_fork_schedules() { + + auto vector_minus = [&](vector &v1, vector &v2) + { + vector diff; + std::set_difference(v1.begin(), v1.end(), v2.begin(), v2.end(), + std::inserter(diff, diff.begin())); + return diff; + }; + + auto watermarks = ctrl.get_watermarks(); + + if (watermarks != prepare_watermarks) { + auto prev = prepare_watermarks; + prepare_watermarks = watermarks; + std::sort(prepare_watermarks.begin(), prepare_watermarks.end()); + auto added = vector_minus(prepare_watermarks, prev); + auto removed = vector_minus(prev, prepare_watermarks); + for (auto i: added) { + if (auto bs = ctrl.fetch_block_state_by_number(i)) { + auto as = bs->active_schedule.producers; + for (auto &bp: as) { + auto key = bp.block_signing_key; + if (fork_schedules.find(key) == fork_schedules.end()) { + fork_schedules[key] = i; + } else if ( i > fork_schedules[key]) { + fork_schedules[key] = i; + } + } + } + } + if (!removed.empty()) { + auto removed_num = *max_element(removed.begin(), removed.end()); + for (auto itr = fork_schedules.begin(); itr != fork_schedules.end();) { + if ((*itr).second <= removed_num) { + itr = fork_schedules.erase(itr); + } else { + ++itr; + } + } + } + } + + auto lscb_bps = lscb_active_producers().producers; + auto lscb_num = ctrl.last_stable_checkpoint_block_num(); + for (auto &bp: lscb_bps) { + if (fork_schedules.find(bp.block_signing_key) == fork_schedules.end() + || fork_schedules[bp.block_signing_key] < lscb_num) { + fork_schedules[bp.block_signing_key] = lscb_num; + } + } + } + + vector& pbft_database::get_updated_watermarks() { + update_fork_schedules(); + return prepare_watermarks; + } + + flat_map& pbft_database::get_updated_fork_schedules() { + update_fork_schedules(); + return fork_schedules; + } + + bool pbft_database::is_less_than_high_watermark(const block_num_type &bnum) { + auto current_watermark = get_current_pbft_watermark(); + return current_watermark == 0 || bnum <= current_watermark; + } + + pbft_state_ptr pbft_database::get_pbft_state_by_id(const block_id_type& id) const { + + auto &by_block_id_index = pbft_state_index.get(); + auto itr = by_block_id_index.find(id); + + if (itr != by_block_id_index.end()) return (*itr); + + return pbft_state_ptr(); + } + + vector pbft_database::get_checkpoints_by_num(const block_num_type& num) const { + auto results = vector{}; + auto &by_num_index = checkpoint_index.get(); + + auto pitr = by_num_index.lower_bound( num ); + while(pitr != by_num_index.end() && (*pitr)->block_num == num ) { + results.emplace_back(*(*pitr)); + ++pitr; + } + + return results; + } + + pbft_view_change_state_ptr pbft_database::get_view_changes_by_target_view(const pbft_view_type& tv) const { + auto &by_view_index = view_state_index.get(); + auto itr = by_view_index.find(tv); + + if (itr != by_view_index.end()) return (*itr); + + return pbft_view_change_state_ptr(); + } + + vector pbft_database::get_pbft_watermarks() const { + return prepare_watermarks; + } + + flat_map pbft_database::get_pbft_fork_schedules() const { + return fork_schedules; + } + + void pbft_database::set(const pbft_state_ptr& s) { + auto result = pbft_state_index.insert(s); + + EOS_ASSERT(result.second, pbft_exception, "unable to insert pbft state, duplicate state detected"); + } + + void pbft_database::set(const pbft_checkpoint_state_ptr& s) { + auto result = checkpoint_index.insert(s); + + EOS_ASSERT(result.second, pbft_exception, "unable to insert pbft checkpoint index, duplicate state detected"); + } + + void pbft_database::prune(const pbft_state_ptr &h) { + auto num = h->block_num; + + auto &by_bn = pbft_state_index.get(); + auto bni = by_bn.begin(); + while (bni != by_bn.end() && (*bni)->block_num < num) { + prune(*bni); + bni = by_bn.begin(); + } + + auto itr = pbft_state_index.find(h->block_id); + if (itr != pbft_state_index.end()) { + pbft_state_index.erase(itr); + } + } + + void pbft_database::prune(const pbft_checkpoint_state_ptr &h) { + auto num = h->block_num; + + auto &by_bn = checkpoint_index.get(); + auto bni = by_bn.begin(); + while (bni != by_bn.end() && (*bni)->block_num < num) { + prune(*bni); + bni = by_bn.begin(); + } + + auto itr = checkpoint_index.find(h->block_id); + if (itr != checkpoint_index.end()) { + checkpoint_index.erase(itr); + } + } + + template + void pbft_database::emit(const Signal &s, Arg &&a) { + try { + s(std::forward(a)); + } catch (boost::interprocess::bad_alloc &e) { + wlog("bad alloc"); + throw e; + } catch (controller_emit_signal_exception &e) { + wlog("${details}", ("details", e.to_detail_string())); + throw e; + } catch (fc::exception &e) { + wlog("${details}", ("details", e.to_detail_string())); + } catch (...) { + wlog("signal handler threw exception"); + } + } + } +} \ No newline at end of file diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index d5da8ca279b..e2c8cd56c3b 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -124,7 +124,8 @@ namespace bacc = boost::accumulators; else { struct itimerval enable = {{0, 0}, {0, (int)x.count()-deadline_timer_verification.timer_overhead}}; expired = 0; - expired |= !!setitimer(ITIMER_REAL, &enable, NULL); + if(setitimer(ITIMER_REAL, &enable, NULL)) + expired = 1; } } diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index 7d3553e379b..a7993d40a27 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -193,6 +193,23 @@ class privileged_api : public context_aware_api { }); } + void set_upgrade_parameters_packed( array_ptr packed_upgrade_parameters, size_t datalen) { + datastream ds( packed_upgrade_parameters, datalen ); + uint32_t target_num; + fc::raw::unpack(ds, target_num); + + EOS_ASSERT( context.control.head_block_num() < target_num - 100, wasm_execution_error, "target block invalid"); + + EOS_ASSERT( !context.control.is_pbft_enabled(), wasm_execution_error, "pbft is already enabled"); + + EOS_ASSERT( !context.control.under_maintenance(), wasm_execution_error, "the system is under maintenance"); + + context.db.modify( context.control.get_upgrade_properties(), + [&]( auto& uprops ) { + uprops.upgrade_target_block_num = target_num; + }); + } + // *bos begin* void set_name_list_packed(int64_t list, int64_t action, array_ptr packed_name_list, size_t datalen) { @@ -1832,6 +1849,7 @@ REGISTER_INTRINSICS(privileged_api, (set_guaranteed_minimum_resources, void(int64_t,int64_t,int64_t) ) (is_privileged, int(int64_t) ) (set_privileged, void(int64_t, int) ) + (set_upgrade_parameters_packed, void(int, int) ) ); REGISTER_INJECTED_INTRINSICS(transaction_context, diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index 5f0a5206fdc..5a9db207780 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -7,6 +7,7 @@ #include #include #include +#include #include @@ -291,6 +292,7 @@ namespace eosio { namespace testing { fc::temp_directory tempdir; public: unique_ptr control; + unique_ptr pbft_ctrl; std::map block_signing_private_keys; protected: controller::config cfg; diff --git a/pipeline.jsonc b/pipeline.jsonc new file mode 100644 index 00000000000..0f78488b9fb --- /dev/null +++ b/pipeline.jsonc @@ -0,0 +1,14 @@ +{ + "eosio-lrt": + { + "pipeline-branch": "legacy-os" + }, + "eosio-nightly-builds": + { + "pipeline-branch": "legacy-os" + }, + "eosio-base-images": + { + "pipeline-branch": "release/1.6.x" + } +} \ No newline at end of file diff --git a/plugins/CMakeLists.txt b/plugins/CMakeLists.txt index 0b42bdf41b0..8eb52950702 100644 --- a/plugins/CMakeLists.txt +++ b/plugins/CMakeLists.txt @@ -9,6 +9,8 @@ add_subdirectory(producer_plugin) add_subdirectory(producer_api_plugin) add_subdirectory(history_plugin) add_subdirectory(history_api_plugin) +add_subdirectory(pbft_plugin) +add_subdirectory(pbft_api_plugin) add_subdirectory(state_history_plugin) add_subdirectory(wallet_plugin) diff --git a/plugins/chain_interface/include/eosio/chain/plugin_interface.hpp b/plugins/chain_interface/include/eosio/chain/plugin_interface.hpp index b62915b5220..a168ea54928 100644 --- a/plugins/chain_interface/include/eosio/chain/plugin_interface.hpp +++ b/plugins/chain_interface/include/eosio/chain/plugin_interface.hpp @@ -11,6 +11,7 @@ #include #include #include +#include namespace eosio { namespace chain { namespace plugin_interface { using namespace eosio::chain; @@ -61,4 +62,24 @@ namespace eosio { namespace chain { namespace plugin_interface { } } + namespace pbft { + namespace incoming { + using prepare_channel = channel_decl>; + using commit_channel = channel_decl>; + using view_change_channel = channel_decl>; + using new_view_channel = channel_decl>; + using checkpoint_channel = channel_decl>; + + } + + namespace outgoing { + using prepare_channel = channel_decl; + using commit_channel = channel_decl; + using view_change_channel = channel_decl; + using new_view_channel = channel_decl; + using checkpoint_channel = channel_decl; + + } + } + } } } diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index f3fddf4764a..ce0eab28de5 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -150,6 +150,17 @@ class chain_plugin_impl { ,incoming_block_channel(app().get_channel()) ,incoming_block_sync_method(app().get_method()) ,incoming_transaction_async_method(app().get_method()) + //pbft channels + ,pbft_outgoing_prepare_channel(app().get_channel()) + ,pbft_incoming_prepare_channel(app().get_channel()) + ,pbft_outgoing_commit_channel(app().get_channel()) + ,pbft_incoming_commit_channel(app().get_channel()) + ,pbft_outgoing_view_change_channel(app().get_channel()) + ,pbft_incoming_view_change_channel(app().get_channel()) + ,pbft_outgoing_new_view_channel(app().get_channel()) + ,pbft_incoming_new_view_channel(app().get_channel()) + ,pbft_outgoing_checkpoint_channel(app().get_channel()) + ,pbft_incoming_checkpoint_channel(app().get_channel()) {} bfs::path blocks_dir; @@ -161,11 +172,17 @@ class chain_plugin_impl { fc::optional chain_config; fc::optional chain; fc::optional chain_id; + fc::optional pbft_ctrl; //txn_msg_rate_limits rate_limits; fc::optional wasm_runtime; fc::microseconds abi_serializer_max_time_ms; fc::optional snapshot_path; + void on_pbft_incoming_prepare(pbft_metadata_ptr p); + void on_pbft_incoming_commit(pbft_metadata_ptr c); + void on_pbft_incoming_view_change(pbft_metadata_ptr vc); + void on_pbft_incoming_new_view(pbft_metadata_ptr nv); + void on_pbft_incoming_checkpoint(pbft_metadata_ptr cp); // retained references to channels for easy publication channels::pre_accepted_block::channel_type& pre_accepted_block_channel; @@ -196,7 +213,31 @@ class chain_plugin_impl { fc::optional applied_transaction_connection; fc::optional accepted_confirmation_connection; - + //pbft + fc::optional pbft_outgoing_prepare_connection; + pbft::incoming::prepare_channel::channel_type::handle pbft_incoming_prepare_subscription; + pbft::outgoing::prepare_channel::channel_type& pbft_outgoing_prepare_channel; + pbft::incoming::prepare_channel::channel_type& pbft_incoming_prepare_channel; + + fc::optional pbft_outgoing_commit_connection; + pbft::incoming::commit_channel::channel_type::handle pbft_incoming_commit_subscription; + pbft::outgoing::commit_channel::channel_type& pbft_outgoing_commit_channel; + pbft::incoming::commit_channel::channel_type& pbft_incoming_commit_channel; + + fc::optional pbft_outgoing_view_change_connection; + pbft::incoming::view_change_channel::channel_type::handle pbft_incoming_view_change_subscription; + pbft::outgoing::view_change_channel::channel_type& pbft_outgoing_view_change_channel; + pbft::incoming::view_change_channel::channel_type& pbft_incoming_view_change_channel; + + fc::optional pbft_outgoing_new_view_connection; + pbft::incoming::new_view_channel::channel_type::handle pbft_incoming_new_view_subscription; + pbft::outgoing::new_view_channel::channel_type& pbft_outgoing_new_view_channel; + pbft::incoming::new_view_channel::channel_type& pbft_incoming_new_view_channel; + + fc::optional pbft_outgoing_checkpoint_connection; + pbft::incoming::checkpoint_channel::channel_type::handle pbft_incoming_checkpoint_subscription; + pbft::outgoing::checkpoint_channel::channel_type& pbft_outgoing_checkpoint_channel; + pbft::incoming::checkpoint_channel::channel_type& pbft_incoming_checkpoint_channel; }; chain_plugin::chain_plugin() @@ -294,12 +335,25 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip } +template +T dejsonify(const string& s) { + return fc::json::from_string(s).as(); +} + #define LOAD_VALUE_SET(options, name, container) \ if( options.count(name) ) { \ const std::vector& ops = options[name].as>(); \ std::copy(ops.begin(), ops.end(), std::inserter(container, container.end())); \ } +static signature_provider_type +make_key_signature_provider(const private_key_type& key) { + return [key]( const chain::digest_type& digest ) { + return key.sign(digest); + }; +} + + fc::time_point calculate_genesis_timestamp( string tstr ) { fc::time_point genesis_timestamp; if( strcasecmp (tstr.c_str(), "now") == 0 ) { @@ -350,6 +404,48 @@ void chain_plugin::plugin_initialize(const variables_map& options) { LOAD_VALUE_SET( options, "actor-blacklist", my->chain_config->actor_blacklist ); LOAD_VALUE_SET( options, "contract-whitelist", my->chain_config->contract_whitelist ); LOAD_VALUE_SET( options, "contract-blacklist", my->chain_config->contract_blacklist ); + LOAD_VALUE_SET( options, "producer-name", my->chain_config->my_producers); + if( options.count("private-key") ) + { + const std::vector key_id_to_wif_pair_strings = options["private-key"].as>(); + for (const std::string& key_id_to_wif_pair_string : key_id_to_wif_pair_strings) + { + try { + auto key_id_to_wif_pair = dejsonify>(key_id_to_wif_pair_string); + my->chain_config->my_signature_providers[key_id_to_wif_pair.first] = make_key_signature_provider(key_id_to_wif_pair.second); + auto blanked_privkey = std::string(std::string(key_id_to_wif_pair.second).size(), '*' ); + wlog("\"private-key\" is DEPRECATED, use \"signature-provider=${pub}=KEY:${priv}\"", ("pub",key_id_to_wif_pair.first)("priv", blanked_privkey)); + } catch ( fc::exception& e ) { + elog("Malformed private key pair"); + } + } + } + + if( options.count("signature-provider") ) { + const std::vector key_spec_pairs = options["signature-provider"].as>(); + for (const auto& key_spec_pair : key_spec_pairs) { + try { + auto delim = key_spec_pair.find("="); + EOS_ASSERT(delim != std::string::npos, plugin_config_exception, "Missing \"=\" in the key spec pair"); + auto pub_key_str = key_spec_pair.substr(0, delim); + auto spec_str = key_spec_pair.substr(delim + 1); + + auto spec_delim = spec_str.find(":"); + EOS_ASSERT(spec_delim != std::string::npos, plugin_config_exception, "Missing \":\" in the key spec pair"); + auto spec_type_str = spec_str.substr(0, spec_delim); + auto spec_data = spec_str.substr(spec_delim + 1); + + auto pubkey = public_key_type(pub_key_str); + + if (spec_type_str == "KEY") { + my->chain_config->my_signature_providers[pubkey] = make_key_signature_provider(private_key_type(spec_data)); + } + + } catch (...) { + elog("Malformed signature provider: \"${val}\", ignoring!", ("val", key_spec_pair)); + } + } + } LOAD_VALUE_SET( options, "trusted-producer", my->chain_config->trusted_producers ); @@ -641,6 +737,9 @@ void chain_plugin::plugin_initialize(const variables_map& options) { my->chain.emplace( *my->chain_config ); my->chain_id.emplace( my->chain->get_chain_id()); + ilog("include pbft controller..."); + my->pbft_ctrl.emplace(*my->chain); + // set up method providers my->get_block_by_number_provider = app().get_method().register_provider( [this]( uint32_t block_num ) -> signed_block_ptr { @@ -703,11 +802,81 @@ void chain_plugin::plugin_initialize(const variables_map& options) { my->accepted_confirmation_channel.publish( conf ); } ); + + + //pbft + my->pbft_incoming_prepare_subscription = my->pbft_incoming_prepare_channel.subscribe( [this]( pbft_metadata_ptr p ){ + my->on_pbft_incoming_prepare(p); + }); + + my->pbft_incoming_commit_subscription = my->pbft_incoming_commit_channel.subscribe( [this]( pbft_metadata_ptr c ){ + my->on_pbft_incoming_commit(c); + }); + + my->pbft_incoming_view_change_subscription = my->pbft_incoming_view_change_channel.subscribe( [this]( pbft_metadata_ptr vc ){ + my->on_pbft_incoming_view_change(vc); + }); + + my->pbft_incoming_new_view_subscription = my->pbft_incoming_new_view_channel.subscribe( [this]( pbft_metadata_ptr nv ){ + my->on_pbft_incoming_new_view(nv); + }); + + my->pbft_incoming_checkpoint_subscription = my->pbft_incoming_checkpoint_channel.subscribe( [this]( pbft_metadata_ptr cp ){ + my->on_pbft_incoming_checkpoint(cp); + }); + + my->pbft_outgoing_prepare_connection = my->pbft_ctrl->pbft_db.pbft_outgoing_prepare.connect( + [this]( const pbft_prepare& prepare ) { + my->pbft_outgoing_prepare_channel.publish( prepare ); + }); + + my->pbft_outgoing_commit_connection = my->pbft_ctrl->pbft_db.pbft_outgoing_commit.connect( + [this]( const pbft_commit& commit ) { + my->pbft_outgoing_commit_channel.publish( commit ); + }); + + my->pbft_outgoing_view_change_connection = my->pbft_ctrl->pbft_db.pbft_outgoing_view_change.connect( + [this]( const pbft_view_change& view_change ) { + my->pbft_outgoing_view_change_channel.publish( view_change ); + }); + + my->pbft_outgoing_new_view_connection = my->pbft_ctrl->pbft_db.pbft_outgoing_new_view.connect( + [this]( const pbft_new_view& new_view ) { + my->pbft_outgoing_new_view_channel.publish( new_view ); + }); + + my->pbft_outgoing_checkpoint_connection = my->pbft_ctrl->pbft_db.pbft_outgoing_checkpoint.connect( + [this]( const pbft_checkpoint& checkpoint ) { + my->pbft_outgoing_checkpoint_channel.publish( checkpoint ); + }); + my->chain->add_indices(); } FC_LOG_AND_RETHROW() + +} + +void chain_plugin_impl::on_pbft_incoming_prepare(pbft_metadata_ptr p){ + pbft_ctrl->on_pbft_prepare(p); +} + +void chain_plugin_impl::on_pbft_incoming_commit(pbft_metadata_ptr c){ + pbft_ctrl->on_pbft_commit(c); +} + +void chain_plugin_impl::on_pbft_incoming_view_change(pbft_metadata_ptr vc){ + pbft_ctrl->on_pbft_view_change(vc); +} + +void chain_plugin_impl::on_pbft_incoming_new_view(pbft_metadata_ptr nv){ + pbft_ctrl->on_pbft_new_view(nv); } +void chain_plugin_impl::on_pbft_incoming_checkpoint(pbft_metadata_ptr cp){ + pbft_ctrl->on_pbft_checkpoint(cp); +} + + void chain_plugin::plugin_startup() { try { try { @@ -979,6 +1148,8 @@ bool chain_plugin::export_reversible_blocks( const fc::path& reversible_dir, controller& chain_plugin::chain() { return *my->chain; } const controller& chain_plugin::chain() const { return *my->chain; } +pbft_controller& chain_plugin::pbft_ctrl() { return *my->pbft_ctrl; } +const pbft_controller& chain_plugin::pbft_ctrl() const { return *my->pbft_ctrl; } chain::chain_id_type chain_plugin::get_chain_id()const { EOS_ASSERT( my->chain_id.valid(), chain_id_type_exception, "chain ID has not been initialized yet" ); @@ -1038,6 +1209,9 @@ read_only::get_info_results read_only::get_info(const read_only::get_info_params db.fork_db_head_block_id(), db.fork_db_head_block_time(), db.fork_db_head_block_producer(), + pbft_ctrl.state_machine->get_current_view(), + pbft_ctrl.state_machine->get_target_view(), + db.last_stable_checkpoint_block_num(), rm.get_virtual_block_cpu_limit(), rm.get_virtual_block_net_limit(), rm.get_block_cpu_limit(), diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index aa493e045fd..036d70358fb 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -20,6 +20,7 @@ #include #include +#include namespace fc { class variant; } @@ -73,12 +74,13 @@ class read_only { const controller& db; const fc::microseconds abi_serializer_max_time; bool shorten_abi_errors = true; + const chain::pbft_controller& pbft_ctrl; public: static const string KEYi64; - read_only(const controller& db, const fc::microseconds& abi_serializer_max_time) - : db(db), abi_serializer_max_time(abi_serializer_max_time) {} + read_only(const controller& db, const fc::microseconds& abi_serializer_max_time, const chain::pbft_controller& pbft_ctrl) + : db(db), abi_serializer_max_time(abi_serializer_max_time), pbft_ctrl(pbft_ctrl) {} void validate() const {} @@ -95,7 +97,9 @@ class read_only { chain::block_id_type head_block_id; fc::time_point head_block_time; account_name head_block_producer; - + uint32_t current_view = 0; + uint32_t target_view = 0; + uint32_t last_stable_checkpoint_block_num = 0; uint64_t virtual_block_cpu_limit = 0; uint64_t virtual_block_net_limit = 0; @@ -660,7 +664,7 @@ class chain_plugin : public plugin { void plugin_startup(); void plugin_shutdown(); - chain_apis::read_only get_read_only_api() const { return chain_apis::read_only(chain(), get_abi_serializer_max_time()); } + chain_apis::read_only get_read_only_api() const { return chain_apis::read_only(chain(), get_abi_serializer_max_time(), pbft_ctrl()); } chain_apis::read_write get_read_write_api() { return chain_apis::read_write(chain(), get_abi_serializer_max_time()); } void accept_block( const chain::signed_block_ptr& block ); @@ -689,6 +693,11 @@ class chain_plugin : public plugin { // Only call this after plugin_initialize()! const controller& chain() const; + // Only call this after plugin_initialize()! + chain::pbft_controller& pbft_ctrl(); + // Only call this after plugin_initialize()! + const chain::pbft_controller& pbft_ctrl() const; + chain::chain_id_type get_chain_id() const; fc::microseconds get_abi_serializer_max_time() const; @@ -706,7 +715,7 @@ class chain_plugin : public plugin { FC_REFLECT( eosio::chain_apis::permission, (perm_name)(parent)(required_auth) ) FC_REFLECT(eosio::chain_apis::empty, ) FC_REFLECT(eosio::chain_apis::read_only::get_info_results, -(server_version)(chain_id)(head_block_num)(last_irreversible_block_num)(last_irreversible_block_id)(head_block_id)(head_block_time)(head_block_producer)(virtual_block_cpu_limit)(virtual_block_net_limit)(block_cpu_limit)(block_net_limit)(server_version_string) ) +(server_version)(chain_id)(head_block_num)(last_irreversible_block_num)(last_irreversible_block_id)(head_block_id)(head_block_time)(head_block_producer)(current_view)(target_view)(last_stable_checkpoint_block_num)(virtual_block_cpu_limit)(virtual_block_net_limit)(block_cpu_limit)(block_net_limit)(server_version_string) ) FC_REFLECT(eosio::chain_apis::read_only::get_block_params, (block_num_or_id)) FC_REFLECT(eosio::chain_apis::read_only::get_block_header_state_params, (block_num_or_id)) diff --git a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp index d732b18cf0c..3de8b9a8d34 100644 --- a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp +++ b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp @@ -30,12 +30,16 @@ namespace eosio { void plugin_startup(); void plugin_shutdown(); + void broadcast_block(const chain::signed_block &sb); string connect( const string& endpoint ); string disconnect( const string& endpoint ); optional status( const string& endpoint )const; vector connections()const; + bool is_syncing()const; + + void maybe_sync_stable_checkpoints(); size_t num_peers() const; private: diff --git a/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp b/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp index 76f11da2411..73a2e9caa04 100644 --- a/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp +++ b/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp @@ -5,6 +5,7 @@ #pragma once #include #include +#include #include namespace eosio { @@ -132,6 +133,11 @@ namespace eosio { uint32_t end_block; }; + struct checkpoint_request_message { + uint32_t start_block; + uint32_t end_block; + }; + struct request_p2p_message{ bool discoverable; }; @@ -140,6 +146,11 @@ struct request_p2p_message{ bool discoverable; string p2p_peer_list; }; + + struct compressed_pbft_message { + std::vector content; + }; + using net_message = static_variant; + request_p2p_message, + pbft_prepare, + pbft_commit, + pbft_view_change, + pbft_new_view, + pbft_checkpoint, + pbft_stable_checkpoint, + checkpoint_request_message, + compressed_pbft_message>; + } // namespace eosio FC_REFLECT( eosio::select_ids, (mode)(pending)(ids) ) @@ -170,6 +190,10 @@ FC_REFLECT( eosio::request_message, (req_trx)(req_blocks) ) FC_REFLECT( eosio::sync_request_message, (start_block)(end_block) ) FC_REFLECT( eosio::request_p2p_message, (discoverable) ) FC_REFLECT( eosio::response_p2p_message, (discoverable)(p2p_peer_list) ) +FC_REFLECT( eosio::checkpoint_request_message, (start_block)(end_block) ) +FC_REFLECT( eosio::compressed_pbft_message, (content)) + + /** * Goals of Network Code diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 8161a7119cf..3b1a38f133b 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -26,6 +26,12 @@ #include #include +#include + +#include +#include +#include + using namespace eosio::chain::plugin_interface::compat; namespace fc { @@ -106,7 +112,13 @@ namespace eosio { bool connected; }; class net_plugin_impl { + private: + std::vector compress_pbft(const std::shared_ptr>& m)const; + std::vector decompress_pbft(const std::vector& m)const; + std::shared_ptr> encode_pbft_message(const net_message &msg, bool compress = false)const; public: + net_plugin_impl(); + unique_ptr acceptor; tcp::endpoint listen_endpoint; string p2p_address; @@ -140,10 +152,14 @@ namespace eosio { unique_ptr connector_check; unique_ptr transaction_check; unique_ptr keepalive_timer; + unique_ptr pbft_message_cache_timer; + unique_ptr connection_monitor_timer; boost::asio::steady_timer::duration connector_period; boost::asio::steady_timer::duration txn_exp_period; boost::asio::steady_timer::duration resp_expected_period; boost::asio::steady_timer::duration keepalive_interval{std::chrono::seconds{32}}; + boost::asio::steady_timer::duration pbft_message_cache_tick_interval{std::chrono::seconds{10}}; + boost::asio::steady_timer::duration connection_monitor_tick_interval{std::chrono::seconds{2}}; int max_cleanup_time_ms = 0; const std::chrono::system_clock::duration peer_authentication_interval{std::chrono::seconds{1}}; ///< Peer clock may be no more than 1 second skewed from our clock, including network latency. @@ -163,7 +179,22 @@ namespace eosio { bool use_socket_read_watermark = false; + std::unordered_map pbft_message_cache{}; + const int pbft_message_cache_TTL = 600; + const int pbft_message_TTL = 60; + channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; + eosio::chain::plugin_interface::pbft::outgoing::prepare_channel::channel_type::handle pbft_outgoing_prepare_subscription; + eosio::chain::plugin_interface::pbft::outgoing::commit_channel::channel_type::handle pbft_outgoing_commit_subscription; + eosio::chain::plugin_interface::pbft::outgoing::view_change_channel::channel_type::handle pbft_outgoing_view_change_subscription; + eosio::chain::plugin_interface::pbft::outgoing::new_view_channel::channel_type::handle pbft_outgoing_new_view_subscription; + eosio::chain::plugin_interface::pbft::outgoing::checkpoint_channel::channel_type::handle pbft_outgoing_checkpoint_subscription; + + eosio::chain::plugin_interface::pbft::incoming::prepare_channel::channel_type& pbft_incoming_prepare_channel; + eosio::chain::plugin_interface::pbft::incoming::commit_channel::channel_type& pbft_incoming_commit_channel; + eosio::chain::plugin_interface::pbft::incoming::view_change_channel::channel_type& pbft_incoming_view_change_channel; + eosio::chain::plugin_interface::pbft::incoming::new_view_channel::channel_type& pbft_incoming_new_view_channel; + eosio::chain::plugin_interface::pbft::incoming::checkpoint_channel::channel_type& pbft_incoming_checkpoint_channel; void connect(const connection_ptr& c); void connect(const connection_ptr& c, tcp::resolver::iterator endpoint_itr); @@ -214,6 +245,33 @@ namespace eosio { void handle_message( connection_ptr c, const request_p2p_message &msg); void handle_message( connection_ptr c, const response_p2p_message &msg); + //pbft messages + bool maybe_add_to_pbft_cache(const string &key); + void clean_expired_pbft_messages(); + template + bool is_pbft_msg_outdated(M const & msg); + template + bool is_pbft_msg_valid(M const & msg); + + void bcast_pbft_msg(const net_message &msg, int ttl); + + void forward_pbft_msg(connection_ptr c, const net_message &msg, int ttl); + + void pbft_outgoing_prepare(const pbft_prepare &prepare); + void pbft_outgoing_commit(const pbft_commit &commit); + void pbft_outgoing_view_change(const pbft_view_change &view_change); + void pbft_outgoing_new_view(const pbft_new_view &new_view); + void pbft_outgoing_checkpoint(const pbft_checkpoint &checkpoint); + + void handle_message( connection_ptr c, const pbft_prepare &msg); + void handle_message( connection_ptr c, const pbft_commit &msg); + void handle_message( connection_ptr c, const pbft_view_change &msg); + void handle_message( connection_ptr c, const pbft_new_view &msg); + void handle_message( connection_ptr c, const pbft_checkpoint &msg); + void handle_message( connection_ptr c, const pbft_stable_checkpoint &msg); + void handle_message( connection_ptr c, const checkpoint_request_message &msg); + void handle_message( connection_ptr c, const compressed_pbft_message &msg); + void start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection); void start_txn_timer(); void start_monitors(); @@ -221,6 +279,9 @@ namespace eosio { void expire_txns(); void expire_local_txns(); void connection_monitor(std::weak_ptr from_connection); + + void pbft_message_cache_ticker(); + void connection_monitor_ticker(); /** \name Peer Timestamps * Time message handling * @{ @@ -436,6 +497,8 @@ namespace eosio { uint32_t write_queue_size() const { return _write_queue_size; } + uint32_t out_queue_size() const { return _out_queue.size(); } + bool is_out_queue_empty() const { return _out_queue.empty(); } bool ready_to_send() const { @@ -497,6 +560,12 @@ namespace eosio { deque _sync_write_queue; // sync_write_queue will be sent first deque _out_queue; + public: + //used for pbft msgs sending only + void push_to_out_queue( const queued_write& m) { + _out_queue.emplace_back( m ); + } + }; // queued_buffer @@ -517,6 +586,15 @@ namespace eosio { fc::optional outstanding_read_bytes; + struct queued_pbft_message { + std::shared_ptr> message; + fc::time_point_sec deadline; + }; + const int OUT_QUEUE_SIZE_LIMIT_FROM_WRITE_QUEUE = 100; + const int OUT_QUEUE_SIZE_LIMIT = 200; + + deque pbft_queue; + queued_buffer buffer_queue; uint32_t reads_in_flight = 0; @@ -527,6 +605,8 @@ namespace eosio { int16_t sent_handshake_count = 0; bool connecting = false; bool syncing = false; + int connecting_timeout_in_seconds = 10; + fc::time_point_sec connecting_deadline; uint16_t protocol_version = 0; string peer_addr; unique_ptr response_expected; @@ -565,6 +645,7 @@ namespace eosio { bool connected(); bool current(); + bool pbft_ready(); void reset(); void close(); void send_handshake(); @@ -602,6 +683,10 @@ namespace eosio { void enqueue_buffer( const std::shared_ptr>& send_buffer, bool trigger_send, go_away_reason close_after_send, bool to_sync_queue = false); + void enqueue_pbft( const std::shared_ptr>& m, const time_point_sec deadline); + + bool pbft_read_to_send(); + void cancel_sync(go_away_reason); void flush_queues(); bool enqueue_sync_block(); @@ -618,6 +703,7 @@ namespace eosio { std::function callback, bool to_sync_queue = false); void do_queue_write(); + void fill_out_buffer_with_pbft_queue(std::vector &bufs); void send_p2p_request(bool discoverable); void send_p2p_response(bool discoverable,string p2p_peer_list); @@ -704,6 +790,7 @@ namespace eosio { uint32_t sync_last_requested_num; uint32_t sync_next_expected_num; uint32_t sync_req_span; + uint32_t last_req_scp_num; connection_ptr source; stages state; @@ -726,6 +813,9 @@ namespace eosio { void recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num); void recv_handshake(const connection_ptr& c, const handshake_message& msg); void recv_notice(const connection_ptr& c, const notice_message& msg); + bool is_syncing(); + void set_in_sync(); + void sync_stable_checkpoints(const connection_ptr& c, uint32_t target); }; class dispatch_manager { @@ -784,6 +874,7 @@ namespace eosio { sent_handshake_count(0), connecting(true), syncing(false), + connecting_deadline(fc::time_point::now()+fc::seconds(connecting_timeout_in_seconds)), protocol_version(0), peer_addr(), response_expected(), @@ -815,6 +906,10 @@ namespace eosio { return (connected() && !syncing); } + bool connection::pbft_ready(){ + return current(); + } + void connection::reset() { peer_requested.reset(); blk_state.clear(); @@ -823,6 +918,7 @@ namespace eosio { void connection::flush_queues() { buffer_queue.clear_write_queue(); + pbft_queue.clear(); } void connection::close() { @@ -834,6 +930,7 @@ namespace eosio { } flush_queues(); connecting = false; + connecting_deadline = fc::time_point::min(); syncing = false; if( last_req ) { my_impl->dispatcher->retry_fetch(shared_from_this()); @@ -995,9 +1092,14 @@ namespace eosio { } } + bool connection::pbft_read_to_send() { + return !pbft_queue.empty() && buffer_queue.is_out_queue_empty(); + } + void connection::do_queue_write() { - if( !buffer_queue.ready_to_send() ) - return; + if( !(buffer_queue.ready_to_send() || pbft_read_to_send()) ) + return; + connection_wptr c(shared_from_this()); if(!socket->is_open()) { fc_elog(logger,"socket not open to ${p}",("p",peer_name())); @@ -1005,7 +1107,10 @@ namespace eosio { return; } std::vector bufs; + buffer_queue.fill_out_buffer( bufs ); + fill_out_buffer_with_pbft_queue( bufs ); + boost::asio::async_write(*socket, bufs, [c](boost::system::error_code ec, std::size_t w) { try { auto conn = c.lock(); @@ -1047,6 +1152,50 @@ namespace eosio { }); } + void connection::fill_out_buffer_with_pbft_queue(std::vector &bufs){ + //delete timeout pbft message + auto now = time_point::now(); + int drop_pbft_count = 0; + while (pbft_queue.size()>0) { + if (pbft_queue.front().deadline <= now) { + pbft_queue.pop_front(); + ++drop_pbft_count; + } else { + break; + } + } + + //drop timeout messages in mem, init send buffer only when actual send happens + //copied from a previous version of connection::enqueue + connection_wptr weak_this = shared_from_this(); + go_away_reason close_after_send = no_reason; + std::function callback = [weak_this, close_after_send](boost::system::error_code ec, std::size_t ) { + connection_ptr conn = weak_this.lock(); + if (conn) { + if (close_after_send != no_reason) { + elog ("sent a go away message: ${r}, closing connection to ${p}",("r", reason_str(close_after_send))("p", conn->peer_name())); + my_impl->close(conn); + return; + } + } else { + fc_wlog(logger, "connection expired before enqueued net_message called callback!"); + } + }; + + //push to out queue + while (buffer_queue.out_queue_size() < OUT_QUEUE_SIZE_LIMIT) { + if (pbft_queue.empty()) break; + + queued_pbft_message pbft = pbft_queue.front(); + pbft_queue.pop_front(); + auto m = pbft.message; + if (m) { + bufs.push_back(boost::asio::buffer(*m)); + buffer_queue.push_to_out_queue( {m, callback} ); + } + } + } + void connection::cancel_sync(go_away_reason reason) { fc_dlog(logger,"cancel sync reason = ${m}, write queue size ${o} bytes peer ${p}", ("m",reason_str(reason)) ("o", buffer_queue.write_queue_size())("p", peer_name())); @@ -1147,6 +1296,14 @@ namespace eosio { to_sync_queue); } + void connection::enqueue_pbft(const std::shared_ptr>& m, const time_point_sec deadline) + { + pbft_queue.push_back(queued_pbft_message{m, deadline }); + if (buffer_queue.is_out_queue_empty()) { + do_queue_write(); + } + } + void connection::cancel_wait() { if (response_expected) response_expected->cancel(); @@ -1242,7 +1399,7 @@ namespace eosio { return false; } return true; - } + } bool connection::add_peer_block(const peer_block_state& entry) { auto bptr = blk_state.get().find(entry.id); @@ -1325,6 +1482,14 @@ namespace eosio { chain_plug->chain().fork_db_head_block_num() < sync_last_requested_num ); } + bool sync_manager::is_syncing() { + return state != in_sync; + } + + void sync_manager::set_in_sync() { + set_state(in_sync); + } + void sync_manager::request_next_chunk( const connection_ptr& conn ) { uint32_t head_block = chain_plug->chain().fork_db_head_block_num(); @@ -1441,6 +1606,21 @@ namespace eosio { request_next_chunk(c); } + void sync_manager::sync_stable_checkpoints(const connection_ptr& c, uint32_t target) { + controller& cc = chain_plug->chain(); + uint32_t lscb_num = cc.last_stable_checkpoint_block_num(); + if (last_req_scp_num < lscb_num || last_req_scp_num == 0 || last_req_scp_num >= target) last_req_scp_num = lscb_num; + auto end = target; + auto max_target_scp_num = last_req_scp_num + pbft_checkpoint_granularity * 10; + if (target > max_target_scp_num) end = max_target_scp_num; + + checkpoint_request_message crm = {last_req_scp_num+1,end}; + c->enqueue( net_message(crm)); + fc_dlog(logger, "request sync stable checkpoints from ${s} to ${e}", + ("s", last_req_scp_num+1)("e", max_target_scp_num)); + last_req_scp_num = max_target_scp_num; + } + void sync_manager::reassign_fetch(const connection_ptr& c, go_away_reason reason) { fc_ilog(logger, "reassign_fetch, our last req is ${cc}, next expected is ${ne} peer ${p}", ( "cc",sync_last_requested_num)("ne",sync_next_expected_num)("p",c->peer_name())); @@ -1865,6 +2045,81 @@ namespace eosio { //------------------------------------------------------------------------ + namespace bio = boost::iostreams; + template + struct read_limiter { + using char_type = char; + using category = bio::multichar_output_filter_tag; + + template + size_t write(Sink &sink, const char* s, size_t count) + { + EOS_ASSERT(_total + count <= Limit, tx_decompression_error, "Exceeded maximum decompressed transaction size"); + _total += count; + return bio::write(sink, s, count); + } + size_t _total = 0; + }; + + std::vector net_plugin_impl::compress_pbft(const std::shared_ptr>& m) const { + std::vector out; + bio::filtering_ostream comp; + comp.push(bio::zlib_compressor(bio::zlib::best_compression)); + comp.push(bio::back_inserter(out)); + bio::write(comp, m->data(), m->size()); + bio::close(comp); + return out; + } + + std::vector net_plugin_impl::decompress_pbft(const std::vector& m) const { + try { + std::vector out; + bio::filtering_ostream decomp; + decomp.push(bio::zlib_decompressor()); + decomp.push(read_limiter<1*1024*1024>()); // limit to 10 megs decompressed for zip bomb protections + decomp.push(bio::back_inserter(out)); + bio::write(decomp, m.data(), m.size()); + bio::close(decomp); + return out; + } catch( fc::exception& er ) { + throw; + } catch( ... ) { + fc::unhandled_exception er( FC_LOG_MESSAGE( warn, "internal decompression error"), std::current_exception() ); + throw er; + } + } + + std::shared_ptr> net_plugin_impl::encode_pbft_message(const net_message &msg, bool compress) const { + + uint32_t payload_size = fc::raw::pack_size( msg ); + + char* header = reinterpret_cast(&payload_size); + size_t header_size = sizeof(payload_size); + size_t buffer_size = header_size + payload_size; + + auto send_buffer = std::make_shared>(buffer_size); + fc::datastream ds( send_buffer->data(), buffer_size); + ds.write( header, header_size ); + fc::raw::pack( ds, msg ); + auto out_buffer = send_buffer; + + if (compress) { + auto cpnv = compressed_pbft_message{ compress_pbft(send_buffer) }; + payload_size = fc::raw::pack_size( cpnv ); + + header = reinterpret_cast(&payload_size); + header_size = sizeof(payload_size); + buffer_size = header_size + payload_size; + + auto compressed_buffer = std::make_shared>(buffer_size); + fc::datastream ds( compressed_buffer->data(), buffer_size); + ds.write( header, header_size ); + fc::raw::pack( ds, &cpnv ); + out_buffer = compressed_buffer; + } + return out_buffer; + } + void net_plugin_impl::connect(const connection_ptr& c) { if( c->no_retry != go_away_reason::no_reason) { fc_dlog( logger, "Skipping connect due to go_away reason ${r}",("r", reason_str( c->no_retry ))); @@ -1916,13 +2171,14 @@ namespace eosio { ++endpoint_itr; c->connecting = true; c->pending_message_buffer.reset(); + c->connecting_deadline = fc::time_point::now()+fc::seconds(c->connecting_timeout_in_seconds); connection_wptr weak_conn = c; c->socket->async_connect( current_endpoint, [weak_conn, endpoint_itr, this] ( const boost::system::error_code& err ) { auto c = weak_conn.lock(); if (!c) return; if( !err && c->socket->is_open() ) { if (start_session( c )) { - c->send_handshake (); + c->send_handshake(); send_p2p_request(c); } } else { @@ -2062,7 +2318,7 @@ namespace eosio { void net_plugin_impl::start_read_message(const connection_ptr& conn) { try { - if(!conn->socket) { + if(!conn->socket || !conn->socket->is_open()) { return; } connection_wptr weak_conn = conn; @@ -2514,11 +2770,11 @@ namespace eosio { } void net_plugin_impl::handle_message(const connection_ptr& c, const request_message& msg) { - if( msg.req_blocks.ids.size() > 1 ) { - elog( "Invalid request_message, req_blocks.ids.size ${s}", ("s", msg.req_blocks.ids.size()) ); - close(c); - return; - } +// if( msg.req_blocks.ids.size() > 1 ) { +// elog( "Invalid request_message, req_blocks.ids.size ${s}", ("s", msg.req_blocks.ids.size()) ); +// close(c); +// return; +// } // we should enable requesting multiple blocks switch (msg.req_blocks.mode) { case catch_up : @@ -2528,7 +2784,10 @@ namespace eosio { case normal : peer_ilog(c, "received request_message:normal"); if( !msg.req_blocks.ids.empty() ) { - c->blk_send(msg.req_blocks.ids.back()); + fc_dlog( logger, "received request_message, sending ${num} blocks from my node", ("num", msg.req_blocks.ids.size())); + for (auto const &bid: msg.req_blocks.ids) { + c->blk_send(bid); + } } break; default:; @@ -2571,6 +2830,33 @@ namespace eosio { trx->get_signatures().size() * sizeof(signature_type); } + void net_plugin_impl::handle_message( connection_ptr c, const checkpoint_request_message &msg) { + + if ( msg.end_block == 0 || msg.end_block < msg.start_block) return; + + fc_dlog(logger, "received checkpoint request message ${m}", ("m", msg)); + vector scp_stack; + controller &cc = my_impl->chain_plug->chain(); + pbft_controller &pcc = my_impl->chain_plug->pbft_ctrl(); + + auto end_block = std::min(msg.end_block, cc.last_stable_checkpoint_block_num()); + + for (auto i = end_block; i >= msg.start_block && i>0; --i) { + auto bid = cc.get_block_id_for_num(i); + auto scp = pcc.pbft_db.get_stable_checkpoint_by_id(bid); + if (!scp.empty()) { + scp_stack.push_back(scp); + } + } + + if (!scp_stack.empty()) fc_dlog(logger, "sending ${n} stable checkpoints on my node",("n",scp_stack.size())); + + while (scp_stack.size()) { + c->enqueue(scp_stack.back()); + scp_stack.pop_back(); + } + } + void net_plugin_impl::handle_message(const connection_ptr& c, const packed_transaction_ptr& trx) { fc_dlog(logger, "got a packed transaction, cancel wait"); peer_ilog(c, "received packed_transaction"); @@ -2620,9 +2906,23 @@ namespace eosio { fc_dlog(logger, "canceling wait on ${p}", ("p",c->peer_name())); c->cancel_wait(); + auto accept_pbft_stable_checkpoint = [&]() { + auto &pcc = chain_plug->pbft_ctrl(); + auto scp = pcc.pbft_db.fetch_stable_checkpoint_from_blk_extn(msg); + + if (!scp.empty() && scp.block_info.block_num() > cc.last_stable_checkpoint_block_num()) { + if (pcc.pbft_db.get_stable_checkpoint_by_id(msg->id(), false).empty()) { + handle_message(c, scp); + } else { + pcc.pbft_db.checkpoint_local(); + } + } + }; + try { if( cc.fetch_block_by_id(blk_id)) { sync_master->recv_block(c, blk_id, blk_num); + accept_pbft_stable_checkpoint(); return; } } catch( ...) { @@ -2638,6 +2938,7 @@ namespace eosio { go_away_reason reason = fatal_other; try { chain_plug->accept_block(msg); //, sync_master->is_active(c)); + accept_pbft_stable_checkpoint(); reason = no_reason; } catch( const unlinkable_block_exception &ex) { peer_elog(c, "bad signed_block : ${m}", ("m",ex.what())); @@ -2679,6 +2980,246 @@ namespace eosio { } } + + template + bool net_plugin_impl::is_pbft_msg_outdated(M const & msg) { + if (time_point_sec(time_point::now()) > time_point_sec(msg.common.timestamp) + pbft_message_TTL) { + fc_dlog( logger, "received an outdated pbft message ${m}", ("m", msg)); + return true; + } + return false; + } + + template + bool net_plugin_impl::is_pbft_msg_valid(M const & msg) { + // Do some basic validations of an incoming pbft msg, bad msgs should be quickly discarded without affecting state. + return !is_pbft_msg_outdated(msg) + && !sync_master->is_syncing(); + } + + void net_plugin_impl::bcast_pbft_msg(const net_message &msg, int ttl) { + if (sync_master->is_syncing()) return; + + auto deadline = time_point_sec(time_point::now()) + ttl; + + for (auto &conn: connections) { + if (conn->pbft_ready()) { + conn->enqueue_pbft(encode_pbft_message(msg), deadline); + } + } + } + + void net_plugin_impl::forward_pbft_msg(connection_ptr c, const net_message &msg, int ttl) { + auto deadline = time_point_sec(time_point::now()) + ttl; + + for (auto &conn: connections) { + if (conn != c && conn->pbft_ready()) { + conn->enqueue_pbft(encode_pbft_message(msg), deadline); + } + } + } + + void net_plugin_impl::pbft_outgoing_prepare(const pbft_prepare &msg) { + auto added = maybe_add_to_pbft_cache(std::string(msg.sender_signature)); + if (!added) return; + + pbft_controller &pcc = my_impl->chain_plug->pbft_ctrl(); + + bcast_pbft_msg(msg, pbft_message_TTL); + fc_dlog( logger, "sent prepare at height: ${n}, view: ${v} ", ("n", msg.block_info.block_num())("v", msg.view)); + } + + void net_plugin_impl::pbft_outgoing_commit(const pbft_commit &msg) { + auto added = maybe_add_to_pbft_cache(std::string(msg.sender_signature)); + if (!added) return; + + pbft_controller &pcc = my_impl->chain_plug->pbft_ctrl(); + + bcast_pbft_msg(msg, pbft_message_TTL); + fc_dlog( logger, "sent commit at height: ${n}, view: ${v} ", ("n", msg.block_info.block_num())("v", msg.view)); + } + + void net_plugin_impl::pbft_outgoing_view_change(const pbft_view_change &msg) { + auto added = maybe_add_to_pbft_cache(std::string(msg.sender_signature)); + if (!added) return; + + pbft_controller &pcc = my_impl->chain_plug->pbft_ctrl(); + + bcast_pbft_msg(msg, pbft_message_TTL); + fc_dlog( logger, "sent view change {cv: ${cv}, tv: ${tv}}", ("cv", msg.current_view)("tv", msg.target_view)); + } + + void net_plugin_impl::pbft_outgoing_new_view(const pbft_new_view &msg) { + auto added = maybe_add_to_pbft_cache(std::string(msg.sender_signature)); + if (!added) return; + + pbft_controller &pcc = my_impl->chain_plug->pbft_ctrl(); + + bcast_pbft_msg(msg, INT_MAX); + fc_dlog( logger, "sent new view at view: ${v} ", ("v", msg.new_view)); + } + + void net_plugin_impl::pbft_outgoing_checkpoint(const pbft_checkpoint &msg) { + auto added = maybe_add_to_pbft_cache(std::string(msg.sender_signature)); + if (!added) return; + + pbft_controller &pcc = my_impl->chain_plug->pbft_ctrl(); + + bcast_pbft_msg(msg, pbft_message_TTL); + fc_dlog( logger, "sent checkpoint at height: ${n} ", ("n", msg.block_info.block_num())); + } + + bool net_plugin_impl::maybe_add_to_pbft_cache(const string &key){ + auto itr = pbft_message_cache.find(key); + if (itr == pbft_message_cache.end()) { + //add to cache + pbft_message_cache[key] = time_point_sec(time_point::now()) + pbft_message_cache_TTL; + return true; + } + return false; + } + + void net_plugin_impl::clean_expired_pbft_messages(){ + auto itr = pbft_message_cache.begin(); + auto now = time_point::now(); + + while (itr != pbft_message_cache.end()) { + if (itr->second <= now) { + itr = pbft_message_cache.erase(itr); + } else + itr++; + } + } + + void net_plugin_impl::handle_message( connection_ptr c, const pbft_prepare &msg) { + + if (!is_pbft_msg_valid(msg)) return; + + auto added = maybe_add_to_pbft_cache(std::string(msg.sender_signature)); + if (!added) return; + + auto pmm = pbft_message_metadata(msg, chain_id); + + pbft_controller &pcc = my_impl->chain_plug->pbft_ctrl(); + if (!pcc.pbft_db.is_valid_prepare(msg, pmm.sender_key)) return; + + forward_pbft_msg(c, msg, pbft_message_TTL); + fc_dlog( logger, "received prepare at height: ${n}, view: ${v}, from ${k}, ", ("n", msg.block_info.block_num())("v", msg.view)("k", pmm.sender_key)); + + pbft_incoming_prepare_channel.publish(std::make_shared>(pmm)); + } + + void net_plugin_impl::handle_message( connection_ptr c, const pbft_commit &msg) { + + if (!is_pbft_msg_valid(msg)) return; + + auto added = maybe_add_to_pbft_cache(std::string(msg.sender_signature)); + if (!added) return; + + auto pmm = pbft_message_metadata(msg, chain_id); + + pbft_controller &pcc = my_impl->chain_plug->pbft_ctrl(); + if (!pcc.pbft_db.is_valid_commit(msg, pmm.sender_key)) return; + + forward_pbft_msg(c, msg, pbft_message_TTL); + fc_dlog( logger, "received commit at height: ${n}, view: ${v}, from ${k}, ", ("n", msg.block_info.block_num())("v", msg.view)("k", pmm.sender_key)); + + pbft_incoming_commit_channel.publish(std::make_shared>(pmm)); + } + + void net_plugin_impl::handle_message( connection_ptr c, const pbft_view_change &msg) { + + if (!is_pbft_msg_valid(msg)) return; + + auto added = maybe_add_to_pbft_cache(std::string(msg.sender_signature)); + if (!added) return; + + auto pmm = pbft_message_metadata(msg, chain_id); + + pbft_controller &pcc = my_impl->chain_plug->pbft_ctrl(); + controller &ctrl = my_impl->chain_plug->chain(); + if (!pcc.pbft_db.is_valid_view_change(msg, pmm.sender_key)) return; + + auto missing_blocks = set{}; + for (auto const &b: msg.prepared_cert.pre_prepares) { + if (!ctrl.fetch_block_by_id(b)) missing_blocks.emplace(b); + } + + if (!missing_blocks.empty()) { + fc_dlog( logger, "requesting ${num} missing blocks from view change", ("num", missing_blocks.size())); + request_message req; + for (auto const &b: missing_blocks) { + req.req_blocks.ids.push_back(b); + } + req.req_trx.mode = normal; + req.req_blocks.mode = normal; + c->enqueue(req); + } + + forward_pbft_msg(c, msg, pbft_message_TTL); + fc_dlog( logger, "received view change {cv: ${cv}, tv: ${tv}} from ${v}", ("cv", msg.current_view)("tv", msg.target_view)("v", pmm.sender_key)); + + pbft_incoming_view_change_channel.publish(std::make_shared>(pmm)); + } + + void net_plugin_impl::handle_message( connection_ptr c, const pbft_new_view &msg) { + + auto added = maybe_add_to_pbft_cache(std::string(msg.sender_signature)); + if (!added) return; + + auto pmm = pbft_message_metadata(msg, chain_id); + + pbft_controller &pcc = my_impl->chain_plug->pbft_ctrl(); + if (pmm.sender_key != pcc.pbft_db.get_new_view_primary_key(msg.new_view)) return; + + forward_pbft_msg(c, msg, INT_MAX); + fc_dlog( logger, "received new view: ${n}, from ${v}", ("n", msg)("v", pmm.sender_key)); + + pbft_incoming_new_view_channel.publish(std::make_shared>(pmm)); + } + + void net_plugin_impl::handle_message( connection_ptr c, const compressed_pbft_message &msg) { + + auto decompressed_msg = decompress_pbft(msg.content); + + net_message message; + fc::datastream ds(decompressed_msg.data(), decompressed_msg.size()); + fc::raw::unpack(ds, message); + + try { + msg_handler m(*this, c); + message.visit( m ); + } catch( const fc::exception& e ) { + edump((e.to_detail_string() )); + } + } + + void net_plugin_impl::handle_message( connection_ptr c, const pbft_checkpoint &msg) { + + if (!is_pbft_msg_valid(msg)) return; + + auto added = maybe_add_to_pbft_cache(std::string(msg.sender_signature)); + if (!added) return; + + auto pmm = pbft_message_metadata(msg, chain_id); + + pbft_controller &pcc = my_impl->chain_plug->pbft_ctrl(); + if (!pcc.pbft_db.is_valid_checkpoint(msg, pmm.sender_key)) return; + + forward_pbft_msg(c, msg, pbft_message_TTL); + fc_dlog( logger, "received checkpoint at ${n}, from ${v}", ("n", msg.block_info.block_num())("v", pmm.sender_key)); + + pbft_incoming_checkpoint_channel.publish(std::make_shared>(pmm)); + } + + void net_plugin_impl::handle_message( connection_ptr c, const pbft_stable_checkpoint &msg) { + + pbft_controller &pcc = my_impl->chain_plug->pbft_ctrl(); + + if (!pcc.pbft_db.is_valid_stable_checkpoint(msg, true)) return; + fc_ilog(logger, "received stable checkpoint at ${n}, from ${v}", ("n", msg.block_info.block_num())("v", c->peer_name())); + } + void net_plugin_impl::start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection) { connector_check->expires_from_now( du); connector_check->async_wait( [this, from_connection](boost::system::error_code ec) { @@ -2705,6 +3246,86 @@ namespace eosio { }); } + void net_plugin_impl::pbft_message_cache_ticker() { + pbft_message_cache_timer->expires_from_now (pbft_message_cache_tick_interval); + pbft_message_cache_timer->async_wait ([this](boost::system::error_code ec) { + pbft_message_cache_ticker (); + if (ec) { + wlog ("pbft message cache ticker error: ${m}", ("m", ec.message())); + } + clean_expired_pbft_messages(); + }); + } + + void net_plugin_impl::connection_monitor_ticker() { + connection_monitor_timer->expires_from_now (connection_monitor_tick_interval); + connection_monitor_timer->async_wait ([this](boost::system::error_code ec) { + connection_monitor_ticker (); + if (ec) { + wlog ("connection monitor ticker error: ${m}", ("m", ec.message())); + } + int total=0; + int current=0; + for(auto &conn: connections){ + if(conn->current()){ + ++current; + } + ++total; + auto is_open = conn->socket && conn->socket->is_open(); +// auto paddr = conn->peer_addr; +// paddr.insert(0, 20 - paddr.length(), ' '); + std::ostringstream ss; + + auto so = is_open?"1":"0"; + auto con = conn->connecting ?"1":"0"; + auto syn = conn->syncing ?"1":"0"; + auto cur = conn->current() ?"1":"0"; + ss << so << con << syn << cur ; + auto status = ss.str(); + + ss.str(""); + ss.clear(); + + ss << std::setfill(' ') << std::setw(22) << conn->peer_addr; + auto paddr = ss.str(); + + ss.str(""); + ss.clear(); + + ss << std::setfill(' ') << std::setw(6) << conn->buffer_queue.write_queue_size(); + auto write_queue = ss.str(); + + ss.str(""); + ss.clear(); + + ss << std::setfill(' ') << std::setw(6) << conn->buffer_queue.out_queue_size(); + auto out_queue = ss.str(); + + ss.str(""); + ss.clear(); + + ss << std::setfill(' ') << std::setw(6) << conn->pbft_queue.size(); + auto pbft_queue = ss.str(); + + auto conn_str = conn->peer_addr; + if(conn_str.empty()) { + try { + conn_str = boost::lexical_cast(conn->socket->remote_endpoint()); + } catch (...) { + + } + } + + dlog("connection: ${conn} \tstatus(socket|connecting|syncing|current): ${status}\t|\twrite_queue: ${write}\t|\tout_queue: ${out}\t|\tpbft_queue: ${pbft}", ("status",status)("conn",conn_str)("write",write_queue)("out",out_queue)("pbft",pbft_queue)); + } + dlog("connections stats: current : ${current}\t total : ${total} ",("current",current)("total",total)); + dlog("================================================================================================"); + auto local_trx_pool_size = local_txns.size(); + fc_dlog(logger, "local trx pool size: ${local_trx_pool_size}",("local_trx_pool_size",local_trx_pool_size)); + fc_dlog(logger, "================================================================================================"); + }); + } + void net_plugin_impl::ticker() { keepalive_timer->expires_from_now(keepalive_interval); keepalive_timer->async_wait([this](boost::system::error_code ec) { @@ -2781,6 +3402,14 @@ namespace eosio { it = connections.erase(it); continue; } + }else if((*it)->connecting && (*it)->connecting_deadline < fc::time_point::now()){ + if( (*it)->peer_addr.length() > 0) { + close(*it); + } + else { + it = connections.erase(it); + continue; + } } ++it; } @@ -3092,6 +3721,10 @@ namespace eosio { my->keepalive_timer.reset( new boost::asio::steady_timer( app().get_io_service())); my->ticker(); + my->pbft_message_cache_timer.reset( new boost::asio::steady_timer( app().get_io_service())); + my->connection_monitor_timer.reset( new boost::asio::steady_timer( app().get_io_service())); + my->pbft_message_cache_ticker(); +// my->connection_monitor_ticker(); } FC_LOG_AND_RETHROW() } @@ -3117,6 +3750,16 @@ namespace eosio { } my->incoming_transaction_ack_subscription = app().get_channel().subscribe(boost::bind(&net_plugin_impl::transaction_ack, my.get(), _1)); + my->pbft_outgoing_prepare_subscription = app().get_channel().subscribe( + boost::bind(&net_plugin_impl::pbft_outgoing_prepare, my.get(), _1)); + my->pbft_outgoing_commit_subscription = app().get_channel().subscribe( + boost::bind(&net_plugin_impl::pbft_outgoing_commit, my.get(), _1)); + my->pbft_outgoing_view_change_subscription = app().get_channel().subscribe( + boost::bind(&net_plugin_impl::pbft_outgoing_view_change, my.get(), _1)); + my->pbft_outgoing_new_view_subscription = app().get_channel().subscribe( + boost::bind(&net_plugin_impl::pbft_outgoing_new_view, my.get(), _1)); + my->pbft_outgoing_checkpoint_subscription = app().get_channel().subscribe( + boost::bind(&net_plugin_impl::pbft_outgoing_checkpoint, my.get(), _1)); if( cc.get_read_mode() == chain::db_read_mode::READ_ONLY ) { my->max_nodes_per_host = 0; @@ -3208,6 +3851,34 @@ namespace eosio { } return result; } + + bool net_plugin::is_syncing()const { + return my->sync_master->is_syncing(); + } + + void net_plugin::maybe_sync_stable_checkpoints() { + controller& cc = my->chain_plug->chain(); + if (!cc.is_pbft_enabled()) return; + //there might be a better way to sync checkpoints, yet we do not want to modify the existing handshake msg. + uint32_t head = cc.fork_db_head_block_num(); + + for (auto const &c: my->connections) { + if (c->current()) { + my->sync_master->sync_stable_checkpoints(c, head); + } + } + + } + + + net_plugin_impl::net_plugin_impl(): + pbft_incoming_prepare_channel(app().get_channel()), + pbft_incoming_commit_channel(app().get_channel()), + pbft_incoming_view_change_channel(app().get_channel()), + pbft_incoming_new_view_channel(app().get_channel()), + pbft_incoming_checkpoint_channel(app().get_channel()) + {} + connection_ptr net_plugin_impl::find_connection(const string& host )const { for( const auto& c : connections ) if( c->peer_addr == host ) return c; diff --git a/plugins/pbft_api_plugin/CMakeLists.txt b/plugins/pbft_api_plugin/CMakeLists.txt new file mode 100644 index 00000000000..cda4ea2a0e8 --- /dev/null +++ b/plugins/pbft_api_plugin/CMakeLists.txt @@ -0,0 +1,7 @@ +file(GLOB HEADERS "include/eosio/pbft_api_plugin/*.hpp") +add_library( pbft_api_plugin + pbft_api_plugin.cpp + ${HEADERS} ) + +target_link_libraries( pbft_api_plugin pbft_plugin http_plugin appbase ) +target_include_directories( pbft_api_plugin PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) diff --git a/plugins/pbft_api_plugin/README.md b/plugins/pbft_api_plugin/README.md new file mode 100644 index 00000000000..54d3552089a --- /dev/null +++ b/plugins/pbft_api_plugin/README.md @@ -0,0 +1,87 @@ +# pbft_api_plugin + + +## Description +**pbft_api_plugin** exposes functionality from the pbft_plugin to the RPC API interface managed by the http_plugin, this plugin provides a chance to recover from a disaster ASAP. + +**!!! SHOULD ONLY BE USED ON SECURE NETWORKS** + + +## Dependencies +**chain_plugin** +**pbft_plugin** +**http_plugin** + + +## Usage & API reference + + # config.ini + plugin = eosio::pbft_api_plugin + + # nodeos startup params + --plugin eosio::pbft_api_plugin + +* **get_pbft_record** + + -- To obtain pbft database state on a specified block id + + ``` + curl --request POST --data string --url http://localhost:8888/v1/pbft/get_pbft_record + ``` + +* **get_pbft_checkpoints_record** + + -- To obtain pbft checkpoints on a specified block number + + ``` + curl --request POST --data uint32_t --url http://localhost:8888/v1/pbft/get_pbft_checkpoints_record + ``` +* **get_view_change_record** + + -- To obtain pbft view change messages on a specified target view + + ``` + curl --request POST --data uint32_t --url http://localhost:8888/v1/pbft/get_view_change_record + ``` +* **get_watermarks** + + -- To obtain pbft high watermarks + + ``` + curl --request POST --url http://localhost:8888/v1/pbft/get_watermarks + ``` +* **get_fork_schedules** + + -- To obtain all possible producer schedules inside fork database + + ``` + curl --request POST --url http://localhost:8888/v1/pbft/get_fork_schedules + ``` +* **get_pbft_status** + + -- To obtain current status from pbft state machine + + ``` + curl --request POST --url http://localhost:8888/v1/pbft/get_pbft_status + ``` +* **get_pbft_prepared_id** + + -- To obtain current prepared block id + + ``` + curl --request POST --url http://localhost:8888/v1/pbft/get_pbft_prepared_id + ``` +* **get_pbft_my_prepare_id** + + -- To obtain the block id that I recently prepare at + + ``` + curl --request POST --url http://localhost:8888/v1/pbft/get_pbft_my_prepare_id + ``` +* **set_pbft_current_view** + + -- To set the value of the current pbft view on my node, the node will transit to view change state afterwards + + ``` + curl --request POST --data uint32_t --url http://localhost:8888/v1/pbft/set_pbft_current_view + ``` \ No newline at end of file diff --git a/plugins/pbft_api_plugin/include/eosio/pbft_api_plugin/pbft_api_plugin.hpp b/plugins/pbft_api_plugin/include/eosio/pbft_api_plugin/pbft_api_plugin.hpp new file mode 100644 index 00000000000..f90e1cd2bb3 --- /dev/null +++ b/plugins/pbft_api_plugin/include/eosio/pbft_api_plugin/pbft_api_plugin.hpp @@ -0,0 +1,35 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include +#include + +#include + +namespace eosio { + +using namespace appbase; + +class pbft_api_plugin : public plugin { + public: + APPBASE_PLUGIN_REQUIRES( (pbft_plugin)(http_plugin)) + + pbft_api_plugin() = default; + pbft_api_plugin(const pbft_api_plugin&) = delete; + pbft_api_plugin(pbft_api_plugin&&) = delete; + pbft_api_plugin& operator=(const pbft_api_plugin&) = delete; + pbft_api_plugin& operator=(pbft_api_plugin&&) = delete; + virtual ~pbft_api_plugin() override = default; + + virtual void set_program_options(options_description& cli, options_description& cfg) override {} + void plugin_initialize(const variables_map& vm); + void plugin_startup(); + void plugin_shutdown() {} + + private: +}; + +} diff --git a/plugins/pbft_api_plugin/pbft_api_plugin.cpp b/plugins/pbft_api_plugin/pbft_api_plugin.cpp new file mode 100644 index 00000000000..33fcd7b8654 --- /dev/null +++ b/plugins/pbft_api_plugin/pbft_api_plugin.cpp @@ -0,0 +1,90 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#include +#include + +#include +#include + +#include + +namespace eosio { namespace detail { + struct pbft_api_plugin_response { + std::string result; + }; +}} + +FC_REFLECT(eosio::detail::pbft_api_plugin_response, (result)); + +namespace eosio { + +static appbase::abstract_plugin& _pbft_api_plugin = app().register_plugin(); + +using namespace eosio; + +#define CALL(api_name, api_handle, call_name, INVOKE, http_response_code) \ +{std::string("/v1/" #api_name "/" #call_name), \ + [&api_handle](string, string body, url_response_callback cb) mutable { \ + try { \ + if (body.empty()) body = "{}"; \ + INVOKE \ + cb(http_response_code, fc::json::to_string(result)); \ + } catch (...) { \ + http_plugin::handle_exception(#api_name, #call_name, body, cb); \ + } \ + }} +#define INVOKE_R(api_handle, call_name) \ + auto result = api_handle.call_name(); + +#define INVOKE_R_P(api_handle, call_name, in_param) \ + auto result = api_handle.call_name(fc::json::from_string(body).as()); + +#define INVOKE_W_P(api_handle, call_name, in_param) \ + api_handle.call_name(fc::json::from_string(body).as()); \ + eosio::detail::pbft_api_plugin_response result{"ok"}; + +void pbft_api_plugin::plugin_startup() { + ilog("starting pbft_api_plugin"); + // lifetime of plugin is lifetime of application + auto& pbft = app().get_plugin(); + + app().get_plugin().add_api({ + CALL(pbft, pbft, get_watermarks, INVOKE_R(pbft, get_watermarks), 200), + CALL(pbft, pbft, get_fork_schedules, INVOKE_R(pbft, get_fork_schedules), 200), + CALL(pbft, pbft, get_pbft_record, INVOKE_R_P(pbft, get_pbft_record, block_id_type), 200), + CALL(pbft, pbft, get_pbft_checkpoints_record, INVOKE_R_P(pbft, get_pbft_checkpoints_record, block_num_type), 200), + CALL(pbft, pbft, get_view_change_record, INVOKE_R_P(pbft, get_view_change_record, pbft_view_type), 200), + CALL(pbft, pbft, get_pbft_status, INVOKE_R(pbft, get_pbft_status), 200), + CALL(pbft, pbft, get_pbft_prepared_id, INVOKE_R(pbft, get_pbft_prepared_id), 200), + CALL(pbft, pbft, get_pbft_my_prepare_id, INVOKE_R(pbft, get_pbft_my_prepare_id), 200), + CALL(pbft, pbft, set_pbft_current_view, INVOKE_W_P(pbft, set_pbft_current_view, pbft_view_type), 201), + }); +} + +void pbft_api_plugin::plugin_initialize(const variables_map& options) { + try { + const auto& _http_plugin = app().get_plugin(); + if( !_http_plugin.is_on_loopback()) { + wlog( "\n" + "**********SECURITY WARNING**********\n" + "* *\n" + "* -- PBFT API -- *\n" + "* - EXPOSED to the LOCAL NETWORK - *\n" + "* - USE ONLY ON SECURE NETWORKS! - *\n" + "* *\n" + "************************************\n" ); + + } + } FC_LOG_AND_RETHROW() +} + +#undef INVOKE_R +#undef INVOKE_R_P +#undef INVOKE_W_P + + +#undef CALL + +} diff --git a/plugins/pbft_plugin/CMakeLists.txt b/plugins/pbft_plugin/CMakeLists.txt new file mode 100644 index 00000000000..9ca17f811f9 --- /dev/null +++ b/plugins/pbft_plugin/CMakeLists.txt @@ -0,0 +1,7 @@ +file(GLOB HEADERS "include/eosio/pbft_plugin/*.hpp") +add_library( pbft_plugin + pbft_plugin.cpp + ${HEADERS} ) + +target_link_libraries( pbft_plugin appbase fc eosio_chain chain_plugin net_plugin) +target_include_directories( pbft_plugin PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_SOURCE_DIR}/../chain_interface/include") diff --git a/plugins/pbft_plugin/include/eosio/pbft_plugin/pbft_plugin.hpp b/plugins/pbft_plugin/include/eosio/pbft_plugin/pbft_plugin.hpp new file mode 100644 index 00000000000..69f67ea6cf8 --- /dev/null +++ b/plugins/pbft_plugin/include/eosio/pbft_plugin/pbft_plugin.hpp @@ -0,0 +1,44 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ +#pragma once +#include +#include +#include +#include + +namespace eosio { + +using namespace appbase; + +class pbft_plugin : public appbase::plugin { +public: + pbft_plugin(); + virtual ~pbft_plugin(); + + APPBASE_PLUGIN_REQUIRES() + virtual void set_program_options(options_description&, options_description& cfg) override; + + void plugin_initialize(const variables_map& options); + void plugin_startup(); + void plugin_shutdown(); + + + pbft_state get_pbft_record( const block_id_type& bid )const; + vector get_pbft_checkpoints_record(const block_num_type &bnum)const; + pbft_view_change_state get_view_change_record(const pbft_view_type& view)const; + vector get_watermarks()const; + flat_map get_fork_schedules()const; + const char* get_pbft_status()const; + block_id_type get_pbft_prepared_id()const; + block_id_type get_pbft_my_prepare_id()const; + + void set_pbft_current_view(const pbft_view_type &view); + + +private: + std::unique_ptr my; +}; + +} diff --git a/plugins/pbft_plugin/pbft_plugin.cpp b/plugins/pbft_plugin/pbft_plugin.cpp new file mode 100644 index 00000000000..0dcc60a5d11 --- /dev/null +++ b/plugins/pbft_plugin/pbft_plugin.cpp @@ -0,0 +1,207 @@ +#include + +#include +#include +#include + +namespace eosio { + static appbase::abstract_plugin &_pbft_plugin = app().register_plugin(); + using namespace std; + using namespace eosio::chain; + + class pbft_plugin_impl { + public: + unique_ptr prepare_timer; + unique_ptr commit_timer; + unique_ptr view_change_timer; + unique_ptr checkpoint_timer; + + boost::asio::steady_timer::duration prepare_timeout{std::chrono::milliseconds{1000}}; + boost::asio::steady_timer::duration commit_timeout{std::chrono::milliseconds{1000}}; + boost::asio::steady_timer::duration view_change_check_interval{std::chrono::seconds{5}}; + boost::asio::steady_timer::duration checkpoint_timeout{std::chrono::seconds{50}}; + + void prepare_timer_tick(); + + void commit_timer_tick(); + + void view_change_timer_tick(); + + void checkpoint_timer_tick(); + + private: + bool upgraded = false; + bool is_replaying(); + bool is_syncing(); + bool pbft_ready(); + }; + + pbft_plugin::pbft_plugin() : my(new pbft_plugin_impl()) {} + + pbft_plugin::~pbft_plugin() = default; + + void pbft_plugin::set_program_options(options_description &, options_description &cfg) { + } + + void pbft_plugin::plugin_initialize(const variables_map &options) { + ilog("Initialize pbft plugin"); + my->prepare_timer = std::make_unique(app().get_io_service()); + my->commit_timer = std::make_unique(app().get_io_service()); + my->view_change_timer = std::make_unique(app().get_io_service()); + my->checkpoint_timer = std::make_unique(app().get_io_service()); + } + + void pbft_plugin::plugin_startup() { + my->prepare_timer_tick(); + my->commit_timer_tick(); + my->view_change_timer_tick(); + my->checkpoint_timer_tick(); + } + + void pbft_plugin::plugin_shutdown() {} + + pbft_state pbft_plugin::get_pbft_record( const block_id_type& bid ) const { + pbft_controller& pbft_ctrl = app().get_plugin().pbft_ctrl(); + auto record = pbft_ctrl.pbft_db.get_pbft_state_by_id(bid); + if (record) return *record; + return pbft_state(); + } + + vector pbft_plugin::get_pbft_checkpoints_record(const block_num_type &bnum) const { + pbft_controller& pbft_ctrl = app().get_plugin().pbft_ctrl(); + auto records = pbft_ctrl.pbft_db.get_checkpoints_by_num(bnum); + if (!records.empty()) return records; + return vector(); + } + pbft_view_change_state pbft_plugin::get_view_change_record(const pbft_view_type& view) const { + pbft_controller& pbft_ctrl = app().get_plugin().pbft_ctrl(); + auto record = pbft_ctrl.pbft_db.get_view_changes_by_target_view(view); + if (record) return *record; + return pbft_view_change_state(); + } + + vector pbft_plugin::get_watermarks() const { + pbft_controller& pbft_ctrl = app().get_plugin().pbft_ctrl(); + return pbft_ctrl.pbft_db.get_pbft_watermarks(); + } + + flat_map pbft_plugin::get_fork_schedules() const { + pbft_controller& pbft_ctrl = app().get_plugin().pbft_ctrl(); + return pbft_ctrl.pbft_db.get_pbft_fork_schedules(); + } + + const char* pbft_plugin::get_pbft_status() const { + pbft_controller& pbft_ctrl = app().get_plugin().pbft_ctrl(); + return pbft_ctrl.state_machine->get_current()->get_name(); + } + + block_id_type pbft_plugin::get_pbft_prepared_id() const { + auto& ctrl = app().get_plugin().chain(); + return ctrl.get_pbft_prepared(); + } + + block_id_type pbft_plugin::get_pbft_my_prepare_id() const { + auto& ctrl = app().get_plugin().chain(); + return ctrl.get_pbft_my_prepare(); + } + + void pbft_plugin::set_pbft_current_view(const pbft_view_type& view) { + //this is used to boost the recovery from a disaster, do not set this unless you have to do so. + pbft_controller& pbft_ctrl = app().get_plugin().pbft_ctrl(); + pbft_ctrl.state_machine->manually_set_current_view(view); + } + + void pbft_plugin_impl::prepare_timer_tick() { + chain::pbft_controller &pbft_ctrl = app().get_plugin().pbft_ctrl(); + prepare_timer->expires_from_now(prepare_timeout); + prepare_timer->async_wait([&](boost::system::error_code ec) { + prepare_timer_tick(); + if (ec) { + wlog ("pbft plugin prepare timer tick error: ${m}", ("m", ec.message())); + } else if (pbft_ready()) { + pbft_ctrl.maybe_pbft_prepare(); + } + }); + } + + void pbft_plugin_impl::commit_timer_tick() { + chain::pbft_controller &pbft_ctrl = app().get_plugin().pbft_ctrl(); + commit_timer->expires_from_now(commit_timeout); + commit_timer->async_wait([&](boost::system::error_code ec) { + commit_timer_tick(); + if (ec) { + wlog ("pbft plugin commit timer tick error: ${m}", ("m", ec.message())); + } else if (pbft_ready()) { + pbft_ctrl.maybe_pbft_commit(); + } + }); + } + + void pbft_plugin_impl::view_change_timer_tick() { + chain::pbft_controller &pbft_ctrl = app().get_plugin().pbft_ctrl(); + try { + view_change_timer->cancel(); + } catch (boost::system::system_error &e) { + elog("view change timer cancel error: ${e}", ("e", e.what())); + } + view_change_timer->expires_from_now(view_change_check_interval); + view_change_timer->async_wait([&](boost::system::error_code ec) { + view_change_timer_tick(); + if (ec) { + wlog ("pbft plugin view change timer tick error: ${m}", ("m", ec.message())); + } else if (pbft_ready()) { + pbft_ctrl.maybe_pbft_view_change(); + } + }); + } + + void pbft_plugin_impl::checkpoint_timer_tick() { + chain::pbft_controller &pbft_ctrl = app().get_plugin().pbft_ctrl(); + checkpoint_timer->expires_from_now(checkpoint_timeout); + checkpoint_timer->async_wait([&](boost::system::error_code ec) { + checkpoint_timer_tick(); + if (ec) { + wlog ("pbft plugin checkpoint timer tick error: ${m}", ("m", ec.message())); + } else if (pbft_ready()) { + pbft_ctrl.maybe_pbft_checkpoint(); + + chain::controller &ctrl = app().get_plugin().chain(); + if ( ctrl.head_block_num() - ctrl.last_stable_checkpoint_block_num() / pbft_checkpoint_granularity > 1) { + //perhaps we need to sync stable checkpoints from other peers + app().get_plugin().maybe_sync_stable_checkpoints(); + } + } + }); + } + + bool pbft_plugin_impl::is_replaying() { + return app().get_plugin().chain().is_replaying(); + } + + bool pbft_plugin_impl::is_syncing() { + return app().get_plugin().is_syncing(); + } + + bool pbft_plugin_impl::pbft_ready() { + // only trigger pbft related logic if I am in sync and replayed. + + auto& chain = app().get_plugin().chain(); + auto enabled = chain.is_pbft_enabled(); + + if (enabled && !upgraded) { + wlog( "\n" + "******** BATCH-PBFT ENABLED ********\n" + "* *\n" + "* -- The blockchain -- *\n" + "* - has successfully switched - *\n" + "* - into the new version - *\n" + "* - Please enjoy a - *\n" + "* - better performance! - *\n" + "* *\n" + "************************************\n" ); + upgraded = true; + } + + return enabled && !is_syncing() && !is_replaying(); + } +} diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 7bcfb7042b7..b7232b9a5ca 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -224,10 +224,13 @@ class producer_plugin_impl : public std::enable_shared_from_thisheader; new_block_header.timestamp = new_block_header.timestamp.next(); new_block_header.previous = bsp->id; - auto new_bs = bsp->generate_next(new_block_header.timestamp); + + auto pbft_enabled = chain.is_pbft_enabled(); + + auto new_bs = bsp->generate_next(new_block_header.timestamp, pbft_enabled); // for newly installed producers we can set their watermarks to the block they became active - if (new_bs.maybe_promote_pending() && bsp->active_schedule.version != new_bs.active_schedule.version) { + if (new_bs.maybe_promote_pending(pbft_enabled) && bsp->active_schedule.version != new_bs.active_schedule.version) { flat_set new_producers; new_producers.reserve(new_bs.active_schedule.producers.size()); for( const auto& p: new_bs.active_schedule.producers) { @@ -298,7 +301,8 @@ class producer_plugin_impl : public std::enable_shared_from_thischain(); - /* de-dupe here... no point in aborting block if we already know the block */ + + /* de-dupe here... no point in aborting block if we already know the block */ auto existing = chain.fetch_block_by_id( id ); if( existing ) { return; } @@ -338,11 +342,22 @@ class producer_plugin_impl : public std::enable_shared_from_thistimestamp < fc::minutes(5) || (block->block_num() % 1000 == 0) ) { - ilog("Received block ${id}... #${n} @ ${t} signed by ${p} [trxs: ${count}, lib: ${lib}, conf: ${confs}, latency: ${latency} ms]", - ("p",block->producer)("id",fc::variant(block->id()).as_string().substr(8,16)) - ("n",block_header::num_from_id(block->id()))("t",block->timestamp) - ("count",block->transactions.size())("lib",chain.last_irreversible_block_num())("confs", block->confirmed)("latency", (fc::time_point::now() - block->timestamp).count()/1000 ) ); + if (chain.is_pbft_enabled()) { + ilog("Received block ${id}... #${n} @ ${t} signed by ${p} [trxs: ${count}, lib: ${lib}, lscb: ${lscb}, latency: ${latency} ms]", + ("p", block->producer)("id", fc::variant(block->id()).as_string().substr(8, 16)) + ("n", block_header::num_from_id(block->id()))("t", block->timestamp) + ("count", block->transactions.size())("lib", chain.last_irreversible_block_num()) + ("lscb", chain.last_stable_checkpoint_block_num()) + ("latency", (fc::time_point::now() - block->timestamp).count() / 1000)); + } else { + ilog("Received block ${id}... #${n} @ ${t} signed by ${p} [trxs: ${count}, lib: ${lib}, conf: ${confs}, latency: ${latency} ms]", + ("p",block->producer)("id",fc::variant(block->id()).as_string().substr(8,16)) + ("n",block_header::num_from_id(block->id()))("t",block->timestamp) + ("count",block->transactions.size())("lib",chain.last_irreversible_block_num()) + ("confs", block->confirmed)("latency", (fc::time_point::now() - block->timestamp).count()/1000 ) ); + } } } @@ -1028,7 +1043,7 @@ fc::time_point producer_plugin_impl::calculate_pending_block_time() const { fc::time_point block_time = base + fc::microseconds(min_time_to_next_block); - if((block_time - now) < fc::microseconds(config::block_interval_us/10) ) { // we must sleep for at least 50ms + if((block_time - now) < fc::microseconds(config::block_interval_us/5) ) { // we must sleep for at least 50ms block_time += fc::microseconds(config::block_interval_us); } return block_time; @@ -1083,7 +1098,9 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { _pending_block_mode = pending_block_mode::speculating; } - if (_pending_block_mode == pending_block_mode::producing) { + auto pbft_enabled = chain.is_pbft_enabled(); + + if (_pending_block_mode == pending_block_mode::producing && !pbft_enabled) { // determine if our watermark excludes us from producing at this point if (currrent_watermark_itr != _producer_watermarks.end()) { if (currrent_watermark_itr->second >= hbs->block_num + 1) { @@ -1105,7 +1122,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { try { uint16_t blocks_to_confirm = 0; - if (_pending_block_mode == pending_block_mode::producing) { + if (_pending_block_mode == pending_block_mode::producing && !pbft_enabled) { // determine how many blocks this producer can confirm // 1) if it is not a producer from this node, assume no confirmations (we will discard this block anyway) // 2) if it is a producer on this node that has never produced, the conservative approach is to assume no @@ -1213,7 +1230,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { if( preprocess_deadline <= fc::time_point::now() ) exhausted = true; if( exhausted ) break; - const auto& trx = itr->second; + const transaction_metadata_ptr trx = itr->second; auto category = calculate_transaction_category(trx); if (category == tx_category::EXPIRED || (category == tx_category::UNEXPIRED_UNPERSISTED && _producers.empty())) @@ -1584,11 +1601,18 @@ void producer_plugin_impl::produce_block() { block_state_ptr new_bs = chain.head_block_state(); _producer_watermarks[new_bs->header.producer] = chain.head_block_num(); - ilog("Produced block ${id}... #${n} @ ${t} signed by ${p} [trxs: ${count}, lib: ${lib}, confirmed: ${confs}]", - ("p",new_bs->header.producer)("id",fc::variant(new_bs->id).as_string().substr(0,16)) - ("n",new_bs->block_num)("t",new_bs->header.timestamp) - ("count",new_bs->block->transactions.size())("lib",chain.last_irreversible_block_num())("confs", new_bs->header.confirmed)); - + if (chain.is_pbft_enabled()) { + ilog("Produced block ${id}... #${n} @ ${t} signed by ${p} [trxs: ${count}, lib: ${lib}, lscb: ${lscb}]", + ("p", new_bs->header.producer)("id", fc::variant(new_bs->id).as_string().substr(0, 16)) + ("n", new_bs->block_num)("t", new_bs->header.timestamp) + ("count", new_bs->block->transactions.size()) + ("lib", chain.last_irreversible_block_num())("lscb", chain.last_stable_checkpoint_block_num())); + } else { + ilog("Produced block ${id}... #${n} @ ${t} signed by ${p} [trxs: ${count}, lib: ${lib}, confirmed: ${confs}]", + ("p",new_bs->header.producer)("id",fc::variant(new_bs->id).as_string().substr(0,16)) + ("n",new_bs->block_num)("t",new_bs->header.timestamp) + ("count",new_bs->block->transactions.size())("lib",chain.last_irreversible_block_num())("confs", new_bs->header.confirmed)); + } } } // namespace eosio diff --git a/plugins/txn_test_gen_plugin/CMakeLists.txt b/plugins/txn_test_gen_plugin/CMakeLists.txt index e765f3478e6..286066d6149 100644 --- a/plugins/txn_test_gen_plugin/CMakeLists.txt +++ b/plugins/txn_test_gen_plugin/CMakeLists.txt @@ -5,6 +5,6 @@ add_library( txn_test_gen_plugin add_dependencies(txn_test_gen_plugin eosio.token) -target_link_libraries( txn_test_gen_plugin appbase fc http_plugin chain_plugin ) +target_link_libraries( txn_test_gen_plugin appbase fc http_plugin chain_plugin net_plugin) target_include_directories( txn_test_gen_plugin PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) target_include_directories( txn_test_gen_plugin PUBLIC ${CMAKE_BINARY_DIR}/contracts ) diff --git a/plugins/txn_test_gen_plugin/README.md b/plugins/txn_test_gen_plugin/README.md index 8d74e6a0412..313fdf83e5d 100644 --- a/plugins/txn_test_gen_plugin/README.md +++ b/plugins/txn_test_gen_plugin/README.md @@ -68,8 +68,28 @@ $ ./cleos set contract eosio ~/eos/build.release/contracts/eosio.bios/ ### Initialize the accounts txn_test_gen_plugin uses ```bash -$ curl --data-binary '["eosio", "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"]' http://127.0.0.1:8888/v1/txn_test_gen/create_test_accounts +$ curl --data-binary '[, , ]' http://127.0.0.1:8888/v1/txn_test_gen/create_test_accounts + +example: +$ curl --data-binary '["eosio", "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3", "EOS"]' http://127.0.0.1:8888/v1/txn_test_gen/create_test_accounts ``` +> **make sure there are more than 4000,0000 ** in creator + + +This api does: +1. creates following accounts: +aaaaaaaaaaaa +bbbbbbbbbbbb +cccccccccccc + +1. delegate 1000,0000 cpu, 100 net, and 100 ram to above accounts using +1. deploy a token contract to account cccccccccccc and issue a large number of tokens to cccccccccccc, then transfer some tokens to aaaaaaaaaaaa and bbbbbbbbbbbb +1. subsequent trx will be generated using deployed token contract + 1. aaaaaaaaaaaa transfer to bbbbbbbbbbbb + 1. bbbbbbbbbbbb transfer to aaaaaaaaaaaa + + + ### Start transaction generation, this will submit 20 transactions evey 20ms (total of 1000TPS) ```bash diff --git a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp index 707f75bd9de..06b6f437cdb 100644 --- a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp +++ b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp @@ -4,6 +4,7 @@ */ #include #include +#include #include #include @@ -24,9 +25,11 @@ #include #include +#include +#include namespace eosio { namespace detail { - struct txn_test_gen_empty {}; +struct txn_test_gen_empty {}; }} FC_REFLECT(eosio::detail::txn_test_gen_empty, ); @@ -82,341 +85,414 @@ using namespace eosio::chain; }\ } -#define INVOKE_ASYNC_R_R(api_handle, call_name, in_param0, in_param1) \ +#define INVOKE_ASYNC_R_R(api_handle, call_name, in_param0, in_param1, in_param2) \ const auto& vs = fc::json::json::from_string(body).as(); \ - api_handle->call_name(vs.at(0).as(), vs.at(1).as(), result_handler); + api_handle->call_name(vs.at(0).as(), vs.at(1).as(), vs.at(2).as(), result_handler); struct txn_test_gen_plugin_impl { - uint64_t _total_us = 0; - uint64_t _txcount = 0; + uint64_t _total_us = 0; + uint64_t _txcount = 0; - int _remain = 0; + int _remain = 0; - void push_next_transaction(const std::shared_ptr>& trxs, size_t index, const std::function& next ) { + std::string cached_salt; + uint64_t cached_period; + uint64_t cached_batch_size; + + void push_next_transaction(const std::shared_ptr>& trxs, size_t index, const std::function& next ) { chain_plugin& cp = app().get_plugin(); const int overlap = 20; int end = std::min(index + overlap, trxs->size()); _remain = end - index; for (int i = index; i < end; ++i) { - cp.accept_transaction( packed_transaction(trxs->at(i)), [=](const fc::static_variant& result){ + cp.accept_transaction( packed_transaction(trxs->at(i)), [=](const fc::static_variant& result){ if (result.contains()) { - next(result.get()); + next(result.get()); } else { - if (result.contains() && result.get()->receipt) { - _total_us += result.get()->receipt->cpu_usage_us; - ++_txcount; - } - --_remain; - if (_remain == 0 ) { - if (end < trxs->size()) { - push_next_transaction(trxs, index + overlap, next); - } else { - next(nullptr); - } - } + if (result.contains() && result.get()->receipt) { + _total_us += result.get()->receipt->cpu_usage_us; + ++_txcount; + } + --_remain; + if (_remain == 0 ) { + if (end < trxs->size()) { + push_next_transaction(trxs, index + overlap, next); + } else { + next(nullptr); + } + } } - }); + }); } - } + } - void push_transactions( std::vector&& trxs, const std::function& next ) { + void push_transactions( std::vector&& trxs, const std::function& next ) { auto trxs_copy = std::make_shared>(std::move(trxs)); push_next_transaction(trxs_copy, 0, next); - } + } - void create_test_accounts(const std::string& init_name, const std::string& init_priv_key, const std::function& next) { + void create_test_accounts(const std::string& init_name, const std::string& init_priv_key, + const std::string& core_symbol, + const std::function& next) { std::vector trxs; trxs.reserve(2); try { - name newaccountA("txn.test.a"); - name newaccountB("txn.test.b"); - name newaccountC("txn.test.t"); - name creator(init_name); - - abi_def currency_abi_def = fc::json::from_string(eosio_token_abi).as(); - - controller& cc = app().get_plugin().chain(); - auto chainid = app().get_plugin().get_chain_id(); - auto abi_serializer_max_time = app().get_plugin().get_abi_serializer_max_time(); - - abi_serializer eosio_token_serializer{fc::json::from_string(eosio_token_abi).as(), abi_serializer_max_time}; - - fc::crypto::private_key txn_test_receiver_A_priv_key = fc::crypto::private_key::regenerate(fc::sha256(std::string(64, 'a'))); - fc::crypto::private_key txn_test_receiver_B_priv_key = fc::crypto::private_key::regenerate(fc::sha256(std::string(64, 'b'))); - fc::crypto::private_key txn_test_receiver_C_priv_key = fc::crypto::private_key::regenerate(fc::sha256(std::string(64, 'c'))); - fc::crypto::public_key txn_text_receiver_A_pub_key = txn_test_receiver_A_priv_key.get_public_key(); - fc::crypto::public_key txn_text_receiver_B_pub_key = txn_test_receiver_B_priv_key.get_public_key(); - fc::crypto::public_key txn_text_receiver_C_pub_key = txn_test_receiver_C_priv_key.get_public_key(); - fc::crypto::private_key creator_priv_key = fc::crypto::private_key(init_priv_key); - - //create some test accounts - { - signed_transaction trx; - - //create "A" account - { - auto owner_auth = eosio::chain::authority{1, {{txn_text_receiver_A_pub_key, 1}}, {}}; - auto active_auth = eosio::chain::authority{1, {{txn_text_receiver_A_pub_key, 1}}, {}}; - - trx.actions.emplace_back(vector{{creator,"active"}}, newaccount{creator, newaccountA, owner_auth, active_auth}); - } - //create "B" account - { - auto owner_auth = eosio::chain::authority{1, {{txn_text_receiver_B_pub_key, 1}}, {}}; - auto active_auth = eosio::chain::authority{1, {{txn_text_receiver_B_pub_key, 1}}, {}}; - - trx.actions.emplace_back(vector{{creator,"active"}}, newaccount{creator, newaccountB, owner_auth, active_auth}); - } - //create "txn.test.t" account - { - auto owner_auth = eosio::chain::authority{1, {{txn_text_receiver_C_pub_key, 1}}, {}}; - auto active_auth = eosio::chain::authority{1, {{txn_text_receiver_C_pub_key, 1}}, {}}; - - trx.actions.emplace_back(vector{{creator,"active"}}, newaccount{creator, newaccountC, owner_auth, active_auth}); - } - - trx.expiration = cc.head_block_time() + fc::seconds(30); - trx.set_reference_block(cc.head_block_id()); - trx.sign(creator_priv_key, chainid); - trxs.emplace_back(std::move(trx)); - } - - //set txn.test.t contract to eosio.token & initialize it - { - signed_transaction trx; - - vector wasm = wast_to_wasm(std::string(eosio_token_wast)); - - setcode handler; - handler.account = newaccountC; - handler.code.assign(wasm.begin(), wasm.end()); - - trx.actions.emplace_back( vector{{newaccountC,"active"}}, handler); - - { - setabi handler; - handler.account = newaccountC; - handler.abi = fc::raw::pack(json::from_string(eosio_token_abi).as()); - trx.actions.emplace_back( vector{{newaccountC,"active"}}, handler); - } - - { - action act; - act.account = N(txn.test.t); - act.name = N(create); - act.authorization = vector{{newaccountC,config::active_name}}; - act.data = eosio_token_serializer.variant_to_binary("create", fc::json::from_string("{\"issuer\":\"txn.test.t\",\"maximum_supply\":\"1000000000.0000 CUR\"}}"), abi_serializer_max_time); - trx.actions.push_back(act); - } - { - action act; - act.account = N(txn.test.t); - act.name = N(issue); - act.authorization = vector{{newaccountC,config::active_name}}; - act.data = eosio_token_serializer.variant_to_binary("issue", fc::json::from_string("{\"to\":\"txn.test.t\",\"quantity\":\"600.0000 CUR\",\"memo\":\"\"}"), abi_serializer_max_time); - trx.actions.push_back(act); - } - { - action act; - act.account = N(txn.test.t); - act.name = N(transfer); - act.authorization = vector{{newaccountC,config::active_name}}; - act.data = eosio_token_serializer.variant_to_binary("transfer", fc::json::from_string("{\"from\":\"txn.test.t\",\"to\":\"txn.test.a\",\"quantity\":\"200.0000 CUR\",\"memo\":\"\"}"), abi_serializer_max_time); - trx.actions.push_back(act); - } - { - action act; - act.account = N(txn.test.t); - act.name = N(transfer); - act.authorization = vector{{newaccountC,config::active_name}}; - act.data = eosio_token_serializer.variant_to_binary("transfer", fc::json::from_string("{\"from\":\"txn.test.t\",\"to\":\"txn.test.b\",\"quantity\":\"200.0000 CUR\",\"memo\":\"\"}"), abi_serializer_max_time); - trx.actions.push_back(act); - } - - trx.expiration = cc.head_block_time() + fc::seconds(30); - trx.set_reference_block(cc.head_block_id()); - trx.max_net_usage_words = 5000; - trx.sign(txn_test_receiver_C_priv_key, chainid); - trxs.emplace_back(std::move(trx)); - } + name newaccountA("aaaaaaaaaaaa"); + name newaccountB("bbbbbbbbbbbb"); + name newaccountC("cccccccccccc"); + name creator(init_name); + + abi_def currency_abi_def = fc::json::from_string(eosio_token_abi).as(); + + controller& cc = app().get_plugin().chain(); + auto chainid = app().get_plugin().get_chain_id(); + auto abi_serializer_max_time = app().get_plugin().get_abi_serializer_max_time(); + + abi_serializer eosio_token_serializer{fc::json::from_string(eosio_token_abi).as(), abi_serializer_max_time}; + + fc::crypto::private_key txn_test_receiver_A_priv_key = fc::crypto::private_key::regenerate(fc::sha256(std::string(64, 'a'))); + fc::crypto::private_key txn_test_receiver_B_priv_key = fc::crypto::private_key::regenerate(fc::sha256(std::string(64, 'b'))); + fc::crypto::private_key txn_test_receiver_C_priv_key = fc::crypto::private_key::regenerate(fc::sha256(std::string(64, 'c'))); + fc::crypto::public_key txn_text_receiver_A_pub_key = txn_test_receiver_A_priv_key.get_public_key(); + fc::crypto::public_key txn_text_receiver_B_pub_key = txn_test_receiver_B_priv_key.get_public_key(); + fc::crypto::public_key txn_text_receiver_C_pub_key = txn_test_receiver_C_priv_key.get_public_key(); + fc::crypto::private_key creator_priv_key = fc::crypto::private_key(init_priv_key); + + eosio::chain::asset net{1000000, symbol(4,core_symbol.c_str())}; + eosio::chain::asset cpu{10000000000, symbol(4,core_symbol.c_str())}; + eosio::chain::asset ram{1000000, symbol(4,core_symbol.c_str())}; + + //create some test accounts + { + signed_transaction trx; + + //create "A" account + { + auto owner_auth = eosio::chain::authority{1, {{txn_text_receiver_A_pub_key, 1}}, {}}; + auto active_auth = eosio::chain::authority{1, {{txn_text_receiver_A_pub_key, 1}}, {}}; + + trx.actions.emplace_back(vector{{creator,"active"}}, newaccount{creator, newaccountA, owner_auth, active_auth}); + + //delegate cpu net and buyram + auto act_delegatebw = create_action_delegatebw(creator, newaccountA,net,cpu,abi_serializer_max_time); + auto act_buyram = create_action_buyram(creator, newaccountA, ram, abi_serializer_max_time); + + trx.actions.emplace_back(act_delegatebw); + trx.actions.emplace_back(act_buyram); + + } + //create "B" account + { + auto owner_auth = eosio::chain::authority{1, {{txn_text_receiver_B_pub_key, 1}}, {}}; + auto active_auth = eosio::chain::authority{1, {{txn_text_receiver_B_pub_key, 1}}, {}}; + + trx.actions.emplace_back(vector{{creator,"active"}}, newaccount{creator, newaccountB, owner_auth, active_auth}); + + //delegate cpu net and buyram + auto act_delegatebw = create_action_delegatebw(creator, newaccountB,net,cpu,abi_serializer_max_time); + auto act_buyram = create_action_buyram(creator, newaccountB, ram, abi_serializer_max_time); + + trx.actions.emplace_back(act_delegatebw); + trx.actions.emplace_back(act_buyram); + } + //create "cccccccccccc" account + { + auto owner_auth = eosio::chain::authority{1, {{txn_text_receiver_C_pub_key, 1}}, {}}; + auto active_auth = eosio::chain::authority{1, {{txn_text_receiver_C_pub_key, 1}}, {}}; + + trx.actions.emplace_back(vector{{creator,"active"}}, newaccount{creator, newaccountC, owner_auth, active_auth}); + + //delegate cpu net and buyram + auto act_delegatebw = create_action_delegatebw(creator, newaccountC,net,cpu,abi_serializer_max_time); + auto act_buyram = create_action_buyram(creator, newaccountC, ram, abi_serializer_max_time); + + trx.actions.emplace_back(act_delegatebw); + trx.actions.emplace_back(act_buyram); + } + + trx.expiration = cc.head_block_time() + fc::seconds(30); + trx.set_reference_block(cc.head_block_id()); + trx.sign(creator_priv_key, chainid); + trxs.emplace_back(std::move(trx)); + } + + //set cccccccccccc contract to eosio.token & initialize it + { + signed_transaction trx; + + vector wasm = wast_to_wasm(std::string(eosio_token_wast)); + + setcode handler; + handler.account = newaccountC; + handler.code.assign(wasm.begin(), wasm.end()); + + trx.actions.emplace_back( vector{{newaccountC,"active"}}, handler); + + { + setabi handler; + handler.account = newaccountC; + handler.abi = fc::raw::pack(json::from_string(eosio_token_abi).as()); + trx.actions.emplace_back( vector{{newaccountC,"active"}}, handler); + } + + { + action act; + act.account = N(cccccccccccc); + act.name = N(create); + act.authorization = vector{{newaccountC,config::active_name}}; + act.data = eosio_token_serializer.variant_to_binary("create", fc::json::from_string("{\"issuer\":\"cccccccccccc\",\"maximum_supply\":\"1000000000.0000 CUR\"}}"), abi_serializer_max_time); + trx.actions.push_back(act); + } + { + action act; + act.account = N(cccccccccccc); + act.name = N(issue); + act.authorization = vector{{newaccountC,config::active_name}}; + act.data = eosio_token_serializer.variant_to_binary("issue", fc::json::from_string("{\"to\":\"cccccccccccc\",\"quantity\":\"1000000000.0000 CUR\",\"memo\":\"\"}"), abi_serializer_max_time); + trx.actions.push_back(act); + } + { + action act; + act.account = N(cccccccccccc); + act.name = N(transfer); + act.authorization = vector{{newaccountC,config::active_name}}; + act.data = eosio_token_serializer.variant_to_binary("transfer", fc::json::from_string("{\"from\":\"cccccccccccc\",\"to\":\"aaaaaaaaaaaa\",\"quantity\":\"500000000.0000 CUR\",\"memo\":\"\"}"), abi_serializer_max_time); + trx.actions.push_back(act); + } + { + action act; + act.account = N(cccccccccccc); + act.name = N(transfer); + act.authorization = vector{{newaccountC,config::active_name}}; + act.data = eosio_token_serializer.variant_to_binary("transfer", fc::json::from_string("{\"from\":\"cccccccccccc\",\"to\":\"bbbbbbbbbbbb\",\"quantity\":\"500000000.0000 CUR\",\"memo\":\"\"}"), abi_serializer_max_time); + trx.actions.push_back(act); + } + + trx.expiration = cc.head_block_time() + fc::seconds(30); + trx.set_reference_block(cc.head_block_id()); + trx.max_net_usage_words = 5000; + trx.sign(txn_test_receiver_C_priv_key, chainid); + trxs.emplace_back(std::move(trx)); + } } catch (const fc::exception& e) { - next(e.dynamic_copy_exception()); - return; + next(e.dynamic_copy_exception()); + return; } push_transactions(std::move(trxs), next); - } - - void start_generation(const std::string& salt, const uint64_t& period, const uint64_t& batch_size) { + } + + eosio::chain::action create_action_delegatebw(const name &from, const name &to, const asset &net, const asset &cpu, const fc::microseconds &abi_serializer_max_time){ + fc::variant variant_delegate = fc::mutable_variant_object() + ("from", from.to_string()) + ("receiver", to.to_string()) + ("stake_net_quantity", net.to_string()) + ("stake_cpu_quantity", cpu.to_string()) + ("transfer", true); + abi_serializer eosio_system_serializer{fc::json::from_string(eosio_system_abi).as(), abi_serializer_max_time}; + + auto payload_delegate = eosio_system_serializer.variant_to_binary( "delegatebw", variant_delegate, abi_serializer_max_time); + eosio::chain::action act_delegate{vector{{from,"active"}}, + config::system_account_name, N(delegatebw), payload_delegate}; + + return act_delegate; + } + + eosio::chain::action create_action_buyram(const name &from, const name &to, const asset &quant, const fc::microseconds &abi_serializer_max_time){ + fc::variant variant_buyram = fc::mutable_variant_object() + ("payer", from.to_string()) + ("receiver", to.to_string()) + ("quant", quant.to_string()); + abi_serializer eosio_system_serializer{fc::json::from_string(eosio_system_abi).as(), abi_serializer_max_time}; + + auto payload_buyram = eosio_system_serializer.variant_to_binary( "buyram", variant_buyram, abi_serializer_max_time); + eosio::chain::action act_buyram{vector{{from,"active"}}, + config::system_account_name, N(buyram), payload_buyram}; + + return act_buyram; + } + + void start_generation(const std::string& salt, const uint64_t& period, const uint64_t& batch_size) { if(running) - throw fc::exception(fc::invalid_operation_exception_code); + throw fc::exception(fc::invalid_operation_exception_code); if(period < 1 || period > 2500) - throw fc::exception(fc::invalid_operation_exception_code); + throw fc::exception(fc::invalid_operation_exception_code); if(batch_size < 1 || batch_size > 250) - throw fc::exception(fc::invalid_operation_exception_code); + throw fc::exception(fc::invalid_operation_exception_code); if(batch_size & 1) - throw fc::exception(fc::invalid_operation_exception_code); + throw fc::exception(fc::invalid_operation_exception_code); running = true; + cached_salt = salt; + cached_period = period; + cached_batch_size = batch_size; controller& cc = app().get_plugin().chain(); auto abi_serializer_max_time = app().get_plugin().get_abi_serializer_max_time(); abi_serializer eosio_token_serializer{fc::json::from_string(eosio_token_abi).as(), abi_serializer_max_time}; //create the actions here - act_a_to_b.account = N(txn.test.t); + act_a_to_b.account = N(cccccccccccc); act_a_to_b.name = N(transfer); - act_a_to_b.authorization = vector{{name("txn.test.a"),config::active_name}}; - act_a_to_b.data = eosio_token_serializer.variant_to_binary("transfer", - fc::json::from_string(fc::format_string("{\"from\":\"txn.test.a\",\"to\":\"txn.test.b\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", - fc::mutable_variant_object()("l", salt))), - abi_serializer_max_time); + act_a_to_b.authorization = vector{{name("aaaaaaaaaaaa"),config::active_name}}; + act_a_to_b.data = eosio_token_serializer.variant_to_binary("transfer", + fc::json::from_string(fc::format_string("{\"from\":\"aaaaaaaaaaaa\",\"to\":\"bbbbbbbbbbbb\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", + fc::mutable_variant_object()("l", salt))), + abi_serializer_max_time); - act_b_to_a.account = N(txn.test.t); + act_b_to_a.account = N(cccccccccccc); act_b_to_a.name = N(transfer); - act_b_to_a.authorization = vector{{name("txn.test.b"),config::active_name}}; - act_b_to_a.data = eosio_token_serializer.variant_to_binary("transfer", - fc::json::from_string(fc::format_string("{\"from\":\"txn.test.b\",\"to\":\"txn.test.a\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", - fc::mutable_variant_object()("l", salt))), - abi_serializer_max_time); + act_b_to_a.authorization = vector{{name("bbbbbbbbbbbb"),config::active_name}}; + act_b_to_a.data = eosio_token_serializer.variant_to_binary("transfer", + fc::json::from_string(fc::format_string("{\"from\":\"bbbbbbbbbbbb\",\"to\":\"aaaaaaaaaaaa\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", + fc::mutable_variant_object()("l", salt))), + abi_serializer_max_time); timer_timeout = period; batch = batch_size/2; ilog("Started transaction test plugin; performing ${p} transactions every ${m}ms", ("p", batch_size)("m", period)); + ilog("wait 3 seconds to spin up"); + arm_timer(boost::asio::high_resolution_timer::clock_type::now() + std::chrono::milliseconds(3000) ); + } - arm_timer(boost::asio::high_resolution_timer::clock_type::now()); - } - - void arm_timer(boost::asio::high_resolution_timer::time_point s) { + void arm_timer(boost::asio::high_resolution_timer::time_point s) { timer.expires_at(s + std::chrono::milliseconds(timer_timeout)); timer.async_wait([this](const boost::system::error_code& ec) { - if(!running || ec) + if(!running || ec) return; - send_transaction([this](const fc::exception_ptr& e){ - if (e) { - elog("pushing transaction failed: ${e}", ("e", e->to_detail_string())); - stop_generation(); - } else { - arm_timer(timer.expires_at()); - } - }); + send_transaction([this](const fc::exception_ptr& e){ + if (e) { + elog("pushing transaction failed: ${e}", ("e", e->to_detail_string())); + stop_generation(); + auto peers_conn = app().get_plugin().connections(); + for(const auto c : peers_conn){ + app().get_plugin().disconnect(c.peer); + } + for(const auto c : peers_conn){ + app().get_plugin().connect(c.peer); + } + start_generation(cached_salt,cached_period,cached_batch_size); + } else { + arm_timer(timer.expires_at()); + } + }); }); - } + } - void send_transaction(std::function next) { + void send_transaction(std::function next) { std::vector trxs; trxs.reserve(2*batch); try { - controller& cc = app().get_plugin().chain(); - auto chainid = app().get_plugin().get_chain_id(); - - static fc::crypto::private_key a_priv_key = fc::crypto::private_key::regenerate(fc::sha256(std::string(64, 'a'))); - static fc::crypto::private_key b_priv_key = fc::crypto::private_key::regenerate(fc::sha256(std::string(64, 'b'))); - - static uint64_t nonce = static_cast(fc::time_point::now().sec_since_epoch()) << 32; - - uint32_t reference_block_num = cc.last_irreversible_block_num(); - if (txn_reference_block_lag >= 0) { - reference_block_num = cc.head_block_num(); - if (reference_block_num <= (uint32_t)txn_reference_block_lag) { - reference_block_num = 0; - } else { - reference_block_num -= (uint32_t)txn_reference_block_lag; - } - } - - block_id_type reference_block_id = cc.get_block_id_for_num(reference_block_num); - - for(unsigned int i = 0; i < batch; ++i) { - { - signed_transaction trx; - trx.actions.push_back(act_a_to_b); - trx.context_free_actions.emplace_back(action({}, config::null_account_name, "nonce", fc::raw::pack(nonce++))); - trx.set_reference_block(reference_block_id); - trx.expiration = cc.head_block_time() + fc::seconds(30); - trx.max_net_usage_words = 100; - trx.sign(a_priv_key, chainid); - trxs.emplace_back(std::move(trx)); - } - - { - signed_transaction trx; - trx.actions.push_back(act_b_to_a); - trx.context_free_actions.emplace_back(action({}, config::null_account_name, "nonce", fc::raw::pack(nonce++))); - trx.set_reference_block(reference_block_id); - trx.expiration = cc.head_block_time() + fc::seconds(30); - trx.max_net_usage_words = 100; - trx.sign(b_priv_key, chainid); - trxs.emplace_back(std::move(trx)); - } - } + controller& cc = app().get_plugin().chain(); + auto chainid = app().get_plugin().get_chain_id(); + + static fc::crypto::private_key a_priv_key = fc::crypto::private_key::regenerate(fc::sha256(std::string(64, 'a'))); + static fc::crypto::private_key b_priv_key = fc::crypto::private_key::regenerate(fc::sha256(std::string(64, 'b'))); + + static uint64_t nonce = static_cast(fc::time_point::now().sec_since_epoch()) << 32; + + uint32_t reference_block_num = cc.last_irreversible_block_num(); + if (txn_reference_block_lag >= 0) { + reference_block_num = cc.head_block_num(); + if (reference_block_num <= (uint32_t)txn_reference_block_lag) { + reference_block_num = 0; + } else { + reference_block_num -= (uint32_t)txn_reference_block_lag; + } + } + + block_id_type reference_block_id = cc.get_block_id_for_num(reference_block_num); + + for(unsigned int i = 0; i < batch; ++i) { + { + signed_transaction trx; + trx.actions.push_back(act_a_to_b); + trx.context_free_actions.emplace_back(action({}, config::null_account_name, "nonce", fc::raw::pack(nonce++))); + trx.set_reference_block(reference_block_id); + trx.expiration = cc.head_block_time() + fc::seconds(30); + trx.max_net_usage_words = 100; + trx.sign(a_priv_key, chainid); + trxs.emplace_back(std::move(trx)); + } + + { + signed_transaction trx; + trx.actions.push_back(act_b_to_a); + trx.context_free_actions.emplace_back(action({}, config::null_account_name, "nonce", fc::raw::pack(nonce++))); + trx.set_reference_block(reference_block_id); + trx.expiration = cc.head_block_time() + fc::seconds(30); + trx.max_net_usage_words = 100; + trx.sign(b_priv_key, chainid); + trxs.emplace_back(std::move(trx)); + } + } } catch ( const fc::exception& e ) { - next(e.dynamic_copy_exception()); + next(e.dynamic_copy_exception()); } push_transactions(std::move(trxs), next); - } + } - void stop_generation() { + void stop_generation() { if(!running) - throw fc::exception(fc::invalid_operation_exception_code); + throw fc::exception(fc::invalid_operation_exception_code); timer.cancel(); running = false; ilog("Stopping transaction generation test"); if (_txcount) { - ilog("${d} transactions executed, ${t}us / transaction", ("d", _txcount)("t", _total_us / (double)_txcount)); - _txcount = _total_us = 0; + ilog("${d} transactions executed, ${t}us / transaction", ("d", _txcount)("t", _total_us / (double)_txcount)); + _txcount = _total_us = 0; } - } + } - boost::asio::high_resolution_timer timer{app().get_io_service()}; - bool running{false}; + boost::asio::high_resolution_timer timer{app().get_io_service()}; + bool running{false}; - unsigned timer_timeout; - unsigned batch; + unsigned timer_timeout; + unsigned batch; - action act_a_to_b; - action act_b_to_a; + action act_a_to_b; + action act_b_to_a; - int32_t txn_reference_block_lag; + int32_t txn_reference_block_lag; }; txn_test_gen_plugin::txn_test_gen_plugin() {} txn_test_gen_plugin::~txn_test_gen_plugin() {} void txn_test_gen_plugin::set_program_options(options_description&, options_description& cfg) { - cfg.add_options() - ("txn-reference-block-lag", bpo::value()->default_value(0), "Lag in number of blocks from the head block when selecting the reference block for transactions (-1 means Last Irreversible Block)") - ; + cfg.add_options() + ("txn-reference-block-lag", bpo::value()->default_value(0), "Lag in number of blocks from the head block when selecting the reference block for transactions (-1 means Last Irreversible Block)") + ; } void txn_test_gen_plugin::plugin_initialize(const variables_map& options) { - try { - my.reset( new txn_test_gen_plugin_impl ); - my->txn_reference_block_lag = options.at( "txn-reference-block-lag" ).as(); - } FC_LOG_AND_RETHROW() + try { + my.reset( new txn_test_gen_plugin_impl ); + my->txn_reference_block_lag = options.at( "txn-reference-block-lag" ).as(); + } FC_LOG_AND_RETHROW() } void txn_test_gen_plugin::plugin_startup() { - app().get_plugin().add_api({ - CALL_ASYNC(txn_test_gen, my, create_test_accounts, INVOKE_ASYNC_R_R(my, create_test_accounts, std::string, std::string), 200), - CALL(txn_test_gen, my, stop_generation, INVOKE_V_V(my, stop_generation), 200), - CALL(txn_test_gen, my, start_generation, INVOKE_V_R_R_R(my, start_generation, std::string, uint64_t, uint64_t), 200) - }); + app().get_plugin().add_api({ + CALL_ASYNC(txn_test_gen, my, create_test_accounts, INVOKE_ASYNC_R_R(my, create_test_accounts, std::string, std::string, std::string), 200), + CALL(txn_test_gen, my, stop_generation, INVOKE_V_V(my, stop_generation), 200), + CALL(txn_test_gen, my, start_generation, INVOKE_V_R_R_R(my, start_generation, std::string, uint64_t, uint64_t), 200) + }); } void txn_test_gen_plugin::plugin_shutdown() { - try { - my->stop_generation(); - } - catch(fc::exception e) { - } + try { + my->stop_generation(); + } + catch(fc::exception e) { + } } } diff --git a/programs/nodeos/CMakeLists.txt b/programs/nodeos/CMakeLists.txt index 7d03ce0001f..4714803f680 100644 --- a/programs/nodeos/CMakeLists.txt +++ b/programs/nodeos/CMakeLists.txt @@ -57,14 +57,16 @@ target_link_libraries( ${NODE_EXECUTABLE_NAME} PRIVATE -Wl,${whole_archive_flag} chain_api_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} net_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} net_api_plugin -Wl,${no_whole_archive_flag} +# PRIVATE -Wl,${whole_archive_flag} pbft_plugin -Wl,${no_whole_archive_flag} # PRIVATE -Wl,${whole_archive_flag} faucet_testnet_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} txn_test_gen_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} db_size_api_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} producer_api_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} test_control_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} test_control_api_plugin -Wl,${no_whole_archive_flag} + PRIVATE -Wl,${whole_archive_flag} pbft_api_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${build_id_flag} - PRIVATE chain_plugin http_plugin producer_plugin http_client_plugin + PRIVATE chain_plugin http_plugin producer_plugin http_client_plugin pbft_plugin PRIVATE eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) if(BUILD_MONGO_DB_PLUGIN) diff --git a/programs/nodeos/main.cpp b/programs/nodeos/main.cpp index 52eb9a0e9ab..a21babac350 100644 --- a/programs/nodeos/main.cpp +++ b/programs/nodeos/main.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -101,7 +102,7 @@ int main(int argc, char** argv) .default_unix_socket_path = "", .default_http_port = 8888 }); - if(!app().initialize(argc, argv)) + if(!app().initialize(argc, argv)) return INITIALIZE_FAIL; initialize_logging(); ilog("nodeos version ${ver}", ("ver", app().version_string())); diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index c88caa1f995..fbe6cc94f42 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -43,6 +43,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_voting_test.py ${CMAKE_CURRENT configure_file(${CMAKE_CURRENT_SOURCE_DIR}/consensus-validation-malicious-producers.py ${CMAKE_CURRENT_BINARY_DIR}/consensus-validation-malicious-producers.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/validate-dirty-db.py ${CMAKE_CURRENT_BINARY_DIR}/validate-dirty-db.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/launcher_test.py ${CMAKE_CURRENT_BINARY_DIR}/launcher_test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/version-label.sh ${CMAKE_CURRENT_BINARY_DIR}/version-label.sh COPYONLY) #To run plugin_test with all log from blockchain displayed, put --verbose after --, i.e. plugin_test -- --verbose add_test(NAME plugin_test COMMAND plugin_test --report_level=detailed --color_output) @@ -78,6 +79,7 @@ add_test(NAME validate_dirty_db_test COMMAND tests/validate-dirty-db.py -v --cle set_property(TEST validate_dirty_db_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME launcher_test COMMAND tests/launcher_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST launcher_test PROPERTY LABELS nonparallelizable_tests) +add_test(NAME version-label-test COMMAND tests/version-label.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) # Long running tests add_test(NAME nodeos_sanity_lr_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) diff --git a/tests/chain_plugin_tests.cpp b/tests/chain_plugin_tests.cpp index 5a489c255b4..9d94e384708 100644 --- a/tests/chain_plugin_tests.cpp +++ b/tests/chain_plugin_tests.cpp @@ -90,7 +90,7 @@ BOOST_FIXTURE_TEST_CASE( get_block_with_invalid_abi, TESTER ) try { char headnumstr[20]; sprintf(headnumstr, "%d", headnum); chain_apis::read_only::get_block_params param{headnumstr}; - chain_apis::read_only plugin(*(this->control), fc::microseconds(INT_MAX)); + chain_apis::read_only plugin(*(this->control), fc::microseconds(INT_MAX), *(this->pbft_ctrl)); // block should be decoded successfully std::string block_str = json::to_pretty_string(plugin.get_block(param)); diff --git a/tests/get_table_tests.cpp b/tests/get_table_tests.cpp index bb332b9a000..8084ddd2f93 100644 --- a/tests/get_table_tests.cpp +++ b/tests/get_table_tests.cpp @@ -72,7 +72,7 @@ BOOST_FIXTURE_TEST_CASE( get_scope_test, TESTER ) try { produce_blocks(1); // iterate over scope - eosio::chain_apis::read_only plugin(*(this->control), fc::microseconds(INT_MAX)); + eosio::chain_apis::read_only plugin(*(this->control), fc::microseconds(INT_MAX), *(this->pbft_ctrl)); eosio::chain_apis::read_only::get_table_by_scope_params param{N(eosio.token), N(accounts), "inita", "", 10}; eosio::chain_apis::read_only::get_table_by_scope_result result = plugin.read_only::get_table_by_scope(param); @@ -193,7 +193,7 @@ BOOST_FIXTURE_TEST_CASE( get_table_test, TESTER ) try { produce_blocks(1); // get table: normal case - eosio::chain_apis::read_only plugin(*(this->control), fc::microseconds(INT_MAX)); + eosio::chain_apis::read_only plugin(*(this->control), fc::microseconds(INT_MAX), *(this->pbft_ctrl)); eosio::chain_apis::read_only::get_table_rows_params p; p.code = N(eosio.token); p.scope = "inita"; @@ -361,7 +361,7 @@ BOOST_FIXTURE_TEST_CASE( get_table_by_seckey_test, TESTER ) try { produce_blocks(1); // get table: normal case - eosio::chain_apis::read_only plugin(*(this->control), fc::microseconds(INT_MAX)); + eosio::chain_apis::read_only plugin(*(this->control), fc::microseconds(INT_MAX), *(this->pbft_ctrl)); eosio::chain_apis::read_only::get_table_rows_params p; p.code = N(eosio); p.scope = "eosio"; diff --git a/tests/testUtils.py b/tests/testUtils.py index a8dbe0fd4d2..85a7f89f301 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -1,3 +1,4 @@ +import re import errno import subprocess import time @@ -217,12 +218,17 @@ def arePortsAvailable(ports): @staticmethod def pgrepCmd(serverName): - pgrepOpts="-fl" # pylint: disable=deprecated-method - if platform.linux_distribution()[0] in ["Ubuntu", "LinuxMint", "Fedora","CentOS Linux","arch"]: + # pgrep differs on different platform (amazonlinux1 and 2 for example). We need to check if pgrep -h has -a available and add that if so: + try: + pgrepHelp = re.search('-a', subprocess.Popen("pgrep --help 2>/dev/null", shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')) + pgrepHelp.group(0) # group() errors if -a is not found, so we don't need to do anything else special here. pgrepOpts="-a" + except AttributeError as error: + # If no -a, AttributeError: 'NoneType' object has no attribute 'group' + pgrepOpts="-fl" - return "pgrep %s %s" % (pgrepOpts, serverName) + return "pgrep %s %s" % (pgrepOpts, serverName)\ @staticmethod def getBlockLog(blockLogLocation, silentErrors=False, exitOnError=False): diff --git a/tests/version-label.sh b/tests/version-label.sh new file mode 100755 index 00000000000..008c469d2b8 --- /dev/null +++ b/tests/version-label.sh @@ -0,0 +1,73 @@ +#!/bin/bash +# The purpose of this test is to ensure that the output of the "nodeos --version" command matches the version string defined by our CMake files +# If the environment variable BUILDKITE_TAG is empty or unset, this test will echo success +echo '##### Nodeos Version Label Test #####' +if [[ "$BUILDKITE_TAG" == '' || "$BUILDKITE" != 'true' ]]; then + echo 'This test is only run in Buildkite against tagged builds.' + [[ "$BUILDKITE" != 'true' ]] && echo 'This is not Buildkite.' + [[ "$BUILDKITE_TAG" == '' ]] && echo 'This is not a tagged build.' + echo 'Exiting...' + exit 0 +fi +echo 'Tagged build detected, running test.' +# orient ourselves +[[ "$EOSIO_ROOT" == '' ]] && EOSIO_ROOT=$(echo $(pwd)/ | grep -ioe '.*/eos/') +[[ "$EOSIO_ROOT" == '' ]] && EOSIO_ROOT=$(echo $(pwd)/ | grep -ioe '.*/EOSIO/eosio/') +[[ "$EOSIO_ROOT" == '' ]] && EOSIO_ROOT=$(echo $(pwd)/ | grep -ioe '.*/build/' | sed 's,/build/,,') +echo "Using EOSIO_ROOT=\"$EOSIO_ROOT\"." +# determine expected value +CMAKE_CACHE="$EOSIO_ROOT/build/CMakeCache.txt" +CMAKE_LISTS="$EOSIO_ROOT/CMakeLists.txt" +if [[ -f "$CMAKE_CACHE" && $(cat "$CMAKE_CACHE" | grep -c 'DOXY_EOS_VERSION') > 0 ]]; then + echo "Parsing \"$CMAKE_CACHE\"..." + EXPECTED="v$(cat "$CMAKE_CACHE" | grep 'DOXY_EOS_VERSION' | cut -d '=' -f 2)" +elif [[ -f "$CMAKE_LISTS" ]]; then + echo "Parsing \"$CMAKE_LISTS\"..." + export $(cat $CMAKE_LISTS | grep -ie 'set *( *VERSION_MAJOR' | cut -d '(' -f 2 | cut -d ')' -f 1 | awk '{print $1"="$2}') + export $(cat $CMAKE_LISTS | grep -ie 'set *( *VERSION_MINOR' | cut -d '(' -f 2 | cut -d ')' -f 1 | awk '{print $1"="$2}') + export $(cat $CMAKE_LISTS | grep -ie 'set *( *VERSION_PATCH' | cut -d '(' -f 2 | cut -d ')' -f 1 | awk '{print $1"="$2}') + if [[ $(cat $CMAKE_LISTS | grep -ice 'set *( *VERSION_SUFFIX') > 0 ]]; then + echo 'Using version suffix...' + export $(cat $CMAKE_LISTS | grep -ie 'set *( *VERSION_SUFFIX' | cut -d '(' -f 2 | cut -d ')' -f 1 | awk '{print $1"="$2}') + export $(echo "$(cat $CMAKE_LISTS | grep -ie 'set *( *VERSION_FULL.*VERSION_SUFFIX' | cut -d '(' -f 2 | cut -d ')' -f 1 | awk '{print $1"="$2}')" | sed "s/VERSION_MAJOR/$VERSION_MAJOR/" | sed "s/VERSION_MINOR/$VERSION_MINOR/" | sed "s/VERSION_PATCH/$VERSION_PATCH/" | sed "s/VERSION_SUFFIX/$VERSION_SUFFIX/" | tr -d '"{}$') + else + echo 'No version suffix found.' + export $(echo "$(cat $CMAKE_LISTS | grep -ie 'set *( *VERSION_FULL' | grep -ive 'VERSION_SUFFIX' | cut -d '(' -f 2 | cut -d ')' -f 1 | awk '{print $1"="$2}')" | sed "s/VERSION_MAJOR/$VERSION_MAJOR/" | sed "s/VERSION_MINOR/$VERSION_MINOR/" | sed "s/VERSION_PATCH/$VERSION_PATCH/" | tr -d '"{}$') + fi + EXPECTED="v$VERSION_FULL" +fi +# fail if no expected value was found +if [[ "$EXPECTED" == '' ]]; then + echo 'ERROR: Could not determine expected value for version label!' + set +e + echo "EOSIO_ROOT=\"$EOSIO_ROOT\"" + echo "CMAKE_CACHE=\"$CMAKE_CACHE\"" + echo "CMAKE_LISTS=\"$CMAKE_LISTS\"" + echo '' + echo "VERSION_MAJOR=\"$VERSION_MAJOR\"" + echo "VERSION_MINOR=\"$VERSION_MINOR\"" + echo "VERSION_PATCH=\"$VERSION_PATCH\"" + echo "VERSION_SUFFIX=\"$VERSION_SUFFIX\"" + echo "VERSION_FULL=\"$VERSION_FULL\"" + echo '' + echo '$ cat "$CMAKE_CACHE" | grep "DOXY_EOS_VERSION"' + cat "$CMAKE_CACHE" | grep "DOXY_EOS_VERSION" + echo '$ pwd' + pwd + echo '$ ls -la "$EOSIO_ROOT"' + ls -la "$EOSIO_ROOT" + echo '$ ls -la "$EOSIO_ROOT/build"' + ls -la "$EOSIO_ROOT/build" + exit 1 +fi +echo "Expecting \"$EXPECTED\"..." +# get nodeos version +ACTUAL=$($EOSIO_ROOT/build/bin/nodeos --version) || : # nodeos currently returns -1 for --version +# test +if [[ "$EXPECTED" == "$ACTUAL" ]]; then + echo 'Passed with \"$ACTUAL\".' + exit 0 +fi +echo 'Failed!' +echo "\"$EXPECTED\" != \"$ACTUAL\"" +exit 1 diff --git a/unittests/actiondemo/actiondemo.abi b/unittests/actiondemo/actiondemo.abi index eb3c15c7c13..b02aa55371d 100644 --- a/unittests/actiondemo/actiondemo.abi +++ b/unittests/actiondemo/actiondemo.abi @@ -1,119 +1,159 @@ { - "____comment": "This file was generated by eosio-abigen. DO NOT EDIT - 2019-01-07T10:42:22", - "version": "eosio::abi/1.0", - "types": [], - "structs": [{ - "name": "seedobj", - "base": "", - "fields": [{ - "name": "id", - "type": "uint64" - },{ - "name": "create", - "type": "time_point" - },{ - "name": "seedstr", - "type": "string" - },{ - "name": "txid", - "type": "string" - },{ - "name": "action", - "type": "uint64" + "____comment": "This file was generated with eosio-abigen. DO NOT EDIT Sat Apr 27 08:00:01 2019", + "version": "eosio::abi/1.1", + "structs": [ + { + "name": "apply", + "base": "", + "fields": [ + { + "name": "contract", + "type": "name" + }, + { + "name": "act", + "type": "name" + } + ] + }, + { + "name": "args", + "base": "", + "fields": [ + { + "name": "loop", + "type": "uint64" + }, + { + "name": "num", + "type": "uint64" + } + ] + }, + { + "name": "args_name", + "base": "", + "fields": [ + { + "name": "name", + "type": "name" + } + ] + }, + { + "name": "argsinline", + "base": "", + "fields": [ + { + "name": "payer", + "type": "name" + }, + { + "name": "in", + "type": "name" + } + ] + }, + { + "name": "clear", + "base": "", + "fields": [] + }, + { + "name": "generate", + "base": "", + "fields": [ + { + "name": "t", + "type": "args" + } + ] + }, + { + "name": "hascontract", + "base": "", + "fields": [ + { + "name": "t", + "type": "args_name" + } + ] + }, + { + "name": "inlineact", + "base": "", + "fields": [ + { + "name": "t", + "type": "argsinline" + } + ] + }, + { + "name": "seedobj", + "base": "", + "fields": [ + { + "name": "id", + "type": "uint64" + }, + { + "name": "create", + "type": "time_point" + }, + { + "name": "seedstr", + "type": "string" + }, + { + "name": "txid", + "type": "string" + }, + { + "name": "action", + "type": "uint64" + } + ] } - ] - },{ - "name": "args", - "base": "", - "fields": [{ - "name": "loop", - "type": "uint64" - },{ - "name": "num", - "type": "uint64" + ], + "types": [], + "actions": [ + { + "name": "apply", + "type": "apply", + "ricardian_contract": "" + }, + { + "name": "clear", + "type": "clear", + "ricardian_contract": "" + }, + { + "name": "generate", + "type": "generate", + "ricardian_contract": "" + }, + { + "name": "hascontract", + "type": "hascontract", + "ricardian_contract": "" + }, + { + "name": "inlineact", + "type": "inlineact", + "ricardian_contract": "" } - ] - },{ - "name": "generate", - "base": "", - "fields": [{ - "name": "t", - "type": "args" + ], + "tables": [ + { + "name": "seedobjs", + "type": "seedobj", + "index_type": "i64", + "key_names": [], + "key_types": [] } - ] - },{ - "name": "clear", - "base": "", - "fields": [] - },{ - "name": "args_name", - "base": "", - "fields": [{ - "name": "name", - "type": "name" - } - ] - },{ - "name": "hascontract", - "base": "", - "fields": [{ - "name": "t", - "type": "args_name" - } - ] - },{ - "name": "args_inline", - "base": "", - "fields": [{ - "name": "payer", - "type": "name" - },{ - "name": "in", - "type": "name" - } - ] - },{ - "name": "inlineact", - "base": "", - "fields": [{ - "name": "t", - "type": "args_inline" - } - ] - } - ], - "actions": [{ - "name": "generate", - "type": "generate", - "ricardian_contract": "" - },{ - "name": "clear", - "type": "clear", - "ricardian_contract": "" - },{ - "name": "hascontract", - "type": "hascontract", - "ricardian_contract": "" - },{ - "name": "inlineact", - "type": "inlineact", - "ricardian_contract": "" - } - ], - "tables": [{ - "name": "seedobjs", - "index_type": "i64", - "key_names": [ - "id" - ], - "key_types": [ - "uint64" - ], - "type": "seedobj" - } - ], - "ricardian_clauses": [], - "error_messages": [], - "abi_extensions": [], - "variants": [] + ], + "ricardian_clauses": [], + "variants": [], + "abi_extensions": [] } \ No newline at end of file diff --git a/unittests/actiondemo/actiondemo.cpp b/unittests/actiondemo/actiondemo.cpp index 88fb113800e..0d668686bd0 100644 --- a/unittests/actiondemo/actiondemo.cpp +++ b/unittests/actiondemo/actiondemo.cpp @@ -1,25 +1,24 @@ #include "actiondemo.hpp" -#include "../../contracts/eosiolib/print.hpp" -#include "../../contracts/eosiolib/types.hpp" -#include "../../contracts/eosiolib/transaction.h" +#include +#include +#include namespace spaceaction { - void actiondemo::apply( account_name code, account_name act ) { - + void actiondemo::apply( name code, name act ) { if( code != _self ) return; - + switch( act ) { - case N(generate): + case "generate"_n: generate(unpack_action_data()); return; - case N(inlineact): - inlineact(unpack_action_data()); - case N(clear): + case "inlineact"_n: + inlineact(unpack_action_data()); + case "clear"_n: clear(); return; - case N(hascontract): + case "hascontract"_n: hascontract(unpack_action_data()); return; } @@ -27,7 +26,7 @@ namespace spaceaction { void actiondemo::clear(){ //require_auth(_self); - seedobjs table(_self, _self); + seedobjs table{_self, _self.value}; auto iter = table.begin(); while (iter != table.end()) { @@ -54,17 +53,18 @@ namespace spaceaction { checksum256 code; get_contract_code(t.name, &code); - std::string s = to_hex((char*)&code.hash, 32); + std::string s = to_hex((char*)&code, 32); print_f("% contract_code:%", name{t.name}.to_string(),s); // } } void actiondemo::generate(const args& t){ - for (int i = 0; i < t.loop; ++i) { - transaction_id_type txid; + // for (int i = 0; i < 1; ++i) + // { + checksum256 txid; get_transaction_id(&txid); - std::string tx = to_hex((char*)&txid.hash, 32); + std::string tx = to_hex((char*)&txid, 32); uint64_t seq = 0; get_action_sequence(&seq); @@ -78,7 +78,7 @@ namespace spaceaction { std::string seedstr = to_hex(buf,size); - seedobjs table(_self, _self); + seedobjs table(_self, _self.value); uint64_t count = 0; for (auto itr = table.begin(); itr != table.end(); ++itr) { ++count; @@ -92,10 +92,10 @@ namespace spaceaction { a.action = seq; }); print_f("self:%, loop:%, count:%, seedstr:%", name{_self}.to_string(), t.loop, count, r->seedstr); - } + // } } - void actiondemo::inlineact(const args_inline& t){ + void actiondemo::inlineact(const argsinline& t){ auto& payer = t.payer; args gen; gen.loop = 1; @@ -103,21 +103,38 @@ namespace spaceaction { generate(gen); - if(t.in != 0) + if(t.in != ""_n) { - INLINE_ACTION_SENDER(spaceaction::actiondemo, generate)( t.in, {payer,N(active)}, + INLINE_ACTION_SENDER(spaceaction::actiondemo, generate)( t.in, {payer,"active"_n}, { gen}); - INLINE_ACTION_SENDER(spaceaction::actiondemo, generate)( t.in, {payer,N(active)}, + INLINE_ACTION_SENDER(spaceaction::actiondemo, generate)( t.in, {payer,"active"_n}, { gen}); } } } -extern "C" { -[[noreturn]] void apply(uint64_t receiver, uint64_t code, uint64_t action) { - spaceaction::actiondemo obj(receiver); - obj.apply(code, action); - eosio_exit(0); -} -} \ No newline at end of file +// extern "C" { +// [[noreturn]] void apply(uint64_t receiver, uint64_t code, uint64_t action) { +// spaceaction::actiondemo obj(receiver); +// obj.apply(code, action); +// eosio_exit(0); +// } +// } + +#define EOSIO_DISPATCH_CUSTOM(TYPE, MEMBERS) \ + extern "C" \ + { \ + void apply(uint64_t receiver, uint64_t code, uint64_t action) \ + { \ + \ + switch (action) \ + { \ + EOSIO_DISPATCH_HELPER(TYPE, MEMBERS) \ + } \ + /* does not allow destructor of thiscontract to run: eosio_exit(0); */ \ + \ + } \ + } + +EOSIO_DISPATCH_CUSTOM(spaceaction::actiondemo, (apply)(generate)(clear)(hascontract)(inlineact)) diff --git a/unittests/actiondemo/actiondemo.hpp b/unittests/actiondemo/actiondemo.hpp index 2008eb17e6c..7b5e61572df 100644 --- a/unittests/actiondemo/actiondemo.hpp +++ b/unittests/actiondemo/actiondemo.hpp @@ -5,40 +5,50 @@ namespace spaceaction { using namespace eosio; - class actiondemo : public contract { + + class [[eosio::contract]] actiondemo : public contract + { typedef std::chrono::milliseconds duration; public: - actiondemo( account_name self ):contract(self){} + using contract::contract; + + // actiondemo( name self ):contract(self){} - void apply( account_name contract, account_name act ); + ACTION apply( name contract, name act ); struct args{ uint64_t loop; uint64_t num; + + }; //@abi action - void generate(const args& t); + ACTION generate(const args& t); //@abi action - void clear(); + ACTION clear(); - struct args_name{ - account_name name; + struct args_name + { + name name; }; //@abi action - void hascontract(const args_name& t); + ACTION hascontract(const args_name& t); + struct argsinline + { + name payer; + name in; - struct args_inline{ - account_name payer; - account_name in; + }; //@abi action - void inlineact(const args_inline& t); + ACTION inlineact(const argsinline& t); public: // @abi table seedobjs i64 - struct seedobj { + TABLE seedobj + { uint64_t id; time_point create; std::string seedstr; @@ -48,7 +58,7 @@ namespace spaceaction { uint64_t primary_key()const { return id; } EOSLIB_SERIALIZE(seedobj,(id)(create)(seedstr)(txid)(action)) }; - typedef eosio::multi_index< N(seedobjs), seedobj> seedobjs; + typedef eosio::multi_index< "seedobjs"_n, seedobj> seedobjs; }; diff --git a/unittests/main.cpp b/unittests/main.cpp index 0644ce80545..63b40b68b51 100644 --- a/unittests/main.cpp +++ b/unittests/main.cpp @@ -26,7 +26,11 @@ boost::unit_test::test_suite* init_unit_test_suite(int argc, char* argv[]) { break; } } - if(!is_verbose) fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::off); + if(is_verbose) { + fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); + } else { + fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::off); + } // Register fc::exception translator boost::unit_test::unit_test_monitor.register_exception_translator(&translate_fc_exception); diff --git a/unittests/pbft_tests.cpp b/unittests/pbft_tests.cpp new file mode 100644 index 00000000000..266a043bf5a --- /dev/null +++ b/unittests/pbft_tests.cpp @@ -0,0 +1,352 @@ +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include + +using namespace eosio::chain; +using namespace eosio::testing; + + +BOOST_AUTO_TEST_SUITE(pbft_tests) +std::map make_signature_provider(){ + std::map msp; + + auto priv_eosio = tester::get_private_key( N(eosio), "active" ); + auto pub_eosio = tester::get_public_key( N(eosio), "active"); + auto sp_eosio = [priv_eosio]( const eosio::chain::digest_type& digest ) { + return priv_eosio.sign(digest); + }; + msp[pub_eosio]=sp_eosio; + + auto priv_alice = tester::get_private_key( N(alice), "active" ); + auto pub_alice = tester::get_public_key( N(alice), "active"); + auto sp_alice = [priv_alice]( const eosio::chain::digest_type& digest ) { + return priv_alice.sign(digest); + }; + msp[pub_alice]=sp_alice; + + auto priv_bob = tester::get_private_key( N(bob), "active" ); + auto pub_bob = tester::get_public_key( N(bob), "active"); + auto sp_bob = [priv_bob]( const eosio::chain::digest_type& digest ) { + return priv_bob.sign(digest); + }; + msp[pub_bob]=sp_bob; + + auto priv_carol = tester::get_private_key( N(carol), "active" ); + auto pub_carol = tester::get_public_key( N(carol), "active"); + auto sp_carol = [priv_carol]( const eosio::chain::digest_type& digest ) { + return priv_carol.sign(digest); + }; + msp[pub_carol]=sp_carol; + + auto priv_deny = tester::get_private_key( N(deny), "active" ); + auto pub_deny = tester::get_public_key( N(deny), "active"); + auto sp_deny = [priv_deny]( const eosio::chain::digest_type& digest ) { + return priv_deny.sign(digest); + }; + msp[pub_deny]=sp_deny; + + return msp; +} + +BOOST_AUTO_TEST_CASE(can_init) { + tester tester; + controller &ctrl = *tester.control.get(); + pbft_controller pbft_ctrl{ctrl}; + + tester.produce_block(); + auto p = pbft_ctrl.pbft_db.should_prepared(); + BOOST_CHECK(!p); +} + +BOOST_AUTO_TEST_CASE(can_advance_lib_in_old_version) { + tester tester; + controller &ctrl = *tester.control.get(); + pbft_controller pbft_ctrl{ctrl}; + + auto msp = make_signature_provider(); + ctrl.set_my_signature_providers(msp); + + tester.produce_block();//produce block num 2 + BOOST_REQUIRE_EQUAL(ctrl.last_irreversible_block_num(), 0); + BOOST_REQUIRE_EQUAL(ctrl.head_block_num(), 2); + tester.produce_block(); + BOOST_REQUIRE_EQUAL(ctrl.last_irreversible_block_num(), 2); + BOOST_REQUIRE_EQUAL(ctrl.head_block_num(), 3); + } + +BOOST_AUTO_TEST_CASE(can_advance_lib_after_upgrade) { + tester tester; + controller &ctrl = *tester.control.get(); + pbft_controller pbft_ctrl{ctrl}; + ctrl.set_upo(150); + + const auto& upo = ctrl.db().get(); + const auto upo_upgrade_target_block_num = upo.upgrade_target_block_num; + BOOST_CHECK_EQUAL(upo_upgrade_target_block_num, 150); + + auto msp = make_signature_provider(); + ctrl.set_my_signature_providers(msp); + + auto is_upgraded = ctrl.is_pbft_enabled(); + + BOOST_CHECK_EQUAL(is_upgraded, false); + + tester.produce_block();//produce block num 2 + BOOST_CHECK_EQUAL(ctrl.last_irreversible_block_num(), 0); + BOOST_CHECK_EQUAL(ctrl.head_block_num(), 2); + tester.produce_blocks(150); + BOOST_CHECK_EQUAL(ctrl.last_irreversible_block_num(), 151); + BOOST_CHECK_EQUAL(ctrl.head_block_num(), 152); + + is_upgraded = ctrl.is_pbft_enabled(); + BOOST_CHECK_EQUAL(is_upgraded, true); + + tester.produce_blocks(10); + BOOST_CHECK_EQUAL(ctrl.pending_pbft_lib(), false); + BOOST_CHECK_EQUAL(ctrl.last_irreversible_block_num(), 151); + BOOST_CHECK_EQUAL(ctrl.head_block_num(), 162); + + pbft_ctrl.maybe_pbft_prepare(); + pbft_ctrl.maybe_pbft_commit(); + + BOOST_CHECK_EQUAL(ctrl.pending_pbft_lib(), true); + tester.produce_block(); //set lib using pending pbft lib + + BOOST_CHECK_EQUAL(ctrl.last_irreversible_block_num(), 162); + BOOST_CHECK_EQUAL(ctrl.head_block_num(), 163); +} + + + +BOOST_AUTO_TEST_CASE(can_advance_lib_after_upgrade_with_four_producers) { + tester tester; + controller &ctrl = *tester.control.get(); + pbft_controller pbft_ctrl{ctrl}; + + ctrl.set_upo(109); + + const auto& upo = ctrl.db().get(); + const auto upo_upgrade_target_block_num = upo.upgrade_target_block_num; + BOOST_CHECK_EQUAL(upo_upgrade_target_block_num, 109); + + auto msp = make_signature_provider(); + ctrl.set_my_signature_providers(msp); + + auto is_upgraded = ctrl.is_pbft_enabled(); + + BOOST_CHECK_EQUAL(is_upgraded, false); + + tester.produce_block();//produce block num 2 + tester.create_accounts( {N(alice),N(bob),N(carol),N(deny)} ); + tester.set_producers({N(alice),N(bob),N(carol),N(deny)}); + tester.produce_blocks(3);//produce block num 3,4,5 + BOOST_CHECK_EQUAL(ctrl.active_producers().producers.front().producer_name, N(alice)); + BOOST_CHECK_EQUAL(ctrl.head_block_producer(),N(eosio)); + tester.produce_blocks(7);//produce to block 12 + BOOST_CHECK_EQUAL(ctrl.head_block_producer(),N(alice)); + + BOOST_CHECK_EQUAL(ctrl.last_irreversible_block_num(), 4); + BOOST_CHECK_EQUAL(ctrl.head_block_num(), 12); + tester.produce_blocks(156 - 12); + BOOST_CHECK_EQUAL(ctrl.last_irreversible_block_num(), 108); + BOOST_CHECK_EQUAL(ctrl.head_block_num(), 156); + + is_upgraded = ctrl.is_pbft_enabled(); + BOOST_CHECK_EQUAL(is_upgraded, false); + tester.produce_blocks(12); + is_upgraded = ctrl.is_pbft_enabled(); + BOOST_CHECK_EQUAL(is_upgraded, true); + BOOST_CHECK_EQUAL(ctrl.last_irreversible_block_num(), 120); + BOOST_CHECK_EQUAL(ctrl.head_block_num(), 168); + BOOST_CHECK_EQUAL(ctrl.pending_pbft_lib(), false); + + pbft_ctrl.maybe_pbft_prepare(); + pbft_ctrl.maybe_pbft_commit(); + + BOOST_CHECK_EQUAL(ctrl.pending_pbft_lib(), true); + tester.produce_block(); //set lib using pending pbft lib + + BOOST_CHECK_EQUAL(ctrl.last_irreversible_block_num(), 168); + BOOST_CHECK_EQUAL(ctrl.head_block_num(), 169); +} + +void push_blocks( tester& from, tester& to ) { + while( to.control->fork_db_head_block_num() < from.control->fork_db_head_block_num() ) { + auto fb = from.control->fetch_block_by_number( to.control->fork_db_head_block_num()+1 ); + to.push_block( fb ); + } +} + +BOOST_AUTO_TEST_CASE(view_change_validation) { + tester tester; + controller &ctrl = *tester.control; + pbft_controller pbft_ctrl{ctrl}; + + auto msp = make_signature_provider(); + ctrl.set_my_signature_providers(msp); + + ctrl.set_upo(48); + + tester.create_accounts( {N(alice),N(bob),N(carol),N(deny)} ); + tester.set_producers({N(alice),N(bob),N(carol),N(deny)}); + tester.produce_blocks(100); + + pbft_ctrl.maybe_pbft_prepare(); + pbft_ctrl.maybe_pbft_commit(); + tester.produce_blocks(1); + + BOOST_CHECK_EQUAL(ctrl.is_pbft_enabled(), true); + BOOST_CHECK_EQUAL(ctrl.head_block_num(), 102); + + + for(int i = 0; i< pbft_ctrl.view_change_timeout; i++){ + pbft_ctrl.maybe_pbft_view_change(); + } + pbft_ctrl.state_machine->do_send_view_change(); + auto new_view = pbft_ctrl.pbft_db.get_proposed_new_view_num(); + auto vcc = pbft_ctrl.pbft_db.generate_view_changed_certificate(new_view); + auto nv_msg = pbft_ctrl.pbft_db.send_pbft_new_view(vcc, new_view); + + bool nv_flag; + try { + pbft_ctrl.pbft_db.validate_new_view(nv_msg, tester::get_public_key(N(carol), "active")); + nv_flag = true; + } catch (fc::exception &e) { + nv_flag = false; + } + BOOST_CHECK_EQUAL(nv_flag, false); +} + +BOOST_AUTO_TEST_CASE(switch_fork_when_accept_new_view_with_prepare_certificate_on_short_fork) { + tester short_prepared_fork, long_non_prepared_fork, new_view_generator; + controller &ctrl_short_prepared_fork = *short_prepared_fork.control.get(); + pbft_controller pbft_short_prepared_fork{ctrl_short_prepared_fork}; + controller &ctrl_long_non_prepared_fork = *long_non_prepared_fork.control.get(); + pbft_controller pbft_long_non_prepared_fork{ctrl_long_non_prepared_fork}; + controller &ctrl_new_view_generator = *new_view_generator.control.get(); + pbft_controller pbft_new_view_generator{ctrl_new_view_generator}; + + auto msp = make_signature_provider(); + ctrl_short_prepared_fork.set_my_signature_providers(msp); + ctrl_long_non_prepared_fork.set_my_signature_providers(msp); + ctrl_new_view_generator.set_my_signature_providers(msp); + + ctrl_short_prepared_fork.set_upo(48); + ctrl_long_non_prepared_fork.set_upo(48); + ctrl_new_view_generator.set_upo(48); + + long_non_prepared_fork.create_accounts( {N(alice),N(bob),N(carol),N(deny)} ); + long_non_prepared_fork.set_producers({N(alice),N(bob),N(carol),N(deny)}); + long_non_prepared_fork.produce_blocks(100); + + short_prepared_fork.create_accounts( {N(alice),N(bob),N(carol),N(deny)} ); + short_prepared_fork.set_producers({N(alice),N(bob),N(carol),N(deny)}); + short_prepared_fork.produce_blocks(100); + + new_view_generator.create_accounts( {N(alice),N(bob),N(carol),N(deny)} ); + new_view_generator.set_producers({N(alice),N(bob),N(carol),N(deny)}); + new_view_generator.produce_blocks(100); + + pbft_long_non_prepared_fork.maybe_pbft_prepare(); + pbft_long_non_prepared_fork.maybe_pbft_commit(); + long_non_prepared_fork.produce_blocks(1); + pbft_long_non_prepared_fork.maybe_pbft_commit(); + long_non_prepared_fork.produce_blocks(25); + + pbft_short_prepared_fork.maybe_pbft_prepare(); + pbft_short_prepared_fork.maybe_pbft_commit(); + short_prepared_fork.produce_blocks(1); + pbft_short_prepared_fork.maybe_pbft_commit(); + short_prepared_fork.produce_blocks(25); + + + pbft_new_view_generator.maybe_pbft_prepare(); + pbft_new_view_generator.maybe_pbft_commit(); + new_view_generator.produce_blocks(1); + + BOOST_CHECK_EQUAL(ctrl_short_prepared_fork.is_pbft_enabled(), true); + BOOST_CHECK_EQUAL(ctrl_long_non_prepared_fork.is_pbft_enabled(), true); + BOOST_CHECK_EQUAL(ctrl_new_view_generator.is_pbft_enabled(), true); + BOOST_CHECK_EQUAL(ctrl_short_prepared_fork.head_block_num(), 127); + BOOST_CHECK_EQUAL(ctrl_long_non_prepared_fork.head_block_num(), 127); + BOOST_CHECK_EQUAL(ctrl_long_non_prepared_fork.fetch_block_by_number(100)->id(), ctrl_short_prepared_fork.fetch_block_by_number(100)->id()); + + + + short_prepared_fork.create_accounts({N(shortname)}); + long_non_prepared_fork.create_accounts({N(longname)}); + short_prepared_fork.produce_blocks(6); + push_blocks(short_prepared_fork, new_view_generator); + long_non_prepared_fork.produce_blocks(10); + + + pbft_new_view_generator.maybe_pbft_commit(); + new_view_generator.produce_blocks(3); + push_blocks(new_view_generator, short_prepared_fork); + + BOOST_CHECK_EQUAL(ctrl_new_view_generator.head_block_num(), 136); + BOOST_CHECK_EQUAL(ctrl_short_prepared_fork.head_block_num(), 136); + BOOST_CHECK_EQUAL(ctrl_long_non_prepared_fork.head_block_num(), 137); + BOOST_CHECK_EQUAL(ctrl_new_view_generator.last_irreversible_block_num(), 101); + BOOST_CHECK_EQUAL(ctrl_short_prepared_fork.last_irreversible_block_num(), 101); + BOOST_CHECK_EQUAL(ctrl_long_non_prepared_fork.last_irreversible_block_num(), 101); + + //generate new view with short fork prepare certificate + pbft_new_view_generator.state_machine->set_prepares_cache(pbft_prepare()); + BOOST_CHECK_EQUAL(pbft_new_view_generator.pbft_db.should_send_pbft_msg(), true); + pbft_new_view_generator.maybe_pbft_prepare(); + BOOST_CHECK_EQUAL(pbft_new_view_generator.pbft_db.should_prepared(), true); + BOOST_CHECK_EQUAL(ctrl_new_view_generator.head_block_num(), 136); + for(int i = 0; ido_send_view_change(); + auto new_view = pbft_new_view_generator.pbft_db.get_proposed_new_view_num(); + auto vcc = pbft_new_view_generator.pbft_db.generate_view_changed_certificate(new_view); + auto nv_msg = pbft_new_view_generator.pbft_db.send_pbft_new_view( + vcc, + new_view); + + //merge short fork and long fork, make sure current head is long fork + for(int i=1;i<=10;i++){ + auto tmp = ctrl_long_non_prepared_fork.fetch_block_by_number(127+i); + short_prepared_fork.push_block(tmp); + } + BOOST_CHECK_EQUAL(ctrl_long_non_prepared_fork.head_block_num(), 137); + BOOST_CHECK_EQUAL(ctrl_short_prepared_fork.head_block_num(), 137); + + + ctrl_short_prepared_fork.reset_pbft_prepared(); + BOOST_CHECK_EQUAL(ctrl_short_prepared_fork.head_block_num(), 137); + + //can switch fork after apply prepare certificate in new view + pbft_short_prepared_fork.state_machine->on_new_view(std::make_shared>(nv_msg, ctrl_new_view_generator.get_chain_id())); + + BOOST_CHECK_EQUAL(ctrl_short_prepared_fork.head_block_num(), 136); + BOOST_CHECK_EQUAL(ctrl_short_prepared_fork.last_irreversible_block_num(), 101); + + + //can switch fork after set lib + ctrl_short_prepared_fork.set_pbft_prepared(ctrl_short_prepared_fork.last_irreversible_block_id()); + BOOST_CHECK_EQUAL(ctrl_short_prepared_fork.head_block_num(), 137); + BOOST_CHECK_EQUAL(ctrl_short_prepared_fork.last_irreversible_block_num(), 101); + + pbft_short_prepared_fork.maybe_pbft_commit(); + short_prepared_fork.produce_blocks(2); + BOOST_CHECK_EQUAL(ctrl_short_prepared_fork.head_block_num(), 138); + BOOST_CHECK_EQUAL(ctrl_short_prepared_fork.last_irreversible_block_num(), 136); +} + + + +BOOST_AUTO_TEST_SUITE_END()