diff --git a/.github/matic-cli-config.yml b/.github/matic-cli-config.yml
new file mode 100644
index 0000000000..c7dab39b0e
--- /dev/null
+++ b/.github/matic-cli-config.yml
@@ -0,0 +1,11 @@
+defaultStake: 10000
+defaultFee: 2000
+borChainId: "15001"
+heimdallChainId: heimdall-15001
+contractsBranch: jc/v0.3.1-backport
+numOfValidators: 3
+numOfNonValidators: 0
+ethURL: http://ganache:9545
+devnetType: docker
+borDockerBuildContext: "../../bor"
+heimdallDockerBuildContext: "https://github.com/maticnetwork/heimdall.git#develop"
\ No newline at end of file
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index d61e14cb42..639a68703f 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -1,20 +1,183 @@
name: CI
-on: [push, pull_request]
+on:
+ push:
+ branches:
+ - "master"
+ - "qa"
+ - "develop"
+ pull_request:
+ branches:
+ - "**"
+ types: [opened, synchronize, edited]
+
+concurrency:
+ group: build-${{ github.event.pull_request.number || github.ref }}
+ cancel-in-progress: true
+
jobs:
- build:
- runs-on: ubuntu-latest
+ tests:
+ if: (github.event.action != 'closed' || github.event.pull_request.merged == true)
+ strategy:
+ matrix:
+ os: [ ubuntu-20.04, macos-11 ] # list of os: https://github.com/actions/virtual-environments
+ runs-on: ${{ matrix.os }}
steps:
- - uses: actions/checkout@v2
- - name: Install Go
- uses: actions/setup-go@v2
- with:
- go-version: 1.17
- - name: "Build binaries"
- run: make all
- - name: "Run tests"
- run: make test
- - name: Upload coverage to Codecov
- uses: codecov/codecov-action@v1
- with:
- file: ./cover.out
+ - uses: actions/checkout@v3
+ - run: |
+ git submodule update --init --recursive --force
+ git fetch --no-tags --prune --depth=1 origin +refs/heads/master:refs/remotes/origin/master
+
+ - uses: actions/setup-go@v3
+ with:
+ go-version: 1.18.x
+
+ - name: Install dependencies on Linux
+ if: runner.os == 'Linux'
+ run: sudo apt update && sudo apt install build-essential
+
+ - name: Golang-ci install
+ if: runner.os == 'Linux'
+ run: make lintci-deps
+
+ - uses: actions/cache@v3
+ with:
+ path: |
+ ~/.cache/go-build
+ ~/Library/Caches/go-build
+ ~/go/pkg/mod
+ key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
+ restore-keys: ${{ runner.os }}-go-
+
+ - name: Build
+ run: make all
+
+ - name: Lint
+ if: runner.os == 'Linux'
+ run: make lint
+
+ - name: Test
+ run: make test
+
+ #- name: Data race tests
+ # run: make test-race
+
+ - name: test-integration
+ run: make test-integration
+
+ - name: Upload coverage to Codecov
+ uses: codecov/codecov-action@v1
+ with:
+ file: ./cover.out
+
+ # # TODO: make it work
+ # - name: Reproducible build test
+ # run: |
+ # make geth
+ # shasum -a256 ./build/bin/geth > bor1.sha256
+ # make geth
+ # shasum -a256 ./build/bin/geth > bor2.sha256
+ # if ! cmp -s bor1.sha256 bor2.sha256; then
+ # echo >&2 "Reproducible build broken"; cat bor1.sha256; cat bor2.sha256; exit 1
+ # fi
+
+ integration-tests:
+ if: (github.event.action != 'closed' || github.event.pull_request.merged == true)
+ strategy:
+ matrix:
+ os: [ ubuntu-20.04 ] # list of os: https://github.com/actions/virtual-environments
+ runs-on: ${{ matrix.os }}
+ steps:
+ - uses: actions/checkout@v3
+ with:
+ path: bor
+ - name: Checkout submodules
+ run: |
+ cd bor
+ git submodule update --init --recursive --force
+ git fetch --no-tags --prune --depth=1 origin +refs/heads/master:refs/remotes/origin/master
+
+ - uses: actions/setup-go@v3
+ with:
+ go-version: 1.18.x
+
+ - name: Checkout matic-cli
+ uses: actions/checkout@v3
+ with:
+ repository: maticnetwork/matic-cli
+ ref: v0.3.0-dev
+ path: matic-cli
+
+ - name: Install dependencies on Linux
+ if: runner.os == 'Linux'
+ run: |
+ sudo apt update
+ sudo apt install build-essential
+ curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
+ sudo snap install solc
+ sudo apt install python2 jq curl
+ sudo ln -sf /usr/bin/python2 /usr/bin/python
+
+ - uses: actions/setup-node@v3
+ with:
+ node-version: '10.17.0'
+ cache: 'npm'
+ cache-dependency-path: |
+ matic-cli/package-lock.json
+ matic-cli/devnet/code/contracts/package-lock.json
+ matic-cli/devnet/code/genesis-contracts/package-lock.json
+ matic-cli/devnet/code/genesis-contracts/matic-contracts/package-lock.json
+
+ - name: Bootstrap devnet
+ run: |
+ cd matic-cli
+ npm install --prefer-offline --no-audit --progress=false
+ mkdir devnet
+ cd devnet
+ ../bin/matic-cli setup devnet -c ../../bor/.github/matic-cli-config.yml
+
+ - name: Launch devnet
+ run: |
+ cd matic-cli/devnet
+ bash docker-ganache-start.sh
+ bash docker-heimdall-start-all.sh
+ bash docker-bor-setup.sh
+ bash docker-bor-start-all.sh
+ sleep 120 && bash ganache-deployment-bor.sh
+ sleep 120 && bash ganache-deployment-sync.sh
+ sleep 120
+ docker exec bor0 bash -c "bor attach /root/.bor/data/bor.ipc -exec 'admin.peers'"
+ docker exec bor0 bash -c "bor attach /root/.bor/data/bor.ipc -exec 'eth.blockNumber'"
+
+ - name: Run smoke tests
+ run: |
+ echo "Deposit 100 matic for each account to bor network"
+ cd matic-cli/devnet/code/contracts
+ npm run truffle exec scripts/deposit.js -- --network development $(jq -r .root.tokens.MaticToken contractAddresses.json) 100000000000000000000
+ cd -
+ bash bor/integration-tests/smoke_test.sh
+
+ - name: Upload logs
+ if: always()
+ uses: actions/upload-artifact@v3
+ with:
+ name: logs_${{ github.run_id }}
+ path: |
+ matic-cli/devnet/logs
+
+ - name: Package code and chain data
+ if: always()
+ run: |
+ cd matic-cli/devnet
+ docker compose down --remove-orphans
+ cd -
+ mkdir -p ${{ github.run_id }}/matic-cli
+ sudo mv bor ${{ github.run_id }}
+ sudo mv matic-cli/devnet ${{ github.run_id }}/matic-cli
+ sudo tar czf code.tar.gz ${{ github.run_id }}
+ - name: Upload code and chain data
+ if: always()
+ uses: actions/upload-artifact@v3
+ with:
+ name: code_${{ github.run_id }}
+ path: code.tar.gz
diff --git a/.github/workflows/packager.yml b/.github/workflows/packager.yml
new file mode 100644
index 0000000000..5c59b1751a
--- /dev/null
+++ b/.github/workflows/packager.yml
@@ -0,0 +1,733 @@
+name: packager
+
+on:
+ push:
+ branches:
+ - 'main'
+ paths:
+ - '**'
+ tags:
+ - 'v*.*.*'
+ - 'v*.*.*-*'
+
+jobs:
+ build:
+ runs-on: ubuntu-18.04
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ with:
+ fetch-depth: 0
+ - name: Set up Go
+ uses: actions/setup-go@master
+ with:
+ go-version: 1.19
+ - name: Adding TAG to ENV
+ run: echo "GIT_TAG=`echo $(git describe --tags --abbrev=0)`" >> $GITHUB_ENV
+
+ - name: Cleaning repo
+ run: make clean
+ - name: Building for amd64
+ run: make bor
+
+ - name: Making directory structure
+ run: mkdir -p packaging/deb/bor/usr/bin
+ - name: Making directory structure for toml
+ run: mkdir -p packaging/deb/bor/var/lib/bor
+ - name: Copying necessary files
+ run: cp -rp build/bin/bor packaging/deb/bor/usr/bin/
+ - name: copying control file
+ run: cp -rp packaging/templates/package_scripts/control packaging/deb/bor/DEBIAN/control
+ - name: removing systemd file for binary
+ run: rm -rf lib/systemd/system/bor.service
+
+ - name: Creating package for binary for bor ${{ env.ARCH }}
+ run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: amd64
+
+ - name: Running package build
+ run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: amd64
+
+ - name: Removing the bor binary
+ run: rm -rf packaging/deb/bor/usr/bin/bor
+
+ - name: making directory structure for systemd
+ run: mkdir -p packaging/deb/bor/lib/systemd/system
+ - name: Copying systemd file
+ run: cp -rp packaging/templates/systemd/bor.service packaging/deb/bor/lib/systemd/system/bor.service
+
+ - name: Prepping ${{ env.NETWORK }} ${{ env.NODE }} node for ${{ env.ARCH }}
+ run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: amd64
+ NODE: sentry
+ NETWORK: mumbai
+ - name: Putting toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/testnet-v4/sentry/sentry/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/
+ env:
+ ARCH: amd64
+ NODE: sentry
+ NETWORK: mumbai
+ - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst
+ env:
+ ARCH: amd64
+ NODE: sentry
+ NETWORK: mumbai
+ - name: Copying the postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst
+ env:
+ ARCH: amd64
+ NODE: sentry
+ NETWORK: mumbai
+ - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm
+ env:
+ ARCH: amd64
+ NODE: sentry
+ NETWORK: mumbai
+ - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm
+ env:
+ ARCH: amd64
+ NODE: sentry
+ NETWORK: mumbai
+ - name: Copying systemd file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/systemd/bor.service packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/lib/systemd/system/
+ env:
+ ARCH: amd64
+ NODE: sentry
+ NETWORK: mumbai
+ - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control
+ env:
+ ARCH: amd64
+ NODE: sentry
+ NETWORK: mumbai
+ - name: Running package build for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: amd64
+ NODE: sentry
+ NETWORK: mumbai
+
+ - name: Setting up ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: amd64
+ NODE: sentry
+ NETWORK: mainnet
+ - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/control packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control
+ env:
+ ARCH: amd64
+ NODE: sentry
+ NETWORK: mainnet
+ - name: Putting toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/
+ env:
+ ARCH: amd64
+ NODE: sentry
+ NETWORK: mainnet
+ - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst
+ env:
+ ARCH: amd64
+ NODE: sentry
+ NETWORK: mainnet
+ - name: Copying the postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst
+ env:
+ ARCH: amd64
+ NODE: sentry
+ NETWORK: mainnet
+ - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm
+ env:
+ ARCH: amd64
+ NODE: sentry
+ NETWORK: mainnet
+ - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm
+ env:
+ ARCH: amd64
+ NODE: sentry
+ NETWORK: mainnet
+ - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control
+ env:
+ ARCH: amd64
+ NODE: sentry
+ NETWORK: mainnet
+ - name: Building ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile
+ run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: amd64
+ NODE: sentry
+ NETWORK: mainnet
+
+ - name: Prepping Bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: amd64
+ NODE: validator
+ NETWORK: mumbai
+ - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/control.validator packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control
+ env:
+ ARCH: amd64
+ NODE: validator
+ NETWORK: mumbai
+ - name: Copying Postinstall script for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst
+ env:
+ ARCH: amd64
+ NODE: validator
+ NETWORK: mumbai
+ - name: Copying Prerm script for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm
+ env:
+ ARCH: amd64
+ NODE: validator
+ NETWORK: mumbai
+ - name: Copying Postrm script for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm
+ env:
+ ARCH: amd64
+ NODE: validator
+ NETWORK: mumbai
+ - name: Copying config.toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/testnet-v4/sentry/validator/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/
+ env:
+ ARCH: amd64
+ NODE: validator
+ NETWORK: mumbai
+ - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control
+ env:
+ ARCH: amd64
+ NODE: validator
+ NETWORK: mumbai
+ - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile
+ run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: amd64
+ NODE: validator
+ NETWORK: mumbai
+
+ - name: Prepping Bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: amd64
+ NODE: validator
+ NETWORK: mainnet
+ - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/control.validator packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control
+ env:
+ ARCH: amd64
+ NODE: validator
+ NETWORK: mainnet
+ - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst
+ env:
+ ARCH: amd64
+ NODE: validator
+ NETWORK: mainnet
+ - name: Copying Postinstall script for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst
+ env:
+ ARCH: amd64
+ NODE: validator
+ NETWORK: mainnet
+ - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm
+ env:
+ ARCH: amd64
+ NODE: validator
+ NETWORK: mainnet
+ - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm
+ env:
+ ARCH: amd64
+ NODE: validator
+ NETWORK: mainnet
+ - name: Copying config.toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/mainnet-v1/sentry/validator/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/
+ env:
+ ARCH: amd64
+ NODE: validator
+ NETWORK: mainnet
+ - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control
+ env:
+ ARCH: amd64
+ NODE: validator
+ NETWORK: mainnet
+ - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile
+ run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: amd64
+ NODE: validator
+ NETWORK: mainnet
+
+ - name: Creating bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: amd64
+ NODE: archive
+ NETWORK: mumbai
+ - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control
+ env:
+ ARCH: amd64
+ NODE: archive
+ NETWORK: mumbai
+ - name: Copying profile preinst file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst
+ env:
+ ARCH: amd64
+ NODE: archive
+ NETWORK: mumbai
+ - name: Copying the profile postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst
+ env:
+ ARCH: amd64
+ NODE: archive
+ NETWORK: mumbai
+ - name: Copying profile prerm file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm
+ env:
+ ARCH: amd64
+ NODE: archive
+ NETWORK: mumbai
+ - name: Copying profile postrm file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm
+ env:
+ ARCH: amd64
+ NODE: archive
+ NETWORK: mumbai
+ - name: Copying the toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/testnet-v4/archive/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/
+ env:
+ ARCH: amd64
+ NODE: archive
+ NETWORK: mumbai
+ - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile
+ run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: amd64
+ NODE: archive
+ NETWORK: mumbai
+
+ - name: Creating bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: amd64
+ NODE: archive
+ NETWORK: mainnet
+ - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control
+ env:
+ ARCH: amd64
+ NODE: archive
+ NETWORK: mainnet
+ - name: Copying profile preinst file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst
+ env:
+ ARCH: amd64
+ NODE: archive
+ NETWORK: mainnet
+ - name: Copying the profile postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst
+ env:
+ ARCH: amd64
+ NODE: archive
+ NETWORK: mainnet
+ - name: Copying profile prerm file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm
+ env:
+ ARCH: amd64
+ NODE: archive
+ NETWORK: mainnet
+ - name: Copying profile postrm file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm
+ env:
+ ARCH: amd64
+ NODE: archive
+ NETWORK: mainnet
+ - name: Copying the toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/mainnet-v1/archive/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/
+ env:
+ ARCH: amd64
+ NODE: archive
+ NETWORK: mainnet
+ - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile
+ run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: amd64
+ NODE: archive
+ NETWORK: mainnet
+
+ - name: Cleaning build directory for arm64 build
+ run: make clean
+
+ - name: Removing systemd file
+ run: rm -rf packaging/deb/bor/lib/systemd/system/bor.service
+
+ - name: Adding requirements for cross compile
+ run: sudo apt-get install g++-aarch64-linux-gnu gcc-aarch64-linux-gnu
+
+ - name: removing systemd file for binary
+ run: rm -rf lib/systemd/system/bor.service
+
+ - name: Building bor for arm64
+ run: GOARCH=arm64 GOOS=linux CC=aarch64-linux-gnu-gcc CXX=aarch64-linux-gnu-g++ CGO_ENABLED=1 go build -o build/bin/bor ./cmd/cli/main.go
+
+ - name: Copying bor arm64 for use with packaging
+ run: cp -rp build/bin/bor packaging/deb/bor/usr/bin/
+
+ - name: Creating package for binary only bor
+ run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: arm64
+ - name: Copying control file
+ run: cp -rp packaging/templates/package_scripts/control.arm64 packaging/deb/bor-${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control
+ env:
+ ARCH: arm64
+ - name: Running package build
+ run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: arm64
+
+ - name: Removing the bor binary
+ run: rm -rf packaging/deb/bor/usr/bin/bor
+
+ - name: Copying systemd file
+ run: cp -rp packaging/templates/systemd/bor.service packaging/deb/bor/lib/systemd/system/bor.service
+
+ - name: Updating the control file to use with the arm64 profile
+ run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor/DEBIAN/control
+
+ - name: Setting up bor for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: arm64
+ NODE: sentry
+ NETWORK: mumbai
+ - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/control.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control
+ env:
+ ARCH: arm64
+ NODE: sentry
+ NETWORK: mumbai
+ - name: Putting toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/testnet-v4/sentry/sentry/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/
+ env:
+ ARCH: arm64
+ NODE: sentry
+ NETWORK: mumbai
+ - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst
+ env:
+ ARCH: arm64
+ NODE: sentry
+ NETWORK: mumbai
+ - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm
+ env:
+ ARCH: arm64
+ NODE: sentry
+ NETWORK: mumbai
+ - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm
+ env:
+ ARCH: arm64
+ NODE: sentry
+ NETWORK: mumbai
+ - name: Copying the postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst
+ env:
+ ARCH: arm64
+ NODE: sentry
+ NETWORK: mumbai
+ - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control
+ env:
+ ARCH: arm64
+ NODE: sentry
+ NETWORK: mumbai
+ - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile
+ run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: arm64
+ NODE: sentry
+ NETWORK: mumbai
+
+ - name: Setting up bor for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: arm64
+ NODE: sentry
+ NETWORK: mainnet
+ - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/control.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control
+ env:
+ ARCH: arm64
+ NODE: sentry
+ NETWORK: mainnet
+ - name: Putting toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/
+ env:
+ ARCH: arm64
+ NODE: sentry
+ NETWORK: mainnet
+ - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst
+ env:
+ ARCH: arm64
+ NODE: sentry
+ NETWORK: mainnet
+ - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm
+ env:
+ ARCH: arm64
+ NODE: sentry
+ NETWORK: mainnet
+ - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm
+ env:
+ ARCH: arm64
+ NODE: sentry
+ NETWORK: mainnet
+ - name: Copying the postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst
+ env:
+ ARCH: arm64
+ NODE: sentry
+ NETWORK: mainnet
+ - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control
+ env:
+ ARCH: arm64
+ NODE: sentry
+ NETWORK: mainnet
+ - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile
+ run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: arm64
+ NODE: sentry
+ NETWORK: mainnet
+
+ - name: Prepping Bor for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: arm64
+ NODE: validator
+ NETWORK: mumbai
+ - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/control.validator.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control
+ env:
+ ARCH: arm64
+ NODE: validator
+ NETWORK: mumbai
+ - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst
+ env:
+ ARCH: arm64
+ NODE: validator
+ NETWORK: mumbai
+ - name: Copying Postinstall script for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst
+ env:
+ ARCH: arm64
+ NODE: validator
+ NETWORK: mumbai
+ - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm
+ env:
+ ARCH: arm64
+ NODE: validator
+ NETWORK: mumbai
+ - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm
+ env:
+ ARCH: arm64
+ NODE: validator
+ NETWORK: mumbai
+ - name: Copying config.toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/testnet-v4/sentry/validator/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/
+ env:
+ ARCH: arm64
+ NODE: validator
+ NETWORK: mumbai
+ - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control
+ env:
+ ARCH: arm64
+ NODE: validator
+ NETWORK: mumbai
+ - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile
+ run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: arm64
+ NODE: validator
+ NETWORK: mumbai
+
+ - name: Prepping Bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: arm64
+ NODE: validator
+ NETWORK: mainnet
+ - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/control.validator.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control
+ env:
+ ARCH: arm64
+ NODE: validator
+ NETWORK: mainnet
+ - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst
+ env:
+ ARCH: arm64
+ NODE: validator
+ NETWORK: mainnet
+ - name: Copying Postinstall script for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst
+ env:
+ ARCH: arm64
+ NODE: validator
+ NETWORK: mainnet
+ - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm
+ env:
+ ARCH: arm64
+ NODE: validator
+ NETWORK: mainnet
+ - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm
+ env:
+ ARCH: arm64
+ NODE: validator
+ NETWORK: mainnet
+ - name: Copying config.toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/mainnet-v1/sentry/validator/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/
+ env:
+ ARCH: arm64
+ NODE: validator
+ NETWORK: mainnet
+ - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control
+ env:
+ ARCH: arm64
+ NODE: validator
+ NETWORK: mainnet
+ - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile
+ run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: arm64
+ NODE: validator
+ NETWORK: mainnet
+
+ - name: Updating the control file to use with the arm64 profile
+ run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor/DEBIAN/control
+
+ - name: Creating bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: arm64
+ NODE: archive
+ NETWORK: mumbai
+ - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control
+ env:
+ ARCH: arm64
+ NODE: archive
+ NETWORK: mumbai
+ - name: Copying over profile postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst
+ env:
+ ARCH: arm64
+ NODE: archive
+ NETWORK: mumbai
+ - name: Copying prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm
+ env:
+ ARCH: arm64
+ NODE: archive
+ NETWORK: mumbai
+ - name: Copying postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm
+ env:
+ ARCH: arm64
+ NODE: archive
+ NETWORK: mumbai
+ - name: Copying the toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/testnet-v4/archive/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/
+ env:
+ ARCH: arm64
+ NODE: archive
+ NETWORK: mumbai
+ - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile
+ run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: arm64
+ NODE: archive
+ NETWORK: mumbai
+
+ - name: Creating bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: arm64
+ NODE: archive
+ NETWORK: mainnet
+ - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control
+ env:
+ ARCH: arm64
+ NODE: archive
+ NETWORK: mainnet
+ - name: Copying over profile postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst
+ env:
+ ARCH: arm64
+ NODE: archive
+ NETWORK: mainnet
+ - name: Copying prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm
+ env:
+ ARCH: arm64
+ NODE: archive
+ NETWORK: mainnet
+ - name: Copying postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm
+ env:
+ ARCH: arm64
+ NODE: archive
+ NETWORK: mainnet
+ - name: Copying the toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/mainnet-v1/archive/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/
+ env:
+ ARCH: arm64
+ NODE: archive
+ NETWORK: mainnet
+ - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile
+ run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: arm64
+ NODE: archive
+ NETWORK: mainnet
+
+ - name: Confirming package built
+ run: ls -ltr packaging/deb/ | grep bor
+
+ - name: Release bor Packages
+ uses: softprops/action-gh-release@v1
+ with:
+ tag_name: ${{ env.GIT_TAG }}
+ prerelease: true
+ files: |
+ packaging/deb/bor**.deb
+ binary/bo**
\ No newline at end of file
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
new file mode 100644
index 0000000000..5d5d221eb3
--- /dev/null
+++ b/.github/workflows/stale.yml
@@ -0,0 +1,31 @@
+# This workflow warns and then closes issues and PRs that have had no activity for a specified amount of time.
+#
+# You can adjust the behavior by modifying this file.
+# For more information, see:
+# https://github.com/actions/stale
+name: Mark stale issues and pull requests
+
+on:
+ schedule:
+ - cron: '0 0 * * *'
+
+jobs:
+ stale:
+
+ runs-on: ubuntu-latest
+ permissions:
+ issues: write
+ pull-requests: write
+
+ steps:
+ - uses: actions/stale@v5
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ stale-issue-message: 'This issue is stale because it has been open 14 days with no activity. Remove stale label or comment or this will be closed in 14 days.'
+ stale-pr-message: 'This PR is stale because it has been open 21 days with no activity. Remove stale label or comment or this will be closed in 14 days.'
+ close-issue-message: 'This issue was closed because it has been stalled for 28 days with no activity.'
+ close-pr-message: 'This PR was closed because it has been stalled for 35 days with no activity.'
+ days-before-issue-stale: 14
+ days-before-pr-stale: 21
+ days-before-issue-close: 14
+ days-before-pr-close: 14
diff --git a/.gitignore b/.gitignore
index ad4242c519..50328c8121 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,6 +15,8 @@
*/**/*dapps*
build/_vendor/pkg
+cover.out
+
#*
.#*
*#
diff --git a/.golangci.yml b/.golangci.yml
index 4950b98c21..89a9e328b8 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,6 +1,7 @@
# This file configures github.com/golangci/golangci-lint.
run:
+ go: '1.18'
timeout: 20m
tests: true
# default is true. Enables skipping of directories:
@@ -8,28 +9,139 @@ run:
skip-dirs-use-default: true
skip-files:
- core/genesis_alloc.go
+ - gen_.*.go
+ - .*_gen.go
linters:
- disable-all: true
enable:
- - deadcode
- goconst
- goimports
- - gosimple
- - govet
- - ineffassign
- misspell
- # - staticcheck
- unconvert
- # - unused
- - varcheck
+ - bodyclose
+ - containedctx
+ - contextcheck
+ - decorder
+ - durationcheck
+ - errchkjson
+ - errname
+ - exhaustive
+ - exportloopref
+ - gocognit
+ - gofmt
+ # - gomnd
+ - gomoddirectives
+ - gosec
+ - makezero
+ - nestif
+ - nilerr
+ - nilnil
+ - noctx
+ #- nosprintfhostport # TODO: do we use IPv6?
+ - paralleltest
+ - prealloc
+ - predeclared
+ #- promlinter
+ #- revive
+ # - tagliatelle
+ - tenv
+ - thelper
+ - tparallel
+ - unconvert
+ - unparam
+ - wsl
+ #- errorlint causes stack overflow. TODO: recheck after each golangci update
linters-settings:
gofmt:
simplify: true
+ auto-fix: false
+
goconst:
min-len: 3 # minimum length of string constant
- min-occurrences: 6 # minimum number of occurrences
+ min-occurrences: 2 # minimum number of occurrences
+ numbers: true
+
+ goimports:
+ local-prefixes: github.com/ethereum/go-ethereum
+
+ nestif:
+ min-complexity: 5
+
+ prealloc:
+ for-loops: true
+
+ gocritic:
+ # Which checks should be enabled; can't be combined with 'disabled-checks';
+ # See https://go-critic.github.io/overview#checks-overview
+ # To check which checks are enabled run `GL_DEBUG=gocritic ./build/bin/golangci-lint run`
+ # By default list of stable checks is used.
+ enabled-checks:
+ - badLock
+ - filepathJoin
+ - sortSlice
+ - sprintfQuotedString
+ - syncMapLoadAndDelete
+ - weakCond
+ - boolExprSimplify
+ - httpNoBody
+ - ioutilDeprecated
+ - nestingReduce
+ - preferFilepathJoin
+ - redundantSprint
+ - stringConcatSimplify
+ - timeExprSimplify
+ - typeAssertChain
+ - yodaStyleExpr
+ - truncateCmp
+ - equalFold
+ - preferDecodeRune
+ - preferFprint
+ - preferStringWriter
+ - preferWriteByte
+ - sliceClear
+ #- ruleguard
+
+ # Which checks should be disabled; can't be combined with 'enabled-checks'; default is empty
+ disabled-checks:
+ - regexpMust
+ - exitAfterDefer
+ - dupBranchBody
+ - singleCaseSwitch
+ - unlambda
+ - captLocal
+ - commentFormatting
+ - ifElseChain
+ - importShadow
+ - builtinShadow
+
+ # Enable multiple checks by tags, run `GL_DEBUG=gocritic golangci-lint run` to see all tags and checks.
+ # Empty list by default. See https://github.com/go-critic/go-critic#usage -> section "Tags".
+ enabled-tags:
+ - performance
+ - diagnostic
+ - opinionated
+ - style
+ disabled-tags:
+ - experimental
+ govet:
+ disable:
+ - deepequalerrors
+ - fieldalignment
+ - shadow
+ - unsafeptr
+ check-shadowing: true
+ enable-all: true
+ settings:
+ printf:
+ # Run `go tool vet help printf` to see available settings for `printf` analyzer.
+ funcs:
+ - (github.com/ethereum/go-ethereum/log.Logger).Trace
+ - (github.com/ethereum/go-ethereum/log.Logger).Debug
+ - (github.com/ethereum/go-ethereum/log.Logger).Info
+ - (github.com/ethereum/go-ethereum/log.Logger).Warn
+ - (github.com/ethereum/go-ethereum/log.Logger).Error
+ - (github.com/ethereum/go-ethereum/log.Logger).Crit
issues:
exclude-rules:
@@ -48,3 +160,28 @@ issues:
- path: cmd/faucet/
linters:
- deadcode
+ # Exclude some linters from running on tests files.
+ - path: test\.go
+ linters:
+ - gosec
+ - unused
+ - deadcode
+ - gocritic
+ - path: cmd/devp2p
+ linters:
+ - gosec
+ - unused
+ - deadcode
+ - gocritic
+ - path: metrics/sample\.go
+ linters:
+ - gosec
+ - gocritic
+ - path: p2p/simulations
+ linters:
+ - gosec
+ - gocritic
+ max-issues-per-linter: 0
+ max-same-issues: 0
+ #new: true
+ new-from-rev: origin/master
\ No newline at end of file
diff --git a/.goreleaser.yml b/.goreleaser.yml
index 331d8de6b5..6f770ba739 100644
--- a/.goreleaser.yml
+++ b/.goreleaser.yml
@@ -1,13 +1,13 @@
project_name: bor
release:
- disable: false
+ disable: true
draft: true
prerelease: auto
builds:
- id: darwin-amd64
- main: ./cmd/geth
+ main: ./cmd/cli
binary: bor
goos:
- darwin
@@ -22,7 +22,7 @@ builds:
-s -w
- id: darwin-arm64
- main: ./cmd/geth
+ main: ./cmd/cli
binary: bor
goos:
- darwin
@@ -37,7 +37,7 @@ builds:
-s -w
- id: linux-amd64
- main: ./cmd/geth
+ main: ./cmd/cli
binary: bor
goos:
- linux
@@ -53,7 +53,7 @@ builds:
-s -w -extldflags "-static"
- id: linux-arm64
- main: ./cmd/geth
+ main: ./cmd/cli
binary: bor
goos:
- linux
@@ -75,12 +75,18 @@ nfpms:
description: Polygon Blockchain
license: GPLv3 LGPLv3
+ bindir: /usr/local/bin
+
formats:
- apk
- deb
- rpm
contents:
+ - dst: /var/lib/bor
+ type: dir
+ file_info:
+ mode: 0777
- src: builder/files/bor.service
dst: /lib/systemd/system/bor.service
type: config
@@ -90,6 +96,12 @@ nfpms:
- src: builder/files/genesis-testnet-v4.json
dst: /etc/bor/genesis-testnet-v4.json
type: config
+ - src: builder/files/config.toml
+ dst: /var/lib/bor/config.toml
+ type: config
+
+ scripts:
+ postinstall: builder/files/bor-post-install.sh
overrides:
rpm:
diff --git a/Dockerfile b/Dockerfile
index 8af791ab3f..6c65faf12d 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,15 +1,17 @@
FROM golang:latest
-ARG BOR_DIR=/bor
+ARG BOR_DIR=/var/lib/bor
ENV BOR_DIR=$BOR_DIR
RUN apt-get update -y && apt-get upgrade -y \
&& apt install build-essential git -y \
- && mkdir -p /bor
+ && mkdir -p ${BOR_DIR}
WORKDIR ${BOR_DIR}
COPY . .
-RUN make bor-all
+RUN make bor
+
+RUN cp build/bin/bor /usr/bin/
ENV SHELL /bin/bash
EXPOSE 8545 8546 8547 30303 30303/udp
diff --git a/Dockerfile.alltools b/Dockerfile.alltools
index a3f36d4a04..1c4437e251 100644
--- a/Dockerfile.alltools
+++ b/Dockerfile.alltools
@@ -13,6 +13,6 @@ RUN set -x \
&& apk add --update --no-cache \
ca-certificates \
&& rm -rf /var/cache/apk/*
-COPY --from=builder /bor/build/bin/* /usr/local/bin/
+COPY --from=builder /bor/build/bin/* /usr/bin/
EXPOSE 8545 8546 30303 30303/udp
diff --git a/Dockerfile.classic b/Dockerfile.classic
deleted file mode 100644
index 2fa38f08f9..0000000000
--- a/Dockerfile.classic
+++ /dev/null
@@ -1,18 +0,0 @@
-# Build Geth in a stock Go builder container
-FROM golang:1.18.1-alpine as builder
-
-RUN apk add --no-cache make gcc musl-dev linux-headers git bash
-
-ADD . /bor
-RUN cd /bor && make bor-all
-
-CMD ["/bin/bash"]
-
-# Pull Bor into a second stage deploy alpine container
-FROM alpine:latest
-
-RUN apk add --no-cache ca-certificates
-COPY --from=builder /bor/build/bin/bor /usr/local/bin/
-COPY --from=builder /bor/build/bin/bootnode /usr/local/bin/
-
-EXPOSE 8545 8546 8547 30303 30303/udp
diff --git a/Dockerfile.release b/Dockerfile.release
index 66dd589e82..2a026566d7 100644
--- a/Dockerfile.release
+++ b/Dockerfile.release
@@ -1,10 +1,15 @@
FROM alpine:3.14
+ARG BOR_DIR=/var/lib/bor
+ENV BOR_DIR=$BOR_DIR
+
RUN apk add --no-cache ca-certificates && \
- mkdir -p /etc/bor
-COPY bor /usr/local/bin/
-COPY builder/files/genesis-mainnet-v1.json /etc/bor/
-COPY builder/files/genesis-testnet-v4.json /etc/bor/
+ mkdir -p ${BOR_DIR}
+
+WORKDIR ${BOR_DIR}
+COPY bor /usr/bin/
+COPY builder/files/genesis-mainnet-v1.json ${BOR_DIR}
+COPY builder/files/genesis-testnet-v4.json ${BOR_DIR}
EXPOSE 8545 8546 8547 30303 30303/udp
ENTRYPOINT ["bor"]
diff --git a/Makefile b/Makefile
index 6469b8510f..a8e14cf06a 100644
--- a/Makefile
+++ b/Makefile
@@ -2,27 +2,37 @@
# with Go source code. If you know what GOPATH is then you probably
# don't need to bother with make.
-.PHONY: geth android ios evm all test clean
+.PHONY: geth android ios geth-cross evm all test clean docs
+.PHONY: geth-linux geth-linux-386 geth-linux-amd64 geth-linux-mips64 geth-linux-mips64le
+.PHONY: geth-linux-arm geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64
+.PHONY: geth-darwin geth-darwin-386 geth-darwin-amd64
+.PHONY: geth-windows geth-windows-386 geth-windows-amd64
-GOBIN = ./build/bin
GO ?= latest
+GOBIN = $(CURDIR)/build/bin
GORUN = env GO111MODULE=on go run
GOPATH = $(shell go env GOPATH)
-bor:
- $(GORUN) build/ci.go install ./cmd/geth
- mkdir -p $(GOPATH)/bin/
- cp $(GOBIN)/geth $(GOBIN)/bor
- cp $(GOBIN)/* $(GOPATH)/bin/
+GIT_COMMIT ?= $(shell git rev-list -1 HEAD)
+GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
+GIT_TAG ?= $(shell git describe --tags `git rev-list --tags="v*" --max-count=1`)
-bor-all:
- $(GORUN) build/ci.go install
+PACKAGE = github.com/ethereum/go-ethereum
+GO_FLAGS += -buildvcs=false
+GO_FLAGS += -ldflags "-X ${PACKAGE}/params.GitCommit=${GIT_COMMIT} -X ${PACKAGE}/params.GitBranch=${GIT_BRANCH} -X ${PACKAGE}/params.GitTag=${GIT_TAG}"
+
+TESTALL = $$(go list ./... | grep -v go-ethereum/cmd/)
+TESTE2E = ./tests/...
+GOTEST = GODEBUG=cgocheck=0 go test $(GO_FLAGS) -p 1
+
+bor:
mkdir -p $(GOPATH)/bin/
- cp $(GOBIN)/geth $(GOBIN)/bor
- cp $(GOBIN)/* $(GOPATH)/bin/
+ go build -o $(GOBIN)/bor ./cmd/cli/main.go
+ cp $(GOBIN)/bor $(GOPATH)/bin/
+ @echo "Done building."
protoc:
- protoc --go_out=. --go-grpc_out=. ./command/server/proto/*.proto
+ protoc --go_out=. --go-grpc_out=. ./internal/cli/server/proto/*.proto
geth:
$(GORUN) build/ci.go install ./cmd/geth
@@ -45,11 +55,29 @@ ios:
@echo "Import \"$(GOBIN)/Geth.framework\" to use the library."
test:
- # Skip mobile and cmd tests since they are being deprecated
- go test -v $$(go list ./... | grep -v go-ethereum/cmd/) -cover -coverprofile=cover.out
+ $(GOTEST) --timeout 5m -shuffle=on -cover -coverprofile=cover.out $(TESTALL)
-lint: ## Run linters.
- $(GORUN) build/ci.go lint
+test-race:
+ $(GOTEST) --timeout 15m -race -shuffle=on $(TESTALL)
+
+test-integration:
+ $(GOTEST) --timeout 30m -tags integration $(TESTE2E)
+
+escape:
+ cd $(path) && go test -gcflags "-m -m" -run none -bench=BenchmarkJumpdest* -benchmem -memprofile mem.out
+
+lint:
+ @./build/bin/golangci-lint run --config ./.golangci.yml
+
+lintci-deps:
+ rm -f ./build/bin/golangci-lint
+ curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./build/bin v1.46.0
+
+goimports:
+ goimports -local "$(PACKAGE)" -w .
+
+docs:
+ $(GORUN) cmd/clidoc/main.go -d ./docs/cli
clean:
env GO111MODULE=on go clean -cache
@@ -59,16 +87,20 @@ clean:
# You need to put $GOBIN (or $GOPATH/bin) in your PATH to use 'go generate'.
devtools:
- env GOBIN= go install golang.org/x/tools/cmd/stringer@latest
- env GOBIN= go install github.com/kevinburke/go-bindata/go-bindata@latest
- env GOBIN= go install github.com/fjl/gencodec@latest
- env GOBIN= go install github.com/golang/protobuf/protoc-gen-go@latest
- env GOBIN= go install ./cmd/abigen
+ # Notice! If you adding new binary - add it also to tests/deps/fake.go file
+ $(GOBUILD) -o $(GOBIN)/stringer github.com/golang.org/x/tools/cmd/stringer
+ $(GOBUILD) -o $(GOBIN)/go-bindata github.com/kevinburke/go-bindata/go-bindata
+ $(GOBUILD) -o $(GOBIN)/codecgen github.com/ugorji/go/codec/codecgen
+ $(GOBUILD) -o $(GOBIN)/abigen ./cmd/abigen
+ $(GOBUILD) -o $(GOBIN)/mockgen github.com/golang/mock/mockgen
+ $(GOBUILD) -o $(GOBIN)/protoc-gen-go github.com/golang/protobuf/protoc-gen-go
+ PATH=$(GOBIN):$(PATH) go generate ./common
+ PATH=$(GOBIN):$(PATH) go generate ./core/types
+ PATH=$(GOBIN):$(PATH) go generate ./consensus/bor
@type "solc" 2> /dev/null || echo 'Please install solc'
@type "protoc" 2> /dev/null || echo 'Please install protoc'
# Cross Compilation Targets (xgo)
-
geth-cross: geth-linux geth-darwin geth-windows geth-android geth-ios
@echo "Full cross compilation done:"
@ls -ld $(GOBIN)/geth-*
diff --git a/README.md b/README.md
index 1dacae0219..4e33a551d0 100644
--- a/README.md
+++ b/README.md
@@ -63,12 +63,6 @@ them using your favourite package manager. Once the dependencies are installed,
$ make bor
```
-- or, to build the full suite of utilities:
-
- ```shell
- $ make bor-all
- ```
-
### Make awesome changes!
1. Create new branch for your changes
@@ -113,12 +107,6 @@ them using your favourite package manager. Once the dependencies are installed,
-Build the beta client:
-
-```shell
-go build -o bor-beta command/*.go
-```
-
## License
The go-ethereum library (i.e. all code outside of the `cmd` directory) is licensed under the
diff --git a/accounts/abi/bind/backends/simulated_test.go b/accounts/abi/bind/backends/simulated_test.go
index 8a0cbe3357..7df1a823ec 100644
--- a/accounts/abi/bind/backends/simulated_test.go
+++ b/accounts/abi/bind/backends/simulated_test.go
@@ -27,10 +27,13 @@ import (
"testing"
"time"
+ "go.uber.org/goleak"
+
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/leak"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
@@ -38,6 +41,8 @@ import (
)
func TestSimulatedBackend(t *testing.T) {
+ defer goleak.VerifyNone(t, leak.IgnoreList()...)
+
var gasLimit uint64 = 8000029
key, _ := crypto.GenerateKey() // nolint: gosec
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
diff --git a/builder/files/bor-post-install.sh b/builder/files/bor-post-install.sh
new file mode 100644
index 0000000000..1419479983
--- /dev/null
+++ b/builder/files/bor-post-install.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+set -e
+
+PKG="bor"
+
+if ! getent passwd $PKG >/dev/null ; then
+ adduser --disabled-password --disabled-login --shell /usr/sbin/nologin --quiet --system --no-create-home --home /nonexistent $PKG
+ echo "Created system user $PKG"
+fi
diff --git a/builder/files/bor.service b/builder/files/bor.service
index fa84320a6c..758553299e 100644
--- a/builder/files/bor.service
+++ b/builder/files/bor.service
@@ -6,21 +6,9 @@
[Service]
Restart=on-failure
RestartSec=5s
- ExecStart=/usr/local/bin/bor \
- --bor-mumbai \
- # --bor-mainnet \
- --datadir /var/lib/bor/data \
- --bootnodes "enode://0cb82b395094ee4a2915e9714894627de9ed8498fb881cec6db7c65e8b9a5bd7f2f25cc84e71e89d0947e51c76e85d0847de848c7782b13c0255247a6758178c@44.232.55.71:30303,enode://88116f4295f5a31538ae409e4d44ad40d22e44ee9342869e7d68bdec55b0f83c1530355ce8b41fbec0928a7d75a5745d528450d30aec92066ab6ba1ee351d710@159.203.9.164:30303"
- # Validator params
- # Uncomment and configure the following lines in case you run a validator
- # --keystore /var/lib/bor/keystore \
- # --unlock [VALIDATOR ADDRESS] \
- # --password /var/lib/bor/password.txt \
- # --allow-insecure-unlock \
- # --nodiscover --maxpeers 1 \
- # --mine
+ ExecStart=/usr/local/bin/bor server -config "/var/lib/bor/config.toml"
Type=simple
- User=root
+ User=bor
KillSignal=SIGINT
TimeoutStopSec=120
diff --git a/builder/files/config.toml b/builder/files/config.toml
new file mode 100644
index 0000000000..fc95cd1a64
--- /dev/null
+++ b/builder/files/config.toml
@@ -0,0 +1,143 @@
+# NOTE: Uncomment and configure the following 8 fields in case you run a validator:
+# `mine`, `etherbase`, `nodiscover`, `maxpeers`, `keystore`, `allow-insecure-unlock`, `password`, `unlock`
+
+chain = "mainnet"
+# chain = "mumbai"
+# identity = "Pratiks-MacBook-Pro.local"
+# log-level = "INFO"
+datadir = "/var/lib/bor/data"
+# ancient = ""
+# keystore = "/var/lib/bor/keystore"
+syncmode = "full"
+# gcmode = "full"
+# snapshot = true
+# "bor.logs" = false
+# ethstats = ""
+
+# ["eth.requiredblocks"]
+
+[p2p]
+ # maxpeers = 1
+ # nodiscover = true
+ # maxpendpeers = 50
+ # bind = "0.0.0.0"
+ # port = 30303
+ # nat = "any"
+ [p2p.discovery]
+ # v5disc = false
+ bootnodes = ["enode://0cb82b395094ee4a2915e9714894627de9ed8498fb881cec6db7c65e8b9a5bd7f2f25cc84e71e89d0947e51c76e85d0847de848c7782b13c0255247a6758178c@44.232.55.71:30303", "enode://88116f4295f5a31538ae409e4d44ad40d22e44ee9342869e7d68bdec55b0f83c1530355ce8b41fbec0928a7d75a5745d528450d30aec92066ab6ba1ee351d710@159.203.9.164:30303"]
+ # Uncomment below `bootnodes` field for Mumbai bootnode
+ # bootnodes = ["enode://095c4465fe509bd7107bbf421aea0d3ad4d4bfc3ff8f9fdc86f4f950892ae3bbc3e5c715343c4cf60c1c06e088e621d6f1b43ab9130ae56c2cacfd356a284ee4@18.213.200.99:30303"]
+ # bootnodesv4 = []
+ # bootnodesv5 = []
+ # static-nodes = []
+ # trusted-nodes = []
+ # dns = []
+
+# [heimdall]
+ # url = "http://localhost:1317"
+ # "bor.without" = false
+ # grpc-address = ""
+
+[txpool]
+ nolocals = true
+ pricelimit = 30000000000
+ accountslots = 16
+ globalslots = 32768
+ accountqueue = 16
+ globalqueue = 32768
+ lifetime = "1h30m0s"
+ # locals = []
+ # journal = ""
+ # rejournal = "1h0m0s"
+ # pricebump = 10
+
+[miner]
+ gaslimit = 30000000
+ gasprice = "30000000000"
+ # mine = true
+ # etherbase = "VALIDATOR ADDRESS"
+ # extradata = ""
+
+
+# [jsonrpc]
+# ipcdisable = false
+# ipcpath = ""
+# gascap = 50000000
+# txfeecap = 5.0
+# [jsonrpc.http]
+# enabled = false
+# port = 8545
+# prefix = ""
+# host = "localhost"
+# api = ["eth", "net", "web3", "txpool", "bor"]
+# vhosts = ["*"]
+# corsdomain = ["*"]
+# [jsonrpc.ws]
+# enabled = false
+# port = 8546
+# prefix = ""
+# host = "localhost"
+# api = ["web3", "net"]
+# origins = ["*"]
+# [jsonrpc.graphql]
+# enabled = false
+# port = 0
+# prefix = ""
+# host = ""
+# vhosts = ["*"]
+# corsdomain = ["*"]
+# [jsonrpc.timeouts]
+# read = "30s"
+# write = "30s"
+# idle = "2m0s"
+
+[gpo]
+ # blocks = 20
+ # percentile = 60
+ # maxprice = "5000000000000"
+ ignoreprice = "30000000000"
+
+[telemetry]
+ metrics = true
+ # expensive = false
+ # prometheus-addr = "127.0.0.1:7071"
+ # opencollector-endpoint = "127.0.0.1:4317"
+ # [telemetry.influx]
+ # influxdb = false
+ # endpoint = ""
+ # database = ""
+ # username = ""
+ # password = ""
+ # influxdbv2 = false
+ # token = ""
+ # bucket = ""
+ # organization = ""
+ # [telemetry.influx.tags]
+
+# [cache]
+ # cache = 1024
+ # gc = 25
+ # snapshot = 10
+ # database = 50
+ # trie = 15
+ # journal = "triecache"
+ # rejournal = "1h0m0s"
+ # noprefetch = false
+ # preimages = false
+ # txlookuplimit = 2350000
+ # timeout = "1h0m0s"
+
+[accounts]
+ # allow-insecure-unlock = true
+ # password = "/var/lib/bor/password.txt"
+ # unlock = ["VALIDATOR ADDRESS"]
+ # lightkdf = false
+ # disable-bor-wallet = false
+
+# [grpc]
+ # addr = ":3131"
+
+# [developer]
+ # dev = false
+ # period = 0
diff --git a/cmd/clidoc/main.go b/cmd/clidoc/main.go
new file mode 100644
index 0000000000..07efc04556
--- /dev/null
+++ b/cmd/clidoc/main.go
@@ -0,0 +1,77 @@
+package main
+
+import (
+ "flag"
+ "log"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "github.com/ethereum/go-ethereum/internal/cli"
+)
+
+const (
+ DefaultDir string = "./docs/cli"
+ DefaultMainPage string = "README.md"
+)
+
+func main() {
+ commands := cli.Commands()
+
+ dest := flag.String("d", DefaultDir, "Destination directory where the docs will be generated")
+ flag.Parse()
+
+ dirPath := filepath.Join(".", *dest)
+ if err := os.MkdirAll(dirPath, os.ModePerm); err != nil {
+ log.Fatalln("Failed to create directory.", err)
+ }
+
+ mainPage := []string{
+ "# Bor command line interface",
+ "## Commands",
+ }
+
+ keys := make([]string, len(commands))
+ i := 0
+
+ for k := range commands {
+ keys[i] = k
+ i++
+ }
+
+ sort.Strings(keys)
+
+ for _, name := range keys {
+ cmd, err := commands[name]()
+ if err != nil {
+ log.Fatalf("Error occurred when inspecting bor command %s: %s", name, err)
+ }
+
+ fileName := strings.ReplaceAll(name, " ", "_") + ".md"
+
+ overwriteFile(filepath.Join(dirPath, fileName), cmd.MarkDown())
+ mainPage = append(mainPage, "- [```"+name+"```](./"+fileName+")")
+ }
+
+ overwriteFile(filepath.Join(dirPath, DefaultMainPage), strings.Join(mainPage, "\n\n"))
+
+ os.Exit(0)
+}
+
+func overwriteFile(filePath string, text string) {
+ log.Printf("Writing to page: %s\n", filePath)
+
+ f, err := os.Create(filePath)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ if _, err = f.WriteString(text); err != nil {
+ log.Fatalln(err)
+ }
+
+ if err = f.Close(); err != nil {
+ log.Fatalln(err)
+ }
+}
diff --git a/cmd/geth/config.go b/cmd/geth/config.go
index d8ba5366fe..101f4b2de6 100644
--- a/cmd/geth/config.go
+++ b/cmd/geth/config.go
@@ -17,17 +17,16 @@
package main
import (
- "bufio"
- "errors"
"fmt"
+ "io/ioutil"
"math/big"
"os"
- "reflect"
"time"
- "unicode"
"gopkg.in/urfave/cli.v1"
+ "github.com/BurntSushi/toml"
+
"github.com/ethereum/go-ethereum/accounts/external"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/accounts/scwallet"
@@ -41,7 +40,6 @@ import (
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/params"
- "github.com/naoina/toml"
)
var (
@@ -61,28 +59,6 @@ var (
}
)
-// These settings ensure that TOML keys use the same names as Go struct fields.
-var tomlSettings = toml.Config{
- NormFieldName: func(rt reflect.Type, key string) string {
- return key
- },
- FieldToKey: func(rt reflect.Type, field string) string {
- return field
- },
- MissingField: func(rt reflect.Type, field string) error {
- id := fmt.Sprintf("%s.%s", rt.String(), field)
- if deprecated(id) {
- log.Warn("Config field is deprecated and won't have an effect", "name", id)
- return nil
- }
- var link string
- if unicode.IsUpper(rune(rt.Name()[0])) && rt.PkgPath() != "main" {
- link = fmt.Sprintf(", see https://godoc.org/%s#%s for available fields", rt.PkgPath(), rt.Name())
- }
- return fmt.Errorf("field '%s' is not defined in %s%s", field, rt.String(), link)
- },
-}
-
type ethstatsConfig struct {
URL string `toml:",omitempty"`
}
@@ -95,18 +71,17 @@ type gethConfig struct {
}
func loadConfig(file string, cfg *gethConfig) error {
- f, err := os.Open(file)
+ data, err := ioutil.ReadFile(file)
if err != nil {
return err
}
- defer f.Close()
- err = tomlSettings.NewDecoder(bufio.NewReader(f)).Decode(cfg)
- // Add file name to errors that have a line number.
- if _, ok := err.(*toml.LineError); ok {
- err = errors.New(file + ", " + err.Error())
+ tomlData := string(data)
+ if _, err = toml.Decode(tomlData, &cfg); err != nil {
+ return err
}
- return err
+
+ return nil
}
func defaultNodeConfig() node.Config {
@@ -214,22 +189,10 @@ func dumpConfig(ctx *cli.Context) error {
comment += "# Note: this config doesn't contain the genesis block.\n\n"
}
- out, err := tomlSettings.Marshal(&cfg)
- if err != nil {
+ if err := toml.NewEncoder(os.Stdout).Encode(&cfg); err != nil {
return err
}
- dump := os.Stdout
- if ctx.NArg() > 0 {
- dump, err = os.OpenFile(ctx.Args().Get(0), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
- if err != nil {
- return err
- }
- defer dump.Close()
- }
- dump.WriteString(comment)
- dump.Write(out)
-
return nil
}
@@ -366,7 +329,7 @@ func setDefaultMumbaiGethConfig(ctx *cli.Context, config *gethConfig) {
config.Eth.TxPool.AccountQueue = 64
config.Eth.TxPool.GlobalQueue = 131072
config.Eth.TxPool.Lifetime = 90 * time.Minute
- config.Node.P2P.MaxPeers = 200
+ config.Node.P2P.MaxPeers = 50
config.Metrics.Enabled = true
// --pprof is enabled in 'internal/debug/flags.go'
}
@@ -389,7 +352,7 @@ func setDefaultBorMainnetGethConfig(ctx *cli.Context, config *gethConfig) {
config.Eth.TxPool.AccountQueue = 64
config.Eth.TxPool.GlobalQueue = 131072
config.Eth.TxPool.Lifetime = 90 * time.Minute
- config.Node.P2P.MaxPeers = 200
+ config.Node.P2P.MaxPeers = 50
config.Metrics.Enabled = true
// --pprof is enabled in 'internal/debug/flags.go'
}
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index e2bbdb17f8..79c73903fb 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -1620,6 +1620,13 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if ctx.GlobalIsSet(SyncModeFlag.Name) {
cfg.SyncMode = *GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode)
+
+ // To be extra preventive, we won't allow the node to start
+ // in snap sync mode until we have it working
+ // TODO(snap): Comment when we have snap sync working
+ if cfg.SyncMode == downloader.SnapSync {
+ cfg.SyncMode = downloader.FullSync
+ }
}
if ctx.GlobalIsSet(NetworkIdFlag.Name) {
cfg.NetworkId = ctx.GlobalUint64(NetworkIdFlag.Name)
diff --git a/common/debug/debug.go b/common/debug/debug.go
new file mode 100644
index 0000000000..6a677e495d
--- /dev/null
+++ b/common/debug/debug.go
@@ -0,0 +1,28 @@
+package debug
+
+import (
+ "runtime"
+)
+
+// Callers returns given number of callers with packages
+func Callers(show int) []string {
+ fpcs := make([]uintptr, show)
+
+ n := runtime.Callers(2, fpcs)
+ if n == 0 {
+ return nil
+ }
+
+ callers := make([]string, 0, len(fpcs))
+
+ for _, p := range fpcs {
+ caller := runtime.FuncForPC(p - 1)
+ if caller == nil {
+ continue
+ }
+
+ callers = append(callers, caller.Name())
+ }
+
+ return callers
+}
diff --git a/common/leak/ignore_list.go b/common/leak/ignore_list.go
new file mode 100644
index 0000000000..56134dcabc
--- /dev/null
+++ b/common/leak/ignore_list.go
@@ -0,0 +1,23 @@
+package leak
+
+import "go.uber.org/goleak"
+
+func IgnoreList() []goleak.Option {
+ return []goleak.Option{
+ // a list of goroutne leaks that hard to fix due to external dependencies or too big refactoring needed
+ goleak.IgnoreTopFunction("github.com/ethereum/go-ethereum/core.(*txSenderCacher).cache"),
+ goleak.IgnoreTopFunction("github.com/rjeczalik/notify.(*recursiveTree).dispatch"),
+ goleak.IgnoreTopFunction("github.com/rjeczalik/notify.(*recursiveTree).internal"),
+ goleak.IgnoreTopFunction("github.com/rjeczalik/notify.(*nonrecursiveTree).dispatch"),
+ goleak.IgnoreTopFunction("github.com/rjeczalik/notify.(*nonrecursiveTree).internal"),
+ goleak.IgnoreTopFunction("github.com/rjeczalik/notify._Cfunc_CFRunLoopRun"),
+
+ // todo: this leaks should be fixed
+ goleak.IgnoreTopFunction("github.com/ethereum/go-ethereum/metrics.(*meterArbiter).tick"),
+ goleak.IgnoreTopFunction("github.com/ethereum/go-ethereum/consensus/ethash.(*remoteSealer).loop"),
+ goleak.IgnoreTopFunction("github.com/ethereum/go-ethereum/core.(*BlockChain).updateFutureBlocks"),
+ goleak.IgnoreTopFunction("github.com/ethereum/go-ethereum/core/state/snapshot.(*diskLayer).generate"),
+ goleak.IgnoreTopFunction("github.com/ethereum/go-ethereum/accounts/abi/bind/backends.nullSubscription.func1"),
+ goleak.IgnoreTopFunction("github.com/ethereum/go-ethereum/eth/filters.(*EventSystem).eventLoop"),
+ }
+}
diff --git a/common/set/slice.go b/common/set/slice.go
new file mode 100644
index 0000000000..36f11e67fe
--- /dev/null
+++ b/common/set/slice.go
@@ -0,0 +1,11 @@
+package set
+
+func New[T comparable](slice []T) map[T]struct{} {
+ m := make(map[T]struct{}, len(slice))
+
+ for _, el := range slice {
+ m[el] = struct{}{}
+ }
+
+ return m
+}
diff --git a/consensus/bor/abi/interface.go b/consensus/bor/abi/interface.go
new file mode 100644
index 0000000000..bb05bf0b23
--- /dev/null
+++ b/consensus/bor/abi/interface.go
@@ -0,0 +1,6 @@
+package abi
+
+type ABI interface {
+ Pack(name string, args ...interface{}) ([]byte, error)
+ UnpackIntoInterface(v interface{}, name string, data []byte) error
+}
diff --git a/consensus/bor/api.go b/consensus/bor/api.go
index 12841290af..26d1efdaf1 100644
--- a/consensus/bor/api.go
+++ b/consensus/bor/api.go
@@ -4,14 +4,17 @@ import (
"encoding/hex"
"math"
"math/big"
+ "sort"
"strconv"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/consensus/bor/valset"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rpc"
+
lru "github.com/hashicorp/golang-lru"
"github.com/xsleonard/go-merkle"
"golang.org/x/crypto/sha3"
@@ -43,9 +46,88 @@ func (api *API) GetSnapshot(number *rpc.BlockNumber) (*Snapshot, error) {
if header == nil {
return nil, errUnknownBlock
}
+
return api.bor.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil)
}
+type BlockSigners struct {
+ Signers []difficultiesKV
+ Diff int
+ Author common.Address
+}
+
+type difficultiesKV struct {
+ Signer common.Address
+ Difficulty uint64
+}
+
+func rankMapDifficulties(values map[common.Address]uint64) []difficultiesKV {
+ ss := make([]difficultiesKV, 0, len(values))
+ for k, v := range values {
+ ss = append(ss, difficultiesKV{k, v})
+ }
+
+ sort.Slice(ss, func(i, j int) bool {
+ return ss[i].Difficulty > ss[j].Difficulty
+ })
+
+ return ss
+}
+
+// GetSnapshotProposerSequence retrieves the in-turn signers of all sprints in a span
+func (api *API) GetSnapshotProposerSequence(number *rpc.BlockNumber) (BlockSigners, error) {
+ snapNumber := *number - 1
+
+ var difficulties = make(map[common.Address]uint64)
+
+ snap, err := api.GetSnapshot(&snapNumber)
+
+ if err != nil {
+ return BlockSigners{}, err
+ }
+
+ proposer := snap.ValidatorSet.GetProposer().Address
+ proposerIndex, _ := snap.ValidatorSet.GetByAddress(proposer)
+
+ signers := snap.signers()
+ for i := 0; i < len(signers); i++ {
+ tempIndex := i
+ if tempIndex < proposerIndex {
+ tempIndex = tempIndex + len(signers)
+ }
+
+ difficulties[signers[i]] = uint64(len(signers) - (tempIndex - proposerIndex))
+ }
+
+ rankedDifficulties := rankMapDifficulties(difficulties)
+
+ author, err := api.GetAuthor(number)
+ if err != nil {
+ return BlockSigners{}, err
+ }
+
+ diff := int(difficulties[*author])
+ blockSigners := &BlockSigners{
+ Signers: rankedDifficulties,
+ Diff: diff,
+ Author: *author,
+ }
+
+ return *blockSigners, nil
+}
+
+// GetSnapshotProposer retrieves the in-turn signer at a given block.
+func (api *API) GetSnapshotProposer(number *rpc.BlockNumber) (common.Address, error) {
+ *number -= 1
+ snap, err := api.GetSnapshot(number)
+
+ if err != nil {
+ return common.Address{}, err
+ }
+
+ return snap.ValidatorSet.GetProposer().Address, nil
+}
+
// GetAuthor retrieves the author a block.
func (api *API) GetAuthor(number *rpc.BlockNumber) (*common.Address, error) {
// Retrieve the requested block number (or current if none requested)
@@ -59,7 +141,9 @@ func (api *API) GetAuthor(number *rpc.BlockNumber) (*common.Address, error) {
if header == nil {
return nil, errUnknownBlock
}
+
author, err := api.bor.Author(header)
+
return &author, err
}
@@ -69,6 +153,7 @@ func (api *API) GetSnapshotAtHash(hash common.Hash) (*Snapshot, error) {
if header == nil {
return nil, errUnknownBlock
}
+
return api.bor.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil)
}
@@ -85,10 +170,13 @@ func (api *API) GetSigners(number *rpc.BlockNumber) ([]common.Address, error) {
if header == nil {
return nil, errUnknownBlock
}
+
snap, err := api.bor.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil)
+
if err != nil {
return nil, err
}
+
return snap.signers(), nil
}
@@ -98,10 +186,13 @@ func (api *API) GetSignersAtHash(hash common.Hash) ([]common.Address, error) {
if header == nil {
return nil, errUnknownBlock
}
+
snap, err := api.bor.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil)
+
if err != nil {
return nil, err
}
+
return snap.signers(), nil
}
@@ -111,15 +202,17 @@ func (api *API) GetCurrentProposer() (common.Address, error) {
if err != nil {
return common.Address{}, err
}
+
return snap.ValidatorSet.GetProposer().Address, nil
}
// GetCurrentValidators gets the current validators
-func (api *API) GetCurrentValidators() ([]*Validator, error) {
+func (api *API) GetCurrentValidators() ([]*valset.Validator, error) {
snap, err := api.GetSnapshot(nil)
if err != nil {
- return make([]*Validator, 0), err
+ return make([]*valset.Validator, 0), err
}
+
return snap.ValidatorSet.Validators, nil
}
@@ -128,26 +221,36 @@ func (api *API) GetRootHash(start uint64, end uint64) (string, error) {
if err := api.initializeRootHashCache(); err != nil {
return "", err
}
+
key := getRootHashKey(start, end)
+
if root, known := api.rootHashCache.Get(key); known {
return root.(string), nil
}
- length := uint64(end - start + 1)
+
+ length := end - start + 1
+
if length > MaxCheckpointLength {
return "", &MaxCheckpointLengthExceededError{start, end}
}
+
currentHeaderNumber := api.chain.CurrentHeader().Number.Uint64()
+
if start > end || end > currentHeaderNumber {
- return "", &InvalidStartEndBlockError{start, end, currentHeaderNumber}
+ return "", &valset.InvalidStartEndBlockError{Start: start, End: end, CurrentHeader: currentHeaderNumber}
}
+
blockHeaders := make([]*types.Header, end-start+1)
wg := new(sync.WaitGroup)
concurrent := make(chan bool, 20)
+
for i := start; i <= end; i++ {
wg.Add(1)
concurrent <- true
+
go func(number uint64) {
- blockHeaders[number-start] = api.chain.GetHeaderByNumber(uint64(number))
+ blockHeaders[number-start] = api.chain.GetHeaderByNumber(number)
+
<-concurrent
wg.Done()
}(i)
@@ -156,6 +259,7 @@ func (api *API) GetRootHash(start uint64, end uint64) (string, error) {
close(concurrent)
headers := make([][32]byte, nextPowerOfTwo(length))
+
for i := 0; i < len(blockHeaders); i++ {
blockHeader := blockHeaders[i]
header := crypto.Keccak256(appendBytes32(
@@ -166,6 +270,7 @@ func (api *API) GetRootHash(start uint64, end uint64) (string, error) {
))
var arr [32]byte
+
copy(arr[:], header)
headers[i] = arr
}
@@ -174,8 +279,10 @@ func (api *API) GetRootHash(start uint64, end uint64) (string, error) {
if err := tree.Generate(convert(headers), sha3.NewLegacyKeccak256()); err != nil {
return "", err
}
+
root := hex.EncodeToString(tree.Root().Hash)
api.rootHashCache.Add(key, root)
+
return root, nil
}
@@ -184,6 +291,7 @@ func (api *API) initializeRootHashCache() error {
if api.rootHashCache == nil {
api.rootHashCache, err = lru.NewARC(10)
}
+
return err
}
diff --git a/consensus/bor/api/caller.go b/consensus/bor/api/caller.go
new file mode 100644
index 0000000000..d5fe259c97
--- /dev/null
+++ b/consensus/bor/api/caller.go
@@ -0,0 +1,14 @@
+package api
+
+import (
+ "context"
+
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/internal/ethapi"
+ "github.com/ethereum/go-ethereum/rpc"
+)
+
+//go:generate mockgen -destination=./caller_mock.go -package=api . Caller
+type Caller interface {
+ Call(ctx context.Context, args ethapi.TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *ethapi.StateOverride) (hexutil.Bytes, error)
+}
diff --git a/consensus/bor/api/caller_mock.go b/consensus/bor/api/caller_mock.go
new file mode 100644
index 0000000000..940c99d178
--- /dev/null
+++ b/consensus/bor/api/caller_mock.go
@@ -0,0 +1,53 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: github.com/ethereum/go-ethereum/consensus/bor/api (interfaces: Caller)
+
+// Package api is a generated GoMock package.
+package api
+
+import (
+ context "context"
+ reflect "reflect"
+
+ hexutil "github.com/ethereum/go-ethereum/common/hexutil"
+ ethapi "github.com/ethereum/go-ethereum/internal/ethapi"
+ rpc "github.com/ethereum/go-ethereum/rpc"
+ gomock "github.com/golang/mock/gomock"
+)
+
+// MockCaller is a mock of Caller interface.
+type MockCaller struct {
+ ctrl *gomock.Controller
+ recorder *MockCallerMockRecorder
+}
+
+// MockCallerMockRecorder is the mock recorder for MockCaller.
+type MockCallerMockRecorder struct {
+ mock *MockCaller
+}
+
+// NewMockCaller creates a new mock instance.
+func NewMockCaller(ctrl *gomock.Controller) *MockCaller {
+ mock := &MockCaller{ctrl: ctrl}
+ mock.recorder = &MockCallerMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockCaller) EXPECT() *MockCallerMockRecorder {
+ return m.recorder
+}
+
+// Call mocks base method.
+func (m *MockCaller) Call(arg0 context.Context, arg1 ethapi.TransactionArgs, arg2 rpc.BlockNumberOrHash, arg3 *ethapi.StateOverride) (hexutil.Bytes, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Call", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(hexutil.Bytes)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Call indicates an expected call of Call.
+func (mr *MockCallerMockRecorder) Call(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Call", reflect.TypeOf((*MockCaller)(nil).Call), arg0, arg1, arg2, arg3)
+}
diff --git a/consensus/bor/bor.go b/consensus/bor/bor.go
index 0e5ed3e5a3..f140146976 100644
--- a/consensus/bor/bor.go
+++ b/consensus/bor/bor.go
@@ -2,38 +2,34 @@ package bor
import (
"bytes"
- "context"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
- "math"
"math/big"
"sort"
"strconv"
- "strings"
"sync"
"time"
lru "github.com/hashicorp/golang-lru"
"golang.org/x/crypto/sha3"
- ethereum "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts"
- "github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/consensus/bor/api"
+ "github.com/ethereum/go-ethereum/consensus/bor/clerk"
+ "github.com/ethereum/go-ethereum/consensus/bor/heimdall/span"
+ "github.com/ethereum/go-ethereum/consensus/bor/statefull"
+ "github.com/ethereum/go-ethereum/consensus/bor/valset"
"github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/event"
- "github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
@@ -56,11 +52,7 @@ var (
uncleHash = types.CalcUncleHash(nil) // Always Keccak256(RLP([])) as uncles are meaningless outside of PoW.
- diffInTurn = big.NewInt(2) // Block difficulty for in-turn signatures
- diffNoTurn = big.NewInt(1) // Block difficulty for out-of-turn signatures
-
validatorHeaderBytesLength = common.AddressLength + 20 // address + power
- systemAddress = common.HexToAddress("0xffffFFFfFFffffffffffffffFfFFFfffFFFfFFfE")
)
// Various error messages to mark blocks invalid. These should be private to
@@ -72,18 +64,6 @@ var (
// that is not part of the local blockchain.
errUnknownBlock = errors.New("unknown block")
- // errInvalidCheckpointBeneficiary is returned if a checkpoint/epoch transition
- // block has a beneficiary set to non-zeroes.
- errInvalidCheckpointBeneficiary = errors.New("beneficiary in checkpoint block non-zero")
-
- // errInvalidVote is returned if a nonce value is something else that the two
- // allowed constants of 0x00..0 or 0xff..f.
- errInvalidVote = errors.New("vote nonce not 0x00..0 or 0xff..f")
-
- // errInvalidCheckpointVote is returned if a checkpoint/epoch transition block
- // has a vote nonce set to non-zeroes.
- errInvalidCheckpointVote = errors.New("vote nonce in checkpoint block non-zero")
-
// errMissingVanity is returned if a block's extra-data section is shorter than
// 32 bytes, which is required to store the signer vanity.
errMissingVanity = errors.New("extra-data 32 byte vanity prefix missing")
@@ -116,9 +96,6 @@ var (
// errOutOfRangeChain is returned if an authorization list is attempted to
// be modified via out-of-range or non-contiguous headers.
errOutOfRangeChain = errors.New("out of range or non-contiguous chain")
-
- // errShutdownDetected is returned if a shutdown was detected
- errShutdownDetected = errors.New("shutdown detected")
)
// SignerFn is a signer callback function to request a header to be signed by a
@@ -136,6 +113,7 @@ func ecrecover(header *types.Header, sigcache *lru.ARCCache, c *params.BorConfig
if len(header.Extra) < extraSeal {
return common.Address{}, errMissingSignature
}
+
signature := header.Extra[len(header.Extra)-extraSeal:]
// Recover the public key and the Ethereum address
@@ -143,10 +121,13 @@ func ecrecover(header *types.Header, sigcache *lru.ARCCache, c *params.BorConfig
if err != nil {
return common.Address{}, err
}
+
var signer common.Address
+
copy(signer[:], crypto.Keccak256(pubkey[1:])[12:])
sigcache.Add(hash, signer)
+
return signer, nil
}
@@ -155,6 +136,7 @@ func SealHash(header *types.Header, c *params.BorConfig) (hash common.Hash) {
hasher := sha3.NewLegacyKeccak256()
encodeSigHeader(hasher, header, c)
hasher.Sum(hash[:0])
+
return hash
}
@@ -176,11 +158,13 @@ func encodeSigHeader(w io.Writer, header *types.Header, c *params.BorConfig) {
header.MixDigest,
header.Nonce,
}
+
if c.IsJaipur(header.Number.Uint64()) {
if header.BaseFee != nil {
enc = append(enc, header.BaseFee)
}
}
+
if err := rlp.Encode(w, enc); err != nil {
panic("can't encode: " + err.Error())
}
@@ -194,9 +178,11 @@ func CalcProducerDelay(number uint64, succession int, c *params.BorConfig) uint6
if number%c.Sprint == 0 {
delay = c.ProducerDelay
}
+
if succession > 0 {
delay += uint64(succession) * c.CalculateBackupMultiplier(number)
}
+
return delay
}
@@ -210,6 +196,7 @@ func CalcProducerDelay(number uint64, succession int, c *params.BorConfig) uint6
func BorRLP(header *types.Header, c *params.BorConfig) []byte {
b := new(bytes.Buffer)
encodeSigHeader(b, header, c)
+
return b.Bytes()
}
@@ -226,25 +213,25 @@ type Bor struct {
signFn SignerFn // Signer function to authorize hashes with
lock sync.RWMutex // Protects the signer fields
- ethAPI *ethapi.PublicBlockChainAPI
- GenesisContractsClient *GenesisContractsClient
- validatorSetABI abi.ABI
- stateReceiverABI abi.ABI
+ ethAPI api.Caller
+ spanner Spanner
+ GenesisContractsClient GenesisContract
HeimdallClient IHeimdallClient
- WithoutHeimdall bool
- scope event.SubscriptionScope
// The fields below are for testing only
fakeDiff bool // Skip difficulty verifications
+
+ closeOnce sync.Once
}
// New creates a Matic Bor consensus engine.
func New(
chainConfig *params.ChainConfig,
db ethdb.Database,
- ethAPI *ethapi.PublicBlockChainAPI,
- heimdallURL string,
- withoutHeimdall bool,
+ ethAPI api.Caller,
+ spanner Spanner,
+ heimdallClient IHeimdallClient,
+ genesisContracts GenesisContract,
) *Bor {
// get bor config
borConfig := chainConfig.Bor
@@ -253,14 +240,10 @@ func New(
if borConfig != nil && borConfig.Sprint == 0 {
borConfig.Sprint = defaultSprintLength
}
-
// Allocate the snapshot caches and create the engine
recents, _ := lru.NewARC(inmemorySnapshots)
signatures, _ := lru.NewARC(inmemorySignatures)
- vABI, _ := abi.JSON(strings.NewReader(validatorsetABI))
- sABI, _ := abi.JSON(strings.NewReader(stateReceiverABI))
- heimdallClient, _ := NewHeimdallClient(heimdallURL)
- genesisContractsClient := NewGenesisContractsClient(chainConfig, borConfig.ValidatorContract, borConfig.StateReceiverContract, ethAPI)
+
c := &Bor{
chainConfig: chainConfig,
config: borConfig,
@@ -268,11 +251,9 @@ func New(
ethAPI: ethAPI,
recents: recents,
signatures: signatures,
- validatorSetABI: vABI,
- stateReceiverABI: sABI,
- GenesisContractsClient: genesisContractsClient,
+ spanner: spanner,
+ GenesisContractsClient: genesisContracts,
HeimdallClient: heimdallClient,
- WithoutHeimdall: withoutHeimdall,
}
// make sure we can decode all the GenesisAlloc in the BorConfig.
@@ -314,6 +295,7 @@ func (c *Bor) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.
}
}
}()
+
return abort, results
}
@@ -325,6 +307,7 @@ func (c *Bor) verifyHeader(chain consensus.ChainHeaderReader, header *types.Head
if header.Number == nil {
return errUnknownBlock
}
+
number := header.Number.Uint64()
// Don't waste time checking blocks from the future
@@ -337,39 +320,47 @@ func (c *Bor) verifyHeader(chain consensus.ChainHeaderReader, header *types.Head
}
// check extr adata
- isSprintEnd := (number+1)%c.config.Sprint == 0
+ isSprintEnd := IsSprintStart(number+1, c.config.Sprint)
// Ensure that the extra-data contains a signer list on checkpoint, but none otherwise
signersBytes := len(header.Extra) - extraVanity - extraSeal
if !isSprintEnd && signersBytes != 0 {
return errExtraValidators
}
+
if isSprintEnd && signersBytes%validatorHeaderBytesLength != 0 {
return errInvalidSpanValidators
}
+
// Ensure that the mix digest is zero as we don't have fork protection currently
if header.MixDigest != (common.Hash{}) {
return errInvalidMixDigest
}
+
// Ensure that the block doesn't contain any uncles which are meaningless in PoA
if header.UncleHash != uncleHash {
return errInvalidUncleHash
}
+
// Ensure that the block's difficulty is meaningful (may not be correct at this point)
if number > 0 {
if header.Difficulty == nil {
return errInvalidDifficulty
}
}
+
// Verify that the gas limit is <= 2^63-1
- cap := uint64(0x7fffffffffffffff)
- if header.GasLimit > cap {
- return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, cap)
+ gasCap := uint64(0x7fffffffffffffff)
+
+ if header.GasLimit > gasCap {
+ return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, gasCap)
}
+
// If all checks passed, validate any special fields for hard forks
if err := misc.VerifyForkHashes(chain.Config(), header, false); err != nil {
return err
}
+
// All basic checks passed, verify cascading fields
return c.verifyCascadingFields(chain, header, parents)
}
@@ -380,9 +371,11 @@ func validateHeaderExtraField(extraBytes []byte) error {
if len(extraBytes) < extraVanity {
return errMissingVanity
}
+
if len(extraBytes) < extraVanity+extraSeal {
return errMissingSignature
}
+
return nil
}
@@ -393,12 +386,14 @@ func validateHeaderExtraField(extraBytes []byte) error {
func (c *Bor) verifyCascadingFields(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error {
// The genesis block is the always valid dead-end
number := header.Number.Uint64()
+
if number == 0 {
return nil
}
// Ensure that the block's timestamp isn't too close to it's parent
var parent *types.Header
+
if len(parents) > 0 {
parent = parents[len(parents)-1]
} else {
@@ -413,11 +408,13 @@ func (c *Bor) verifyCascadingFields(chain consensus.ChainHeaderReader, header *t
if header.GasUsed > header.GasLimit {
return fmt.Errorf("invalid gasUsed: have %d, gasLimit %d", header.GasUsed, header.GasLimit)
}
+
if !chain.Config().IsLondon(header.Number) {
// Verify BaseFee not present before EIP-1559 fork.
if header.BaseFee != nil {
return fmt.Errorf("invalid baseFee before fork: have %d, want ", header.BaseFee)
}
+
if err := misc.VerifyGaslimit(parent.GasLimit, header.GasLimit); err != nil {
return err
}
@@ -437,13 +434,14 @@ func (c *Bor) verifyCascadingFields(chain consensus.ChainHeaderReader, header *t
}
// verify the validator list in the last sprint block
- if isSprintStart(number, c.config.Sprint) {
+ if IsSprintStart(number, c.config.Sprint) {
parentValidatorBytes := parent.Extra[extraVanity : len(parent.Extra)-extraSeal]
validatorsBytes := make([]byte, len(snap.ValidatorSet.Validators)*validatorHeaderBytesLength)
currentValidators := snap.ValidatorSet.Copy().Validators
// sort validator by address
- sort.Sort(ValidatorsByAddress(currentValidators))
+ sort.Sort(valset.ValidatorsByAddress(currentValidators))
+
for i, validator := range currentValidators {
copy(validatorsBytes[i*validatorHeaderBytesLength:], validator.HeaderBytes())
}
@@ -458,25 +456,29 @@ func (c *Bor) verifyCascadingFields(chain consensus.ChainHeaderReader, header *t
}
// snapshot retrieves the authorization snapshot at a given point in time.
+// nolint: gocognit
func (c *Bor) snapshot(chain consensus.ChainHeaderReader, number uint64, hash common.Hash, parents []*types.Header) (*Snapshot, error) {
// Search for a snapshot in memory or on disk for checkpoints
- var (
- headers []*types.Header
- snap *Snapshot
- )
+ var snap *Snapshot
+ headers := make([]*types.Header, 0, 16)
+
+ //nolint:govet
for snap == nil {
// If an in-memory snapshot was found, use that
if s, ok := c.recents.Get(hash); ok {
snap = s.(*Snapshot)
+
break
}
// If an on-disk checkpoint snapshot can be found, use that
if number%checkpointInterval == 0 {
- if s, err := loadSnapshot(c.config, c.signatures, c.db, hash, c.ethAPI); err == nil {
+ if s, err := loadSnapshot(c.config, c.signatures, c.db, hash); err == nil {
log.Trace("Loaded snapshot from disk", "number", number, "hash", hash)
+
snap = s
+
break
}
}
@@ -486,6 +488,7 @@ func (c *Bor) snapshot(chain consensus.ChainHeaderReader, number uint64, hash co
// up more headers than allowed to be reorged (chain reinit from a freezer),
// consider the checkpoint trusted and snapshot it.
// TODO fix this
+ // nolint:nestif
if number == 0 {
checkpoint := chain.GetHeaderByNumber(number)
if checkpoint != nil {
@@ -493,17 +496,19 @@ func (c *Bor) snapshot(chain consensus.ChainHeaderReader, number uint64, hash co
hash := checkpoint.Hash()
// get validators and current span
- validators, err := c.GetCurrentValidators(hash, number+1)
+ validators, err := c.spanner.GetCurrentValidators(hash, number+1)
if err != nil {
return nil, err
}
// new snap shot
- snap = newSnapshot(c.config, c.signatures, number, hash, validators, c.ethAPI)
+ snap = newSnapshot(c.config, c.signatures, number, hash, validators)
if err := snap.store(c.db); err != nil {
return nil, err
}
+
log.Info("Stored checkpoint snapshot to disk", "number", number, "hash", hash)
+
break
}
}
@@ -516,6 +521,7 @@ func (c *Bor) snapshot(chain consensus.ChainHeaderReader, number uint64, hash co
if header.Hash() != hash || header.Number.Uint64() != number {
return nil, consensus.ErrUnknownAncestor
}
+
parents = parents[:len(parents)-1]
} else {
// No explicit parents (or no more left), reach out to the database
@@ -524,6 +530,7 @@ func (c *Bor) snapshot(chain consensus.ChainHeaderReader, number uint64, hash co
return nil, consensus.ErrUnknownAncestor
}
}
+
headers = append(headers, header)
number, hash = number-1, header.ParentHash
}
@@ -542,6 +549,7 @@ func (c *Bor) snapshot(chain consensus.ChainHeaderReader, number uint64, hash co
if err != nil {
return nil, err
}
+
c.recents.Add(snap.Hash, snap)
// If we've generated a new checkpoint snapshot, save to disk
@@ -549,8 +557,10 @@ func (c *Bor) snapshot(chain consensus.ChainHeaderReader, number uint64, hash co
if err = snap.store(c.db); err != nil {
return nil, err
}
+
log.Trace("Stored snapshot to disk", "number", snap.Number, "hash", snap.Hash)
}
+
return snap, err
}
@@ -560,6 +570,7 @@ func (c *Bor) VerifyUncles(chain consensus.ChainReader, block *types.Block) erro
if len(block.Uncles()) > 0 {
return errors.New("uncles not allowed")
}
+
return nil
}
@@ -590,6 +601,7 @@ func (c *Bor) verifySeal(chain consensus.ChainHeaderReader, header *types.Header
if err != nil {
return err
}
+
if !snap.ValidatorSet.HasAddress(signer.Bytes()) {
// Check the UnauthorizedSignerError.Error() msg to see why we pass number-1
return &UnauthorizedSignerError{number - 1, signer.Bytes()}
@@ -643,17 +655,19 @@ func (c *Bor) Prepare(chain consensus.ChainHeaderReader, header *types.Header) e
if len(header.Extra) < extraVanity {
header.Extra = append(header.Extra, bytes.Repeat([]byte{0x00}, extraVanity-len(header.Extra))...)
}
+
header.Extra = header.Extra[:extraVanity]
// get validator set if number
- if (number+1)%c.config.Sprint == 0 {
- newValidators, err := c.GetCurrentValidators(header.ParentHash, number+1)
+ if IsSprintStart(number+1, c.config.Sprint) {
+ newValidators, err := c.spanner.GetCurrentValidators(header.ParentHash, number+1)
if err != nil {
return errors.New("unknown validators")
}
// sort validator by address
- sort.Sort(ValidatorsByAddress(newValidators))
+ sort.Sort(valset.ValidatorsByAddress(newValidators))
+
for _, validator := range newValidators {
header.Extra = append(header.Extra, validator.HeaderBytes()...)
}
@@ -673,7 +687,7 @@ func (c *Bor) Prepare(chain consensus.ChainHeaderReader, header *types.Header) e
var succession int
// if signer is not empty
- if bytes.Compare(c.signer.Bytes(), common.Address{}.Bytes()) != 0 {
+ if c.signer != (common.Address{}) {
succession, err = snap.GetSignerSuccessionNumber(c.signer)
if err != nil {
return err
@@ -684,6 +698,7 @@ func (c *Bor) Prepare(chain consensus.ChainHeaderReader, header *types.Header) e
if header.Time < uint64(time.Now().Unix()) {
header.Time = uint64(time.Now().Unix())
}
+
return nil
}
@@ -693,16 +708,18 @@ func (c *Bor) Finalize(chain consensus.ChainHeaderReader, header *types.Header,
stateSyncData := []*types.StateSyncData{}
var err error
+
headerNumber := header.Number.Uint64()
+
if headerNumber%c.config.Sprint == 0 {
- cx := chainContext{Chain: chain, Bor: c}
+ cx := statefull.ChainContext{Chain: chain, Bor: c}
// check and commit span
if err := c.checkAndCommitSpan(state, header, cx); err != nil {
log.Error("Error while committing span", "error", err)
return
}
- if !c.WithoutHeimdall {
+ if c.HeimdallClient != nil {
// commit statees
stateSyncData, err = c.CommitStates(state, header, cx)
if err != nil {
@@ -728,13 +745,16 @@ func (c *Bor) Finalize(chain consensus.ChainHeaderReader, header *types.Header,
func decodeGenesisAlloc(i interface{}) (core.GenesisAlloc, error) {
var alloc core.GenesisAlloc
+
b, err := json.Marshal(i)
if err != nil {
return nil, err
}
+
if err := json.Unmarshal(b, &alloc); err != nil {
return nil, err
}
+
return alloc, nil
}
@@ -745,12 +765,14 @@ func (c *Bor) changeContractCodeIfNeeded(headerNumber uint64, state *state.State
if err != nil {
return fmt.Errorf("failed to decode genesis alloc: %v", err)
}
+
for addr, account := range allocs {
log.Info("change contract code", "address", addr)
state.SetCode(addr, account.Code)
}
}
}
+
return nil
}
@@ -762,7 +784,7 @@ func (c *Bor) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *typ
headerNumber := header.Number.Uint64()
if headerNumber%c.config.Sprint == 0 {
- cx := chainContext{Chain: chain, Bor: c}
+ cx := statefull.ChainContext{Chain: chain, Bor: c}
// check and commit span
err := c.checkAndCommitSpan(state, header, cx)
@@ -771,7 +793,7 @@ func (c *Bor) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *typ
return nil, err
}
- if !c.WithoutHeimdall {
+ if c.HeimdallClient != nil {
// commit states
stateSyncData, err = c.CommitStates(state, header, cx)
if err != nil {
@@ -794,7 +816,7 @@ func (c *Bor) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *typ
block := types.NewBlock(header, txs, nil, receipts, new(trie.Trie))
// set state sync
- bc := chain.(*core.BlockChain)
+ bc := chain.(core.BorStateSyncer)
bc.SetStateSync(stateSyncData)
// return the final block for sealing
@@ -852,14 +874,14 @@ func (c *Bor) Seal(chain consensus.ChainHeaderReader, block *types.Block, result
wiggle := time.Duration(successionNumber) * time.Duration(c.config.CalculateBackupMultiplier(number)) * time.Second
// Sign all the things!
- sighash, err := signFn(accounts.Account{Address: signer}, accounts.MimetypeBor, BorRLP(header, c.config))
+ err = Sign(signFn, signer, header, c.config)
if err != nil {
return err
}
- copy(header.Extra[len(header.Extra)-extraSeal:], sighash)
// Wait until sealing is terminated or delay timeout.
- log.Trace("Waiting for slot to sign and propagate", "delay", common.PrettyDuration(delay))
+ log.Info("Waiting for slot to sign and propagate", "number", number, "hash", header.Hash, "delay-in-sec", uint(delay), "delay", common.PrettyDuration(delay))
+
go func() {
select {
case <-stop:
@@ -870,10 +892,13 @@ func (c *Bor) Seal(chain consensus.ChainHeaderReader, block *types.Block, result
log.Info(
"Sealing out-of-turn",
"number", number,
+ "hash", header.Hash,
+ "wiggle-in-sec", uint(wiggle),
"wiggle", common.PrettyDuration(wiggle),
"in-turn-signer", snap.ValidatorSet.GetProposer().Address.Hex(),
)
}
+
log.Info(
"Sealing successful",
"number", number,
@@ -887,6 +912,18 @@ func (c *Bor) Seal(chain consensus.ChainHeaderReader, block *types.Block, result
log.Warn("Sealing result was not read by miner", "number", number, "sealhash", SealHash(header, c.config))
}
}()
+
+ return nil
+}
+
+func Sign(signFn SignerFn, signer common.Address, header *types.Header, c *params.BorConfig) error {
+ sighash, err := signFn(accounts.Account{Address: signer}, accounts.MimetypeBor, BorRLP(header, c))
+ if err != nil {
+ return err
+ }
+
+ copy(header.Extra[len(header.Extra)-extraSeal:], sighash)
+
return nil
}
@@ -898,6 +935,7 @@ func (c *Bor) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, par
if err != nil {
return nil
}
+
return new(big.Int).SetUint64(snap.Difficulty(c.signer))
}
@@ -919,112 +957,13 @@ func (c *Bor) APIs(chain consensus.ChainHeaderReader) []rpc.API {
// Close implements consensus.Engine. It's a noop for bor as there are no background threads.
func (c *Bor) Close() error {
- c.HeimdallClient.Close()
- return nil
-}
-
-// GetCurrentSpan get current span from contract
-func (c *Bor) GetCurrentSpan(headerHash common.Hash) (*Span, error) {
- // block
- blockNr := rpc.BlockNumberOrHashWithHash(headerHash, false)
-
- // method
- method := "getCurrentSpan"
-
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- data, err := c.validatorSetABI.Pack(method)
- if err != nil {
- log.Error("Unable to pack tx for getCurrentSpan", "error", err)
- return nil, err
- }
-
- msgData := (hexutil.Bytes)(data)
- toAddress := common.HexToAddress(c.config.ValidatorContract)
- gas := (hexutil.Uint64)(uint64(math.MaxUint64 / 2))
- result, err := c.ethAPI.Call(ctx, ethapi.TransactionArgs{
- Gas: &gas,
- To: &toAddress,
- Data: &msgData,
- }, blockNr, nil)
- if err != nil {
- return nil, err
- }
-
- // span result
- ret := new(struct {
- Number *big.Int
- StartBlock *big.Int
- EndBlock *big.Int
- })
- if err := c.validatorSetABI.UnpackIntoInterface(ret, method, result); err != nil {
- return nil, err
- }
-
- // create new span
- span := Span{
- ID: ret.Number.Uint64(),
- StartBlock: ret.StartBlock.Uint64(),
- EndBlock: ret.EndBlock.Uint64(),
- }
-
- return &span, nil
-}
-
-// GetCurrentValidators get current validators
-func (c *Bor) GetCurrentValidators(headerHash common.Hash, blockNumber uint64) ([]*Validator, error) {
- // block
- blockNr := rpc.BlockNumberOrHashWithHash(headerHash, false)
-
- // method
- method := "getBorValidators"
-
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- data, err := c.validatorSetABI.Pack(method, big.NewInt(0).SetUint64(blockNumber))
- if err != nil {
- log.Error("Unable to pack tx for getValidator", "error", err)
- return nil, err
- }
-
- // call
- msgData := (hexutil.Bytes)(data)
- toAddress := common.HexToAddress(c.config.ValidatorContract)
- gas := (hexutil.Uint64)(uint64(math.MaxUint64 / 2))
- result, err := c.ethAPI.Call(ctx, ethapi.TransactionArgs{
- Gas: &gas,
- To: &toAddress,
- Data: &msgData,
- }, blockNr, nil)
- if err != nil {
- panic(err)
- // return nil, err
- }
-
- var (
- ret0 = new([]common.Address)
- ret1 = new([]*big.Int)
- )
- out := &[]interface{}{
- ret0,
- ret1,
- }
-
- if err := c.validatorSetABI.UnpackIntoInterface(out, method, result); err != nil {
- return nil, err
- }
-
- valz := make([]*Validator, len(*ret0))
- for i, a := range *ret0 {
- valz[i] = &Validator{
- Address: a,
- VotingPower: (*ret1)[i].Int64(),
+ c.closeOnce.Do(func() {
+ if c.HeimdallClient != nil {
+ c.HeimdallClient.Close()
}
- }
+ })
- return valz, nil
+ return nil
}
func (c *Bor) checkAndCommitSpan(
@@ -1033,18 +972,20 @@ func (c *Bor) checkAndCommitSpan(
chain core.ChainContext,
) error {
headerNumber := header.Number.Uint64()
- span, err := c.GetCurrentSpan(header.ParentHash)
+
+ span, err := c.spanner.GetCurrentSpan(header.ParentHash)
if err != nil {
return err
}
+
if c.needToCommitSpan(span, headerNumber) {
- err := c.fetchAndCommitSpan(span.ID+1, state, header, chain)
- return err
+ return c.FetchAndCommitSpan(span.ID+1, state, header, chain)
}
+
return nil
}
-func (c *Bor) needToCommitSpan(span *Span, headerNumber uint64) bool {
+func (c *Bor) needToCommitSpan(span *span.Span, headerNumber uint64) bool {
// if span is nil
if span == nil {
return false
@@ -1063,124 +1004,52 @@ func (c *Bor) needToCommitSpan(span *Span, headerNumber uint64) bool {
return false
}
-func (c *Bor) fetchAndCommitSpan(
+func (c *Bor) FetchAndCommitSpan(
newSpanID uint64,
state *state.StateDB,
header *types.Header,
chain core.ChainContext,
) error {
- var heimdallSpan HeimdallSpan
+ var heimdallSpan span.HeimdallSpan
- if c.WithoutHeimdall {
- s, err := c.getNextHeimdallSpanForTest(newSpanID, state, header, chain)
+ if c.HeimdallClient == nil {
+ // fixme: move to a new mock or fake and remove c.HeimdallClient completely
+ s, err := c.getNextHeimdallSpanForTest(newSpanID, header, chain)
if err != nil {
return err
}
+
heimdallSpan = *s
} else {
-
- var spanArray []*ResponseWithHeight
-
- if err := json.Unmarshal([]byte(SPANS), &spanArray); err != nil {
+ response, err := c.HeimdallClient.Span(newSpanID)
+ if err != nil {
return err
}
- spanInJSON := false
-
- for _, val := range spanArray {
-
- var tempHeimdallSpan HeimdallSpan
-
- if err := json.Unmarshal(val.Result, &tempHeimdallSpan); err != nil {
- return err
- }
-
- if tempHeimdallSpan.ID == newSpanID {
- heimdallSpan = tempHeimdallSpan
- log.Info("Got span from local", "span", heimdallSpan.Span.ID)
- spanInJSON = true
- break
- }
- }
-
- if !spanInJSON {
- response, err := c.HeimdallClient.FetchWithRetry(fmt.Sprintf("bor/span/%d", newSpanID), "")
- if err != nil {
- return err
- }
- if err := json.Unmarshal(response.Result, &heimdallSpan); err != nil {
- return err
- }
- }
+ heimdallSpan = *response
}
- // check if chain id matches with heimdall span
+ // check if chain id matches with Heimdall span
if heimdallSpan.ChainID != c.chainConfig.ChainID.String() {
return fmt.Errorf(
- "Chain id proposed span, %s, and bor chain id, %s, doesn't match",
+ "chain id proposed span, %s, and bor chain id, %s, doesn't match",
heimdallSpan.ChainID,
c.chainConfig.ChainID,
)
}
- // get validators bytes
- var validators []MinimalVal
- for _, val := range heimdallSpan.ValidatorSet.Validators {
- validators = append(validators, val.MinimalVal())
- }
- validatorBytes, err := rlp.EncodeToBytes(validators)
- if err != nil {
- return err
- }
-
- // get producers bytes
- var producers []MinimalVal
- for _, val := range heimdallSpan.SelectedProducers {
- producers = append(producers, val.MinimalVal())
- }
- producerBytes, err := rlp.EncodeToBytes(producers)
- if err != nil {
- return err
- }
-
- // method
- method := "commitSpan"
- log.Info("✅ Committing new span",
- "id", heimdallSpan.ID,
- "startBlock", heimdallSpan.StartBlock,
- "endBlock", heimdallSpan.EndBlock,
- "validatorBytes", hex.EncodeToString(validatorBytes),
- "producerBytes", hex.EncodeToString(producerBytes),
- )
-
- // get packed data
- data, err := c.validatorSetABI.Pack(method,
- big.NewInt(0).SetUint64(heimdallSpan.ID),
- big.NewInt(0).SetUint64(heimdallSpan.StartBlock),
- big.NewInt(0).SetUint64(heimdallSpan.EndBlock),
- validatorBytes,
- producerBytes,
- )
- if err != nil {
- log.Error("Unable to pack tx for commitSpan", "error", err)
- return err
- }
-
- // get system message
- msg := getSystemMessage(common.HexToAddress(c.config.ValidatorContract), data)
-
- // apply message
- return applyMessage(msg, state, header, c.chainConfig, chain)
+ return c.spanner.CommitSpan(heimdallSpan, state, header, chain)
}
// CommitStates commit states
func (c *Bor) CommitStates(
state *state.StateDB,
header *types.Header,
- chain chainContext,
+ chain statefull.ChainContext,
) ([]*types.StateSyncData, error) {
stateSyncs := make([]*types.StateSyncData, 0)
number := header.Number.Uint64()
+
_lastStateID, err := c.GenesisContractsClient.LastStateId(number - 1)
if err != nil {
return nil, err
@@ -1188,24 +1057,34 @@ func (c *Bor) CommitStates(
to := time.Unix(int64(chain.Chain.GetHeaderByNumber(number-c.config.Sprint).Time), 0)
lastStateID := _lastStateID.Uint64()
+
log.Info(
"Fetching state updates from Heimdall",
"fromID", lastStateID+1,
"to", to.Format(time.RFC3339))
- eventRecords, err := c.HeimdallClient.FetchStateSyncEvents(lastStateID+1, to.Unix())
+
+ eventRecords, err := c.HeimdallClient.StateSyncEvents(lastStateID+1, to.Unix())
+ if err != nil {
+ log.Error("Error occurred when fetching state sync events", "stateID", lastStateID+1, "error", err)
+ }
+
if c.config.OverrideStateSyncRecords != nil {
if val, ok := c.config.OverrideStateSyncRecords[strconv.FormatUint(number, 10)]; ok {
eventRecords = eventRecords[0:val]
}
}
+ totalGas := 0 /// limit on gas for state sync per block
+
chainID := c.chainConfig.ChainID.String()
+
for _, eventRecord := range eventRecords {
if eventRecord.ID <= lastStateID {
continue
}
+
if err := validateEventRecord(eventRecord, number, to, lastStateID, chainID); err != nil {
- log.Error(err.Error())
+ log.Error("while validating event record", "block", number, "to", to, "stateID", lastStateID, "error", err.Error())
break
}
@@ -1215,21 +1094,30 @@ func (c *Bor) CommitStates(
Data: hex.EncodeToString(eventRecord.Data),
TxHash: eventRecord.TxHash,
}
+
stateSyncs = append(stateSyncs, &stateData)
- if err := c.GenesisContractsClient.CommitState(eventRecord, state, header, chain); err != nil {
+ gasUsed, err := c.GenesisContractsClient.CommitState(eventRecord, state, header, chain)
+ if err != nil {
return nil, err
}
+
+ totalGas += int(gasUsed)
+
lastStateID++
}
+
+ log.Info("StateSyncData", "Gas", totalGas, "Block-number", number, "LastStateID", lastStateID, "TotalRecords", len(eventRecords))
+
return stateSyncs, nil
}
-func validateEventRecord(eventRecord *EventRecordWithTime, number uint64, to time.Time, lastStateID uint64, chainID string) error {
+func validateEventRecord(eventRecord *clerk.EventRecordWithTime, number uint64, to time.Time, lastStateID uint64, chainID string) error {
// event id should be sequential and event.Time should lie in the range [from, to)
if lastStateID+1 != eventRecord.ID || eventRecord.ChainID != chainID || !eventRecord.Time.Before(to) {
return &InvalidStateReceivedError{number, lastStateID, &to, eventRecord}
}
+
return nil
}
@@ -1237,24 +1125,28 @@ func (c *Bor) SetHeimdallClient(h IHeimdallClient) {
c.HeimdallClient = h
}
+func (c *Bor) GetCurrentValidators(headerHash common.Hash, blockNumber uint64) ([]*valset.Validator, error) {
+ return c.spanner.GetCurrentValidators(headerHash, blockNumber)
+}
+
//
// Private methods
//
func (c *Bor) getNextHeimdallSpanForTest(
newSpanID uint64,
- state *state.StateDB,
header *types.Header,
chain core.ChainContext,
-) (*HeimdallSpan, error) {
+) (*span.HeimdallSpan, error) {
headerNumber := header.Number.Uint64()
- span, err := c.GetCurrentSpan(header.ParentHash)
+
+ spanBor, err := c.spanner.GetCurrentSpan(header.ParentHash)
if err != nil {
return nil, err
}
// get local chain context object
- localContext := chain.(chainContext)
+ localContext := chain.(statefull.ChainContext)
// Retrieve the snapshot needed to verify this header and cache it
snap, err := c.snapshot(localContext.Chain, headerNumber-1, header.ParentHash, nil)
if err != nil {
@@ -1262,20 +1154,22 @@ func (c *Bor) getNextHeimdallSpanForTest(
}
// new span
- span.ID = newSpanID
- if span.EndBlock == 0 {
- span.StartBlock = 256
+ spanBor.ID = newSpanID
+ if spanBor.EndBlock == 0 {
+ spanBor.StartBlock = 256
} else {
- span.StartBlock = span.EndBlock + 1
+ spanBor.StartBlock = spanBor.EndBlock + 1
}
- span.EndBlock = span.StartBlock + (100 * c.config.Sprint) - 1
- selectedProducers := make([]Validator, len(snap.ValidatorSet.Validators))
+ spanBor.EndBlock = spanBor.StartBlock + (100 * c.config.Sprint) - 1
+
+ selectedProducers := make([]valset.Validator, len(snap.ValidatorSet.Validators))
for i, v := range snap.ValidatorSet.Validators {
selectedProducers[i] = *v
}
- heimdallSpan := &HeimdallSpan{
- Span: *span,
+
+ heimdallSpan := &span.HeimdallSpan{
+ Span: *spanBor,
ValidatorSet: *snap.ValidatorSet,
SelectedProducers: selectedProducers,
ChainID: c.chainConfig.ChainID.String(),
@@ -1284,95 +1178,22 @@ func (c *Bor) getNextHeimdallSpanForTest(
return heimdallSpan, nil
}
-//
-// Chain context
-//
-
-// chain context
-type chainContext struct {
- Chain consensus.ChainHeaderReader
- Bor consensus.Engine
-}
-
-func (c chainContext) Engine() consensus.Engine {
- return c.Bor
-}
-
-func (c chainContext) GetHeader(hash common.Hash, number uint64) *types.Header {
- return c.Chain.GetHeader(hash, number)
-}
-
-// callmsg implements core.Message to allow passing it as a transaction simulator.
-type callmsg struct {
- ethereum.CallMsg
-}
-
-func (m callmsg) From() common.Address { return m.CallMsg.From }
-func (m callmsg) Nonce() uint64 { return 0 }
-func (m callmsg) CheckNonce() bool { return false }
-func (m callmsg) To() *common.Address { return m.CallMsg.To }
-func (m callmsg) GasPrice() *big.Int { return m.CallMsg.GasPrice }
-func (m callmsg) Gas() uint64 { return m.CallMsg.Gas }
-func (m callmsg) Value() *big.Int { return m.CallMsg.Value }
-func (m callmsg) Data() []byte { return m.CallMsg.Data }
-
-// get system message
-func getSystemMessage(toAddress common.Address, data []byte) callmsg {
- return callmsg{
- ethereum.CallMsg{
- From: systemAddress,
- Gas: math.MaxUint64 / 2,
- GasPrice: big.NewInt(0),
- Value: big.NewInt(0),
- To: &toAddress,
- Data: data,
- },
- }
-}
-
-// apply message
-func applyMessage(
- msg callmsg,
- state *state.StateDB,
- header *types.Header,
- chainConfig *params.ChainConfig,
- chainContext core.ChainContext,
-) error {
- // Create a new context to be used in the EVM environment
- blockContext := core.NewEVMBlockContext(header, chainContext, &header.Coinbase)
- // Create a new environment which holds all relevant information
- // about the transaction and calling mechanisms.
- vmenv := vm.NewEVM(blockContext, vm.TxContext{}, state, chainConfig, vm.Config{})
- // Apply the transaction to the current state (included in the env)
- _, _, err := vmenv.Call(
- vm.AccountRef(msg.From()),
- *msg.To(),
- msg.Data(),
- msg.Gas(),
- msg.Value(),
- )
- // Update the state with pending changes
- if err != nil {
- state.Finalise(true)
- }
-
- return nil
-}
-
-func validatorContains(a []*Validator, x *Validator) (*Validator, bool) {
+func validatorContains(a []*valset.Validator, x *valset.Validator) (*valset.Validator, bool) {
for _, n := range a {
- if bytes.Compare(n.Address.Bytes(), x.Address.Bytes()) == 0 {
+ if n.Address == x.Address {
return n, true
}
}
+
return nil, false
}
-func getUpdatedValidatorSet(oldValidatorSet *ValidatorSet, newVals []*Validator) *ValidatorSet {
+func getUpdatedValidatorSet(oldValidatorSet *valset.ValidatorSet, newVals []*valset.Validator) *valset.ValidatorSet {
v := oldValidatorSet
oldVals := v.Validators
- var changes []*Validator
+ changes := make([]*valset.Validator, 0, len(oldVals))
+
for _, ov := range oldVals {
if f, ok := validatorContains(newVals, ov); ok {
ov.VotingPower = f.VotingPower
@@ -1389,10 +1210,13 @@ func getUpdatedValidatorSet(oldValidatorSet *ValidatorSet, newVals []*Validator)
}
}
- v.UpdateWithChangeSet(changes)
+ if err := v.UpdateWithChangeSet(changes); err != nil {
+ log.Error("Error while updating change set", "error", err)
+ }
+
return v
}
-func isSprintStart(number, sprint uint64) bool {
+func IsSprintStart(number, sprint uint64) bool {
return number%sprint == 0
}
diff --git a/consensus/bor/bor_test.go b/consensus/bor/bor_test.go
index 409cfb0e78..e2f4c0a405 100644
--- a/consensus/bor/bor_test.go
+++ b/consensus/bor/bor_test.go
@@ -5,6 +5,8 @@ import (
"math/big"
"testing"
+ "github.com/stretchr/testify/assert"
+
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core"
@@ -13,10 +15,11 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/params"
- "github.com/stretchr/testify/assert"
)
func TestGenesisContractChange(t *testing.T) {
+ t.Parallel()
+
addr0 := common.Address{0x1}
b := &Bor{
@@ -102,6 +105,8 @@ func TestGenesisContractChange(t *testing.T) {
}
func TestEncodeSigHeaderJaipur(t *testing.T) {
+ t.Parallel()
+
// As part of the EIP-1559 fork in mumbai, an incorrect seal hash
// was used for Bor that did not included the BaseFee. The Jaipur
// block is a hard fork to fix that.
diff --git a/consensus/bor/clerk.go b/consensus/bor/clerk/clerk.go
similarity index 85%
rename from consensus/bor/clerk.go
rename to consensus/bor/clerk/clerk.go
index d7e6982873..fedca3df16 100644
--- a/consensus/bor/clerk.go
+++ b/consensus/bor/clerk/clerk.go
@@ -1,4 +1,4 @@
-package bor
+package clerk
import (
"fmt"
@@ -23,10 +23,10 @@ type EventRecordWithTime struct {
Time time.Time `json:"record_time" yaml:"record_time"`
}
-// String returns the string representatin of span
-func (e *EventRecordWithTime) String() string {
+// String returns the string representation of EventRecord
+func (e *EventRecordWithTime) String(gasUsed uint64) string {
return fmt.Sprintf(
- "id %v, contract %v, data: %v, txHash: %v, logIndex: %v, chainId: %v, time %s",
+ "id %v, contract %v, data: %v, txHash: %v, logIndex: %v, chainId: %v, time %s, gasUsed %d",
e.ID,
e.Contract.String(),
e.Data.String(),
@@ -34,6 +34,7 @@ func (e *EventRecordWithTime) String() string {
e.LogIndex,
e.ChainID,
e.Time.Format(time.RFC3339),
+ gasUsed,
)
}
diff --git a/consensus/bor/contract/client.go b/consensus/bor/contract/client.go
new file mode 100644
index 0000000000..d65fb5bb15
--- /dev/null
+++ b/consensus/bor/contract/client.go
@@ -0,0 +1,133 @@
+package contract
+
+import (
+ "context"
+ "math"
+ "math/big"
+ "strings"
+
+ "github.com/ethereum/go-ethereum/accounts/abi"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/consensus/bor/api"
+ "github.com/ethereum/go-ethereum/consensus/bor/clerk"
+ "github.com/ethereum/go-ethereum/consensus/bor/statefull"
+ "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/internal/ethapi"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/rpc"
+)
+
+var (
+ vABI, _ = abi.JSON(strings.NewReader(validatorsetABI))
+ sABI, _ = abi.JSON(strings.NewReader(stateReceiverABI))
+)
+
+func ValidatorSet() abi.ABI {
+ return vABI
+}
+
+func StateReceiver() abi.ABI {
+ return sABI
+}
+
+type GenesisContractsClient struct {
+ validatorSetABI abi.ABI
+ stateReceiverABI abi.ABI
+ ValidatorContract string
+ StateReceiverContract string
+ chainConfig *params.ChainConfig
+ ethAPI api.Caller
+}
+
+const (
+ validatorsetABI = `[{"constant":true,"inputs":[],"name":"SPRINT","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"SYSTEM_ADDRESS","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"CHAIN","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"FIRST_END_BLOCK","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"","type":"uint256"},{"internalType":"uint256","name":"","type":"uint256"}],"name":"producers","outputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"uint256","name":"power","type":"uint256"},{"internalType":"address","name":"signer","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"ROUND_TYPE","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"BOR_ID","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"spanNumbers","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"VOTE_TYPE","outputs":[{"internalType":"uint8","name":"","type":"uint8"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"","type":"uint256"},{"internalType":"uint256","name":"","type":"uint256"}],"name":"validators","outputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"uint256","name":"power","type":"uint256"},{"internalType":"address","name":"signer","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"spans","outputs":[{"internalType":"uint256","name":"number","type":"uint256"},{"internalType":"uint256","name":"startBlock","type":"uint256"},{"internalType":"uint256","name":"endBlock","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"inputs":[],"payable":false,"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":true,"internalType":"uint256","name":"startBlock","type":"uint256"},{"indexed":true,"internalType":"uint256","name":"endBlock","type":"uint256"}],"name":"NewSpan","type":"event"},{"constant":true,"inputs":[],"name":"currentSprint","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"span","type":"uint256"}],"name":"getSpan","outputs":[{"internalType":"uint256","name":"number","type":"uint256"},{"internalType":"uint256","name":"startBlock","type":"uint256"},{"internalType":"uint256","name":"endBlock","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getCurrentSpan","outputs":[{"internalType":"uint256","name":"number","type":"uint256"},{"internalType":"uint256","name":"startBlock","type":"uint256"},{"internalType":"uint256","name":"endBlock","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getNextSpan","outputs":[{"internalType":"uint256","name":"number","type":"uint256"},{"internalType":"uint256","name":"startBlock","type":"uint256"},{"internalType":"uint256","name":"endBlock","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"number","type":"uint256"}],"name":"getSpanByBlock","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"currentSpanNumber","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"span","type":"uint256"}],"name":"getValidatorsTotalStakeBySpan","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"span","type":"uint256"}],"name":"getProducersTotalStakeBySpan","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"span","type":"uint256"},{"internalType":"address","name":"signer","type":"address"}],"name":"getValidatorBySigner","outputs":[{"components":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"uint256","name":"power","type":"uint256"},{"internalType":"address","name":"signer","type":"address"}],"internalType":"struct BorValidatorSet.Validator","name":"result","type":"tuple"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"span","type":"uint256"},{"internalType":"address","name":"signer","type":"address"}],"name":"isValidator","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"span","type":"uint256"},{"internalType":"address","name":"signer","type":"address"}],"name":"isProducer","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"address","name":"signer","type":"address"}],"name":"isCurrentValidator","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"address","name":"signer","type":"address"}],"name":"isCurrentProducer","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"number","type":"uint256"}],"name":"getBorValidators","outputs":[{"internalType":"address[]","name":"","type":"address[]"},{"internalType":"uint256[]","name":"","type":"uint256[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getInitialValidators","outputs":[{"internalType":"address[]","name":"","type":"address[]"},{"internalType":"uint256[]","name":"","type":"uint256[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getValidators","outputs":[{"internalType":"address[]","name":"","type":"address[]"},{"internalType":"uint256[]","name":"","type":"uint256[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"uint256","name":"newSpan","type":"uint256"},{"internalType":"uint256","name":"startBlock","type":"uint256"},{"internalType":"uint256","name":"endBlock","type":"uint256"},{"internalType":"bytes","name":"validatorBytes","type":"bytes"},{"internalType":"bytes","name":"producerBytes","type":"bytes"}],"name":"commitSpan","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"span","type":"uint256"},{"internalType":"bytes32","name":"dataHash","type":"bytes32"},{"internalType":"bytes","name":"sigs","type":"bytes"}],"name":"getStakePowerBySigs","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"bytes32","name":"rootHash","type":"bytes32"},{"internalType":"bytes32","name":"leaf","type":"bytes32"},{"internalType":"bytes","name":"proof","type":"bytes"}],"name":"checkMembership","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"pure","type":"function"},{"constant":true,"inputs":[{"internalType":"bytes32","name":"d","type":"bytes32"}],"name":"leafNode","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"payable":false,"stateMutability":"pure","type":"function"},{"constant":true,"inputs":[{"internalType":"bytes32","name":"left","type":"bytes32"},{"internalType":"bytes32","name":"right","type":"bytes32"}],"name":"innerNode","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"payable":false,"stateMutability":"pure","type":"function"}]`
+ stateReceiverABI = `[{"constant":true,"inputs":[],"name":"SYSTEM_ADDRESS","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"lastStateId","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"uint256","name":"syncTime","type":"uint256"},{"internalType":"bytes","name":"recordBytes","type":"bytes"}],"name":"commitState","outputs":[{"internalType":"bool","name":"success","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]`
+)
+
+func NewGenesisContractsClient(
+ chainConfig *params.ChainConfig,
+ validatorContract,
+ stateReceiverContract string,
+ ethAPI api.Caller,
+) *GenesisContractsClient {
+ return &GenesisContractsClient{
+ validatorSetABI: ValidatorSet(),
+ stateReceiverABI: StateReceiver(),
+ ValidatorContract: validatorContract,
+ StateReceiverContract: stateReceiverContract,
+ chainConfig: chainConfig,
+ ethAPI: ethAPI,
+ }
+}
+
+func (gc *GenesisContractsClient) CommitState(
+ event *clerk.EventRecordWithTime,
+ state *state.StateDB,
+ header *types.Header,
+ chCtx statefull.ChainContext,
+) (uint64, error) {
+ eventRecord := event.BuildEventRecord()
+
+ recordBytes, err := rlp.EncodeToBytes(eventRecord)
+ if err != nil {
+ return 0, err
+ }
+
+ const method = "commitState"
+
+ t := event.Time.Unix()
+
+ data, err := gc.stateReceiverABI.Pack(method, big.NewInt(0).SetInt64(t), recordBytes)
+ if err != nil {
+ log.Error("Unable to pack tx for commitState", "error", err)
+ return 0, err
+ }
+
+ msg := statefull.GetSystemMessage(common.HexToAddress(gc.StateReceiverContract), data)
+ gasUsed, err := statefull.ApplyMessage(msg, state, header, gc.chainConfig, chCtx)
+
+ // Logging event log with time and individual gasUsed
+ log.Info("→ committing new state", "eventRecord", event.String(gasUsed))
+
+ if err != nil {
+ return 0, err
+ }
+
+ return gasUsed, nil
+}
+
+func (gc *GenesisContractsClient) LastStateId(snapshotNumber uint64) (*big.Int, error) {
+ blockNr := rpc.BlockNumber(snapshotNumber)
+ method := "lastStateId"
+
+ data, err := gc.stateReceiverABI.Pack(method)
+ if err != nil {
+ log.Error("Unable to pack tx for LastStateId", "error", err)
+
+ return nil, err
+ }
+
+ msgData := (hexutil.Bytes)(data)
+ toAddress := common.HexToAddress(gc.StateReceiverContract)
+ gas := (hexutil.Uint64)(uint64(math.MaxUint64 / 2))
+
+ result, err := gc.ethAPI.Call(context.Background(), ethapi.TransactionArgs{
+ Gas: &gas,
+ To: &toAddress,
+ Data: &msgData,
+ }, rpc.BlockNumberOrHash{BlockNumber: &blockNr}, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ ret := new(*big.Int)
+ if err := gc.stateReceiverABI.UnpackIntoInterface(ret, method, result); err != nil {
+ return nil, err
+ }
+
+ return *ret, nil
+}
diff --git a/consensus/bor/errors.go b/consensus/bor/errors.go
index a1e60d1e21..67f7ef53f3 100644
--- a/consensus/bor/errors.go
+++ b/consensus/bor/errors.go
@@ -3,36 +3,9 @@ package bor
import (
"fmt"
"time"
-)
-
-// TotalVotingPowerExceededError is returned when the maximum allowed total voting power is exceeded
-type TotalVotingPowerExceededError struct {
- Sum int64
- Validators []*Validator
-}
-
-func (e *TotalVotingPowerExceededError) Error() string {
- return fmt.Sprintf(
- "Total voting power should be guarded to not exceed %v; got: %v; for validator set: %v",
- MaxTotalVotingPower,
- e.Sum,
- e.Validators,
- )
-}
-type InvalidStartEndBlockError struct {
- Start uint64
- End uint64
- CurrentHeader uint64
-}
-
-func (e *InvalidStartEndBlockError) Error() string {
- return fmt.Sprintf(
- "Invalid parameters start: %d and end block: %d params",
- e.Start,
- e.End,
- )
-}
+ "github.com/ethereum/go-ethereum/consensus/bor/clerk"
+)
type MaxCheckpointLengthExceededError struct {
Start uint64
@@ -129,12 +102,12 @@ type InvalidStateReceivedError struct {
Number uint64
LastStateID uint64
To *time.Time
- Event *EventRecordWithTime
+ Event *clerk.EventRecordWithTime
}
func (e *InvalidStateReceivedError) Error() string {
return fmt.Sprintf(
- "Received invalid event %s at block %d. Requested events until %s. Last state id was %d",
+ "Received invalid event %v at block %d. Requested events until %s. Last state id was %d",
e.Event,
e.Number,
e.To.Format(time.RFC3339),
diff --git a/consensus/bor/genesis.go b/consensus/bor/genesis.go
new file mode 100644
index 0000000000..33de53f9ba
--- /dev/null
+++ b/consensus/bor/genesis.go
@@ -0,0 +1,16 @@
+package bor
+
+import (
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/consensus/bor/clerk"
+ "github.com/ethereum/go-ethereum/consensus/bor/statefull"
+ "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+//go:generate mockgen -destination=./genesis_contract_mock.go -package=bor . GenesisContract
+type GenesisContract interface {
+ CommitState(event *clerk.EventRecordWithTime, state *state.StateDB, header *types.Header, chCtx statefull.ChainContext) (uint64, error)
+ LastStateId(snapshotNumber uint64) (*big.Int, error)
+}
diff --git a/consensus/bor/genesis_contract_mock.go b/consensus/bor/genesis_contract_mock.go
new file mode 100644
index 0000000000..dfe9390509
--- /dev/null
+++ b/consensus/bor/genesis_contract_mock.go
@@ -0,0 +1,69 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: github.com/ethereum/go-ethereum/consensus/bor (interfaces: GenesisContract)
+
+// Package bor is a generated GoMock package.
+package bor
+
+import (
+ big "math/big"
+ reflect "reflect"
+
+ clerk "github.com/ethereum/go-ethereum/consensus/bor/clerk"
+ statefull "github.com/ethereum/go-ethereum/consensus/bor/statefull"
+ state "github.com/ethereum/go-ethereum/core/state"
+ types "github.com/ethereum/go-ethereum/core/types"
+ gomock "github.com/golang/mock/gomock"
+)
+
+// MockGenesisContract is a mock of GenesisContract interface.
+type MockGenesisContract struct {
+ ctrl *gomock.Controller
+ recorder *MockGenesisContractMockRecorder
+}
+
+// MockGenesisContractMockRecorder is the mock recorder for MockGenesisContract.
+type MockGenesisContractMockRecorder struct {
+ mock *MockGenesisContract
+}
+
+// NewMockGenesisContract creates a new mock instance.
+func NewMockGenesisContract(ctrl *gomock.Controller) *MockGenesisContract {
+ mock := &MockGenesisContract{ctrl: ctrl}
+ mock.recorder = &MockGenesisContractMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockGenesisContract) EXPECT() *MockGenesisContractMockRecorder {
+ return m.recorder
+}
+
+// CommitState mocks base method.
+func (m *MockGenesisContract) CommitState(arg0 *clerk.EventRecordWithTime, arg1 *state.StateDB, arg2 *types.Header, arg3 statefull.ChainContext) (uint64, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CommitState", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(uint64)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CommitState indicates an expected call of CommitState.
+func (mr *MockGenesisContractMockRecorder) CommitState(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitState", reflect.TypeOf((*MockGenesisContract)(nil).CommitState), arg0, arg1, arg2, arg3)
+}
+
+// LastStateId mocks base method.
+func (m *MockGenesisContract) LastStateId(arg0 uint64) (*big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "LastStateId", arg0)
+ ret0, _ := ret[0].(*big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// LastStateId indicates an expected call of LastStateId.
+func (mr *MockGenesisContractMockRecorder) LastStateId(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastStateId", reflect.TypeOf((*MockGenesisContract)(nil).LastStateId), arg0)
+}
diff --git a/consensus/bor/genesis_contracts_client.go b/consensus/bor/genesis_contracts_client.go
deleted file mode 100644
index 582358e0cb..0000000000
--- a/consensus/bor/genesis_contracts_client.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package bor
-
-import (
- "context"
- "math"
- "math/big"
- "strings"
-
- "github.com/ethereum/go-ethereum/accounts/abi"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/hexutil"
- "github.com/ethereum/go-ethereum/core/state"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/internal/ethapi"
- "github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/rlp"
- "github.com/ethereum/go-ethereum/rpc"
-)
-
-type GenesisContractsClient struct {
- validatorSetABI abi.ABI
- stateReceiverABI abi.ABI
- ValidatorContract string
- StateReceiverContract string
- chainConfig *params.ChainConfig
- ethAPI *ethapi.PublicBlockChainAPI
-}
-
-const validatorsetABI = `[{"constant":true,"inputs":[],"name":"SPRINT","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"SYSTEM_ADDRESS","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"CHAIN","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"FIRST_END_BLOCK","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"","type":"uint256"},{"internalType":"uint256","name":"","type":"uint256"}],"name":"producers","outputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"uint256","name":"power","type":"uint256"},{"internalType":"address","name":"signer","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"ROUND_TYPE","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"BOR_ID","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"spanNumbers","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"VOTE_TYPE","outputs":[{"internalType":"uint8","name":"","type":"uint8"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"","type":"uint256"},{"internalType":"uint256","name":"","type":"uint256"}],"name":"validators","outputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"uint256","name":"power","type":"uint256"},{"internalType":"address","name":"signer","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"spans","outputs":[{"internalType":"uint256","name":"number","type":"uint256"},{"internalType":"uint256","name":"startBlock","type":"uint256"},{"internalType":"uint256","name":"endBlock","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"inputs":[],"payable":false,"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":true,"internalType":"uint256","name":"startBlock","type":"uint256"},{"indexed":true,"internalType":"uint256","name":"endBlock","type":"uint256"}],"name":"NewSpan","type":"event"},{"constant":true,"inputs":[],"name":"currentSprint","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"span","type":"uint256"}],"name":"getSpan","outputs":[{"internalType":"uint256","name":"number","type":"uint256"},{"internalType":"uint256","name":"startBlock","type":"uint256"},{"internalType":"uint256","name":"endBlock","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getCurrentSpan","outputs":[{"internalType":"uint256","name":"number","type":"uint256"},{"internalType":"uint256","name":"startBlock","type":"uint256"},{"internalType":"uint256","name":"endBlock","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getNextSpan","outputs":[{"internalType":"uint256","name":"number","type":"uint256"},{"internalType":"uint256","name":"startBlock","type":"uint256"},{"internalType":"uint256","name":"endBlock","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"number","type":"uint256"}],"name":"getSpanByBlock","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"currentSpanNumber","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"span","type":"uint256"}],"name":"getValidatorsTotalStakeBySpan","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"span","type":"uint256"}],"name":"getProducersTotalStakeBySpan","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"span","type":"uint256"},{"internalType":"address","name":"signer","type":"address"}],"name":"getValidatorBySigner","outputs":[{"components":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"uint256","name":"power","type":"uint256"},{"internalType":"address","name":"signer","type":"address"}],"internalType":"struct BorValidatorSet.Validator","name":"result","type":"tuple"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"span","type":"uint256"},{"internalType":"address","name":"signer","type":"address"}],"name":"isValidator","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"span","type":"uint256"},{"internalType":"address","name":"signer","type":"address"}],"name":"isProducer","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"address","name":"signer","type":"address"}],"name":"isCurrentValidator","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"address","name":"signer","type":"address"}],"name":"isCurrentProducer","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"number","type":"uint256"}],"name":"getBorValidators","outputs":[{"internalType":"address[]","name":"","type":"address[]"},{"internalType":"uint256[]","name":"","type":"uint256[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getInitialValidators","outputs":[{"internalType":"address[]","name":"","type":"address[]"},{"internalType":"uint256[]","name":"","type":"uint256[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getValidators","outputs":[{"internalType":"address[]","name":"","type":"address[]"},{"internalType":"uint256[]","name":"","type":"uint256[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"uint256","name":"newSpan","type":"uint256"},{"internalType":"uint256","name":"startBlock","type":"uint256"},{"internalType":"uint256","name":"endBlock","type":"uint256"},{"internalType":"bytes","name":"validatorBytes","type":"bytes"},{"internalType":"bytes","name":"producerBytes","type":"bytes"}],"name":"commitSpan","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"span","type":"uint256"},{"internalType":"bytes32","name":"dataHash","type":"bytes32"},{"internalType":"bytes","name":"sigs","type":"bytes"}],"name":"getStakePowerBySigs","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"bytes32","name":"rootHash","type":"bytes32"},{"internalType":"bytes32","name":"leaf","type":"bytes32"},{"internalType":"bytes","name":"proof","type":"bytes"}],"name":"checkMembership","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"pure","type":"function"},{"constant":true,"inputs":[{"internalType":"bytes32","name":"d","type":"bytes32"}],"name":"leafNode","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"payable":false,"stateMutability":"pure","type":"function"},{"constant":true,"inputs":[{"internalType":"bytes32","name":"left","type":"bytes32"},{"internalType":"bytes32","name":"right","type":"bytes32"}],"name":"innerNode","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"payable":false,"stateMutability":"pure","type":"function"}]`
-const stateReceiverABI = `[{"constant":true,"inputs":[],"name":"SYSTEM_ADDRESS","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"lastStateId","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"uint256","name":"syncTime","type":"uint256"},{"internalType":"bytes","name":"recordBytes","type":"bytes"}],"name":"commitState","outputs":[{"internalType":"bool","name":"success","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]`
-
-func NewGenesisContractsClient(
- chainConfig *params.ChainConfig,
- validatorContract,
- stateReceiverContract string,
- ethAPI *ethapi.PublicBlockChainAPI,
-) *GenesisContractsClient {
- vABI, _ := abi.JSON(strings.NewReader(validatorsetABI))
- sABI, _ := abi.JSON(strings.NewReader(stateReceiverABI))
- return &GenesisContractsClient{
- validatorSetABI: vABI,
- stateReceiverABI: sABI,
- ValidatorContract: validatorContract,
- StateReceiverContract: stateReceiverContract,
- chainConfig: chainConfig,
- ethAPI: ethAPI,
- }
-}
-
-func (gc *GenesisContractsClient) CommitState(
- event *EventRecordWithTime,
- state *state.StateDB,
- header *types.Header,
- chCtx chainContext,
-) error {
- eventRecord := event.BuildEventRecord()
- recordBytes, err := rlp.EncodeToBytes(eventRecord)
- if err != nil {
- return err
- }
- method := "commitState"
- t := event.Time.Unix()
- data, err := gc.stateReceiverABI.Pack(method, big.NewInt(0).SetInt64(t), recordBytes)
- if err != nil {
- log.Error("Unable to pack tx for commitState", "error", err)
- return err
- }
- log.Info("→ committing new state", "eventRecord", event.String())
- msg := getSystemMessage(common.HexToAddress(gc.StateReceiverContract), data)
- if err := applyMessage(msg, state, header, gc.chainConfig, chCtx); err != nil {
- return err
- }
- return nil
-}
-
-func (gc *GenesisContractsClient) LastStateId(snapshotNumber uint64) (*big.Int, error) {
- blockNr := rpc.BlockNumber(snapshotNumber)
- method := "lastStateId"
- data, err := gc.stateReceiverABI.Pack(method)
- if err != nil {
- log.Error("Unable to pack tx for LastStateId", "error", err)
- return nil, err
- }
-
- msgData := (hexutil.Bytes)(data)
- toAddress := common.HexToAddress(gc.StateReceiverContract)
- gas := (hexutil.Uint64)(uint64(math.MaxUint64 / 2))
- result, err := gc.ethAPI.Call(context.Background(), ethapi.TransactionArgs{
- Gas: &gas,
- To: &toAddress,
- Data: &msgData,
- }, rpc.BlockNumberOrHash{BlockNumber: &blockNr}, nil)
- if err != nil {
- return nil, err
- }
-
- var ret = new(*big.Int)
- if err := gc.stateReceiverABI.UnpackIntoInterface(ret, method, result); err != nil {
- return nil, err
- }
- return *ret, nil
-}
diff --git a/consensus/bor/heimdall.go b/consensus/bor/heimdall.go
new file mode 100644
index 0000000000..217de13fe9
--- /dev/null
+++ b/consensus/bor/heimdall.go
@@ -0,0 +1,15 @@
+package bor
+
+import (
+ "github.com/ethereum/go-ethereum/consensus/bor/clerk"
+ "github.com/ethereum/go-ethereum/consensus/bor/heimdall/checkpoint"
+ "github.com/ethereum/go-ethereum/consensus/bor/heimdall/span"
+)
+
+//go:generate mockgen -destination=../../tests/bor/mocks/IHeimdallClient.go -package=mocks . IHeimdallClient
+type IHeimdallClient interface {
+ StateSyncEvents(fromID uint64, to int64) ([]*clerk.EventRecordWithTime, error)
+ Span(spanID uint64) (*span.HeimdallSpan, error)
+ FetchLatestCheckpoint() (*checkpoint.Checkpoint, error)
+ Close()
+}
diff --git a/consensus/bor/heimdall/checkpoint/checkpoint.go b/consensus/bor/heimdall/checkpoint/checkpoint.go
new file mode 100644
index 0000000000..77569293ad
--- /dev/null
+++ b/consensus/bor/heimdall/checkpoint/checkpoint.go
@@ -0,0 +1,22 @@
+package checkpoint
+
+import (
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// Checkpoint defines a response object type of bor checkpoint
+type Checkpoint struct {
+ Proposer common.Address `json:"proposer"`
+ StartBlock *big.Int `json:"start_block"`
+ EndBlock *big.Int `json:"end_block"`
+ RootHash common.Hash `json:"root_hash"`
+ BorChainID string `json:"bor_chain_id"`
+ Timestamp uint64 `json:"timestamp"`
+}
+
+type CheckpointResponse struct {
+ Height string `json:"height"`
+ Result Checkpoint `json:"result"`
+}
diff --git a/consensus/bor/heimdall/client.go b/consensus/bor/heimdall/client.go
new file mode 100644
index 0000000000..2d42cfc31b
--- /dev/null
+++ b/consensus/bor/heimdall/client.go
@@ -0,0 +1,243 @@
+package heimdall
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "sort"
+ "time"
+
+ "github.com/ethereum/go-ethereum/consensus/bor/clerk"
+ "github.com/ethereum/go-ethereum/consensus/bor/heimdall/checkpoint"
+ "github.com/ethereum/go-ethereum/consensus/bor/heimdall/span"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// errShutdownDetected is returned if a shutdown was detected
+var errShutdownDetected = errors.New("shutdown detected")
+
+const (
+ stateFetchLimit = 50
+ apiHeimdallTimeout = 5 * time.Second
+)
+
+type StateSyncEventsResponse struct {
+ Height string `json:"height"`
+ Result []*clerk.EventRecordWithTime `json:"result"`
+}
+
+type SpanResponse struct {
+ Height string `json:"height"`
+ Result span.HeimdallSpan `json:"result"`
+}
+
+type HeimdallClient struct {
+ urlString string
+ client http.Client
+ closeCh chan struct{}
+}
+
+func NewHeimdallClient(urlString string) *HeimdallClient {
+ return &HeimdallClient{
+ urlString: urlString,
+ client: http.Client{
+ Timeout: apiHeimdallTimeout,
+ },
+ closeCh: make(chan struct{}),
+ }
+}
+
+const (
+ fetchStateSyncEventsFormat = "from-id=%d&to-time=%d&limit=%d"
+ fetchStateSyncEventsPath = "clerk/event-record/list"
+ fetchLatestCheckpoint = "/checkpoints/latest"
+
+ fetchSpanFormat = "bor/span/%d"
+)
+
+func (h *HeimdallClient) StateSyncEvents(fromID uint64, to int64) ([]*clerk.EventRecordWithTime, error) {
+ eventRecords := make([]*clerk.EventRecordWithTime, 0)
+
+ for {
+ url, err := stateSyncURL(h.urlString, fromID, to)
+ if err != nil {
+ return nil, err
+ }
+
+ log.Info("Fetching state sync events", "queryParams", url.RawQuery)
+
+ response, err := FetchWithRetry[StateSyncEventsResponse](h.client, url, h.closeCh)
+ if err != nil {
+ return nil, err
+ }
+
+ if response == nil || response.Result == nil {
+ // status 204
+ break
+ }
+
+ eventRecords = append(eventRecords, response.Result...)
+
+ if len(response.Result) < stateFetchLimit {
+ break
+ }
+
+ fromID += uint64(stateFetchLimit)
+ }
+
+ sort.SliceStable(eventRecords, func(i, j int) bool {
+ return eventRecords[i].ID < eventRecords[j].ID
+ })
+
+ return eventRecords, nil
+}
+
+func (h *HeimdallClient) Span(spanID uint64) (*span.HeimdallSpan, error) {
+ url, err := spanURL(h.urlString, spanID)
+ if err != nil {
+ return nil, err
+ }
+
+ response, err := FetchWithRetry[SpanResponse](h.client, url, h.closeCh)
+ if err != nil {
+ return nil, err
+ }
+
+ return &response.Result, nil
+}
+
+// FetchLatestCheckpoint fetches the latest bor submitted checkpoint from heimdall
+func (h *HeimdallClient) FetchLatestCheckpoint() (*checkpoint.Checkpoint, error) {
+ url, err := latestCheckpointURL(h.urlString)
+ if err != nil {
+ return nil, err
+ }
+
+ response, err := FetchWithRetry[checkpoint.CheckpointResponse](h.client, url, h.closeCh)
+ if err != nil {
+ return nil, err
+ }
+
+ return &response.Result, nil
+}
+
+// FetchWithRetry returns data from heimdall with retry
+func FetchWithRetry[T any](client http.Client, url *url.URL, closeCh chan struct{}) (*T, error) {
+ // attempt counter
+ attempt := 1
+ result := new(T)
+
+ ctx, cancel := context.WithTimeout(context.Background(), apiHeimdallTimeout)
+
+ // request data once
+ body, err := internalFetch(ctx, client, url)
+
+ cancel()
+
+ if err == nil && body != nil {
+ err = json.Unmarshal(body, result)
+ if err != nil {
+ return nil, err
+ }
+
+ return result, nil
+ }
+
+ // create a new ticker for retrying the request
+ ticker := time.NewTicker(5 * time.Second)
+ defer ticker.Stop()
+
+ for {
+ log.Info("Retrying again in 5 seconds to fetch data from Heimdall", "path", url.Path, "attempt", attempt)
+ attempt++
+ select {
+ case <-closeCh:
+ log.Debug("Shutdown detected, terminating request")
+
+ return nil, errShutdownDetected
+ case <-ticker.C:
+ ctx, cancel = context.WithTimeout(context.Background(), apiHeimdallTimeout)
+
+ body, err = internalFetch(ctx, client, url)
+
+ cancel()
+
+ if err == nil && body != nil {
+ err = json.Unmarshal(body, result)
+ if err != nil {
+ return nil, err
+ }
+
+ return result, nil
+ }
+ }
+ }
+}
+
+func spanURL(urlString string, spanID uint64) (*url.URL, error) {
+ return makeURL(urlString, fmt.Sprintf(fetchSpanFormat, spanID), "")
+}
+
+func stateSyncURL(urlString string, fromID uint64, to int64) (*url.URL, error) {
+ queryParams := fmt.Sprintf(fetchStateSyncEventsFormat, fromID, to, stateFetchLimit)
+
+ return makeURL(urlString, fetchStateSyncEventsPath, queryParams)
+}
+
+func latestCheckpointURL(urlString string) (*url.URL, error) {
+ return makeURL(urlString, fetchLatestCheckpoint, "")
+}
+
+func makeURL(urlString, rawPath, rawQuery string) (*url.URL, error) {
+ u, err := url.Parse(urlString)
+ if err != nil {
+ return nil, err
+ }
+
+ u.Path = rawPath
+ u.RawQuery = rawQuery
+
+ return u, err
+}
+
+// internal fetch method
+func internalFetch(ctx context.Context, client http.Client, u *url.URL) ([]byte, error) {
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ res, err := client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+
+ // check status code
+ if res.StatusCode != 200 && res.StatusCode != 204 {
+ return nil, fmt.Errorf("Error while fetching data from Heimdall")
+ }
+
+ // unmarshall data from buffer
+ if res.StatusCode == 204 {
+ return nil, nil
+ }
+
+ // get response
+ body, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ return body, nil
+}
+
+// Close sends a signal to stop the running process
+func (h *HeimdallClient) Close() {
+ close(h.closeCh)
+ h.client.CloseIdleConnections()
+}
diff --git a/consensus/bor/heimdall/client_test.go b/consensus/bor/heimdall/client_test.go
new file mode 100644
index 0000000000..c423a6f659
--- /dev/null
+++ b/consensus/bor/heimdall/client_test.go
@@ -0,0 +1,35 @@
+package heimdall
+
+import (
+ "testing"
+)
+
+func TestSpanURL(t *testing.T) {
+ t.Parallel()
+
+ url, err := spanURL("http://bor0", 1)
+ if err != nil {
+ t.Fatal("got an error", err)
+ }
+
+ const expected = "http://bor0/bor/span/1"
+
+ if url.String() != expected {
+ t.Fatalf("expected URL %q, got %q", url.String(), expected)
+ }
+}
+
+func TestStateSyncURL(t *testing.T) {
+ t.Parallel()
+
+ url, err := stateSyncURL("http://bor0", 10, 100)
+ if err != nil {
+ t.Fatal("got an error", err)
+ }
+
+ const expected = "http://bor0/clerk/event-record/list?from-id=10&to-time=100&limit=50"
+
+ if url.String() != expected {
+ t.Fatalf("expected URL %q, got %q", url.String(), expected)
+ }
+}
diff --git a/consensus/bor/heimdall/span/span.go b/consensus/bor/heimdall/span/span.go
new file mode 100644
index 0000000000..5bf85fb341
--- /dev/null
+++ b/consensus/bor/heimdall/span/span.go
@@ -0,0 +1,20 @@
+package span
+
+import (
+ "github.com/ethereum/go-ethereum/consensus/bor/valset"
+)
+
+// Span Bor represents a current bor span
+type Span struct {
+ ID uint64 `json:"span_id" yaml:"span_id"`
+ StartBlock uint64 `json:"start_block" yaml:"start_block"`
+ EndBlock uint64 `json:"end_block" yaml:"end_block"`
+}
+
+// HeimdallSpan represents span from heimdall APIs
+type HeimdallSpan struct {
+ Span
+ ValidatorSet valset.ValidatorSet `json:"validator_set" yaml:"validator_set"`
+ SelectedProducers []valset.Validator `json:"selected_producers" yaml:"selected_producers"`
+ ChainID string `json:"bor_chain_id" yaml:"bor_chain_id"`
+}
diff --git a/consensus/bor/heimdall/span/spanner.go b/consensus/bor/heimdall/span/spanner.go
new file mode 100644
index 0000000000..7bf6e350ee
--- /dev/null
+++ b/consensus/bor/heimdall/span/spanner.go
@@ -0,0 +1,200 @@
+package span
+
+import (
+ "context"
+ "encoding/hex"
+ "math"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/consensus/bor/abi"
+ "github.com/ethereum/go-ethereum/consensus/bor/api"
+ "github.com/ethereum/go-ethereum/consensus/bor/statefull"
+ "github.com/ethereum/go-ethereum/consensus/bor/valset"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/internal/ethapi"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/rpc"
+)
+
+type ChainSpanner struct {
+ ethAPI api.Caller
+ validatorSet abi.ABI
+ chainConfig *params.ChainConfig
+ validatorContractAddress common.Address
+}
+
+func NewChainSpanner(ethAPI api.Caller, validatorSet abi.ABI, chainConfig *params.ChainConfig, validatorContractAddress common.Address) *ChainSpanner {
+ return &ChainSpanner{
+ ethAPI: ethAPI,
+ validatorSet: validatorSet,
+ chainConfig: chainConfig,
+ validatorContractAddress: validatorContractAddress,
+ }
+}
+
+// GetCurrentSpan get current span from contract
+func (c *ChainSpanner) GetCurrentSpan(headerHash common.Hash) (*Span, error) {
+ // block
+ blockNr := rpc.BlockNumberOrHashWithHash(headerHash, false)
+
+ // method
+ method := "getCurrentSpan"
+
+ data, err := c.validatorSet.Pack(method)
+ if err != nil {
+ log.Error("Unable to pack tx for getCurrentSpan", "error", err)
+
+ return nil, err
+ }
+
+ msgData := (hexutil.Bytes)(data)
+ toAddress := c.validatorContractAddress
+ gas := (hexutil.Uint64)(uint64(math.MaxUint64 / 2))
+
+ // todo: would we like to have a timeout here?
+ result, err := c.ethAPI.Call(context.Background(), ethapi.TransactionArgs{
+ Gas: &gas,
+ To: &toAddress,
+ Data: &msgData,
+ }, blockNr, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // span result
+ ret := new(struct {
+ Number *big.Int
+ StartBlock *big.Int
+ EndBlock *big.Int
+ })
+
+ if err := c.validatorSet.UnpackIntoInterface(ret, method, result); err != nil {
+ return nil, err
+ }
+
+ // create new span
+ span := Span{
+ ID: ret.Number.Uint64(),
+ StartBlock: ret.StartBlock.Uint64(),
+ EndBlock: ret.EndBlock.Uint64(),
+ }
+
+ return &span, nil
+}
+
+// GetCurrentValidators get current validators
+func (c *ChainSpanner) GetCurrentValidators(headerHash common.Hash, blockNumber uint64) ([]*valset.Validator, error) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ // method
+ const method = "getBorValidators"
+
+ data, err := c.validatorSet.Pack(method, big.NewInt(0).SetUint64(blockNumber))
+ if err != nil {
+ log.Error("Unable to pack tx for getValidator", "error", err)
+ return nil, err
+ }
+
+ // call
+ msgData := (hexutil.Bytes)(data)
+ toAddress := c.validatorContractAddress
+ gas := (hexutil.Uint64)(uint64(math.MaxUint64 / 2))
+
+ // block
+ blockNr := rpc.BlockNumberOrHashWithHash(headerHash, false)
+
+ result, err := c.ethAPI.Call(ctx, ethapi.TransactionArgs{
+ Gas: &gas,
+ To: &toAddress,
+ Data: &msgData,
+ }, blockNr, nil)
+ if err != nil {
+ panic(err)
+ }
+
+ var (
+ ret0 = new([]common.Address)
+ ret1 = new([]*big.Int)
+ )
+
+ out := &[]interface{}{
+ ret0,
+ ret1,
+ }
+
+ if err := c.validatorSet.UnpackIntoInterface(out, method, result); err != nil {
+ return nil, err
+ }
+
+ valz := make([]*valset.Validator, len(*ret0))
+ for i, a := range *ret0 {
+ valz[i] = &valset.Validator{
+ Address: a,
+ VotingPower: (*ret1)[i].Int64(),
+ }
+ }
+
+ return valz, nil
+}
+
+const method = "commitSpan"
+
+func (c *ChainSpanner) CommitSpan(heimdallSpan HeimdallSpan, state *state.StateDB, header *types.Header, chainContext core.ChainContext) error {
+ // get validators bytes
+ validators := make([]valset.MinimalVal, 0, len(heimdallSpan.ValidatorSet.Validators))
+ for _, val := range heimdallSpan.ValidatorSet.Validators {
+ validators = append(validators, val.MinimalVal())
+ }
+
+ validatorBytes, err := rlp.EncodeToBytes(validators)
+ if err != nil {
+ return err
+ }
+
+ // get producers bytes
+ producers := make([]valset.MinimalVal, 0, len(heimdallSpan.SelectedProducers))
+ for _, val := range heimdallSpan.SelectedProducers {
+ producers = append(producers, val.MinimalVal())
+ }
+
+ producerBytes, err := rlp.EncodeToBytes(producers)
+ if err != nil {
+ return err
+ }
+
+ log.Info("✅ Committing new span",
+ "id", heimdallSpan.ID,
+ "startBlock", heimdallSpan.StartBlock,
+ "endBlock", heimdallSpan.EndBlock,
+ "validatorBytes", hex.EncodeToString(validatorBytes),
+ "producerBytes", hex.EncodeToString(producerBytes),
+ )
+
+ data, err := c.validatorSet.Pack(method,
+ big.NewInt(0).SetUint64(heimdallSpan.ID),
+ big.NewInt(0).SetUint64(heimdallSpan.StartBlock),
+ big.NewInt(0).SetUint64(heimdallSpan.EndBlock),
+ validatorBytes,
+ producerBytes,
+ )
+ if err != nil {
+ log.Error("Unable to pack tx for commitSpan", "error", err)
+
+ return err
+ }
+
+ // get system message
+ msg := statefull.GetSystemMessage(c.validatorContractAddress, data)
+
+ // apply message
+ _, err = statefull.ApplyMessage(msg, state, header, c.chainConfig, chainContext)
+
+ return err
+}
diff --git a/consensus/bor/merkle.go b/consensus/bor/merkle.go
index bdfbaba983..ef1b4eb87e 100644
--- a/consensus/bor/merkle.go
+++ b/consensus/bor/merkle.go
@@ -2,12 +2,12 @@ package bor
func appendBytes32(data ...[]byte) []byte {
var result []byte
+
for _, v := range data {
- paddedV, err := convertTo32(v)
- if err == nil {
- result = append(result, paddedV[:]...)
- }
+ paddedV := convertTo32(v)
+ result = append(result, paddedV[:]...)
}
+
return result
}
@@ -24,25 +24,29 @@ func nextPowerOfTwo(n uint64) uint64 {
n |= n >> 16
n |= n >> 32
n++
+
return n
}
-func convertTo32(input []byte) (output [32]byte, err error) {
+func convertTo32(input []byte) (output [32]byte) {
l := len(input)
if l > 32 || l == 0 {
return
}
+
copy(output[32-l:], input[:])
+
return
}
func convert(input []([32]byte)) [][]byte {
- var output [][]byte
+ output := make([][]byte, 0, len(input))
+
for _, in := range input {
newInput := make([]byte, len(in[:]))
copy(newInput, in[:])
output = append(output, newInput)
-
}
+
return output
}
diff --git a/consensus/bor/rest.go b/consensus/bor/rest.go
deleted file mode 100644
index 3ef531de18..0000000000
--- a/consensus/bor/rest.go
+++ /dev/null
@@ -1,167 +0,0 @@
-package bor
-
-import (
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/url"
- "sort"
- "time"
-
- "github.com/ethereum/go-ethereum/log"
-)
-
-var (
- stateFetchLimit = 50
-)
-
-// ResponseWithHeight defines a response object type that wraps an original
-// response with a height.
-type ResponseWithHeight struct {
- Height string `json:"height"`
- Result json.RawMessage `json:"result"`
-}
-
-type IHeimdallClient interface {
- Fetch(path string, query string) (*ResponseWithHeight, error)
- FetchWithRetry(path string, query string) (*ResponseWithHeight, error)
- FetchStateSyncEvents(fromID uint64, to int64) ([]*EventRecordWithTime, error)
- Close()
-}
-
-type HeimdallClient struct {
- urlString string
- client http.Client
- closeCh chan struct{}
-}
-
-func NewHeimdallClient(urlString string) (*HeimdallClient, error) {
- h := &HeimdallClient{
- urlString: urlString,
- client: http.Client{
- Timeout: time.Duration(5 * time.Second),
- },
- closeCh: make(chan struct{}),
- }
- return h, nil
-}
-
-func (h *HeimdallClient) FetchStateSyncEvents(fromID uint64, to int64) ([]*EventRecordWithTime, error) {
- eventRecords := make([]*EventRecordWithTime, 0)
- for {
- queryParams := fmt.Sprintf("from-id=%d&to-time=%d&limit=%d", fromID, to, stateFetchLimit)
- log.Info("Fetching state sync events", "queryParams", queryParams)
- response, err := h.FetchWithRetry("clerk/event-record/list", queryParams)
- if err != nil {
- return nil, err
- }
- var _eventRecords []*EventRecordWithTime
- if response.Result == nil { // status 204
- break
- }
- if err := json.Unmarshal(response.Result, &_eventRecords); err != nil {
- return nil, err
- }
- eventRecords = append(eventRecords, _eventRecords...)
- if len(_eventRecords) < stateFetchLimit {
- break
- }
- fromID += uint64(stateFetchLimit)
- }
-
- sort.SliceStable(eventRecords, func(i, j int) bool {
- return eventRecords[i].ID < eventRecords[j].ID
- })
- return eventRecords, nil
-}
-
-// Fetch fetches response from heimdall
-func (h *HeimdallClient) Fetch(rawPath string, rawQuery string) (*ResponseWithHeight, error) {
- u, err := url.Parse(h.urlString)
- if err != nil {
- return nil, err
- }
-
- u.Path = rawPath
- u.RawQuery = rawQuery
-
- return h.internalFetch(u)
-}
-
-// FetchWithRetry returns data from heimdall with retry
-func (h *HeimdallClient) FetchWithRetry(rawPath string, rawQuery string) (*ResponseWithHeight, error) {
- u, err := url.Parse(h.urlString)
- if err != nil {
- return nil, err
- }
-
- u.Path = rawPath
- u.RawQuery = rawQuery
-
- // attempt counter
- attempt := 1
-
- // request data once
- res, err := h.internalFetch(u)
- if err == nil && res != nil {
- return res, nil
- }
-
- // create a new ticker for retrying the request
- ticker := time.NewTicker(5 * time.Second)
- defer ticker.Stop()
-
- for {
- log.Info("Retrying again in 5 seconds to fetch data from Heimdall", "path", u.Path, "attempt", attempt)
- attempt++
- select {
- case <-h.closeCh:
- log.Debug("Shutdown detected, terminating request")
- return nil, errShutdownDetected
- case <-ticker.C:
- res, err := h.internalFetch(u)
- if err == nil && res != nil {
- return res, nil
- }
- }
- }
-}
-
-// internal fetch method
-func (h *HeimdallClient) internalFetch(u *url.URL) (*ResponseWithHeight, error) {
- res, err := h.client.Get(u.String())
- if err != nil {
- return nil, err
- }
- defer res.Body.Close()
-
- // check status code
- if res.StatusCode != 200 && res.StatusCode != 204 {
- return nil, fmt.Errorf("Error while fetching data from Heimdall")
- }
-
- // unmarshall data from buffer
- var response ResponseWithHeight
- if res.StatusCode == 204 {
- return &response, nil
- }
-
- // get response
- body, err := ioutil.ReadAll(res.Body)
- if err != nil {
- return nil, err
- }
-
- if err := json.Unmarshal(body, &response); err != nil {
- return nil, err
- }
-
- return &response, nil
-}
-
-// Close sends a signal to stop the running process
-func (h *HeimdallClient) Close() {
- close(h.closeCh)
- h.client.CloseIdleConnections()
-}
diff --git a/consensus/bor/snapshot.go b/consensus/bor/snapshot.go
index 8d212f33ef..f71ceae0ad 100644
--- a/consensus/bor/snapshot.go
+++ b/consensus/bor/snapshot.go
@@ -1,37 +1,29 @@
package bor
import (
- "bytes"
"encoding/json"
+ "github.com/ethereum/go-ethereum/consensus/bor/valset"
+
lru "github.com/hashicorp/golang-lru"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/params"
)
// Snapshot is the state of the authorization voting at a given point in time.
type Snapshot struct {
config *params.BorConfig // Consensus engine parameters to fine tune behavior
- ethAPI *ethapi.PublicBlockChainAPI
- sigcache *lru.ARCCache // Cache of recent block signatures to speed up ecrecover
+ sigcache *lru.ARCCache // Cache of recent block signatures to speed up ecrecover
Number uint64 `json:"number"` // Block number where the snapshot was created
Hash common.Hash `json:"hash"` // Block hash where the snapshot was created
- ValidatorSet *ValidatorSet `json:"validatorSet"` // Validator set at this moment
+ ValidatorSet *valset.ValidatorSet `json:"validatorSet"` // Validator set at this moment
Recents map[uint64]common.Address `json:"recents"` // Set of recent signers for spam protections
}
-// signersAscending implements the sort interface to allow sorting a list of addresses
-type signersAscending []common.Address
-
-func (s signersAscending) Len() int { return len(s) }
-func (s signersAscending) Less(i, j int) bool { return bytes.Compare(s[i][:], s[j][:]) < 0 }
-func (s signersAscending) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
// newSnapshot creates a new snapshot with the specified startup parameters. This
// method does not initialize the set of recent signers, so only ever use if for
// the genesis block.
@@ -40,37 +32,38 @@ func newSnapshot(
sigcache *lru.ARCCache,
number uint64,
hash common.Hash,
- validators []*Validator,
- ethAPI *ethapi.PublicBlockChainAPI,
+ validators []*valset.Validator,
) *Snapshot {
snap := &Snapshot{
config: config,
- ethAPI: ethAPI,
sigcache: sigcache,
Number: number,
Hash: hash,
- ValidatorSet: NewValidatorSet(validators),
+ ValidatorSet: valset.NewValidatorSet(validators),
Recents: make(map[uint64]common.Address),
}
+
return snap
}
// loadSnapshot loads an existing snapshot from the database.
-func loadSnapshot(config *params.BorConfig, sigcache *lru.ARCCache, db ethdb.Database, hash common.Hash, ethAPI *ethapi.PublicBlockChainAPI) (*Snapshot, error) {
+func loadSnapshot(config *params.BorConfig, sigcache *lru.ARCCache, db ethdb.Database, hash common.Hash) (*Snapshot, error) {
blob, err := db.Get(append([]byte("bor-"), hash[:]...))
if err != nil {
return nil, err
}
+
snap := new(Snapshot)
+
if err := json.Unmarshal(blob, snap); err != nil {
return nil, err
}
+
snap.config = config
snap.sigcache = sigcache
- snap.ethAPI = ethAPI
// update total voting power
- if err := snap.ValidatorSet.updateTotalVotingPower(); err != nil {
+ if err := snap.ValidatorSet.UpdateTotalVotingPower(); err != nil {
return nil, err
}
@@ -83,6 +76,7 @@ func (s *Snapshot) store(db ethdb.Database) error {
if err != nil {
return err
}
+
return db.Put(append([]byte("bor-"), s.Hash[:]...), blob)
}
@@ -90,7 +84,6 @@ func (s *Snapshot) store(db ethdb.Database) error {
func (s *Snapshot) copy() *Snapshot {
cpy := &Snapshot{
config: s.config,
- ethAPI: s.ethAPI,
sigcache: s.sigcache,
Number: s.Number,
Hash: s.Hash,
@@ -115,6 +108,7 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) {
return nil, errOutOfRangeChain
}
}
+
if headers[0].Number.Uint64() != s.Number+1 {
return nil, errOutOfRangeChain
}
@@ -126,7 +120,7 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) {
number := header.Number.Uint64()
// Delete the oldest signer from the recent list to allow it signing again
- if number >= s.config.Sprint && number-s.config.Sprint >= 0 {
+ if number >= s.config.Sprint {
delete(snap.Recents, number-s.config.Sprint)
}
@@ -153,15 +147,17 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) {
if err := validateHeaderExtraField(header.Extra); err != nil {
return nil, err
}
+
validatorBytes := header.Extra[extraVanity : len(header.Extra)-extraSeal]
// get validators from headers and use that for new validator set
- newVals, _ := ParseValidators(validatorBytes)
+ newVals, _ := valset.ParseValidators(validatorBytes)
v := getUpdatedValidatorSet(snap.ValidatorSet.Copy(), newVals)
v.IncrementProposerPriority(1)
snap.ValidatorSet = v
}
}
+
snap.Number += uint64(len(headers))
snap.Hash = headers[len(headers)-1].Hash()
@@ -173,10 +169,13 @@ func (s *Snapshot) GetSignerSuccessionNumber(signer common.Address) (int, error)
validators := s.ValidatorSet.Validators
proposer := s.ValidatorSet.GetProposer().Address
proposerIndex, _ := s.ValidatorSet.GetByAddress(proposer)
+
if proposerIndex == -1 {
return -1, &UnauthorizedProposerError{s.Number, proposer.Bytes()}
}
+
signerIndex, _ := s.ValidatorSet.GetByAddress(signer)
+
if signerIndex == -1 {
return -1, &UnauthorizedSignerError{s.Number, signer.Bytes()}
}
@@ -187,6 +186,7 @@ func (s *Snapshot) GetSignerSuccessionNumber(signer common.Address) (int, error)
tempIndex = tempIndex + len(validators)
}
}
+
return tempIndex - proposerIndex, nil
}
@@ -196,13 +196,14 @@ func (s *Snapshot) signers() []common.Address {
for _, sig := range s.ValidatorSet.Validators {
sigs = append(sigs, sig.Address)
}
+
return sigs
}
// Difficulty returns the difficulty for a particular signer at the current snapshot number
func (s *Snapshot) Difficulty(signer common.Address) uint64 {
// if signer is empty
- if bytes.Compare(signer.Bytes(), common.Address{}.Bytes()) == 0 {
+ if signer == (common.Address{}) {
return 1
}
diff --git a/consensus/bor/snapshot_test.go b/consensus/bor/snapshot_test.go
index 6bb8547843..1a3e967613 100644
--- a/consensus/bor/snapshot_test.go
+++ b/consensus/bor/snapshot_test.go
@@ -7,8 +7,11 @@ import (
"time"
"github.com/stretchr/testify/assert"
+ "pgregory.net/rapid"
"github.com/ethereum/go-ethereum/common"
+ unique "github.com/ethereum/go-ethereum/common/set"
+ "github.com/ethereum/go-ethereum/consensus/bor/valset"
)
const (
@@ -16,8 +19,10 @@ const (
)
func TestGetSignerSuccessionNumber_ProposerIsSigner(t *testing.T) {
+ t.Parallel()
+
validators := buildRandomValidatorSet(numVals)
- validatorSet := NewValidatorSet(validators)
+ validatorSet := valset.NewValidatorSet(validators)
snap := Snapshot{
ValidatorSet: validatorSet,
}
@@ -28,20 +33,24 @@ func TestGetSignerSuccessionNumber_ProposerIsSigner(t *testing.T) {
if err != nil {
t.Fatalf("%s", err)
}
+
assert.Equal(t, 0, successionNumber)
}
func TestGetSignerSuccessionNumber_SignerIndexIsLarger(t *testing.T) {
+ t.Parallel()
+
validators := buildRandomValidatorSet(numVals)
// sort validators by address, which is what NewValidatorSet also does
- sort.Sort(ValidatorsByAddress(validators))
+ sort.Sort(valset.ValidatorsByAddress(validators))
+
proposerIndex := 32
signerIndex := 56
// give highest ProposerPriority to a particular val, so that they become the proposer
validators[proposerIndex].VotingPower = 200
snap := Snapshot{
- ValidatorSet: NewValidatorSet(validators),
+ ValidatorSet: valset.NewValidatorSet(validators),
}
// choose a signer at an index greater than proposer index
@@ -50,17 +59,20 @@ func TestGetSignerSuccessionNumber_SignerIndexIsLarger(t *testing.T) {
if err != nil {
t.Fatalf("%s", err)
}
+
assert.Equal(t, signerIndex-proposerIndex, successionNumber)
}
func TestGetSignerSuccessionNumber_SignerIndexIsSmaller(t *testing.T) {
+ t.Parallel()
+
validators := buildRandomValidatorSet(numVals)
proposerIndex := 98
signerIndex := 11
// give highest ProposerPriority to a particular val, so that they become the proposer
validators[proposerIndex].VotingPower = 200
snap := Snapshot{
- ValidatorSet: NewValidatorSet(validators),
+ ValidatorSet: valset.NewValidatorSet(validators),
}
// choose a signer at an index greater than proposer index
@@ -69,29 +81,38 @@ func TestGetSignerSuccessionNumber_SignerIndexIsSmaller(t *testing.T) {
if err != nil {
t.Fatalf("%s", err)
}
+
assert.Equal(t, signerIndex+numVals-proposerIndex, successionNumber)
}
func TestGetSignerSuccessionNumber_ProposerNotFound(t *testing.T) {
+ t.Parallel()
+
validators := buildRandomValidatorSet(numVals)
snap := Snapshot{
- ValidatorSet: NewValidatorSet(validators),
+ ValidatorSet: valset.NewValidatorSet(validators),
}
+
dummyProposerAddress := randomAddress()
- snap.ValidatorSet.Proposer = &Validator{Address: dummyProposerAddress}
+ snap.ValidatorSet.Proposer = &valset.Validator{Address: dummyProposerAddress}
+
// choose any signer
signer := snap.ValidatorSet.Validators[3].Address
+
_, err := snap.GetSignerSuccessionNumber(signer)
assert.NotNil(t, err)
+
e, ok := err.(*UnauthorizedProposerError)
assert.True(t, ok)
assert.Equal(t, dummyProposerAddress.Bytes(), e.Proposer)
}
func TestGetSignerSuccessionNumber_SignerNotFound(t *testing.T) {
+ t.Parallel()
+
validators := buildRandomValidatorSet(numVals)
snap := Snapshot{
- ValidatorSet: NewValidatorSet(validators),
+ ValidatorSet: valset.NewValidatorSet(validators),
}
dummySignerAddress := randomAddress()
_, err := snap.GetSignerSuccessionNumber(dummySignerAddress)
@@ -101,24 +122,78 @@ func TestGetSignerSuccessionNumber_SignerNotFound(t *testing.T) {
assert.Equal(t, dummySignerAddress.Bytes(), e.Signer)
}
-func buildRandomValidatorSet(numVals int) []*Validator {
+// nolint: unparam
+func buildRandomValidatorSet(numVals int) []*valset.Validator {
rand.Seed(time.Now().Unix())
- validators := make([]*Validator, numVals)
+
+ validators := make([]*valset.Validator, numVals)
+ valAddrs := randomAddresses(numVals)
+
for i := 0; i < numVals; i++ {
- validators[i] = &Validator{
- Address: randomAddress(),
+ validators[i] = &valset.Validator{
+ Address: valAddrs[i],
// cannot process validators with voting power 0, hence +1
VotingPower: int64(rand.Intn(99) + 1),
}
}
// sort validators by address, which is what NewValidatorSet also does
- sort.Sort(ValidatorsByAddress(validators))
+ sort.Sort(valset.ValidatorsByAddress(validators))
+
return validators
}
func randomAddress() common.Address {
bytes := make([]byte, 32)
rand.Read(bytes)
+
return common.BytesToAddress(bytes)
}
+
+func randomAddresses(n int) []common.Address {
+ if n <= 0 {
+ return []common.Address{}
+ }
+
+ addrs := make([]common.Address, 0, n)
+ addrsSet := make(map[common.Address]struct{}, n)
+
+ var (
+ addr common.Address
+ exist bool
+ )
+
+ bytes := make([]byte, 32)
+
+ for {
+ rand.Read(bytes)
+
+ addr = common.BytesToAddress(bytes)
+
+ _, exist = addrsSet[addr]
+ if !exist {
+ addrs = append(addrs, addr)
+
+ addrsSet[addr] = struct{}{}
+ }
+
+ if len(addrs) == n {
+ return addrs
+ }
+ }
+}
+
+func TestRandomAddresses(t *testing.T) {
+ t.Parallel()
+
+ rapid.Check(t, func(t *rapid.T) {
+ length := rapid.IntMax(100).Draw(t, "length").(int)
+
+ addrs := randomAddresses(length)
+ addressSet := unique.New(addrs)
+
+ if len(addrs) != len(addressSet) {
+ t.Fatalf("length of unique addresses %d, expected %d", len(addressSet), len(addrs))
+ }
+ })
+}
diff --git a/consensus/bor/span.go b/consensus/bor/span.go
index 2fd0cf1079..4867635b8e 100644
--- a/consensus/bor/span.go
+++ b/consensus/bor/span.go
@@ -1,16 +1,17 @@
package bor
-// Span represents a current bor span
-type Span struct {
- ID uint64 `json:"span_id" yaml:"span_id"`
- StartBlock uint64 `json:"start_block" yaml:"start_block"`
- EndBlock uint64 `json:"end_block" yaml:"end_block"`
-}
+import (
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/bor/heimdall/span"
+ "github.com/ethereum/go-ethereum/consensus/bor/valset"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/types"
+)
-// HeimdallSpan represents span from heimdall APIs
-type HeimdallSpan struct {
- Span
- ValidatorSet ValidatorSet `json:"validator_set" yaml:"validator_set"`
- SelectedProducers []Validator `json:"selected_producers" yaml:"selected_producers"`
- ChainID string `json:"bor_chain_id" yaml:"bor_chain_id"`
+//go:generate mockgen -destination=./span_mock.go -package=bor . Spanner
+type Spanner interface {
+ GetCurrentSpan(headerHash common.Hash) (*span.Span, error)
+ GetCurrentValidators(headerHash common.Hash, blockNumber uint64) ([]*valset.Validator, error)
+ CommitSpan(heimdallSpan span.HeimdallSpan, state *state.StateDB, header *types.Header, chainContext core.ChainContext) error
}
diff --git a/consensus/bor/span_mock.go b/consensus/bor/span_mock.go
new file mode 100644
index 0000000000..12ed945234
--- /dev/null
+++ b/consensus/bor/span_mock.go
@@ -0,0 +1,84 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: github.com/ethereum/go-ethereum/consensus/bor (interfaces: Spanner)
+
+// Package bor is a generated GoMock package.
+package bor
+
+import (
+ reflect "reflect"
+
+ common "github.com/ethereum/go-ethereum/common"
+ span "github.com/ethereum/go-ethereum/consensus/bor/heimdall/span"
+ valset "github.com/ethereum/go-ethereum/consensus/bor/valset"
+ core "github.com/ethereum/go-ethereum/core"
+ state "github.com/ethereum/go-ethereum/core/state"
+ types "github.com/ethereum/go-ethereum/core/types"
+ gomock "github.com/golang/mock/gomock"
+)
+
+// MockSpanner is a mock of Spanner interface.
+type MockSpanner struct {
+ ctrl *gomock.Controller
+ recorder *MockSpannerMockRecorder
+}
+
+// MockSpannerMockRecorder is the mock recorder for MockSpanner.
+type MockSpannerMockRecorder struct {
+ mock *MockSpanner
+}
+
+// NewMockSpanner creates a new mock instance.
+func NewMockSpanner(ctrl *gomock.Controller) *MockSpanner {
+ mock := &MockSpanner{ctrl: ctrl}
+ mock.recorder = &MockSpannerMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockSpanner) EXPECT() *MockSpannerMockRecorder {
+ return m.recorder
+}
+
+// CommitSpan mocks base method.
+func (m *MockSpanner) CommitSpan(arg0 span.HeimdallSpan, arg1 *state.StateDB, arg2 *types.Header, arg3 core.ChainContext) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CommitSpan", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// CommitSpan indicates an expected call of CommitSpan.
+func (mr *MockSpannerMockRecorder) CommitSpan(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitSpan", reflect.TypeOf((*MockSpanner)(nil).CommitSpan), arg0, arg1, arg2, arg3)
+}
+
+// GetCurrentSpan mocks base method.
+func (m *MockSpanner) GetCurrentSpan(arg0 common.Hash) (*span.Span, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetCurrentSpan", arg0)
+ ret0, _ := ret[0].(*span.Span)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetCurrentSpan indicates an expected call of GetCurrentSpan.
+func (mr *MockSpannerMockRecorder) GetCurrentSpan(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentSpan", reflect.TypeOf((*MockSpanner)(nil).GetCurrentSpan), arg0)
+}
+
+// GetCurrentValidators mocks base method.
+func (m *MockSpanner) GetCurrentValidators(arg0 common.Hash, arg1 uint64) ([]*valset.Validator, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetCurrentValidators", arg0, arg1)
+ ret0, _ := ret[0].([]*valset.Validator)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetCurrentValidators indicates an expected call of GetCurrentValidators.
+func (mr *MockSpannerMockRecorder) GetCurrentValidators(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidators", reflect.TypeOf((*MockSpanner)(nil).GetCurrentValidators), arg0, arg1)
+}
diff --git a/consensus/bor/statefull/processor.go b/consensus/bor/statefull/processor.go
new file mode 100644
index 0000000000..98ed41ed92
--- /dev/null
+++ b/consensus/bor/statefull/processor.go
@@ -0,0 +1,118 @@
+package statefull
+
+import (
+ "math"
+ "math/big"
+
+ ethereum "github.com/ethereum/go-ethereum"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/core/vm"
+ "github.com/ethereum/go-ethereum/params"
+)
+
+var systemAddress = common.HexToAddress("0xffffFFFfFFffffffffffffffFfFFFfffFFFfFFfE")
+
+type ChainContext struct {
+ Chain consensus.ChainHeaderReader
+ Bor consensus.Engine
+}
+
+func (c ChainContext) Engine() consensus.Engine {
+ return c.Bor
+}
+
+func (c ChainContext) GetHeader(hash common.Hash, number uint64) *types.Header {
+ return c.Chain.GetHeader(hash, number)
+}
+
+// callmsg implements core.Message to allow passing it as a transaction simulator.
+type Callmsg struct {
+ ethereum.CallMsg
+}
+
+func (m Callmsg) From() common.Address { return m.CallMsg.From }
+func (m Callmsg) Nonce() uint64 { return 0 }
+func (m Callmsg) CheckNonce() bool { return false }
+func (m Callmsg) To() *common.Address { return m.CallMsg.To }
+func (m Callmsg) GasPrice() *big.Int { return m.CallMsg.GasPrice }
+func (m Callmsg) Gas() uint64 { return m.CallMsg.Gas }
+func (m Callmsg) Value() *big.Int { return m.CallMsg.Value }
+func (m Callmsg) Data() []byte { return m.CallMsg.Data }
+
+// get system message
+func GetSystemMessage(toAddress common.Address, data []byte) Callmsg {
+ return Callmsg{
+ ethereum.CallMsg{
+ From: systemAddress,
+ Gas: math.MaxUint64 / 2,
+ GasPrice: big.NewInt(0),
+ Value: big.NewInt(0),
+ To: &toAddress,
+ Data: data,
+ },
+ }
+}
+
+// apply message
+func ApplyMessage(
+ msg Callmsg,
+ state *state.StateDB,
+ header *types.Header,
+ chainConfig *params.ChainConfig,
+ chainContext core.ChainContext,
+) (uint64, error) {
+ initialGas := msg.Gas()
+
+ // Create a new context to be used in the EVM environment
+ blockContext := core.NewEVMBlockContext(header, chainContext, &header.Coinbase)
+
+ // Create a new environment which holds all relevant information
+ // about the transaction and calling mechanisms.
+ vmenv := vm.NewEVM(blockContext, vm.TxContext{}, state, chainConfig, vm.Config{})
+
+ // Apply the transaction to the current state (included in the env)
+ _, gasLeft, err := vmenv.Call(
+ vm.AccountRef(msg.From()),
+ *msg.To(),
+ msg.Data(),
+ msg.Gas(),
+ msg.Value(),
+ )
+ // Update the state with pending changes
+ if err != nil {
+ state.Finalise(true)
+ }
+
+ gasUsed := initialGas - gasLeft
+
+ return gasUsed, nil
+}
+
+func ApplyBorMessage(vmenv vm.EVM, msg Callmsg) (*core.ExecutionResult, error) {
+ initialGas := msg.Gas()
+
+ // Apply the transaction to the current state (included in the env)
+ ret, gasLeft, err := vmenv.Call(
+ vm.AccountRef(msg.From()),
+ *msg.To(),
+ msg.Data(),
+ msg.Gas(),
+ msg.Value(),
+ )
+ // Update the state with pending changes
+ if err != nil {
+ vmenv.StateDB.Finalise(true)
+ }
+
+ gasUsed := initialGas - gasLeft
+
+ return &core.ExecutionResult{
+ UsedGas: gasUsed,
+ Err: err,
+ ReturnData: ret,
+ }, nil
+}
diff --git a/consensus/bor/validators_getter.go b/consensus/bor/validators_getter.go
new file mode 100644
index 0000000000..90d1fccf6e
--- /dev/null
+++ b/consensus/bor/validators_getter.go
@@ -0,0 +1,11 @@
+package bor
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/bor/valset"
+)
+
+//go:generate mockgen -destination=./validators_getter_mock.go -package=bor . ValidatorsGetter
+type ValidatorsGetter interface {
+ GetCurrentValidators(headerHash common.Hash, blockNumber uint64) ([]*valset.Validator, error)
+}
diff --git a/consensus/bor/validators_getter_mock.go b/consensus/bor/validators_getter_mock.go
new file mode 100644
index 0000000000..ad99489d8e
--- /dev/null
+++ b/consensus/bor/validators_getter_mock.go
@@ -0,0 +1,51 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: github.com/ethereum/go-ethereum/consensus/bor (interfaces: ValidatorsGetter)
+
+// Package bor is a generated GoMock package.
+package bor
+
+import (
+ reflect "reflect"
+
+ common "github.com/ethereum/go-ethereum/common"
+ valset "github.com/ethereum/go-ethereum/consensus/bor/valset"
+ gomock "github.com/golang/mock/gomock"
+)
+
+// MockValidatorsGetter is a mock of ValidatorsGetter interface.
+type MockValidatorsGetter struct {
+ ctrl *gomock.Controller
+ recorder *MockValidatorsGetterMockRecorder
+}
+
+// MockValidatorsGetterMockRecorder is the mock recorder for MockValidatorsGetter.
+type MockValidatorsGetterMockRecorder struct {
+ mock *MockValidatorsGetter
+}
+
+// NewMockValidatorsGetter creates a new mock instance.
+func NewMockValidatorsGetter(ctrl *gomock.Controller) *MockValidatorsGetter {
+ mock := &MockValidatorsGetter{ctrl: ctrl}
+ mock.recorder = &MockValidatorsGetterMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockValidatorsGetter) EXPECT() *MockValidatorsGetterMockRecorder {
+ return m.recorder
+}
+
+// GetCurrentValidators mocks base method.
+func (m *MockValidatorsGetter) GetCurrentValidators(arg0 common.Hash, arg1 uint64) ([]*valset.Validator, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetCurrentValidators", arg0, arg1)
+ ret0, _ := ret[0].([]*valset.Validator)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetCurrentValidators indicates an expected call of GetCurrentValidators.
+func (mr *MockValidatorsGetterMockRecorder) GetCurrentValidators(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidators", reflect.TypeOf((*MockValidatorsGetter)(nil).GetCurrentValidators), arg0, arg1)
+}
diff --git a/consensus/bor/valset/error.go b/consensus/bor/valset/error.go
new file mode 100644
index 0000000000..37add21683
--- /dev/null
+++ b/consensus/bor/valset/error.go
@@ -0,0 +1,32 @@
+package valset
+
+import "fmt"
+
+// TotalVotingPowerExceededError is returned when the maximum allowed total voting power is exceeded
+type TotalVotingPowerExceededError struct {
+ Sum int64
+ Validators []*Validator
+}
+
+func (e *TotalVotingPowerExceededError) Error() string {
+ return fmt.Sprintf(
+ "Total voting power should be guarded to not exceed %v; got: %v; for validator set: %v",
+ MaxTotalVotingPower,
+ e.Sum,
+ e.Validators,
+ )
+}
+
+type InvalidStartEndBlockError struct {
+ Start uint64
+ End uint64
+ CurrentHeader uint64
+}
+
+func (e *InvalidStartEndBlockError) Error() string {
+ return fmt.Sprintf(
+ "Invalid parameters start: %d and end block: %d params",
+ e.Start,
+ e.End,
+ )
+}
diff --git a/consensus/bor/validator.go b/consensus/bor/valset/validator.go
similarity index 99%
rename from consensus/bor/validator.go
rename to consensus/bor/valset/validator.go
index 00e9fdc645..250206c1f3 100644
--- a/consensus/bor/validator.go
+++ b/consensus/bor/valset/validator.go
@@ -1,8 +1,7 @@
-package bor
+package valset
import (
"bytes"
- // "encoding/json"
"errors"
"fmt"
"math/big"
@@ -43,9 +42,12 @@ func (v *Validator) Cmp(other *Validator) *Validator {
if v == nil {
return other
}
+
if other == nil {
return v
}
+
+ // nolint:nestif
if v.ProposerPriority > other.ProposerPriority {
return v
} else if v.ProposerPriority < other.ProposerPriority {
@@ -66,6 +68,7 @@ func (v *Validator) String() string {
if v == nil {
return "nil-Validator"
}
+
return fmt.Sprintf("Validator{%v Power:%v Priority:%v}",
v.Address.Hex(),
v.VotingPower,
@@ -87,6 +90,7 @@ func (v *Validator) HeaderBytes() []byte {
result := make([]byte, 40)
copy(result[:20], v.Address.Bytes())
copy(result[20:], v.PowerBytes())
+
return result
}
@@ -95,6 +99,7 @@ func (v *Validator) PowerBytes() []byte {
powerBytes := big.NewInt(0).SetInt64(v.VotingPower).Bytes()
result := make([]byte, 20)
copy(result[20-len(powerBytes):], powerBytes)
+
return result
}
@@ -114,6 +119,7 @@ func ParseValidators(validatorsBytes []byte) ([]*Validator, error) {
}
result := make([]*Validator, len(validatorsBytes)/40)
+
for i := 0; i < len(validatorsBytes); i += 40 {
address := make([]byte, 20)
power := make([]byte, 20)
@@ -142,6 +148,7 @@ func SortMinimalValByAddress(a []MinimalVal) []MinimalVal {
sort.Slice(a, func(i, j int) bool {
return bytes.Compare(a[i].Signer.Bytes(), a[j].Signer.Bytes()) < 0
})
+
return a
}
@@ -150,5 +157,6 @@ func ValidatorsToMinimalValidators(vals []Validator) (minVals []MinimalVal) {
for _, val := range vals {
minVals = append(minVals, val.MinimalVal())
}
+
return
}
diff --git a/consensus/bor/validator_set.go b/consensus/bor/valset/validator_set.go
similarity index 97%
rename from consensus/bor/validator_set.go
rename to consensus/bor/valset/validator_set.go
index 0b5c10ebd0..19e6c681fe 100644
--- a/consensus/bor/validator_set.go
+++ b/consensus/bor/valset/validator_set.go
@@ -1,4 +1,4 @@
-package bor
+package valset
// Tendermint leader selection algorithm
@@ -55,13 +55,16 @@ type ValidatorSet struct {
// function panics.
func NewValidatorSet(valz []*Validator) *ValidatorSet {
vals := &ValidatorSet{}
+
err := vals.updateWithChangeSet(valz, false)
if err != nil {
panic(fmt.Sprintf("cannot create validator set: %s", err))
}
+
if len(valz) > 0 {
vals.IncrementProposerPriority(1)
}
+
return vals
}
@@ -72,9 +75,10 @@ func (vals *ValidatorSet) IsNilOrEmpty() bool {
// Increment ProposerPriority and update the proposer on a copy, and return it.
func (vals *ValidatorSet) CopyIncrementProposerPriority(times int) *ValidatorSet {
- copy := vals.Copy()
- copy.IncrementProposerPriority(times)
- return copy
+ validatorCopy := vals.Copy()
+ validatorCopy.IncrementProposerPriority(times)
+
+ return validatorCopy
}
// IncrementProposerPriority increments ProposerPriority of each validator and updates the
@@ -84,6 +88,7 @@ func (vals *ValidatorSet) IncrementProposerPriority(times int) {
if vals.IsNilOrEmpty() {
panic("empty validator set")
}
+
if times <= 0 {
panic("Cannot call IncrementProposerPriority with non-positive times")
}
@@ -120,6 +125,7 @@ func (vals *ValidatorSet) RescalePriorities(diffMax int64) {
// NOTE: This may make debugging priority issues easier as well.
diff := computeMaxMinPriorityDiff(vals)
ratio := (diff + diffMax - 1) / diffMax
+
if diff > diffMax {
for _, val := range vals.Validators {
val.ProposerPriority = val.ProposerPriority / ratio
@@ -145,10 +151,13 @@ func (vals *ValidatorSet) incrementProposerPriority() *Validator {
func (vals *ValidatorSet) computeAvgProposerPriority() int64 {
n := int64(len(vals.Validators))
sum := big.NewInt(0)
+
for _, val := range vals.Validators {
sum.Add(sum, big.NewInt(val.ProposerPriority))
}
+
avg := sum.Div(sum, big.NewInt(n))
+
if avg.IsInt64() {
return avg.Int64()
}
@@ -162,17 +171,22 @@ func computeMaxMinPriorityDiff(vals *ValidatorSet) int64 {
if vals.IsNilOrEmpty() {
panic("empty validator set")
}
+
max := int64(math.MinInt64)
min := int64(math.MaxInt64)
+
for _, v := range vals.Validators {
if v.ProposerPriority < min {
min = v.ProposerPriority
}
+
if v.ProposerPriority > max {
max = v.ProposerPriority
}
}
+
diff := max - min
+
if diff < 0 {
return -1 * diff
} else {
@@ -185,6 +199,7 @@ func (vals *ValidatorSet) getValWithMostPriority() *Validator {
for _, val := range vals.Validators {
res = res.Cmp(val)
}
+
return res
}
@@ -192,7 +207,9 @@ func (vals *ValidatorSet) shiftByAvgProposerPriority() {
if vals.IsNilOrEmpty() {
panic("empty validator set")
}
+
avgProposerPriority := vals.computeAvgProposerPriority()
+
for _, val := range vals.Validators {
val.ProposerPriority = safeSubClip(val.ProposerPriority, avgProposerPriority)
}
@@ -203,10 +220,13 @@ func validatorListCopy(valsList []*Validator) []*Validator {
if valsList == nil {
return nil
}
+
valsCopy := make([]*Validator, len(valsList))
+
for i, val := range valsList {
valsCopy[i] = val.Copy()
}
+
return valsCopy
}
@@ -225,6 +245,7 @@ func (vals *ValidatorSet) HasAddress(address []byte) bool {
idx := sort.Search(len(vals.Validators), func(i int) bool {
return bytes.Compare(address, vals.Validators[i].Address.Bytes()) <= 0
})
+
return idx < len(vals.Validators) && bytes.Equal(vals.Validators[idx].Address.Bytes(), address)
}
@@ -234,9 +255,10 @@ func (vals *ValidatorSet) GetByAddress(address common.Address) (index int, val *
idx := sort.Search(len(vals.Validators), func(i int) bool {
return bytes.Compare(address.Bytes(), vals.Validators[i].Address.Bytes()) <= 0
})
- if idx < len(vals.Validators) && bytes.Equal(vals.Validators[idx].Address.Bytes(), address.Bytes()) {
+ if idx < len(vals.Validators) && vals.Validators[idx].Address == address {
return idx, vals.Validators[idx].Copy()
}
+
return -1, nil
}
@@ -247,7 +269,9 @@ func (vals *ValidatorSet) GetByIndex(index int) (address []byte, val *Validator)
if index < 0 || index >= len(vals.Validators) {
return nil, nil
}
+
val = vals.Validators[index]
+
return val.Address.Bytes(), val.Copy()
}
@@ -257,8 +281,7 @@ func (vals *ValidatorSet) Size() int {
}
// Force recalculation of the set's total voting power.
-func (vals *ValidatorSet) updateTotalVotingPower() error {
-
+func (vals *ValidatorSet) UpdateTotalVotingPower() error {
sum := int64(0)
for _, val := range vals.Validators {
// mind overflow
@@ -267,7 +290,9 @@ func (vals *ValidatorSet) updateTotalVotingPower() error {
return &TotalVotingPowerExceededError{sum, vals.Validators}
}
}
+
vals.totalVotingPower = sum
+
return nil
}
@@ -276,11 +301,13 @@ func (vals *ValidatorSet) updateTotalVotingPower() error {
func (vals *ValidatorSet) TotalVotingPower() int64 {
if vals.totalVotingPower == 0 {
log.Info("invoking updateTotalVotingPower before returning it")
- if err := vals.updateTotalVotingPower(); err != nil {
+
+ if err := vals.UpdateTotalVotingPower(); err != nil {
// Can/should we do better?
panic(err)
}
}
+
return vals.totalVotingPower
}
@@ -290,9 +317,11 @@ func (vals *ValidatorSet) GetProposer() (proposer *Validator) {
if len(vals.Validators) == 0 {
return nil
}
+
if vals.Proposer == nil {
vals.Proposer = vals.findProposer()
}
+
return vals.Proposer.Copy()
}
@@ -303,6 +332,7 @@ func (vals *ValidatorSet) findProposer() *Validator {
proposer = proposer.Cmp(val)
}
}
+
return proposer
}
@@ -343,6 +373,7 @@ func processChanges(origChanges []*Validator) (updates, removals []*Validator, e
removals = make([]*Validator, 0, len(changes))
updates = make([]*Validator, 0, len(changes))
+
var prevAddr common.Address
// Scan changes by address and append valid validators to updates or removals lists.
@@ -351,22 +382,27 @@ func processChanges(origChanges []*Validator) (updates, removals []*Validator, e
err = fmt.Errorf("duplicate entry %v in %v", valUpdate, changes)
return nil, nil, err
}
+
if valUpdate.VotingPower < 0 {
err = fmt.Errorf("voting power can't be negative: %v", valUpdate)
return nil, nil, err
}
+
if valUpdate.VotingPower > MaxTotalVotingPower {
err = fmt.Errorf("to prevent clipping/ overflow, voting power can't be higher than %v: %v ",
MaxTotalVotingPower, valUpdate)
return nil, nil, err
}
+
if valUpdate.VotingPower == 0 {
removals = append(removals, valUpdate)
} else {
updates = append(updates, valUpdate)
}
+
prevAddr = valUpdate.Address
}
+
return updates, removals, err
}
@@ -382,12 +418,12 @@ func processChanges(origChanges []*Validator) (updates, removals []*Validator, e
// by processChanges for duplicates and invalid values.
// No changes are made to the validator set 'vals'.
func verifyUpdates(updates []*Validator, vals *ValidatorSet) (updatedTotalVotingPower int64, numNewValidators int, err error) {
-
updatedTotalVotingPower = vals.TotalVotingPower()
for _, valUpdate := range updates {
address := valUpdate.Address
_, val := vals.GetByAddress(address)
+
if val == nil {
// New validator, add its voting power the the total.
updatedTotalVotingPower += valUpdate.VotingPower
@@ -396,11 +432,14 @@ func verifyUpdates(updates []*Validator, vals *ValidatorSet) (updatedTotalVoting
// Updated validator, add the difference in power to the total.
updatedTotalVotingPower += valUpdate.VotingPower - val.VotingPower
}
+
overflow := updatedTotalVotingPower > MaxTotalVotingPower
+
if overflow {
err = fmt.Errorf(
"failed to add/update validator %v, total voting power would exceed the max allowed %v",
valUpdate, MaxTotalVotingPower)
+
return 0, 0, err
}
}
@@ -414,10 +453,10 @@ func verifyUpdates(updates []*Validator, vals *ValidatorSet) (updatedTotalVoting
// 'updates' parameter must be a list of unique validators to be added or updated.
// No changes are made to the validator set 'vals'.
func computeNewPriorities(updates []*Validator, vals *ValidatorSet, updatedTotalVotingPower int64) {
-
for _, valUpdate := range updates {
address := valUpdate.Address
_, val := vals.GetByAddress(address)
+
if val == nil {
// add val
// Set ProposerPriority to -C*totalVotingPower (with C ~= 1.125) to make sure validators can't
@@ -432,7 +471,6 @@ func computeNewPriorities(updates []*Validator, vals *ValidatorSet, updatedTotal
valUpdate.ProposerPriority = val.ProposerPriority
}
}
-
}
// Merges the vals' validator list with the updates list.
@@ -440,7 +478,6 @@ func computeNewPriorities(updates []*Validator, vals *ValidatorSet, updatedTotal
// Expects updates to be a list of updates sorted by address with no duplicates or errors,
// must have been validated with verifyUpdates() and priorities computed with computeNewPriorities().
func (vals *ValidatorSet) applyUpdates(updates []*Validator) {
-
existing := vals.Validators
merged := make([]*Validator, len(existing)+len(updates))
i := 0
@@ -478,24 +515,25 @@ func (vals *ValidatorSet) applyUpdates(updates []*Validator) {
// Checks that the validators to be removed are part of the validator set.
// No changes are made to the validator set 'vals'.
func verifyRemovals(deletes []*Validator, vals *ValidatorSet) error {
-
for _, valUpdate := range deletes {
address := valUpdate.Address
_, val := vals.GetByAddress(address)
+
if val == nil {
return fmt.Errorf("failed to find validator %X to remove", address)
}
}
+
if len(deletes) > len(vals.Validators) {
panic("more deletes than validators")
}
+
return nil
}
// Removes the validators specified in 'deletes' from validator set 'vals'.
// Should not fail as verification has been done before.
func (vals *ValidatorSet) applyRemovals(deletes []*Validator) {
-
existing := vals.Validators
merged := make([]*Validator, len(existing)-len(deletes))
@@ -509,6 +547,7 @@ func (vals *ValidatorSet) applyRemovals(deletes []*Validator) {
merged[i] = existing[0]
i++
}
+
existing = existing[1:]
}
@@ -526,7 +565,6 @@ func (vals *ValidatorSet) applyRemovals(deletes []*Validator) {
// are not allowed and will trigger an error if present in 'changes'.
// The 'allowDeletes' flag is set to false by NewValidatorSet() and to true by UpdateWithChangeSet().
func (vals *ValidatorSet) updateWithChangeSet(changes []*Validator, allowDeletes bool) error {
-
if len(changes) <= 0 {
return nil
}
@@ -564,7 +602,7 @@ func (vals *ValidatorSet) updateWithChangeSet(changes []*Validator, allowDeletes
vals.applyUpdates(updates)
vals.applyRemovals(deletes)
- if err := vals.updateTotalVotingPower(); err != nil {
+ if err := vals.UpdateTotalVotingPower(); err != nil {
return err
}
@@ -596,19 +634,19 @@ func (vals *ValidatorSet) UpdateWithChangeSet(changes []*Validator) error {
func IsErrTooMuchChange(err error) bool {
switch err.(type) {
- case errTooMuchChange:
+ case tooMuchChangeError:
return true
default:
return false
}
}
-type errTooMuchChange struct {
+type tooMuchChangeError struct {
got int64
needed int64
}
-func (e errTooMuchChange) Error() string {
+func (e tooMuchChangeError) Error() string {
return fmt.Sprintf("Invalid commit -- insufficient old voting power: got %v, needed %v", e.got, e.needed)
}
@@ -622,11 +660,14 @@ func (vals *ValidatorSet) StringIndented(indent string) string {
if vals == nil {
return "nil-ValidatorSet"
}
+
var valStrings []string
+
vals.Iterate(func(index int, val *Validator) bool {
valStrings = append(valStrings, val.String())
return false
})
+
return fmt.Sprintf(`ValidatorSet{
%s Proposer: %v
%s Validators:
@@ -636,7 +677,6 @@ func (vals *ValidatorSet) StringIndented(indent string) string {
indent,
indent, strings.Join(valStrings, "\n"+indent+" "),
indent)
-
}
//-------------------------------------
@@ -668,6 +708,7 @@ func safeAdd(a, b int64) (int64, bool) {
} else if b < 0 && a < math.MinInt64-b {
return -1, true
}
+
return a + b, false
}
@@ -677,6 +718,7 @@ func safeSub(a, b int64) (int64, bool) {
} else if b < 0 && a > math.MaxInt64+b {
return -1, true
}
+
return a - b, false
}
@@ -686,8 +728,10 @@ func safeAddClip(a, b int64) int64 {
if b < 0 {
return math.MinInt64
}
+
return math.MaxInt64
}
+
return c
}
@@ -697,7 +741,9 @@ func safeSubClip(a, b int64) int64 {
if b > 0 {
return math.MinInt64
}
+
return math.MaxInt64
}
+
return c
}
diff --git a/consensus/ethash/sealer_test.go b/consensus/ethash/sealer_test.go
index c34e76aec2..a9e96af866 100644
--- a/consensus/ethash/sealer_test.go
+++ b/consensus/ethash/sealer_test.go
@@ -70,7 +70,7 @@ func TestRemoteNotify(t *testing.T) {
if want := common.BytesToHash(target.Bytes()).Hex(); work[2] != want {
t.Errorf("work packet target mismatch: have %s, want %s", work[2], want)
}
- case <-time.After(3 * time.Second):
+ case <-time.After(5 * time.Second):
t.Fatalf("notification timed out")
}
}
diff --git a/consensus/merger.go b/consensus/merger.go
index ffbcbf2b85..c435ecf91e 100644
--- a/consensus/merger.go
+++ b/consensus/merger.go
@@ -45,12 +45,14 @@ type Merger struct {
// NewMerger creates a new Merger which stores its transition status in the provided db.
func NewMerger(db ethdb.KeyValueStore) *Merger {
var status transitionStatus
+
blob := rawdb.ReadTransitionStatus(db)
if len(blob) != 0 {
if err := rlp.DecodeBytes(blob, &status); err != nil {
log.Crit("Failed to decode the transition status", "err", err)
}
}
+
return &Merger{
db: db,
status: status,
diff --git a/core/bench_test.go b/core/bench_test.go
index 959979763d..ad6179d0a8 100644
--- a/core/bench_test.go
+++ b/core/bench_test.go
@@ -305,7 +305,8 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
}
makeChainForBench(db, full, count)
db.Close()
- cacheConfig := *defaultCacheConfig
+
+ cacheConfig := *DefaultCacheConfig
cacheConfig.TrieDirtyDisabled = true
b.ReportAllocs()
diff --git a/core/blockchain.go b/core/blockchain.go
index 1b11c781c5..8a4c581f66 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -27,6 +27,8 @@ import (
"sync/atomic"
"time"
+ lru "github.com/hashicorp/golang-lru"
+
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/common/prque"
@@ -43,7 +45,6 @@ import (
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
- lru "github.com/hashicorp/golang-lru"
)
var (
@@ -133,9 +134,9 @@ type CacheConfig struct {
SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it
}
-// defaultCacheConfig are the default caching values if none are specified by the
+// DefaultCacheConfig are the default caching values if none are specified by the
// user (also used during testing).
-var defaultCacheConfig = &CacheConfig{
+var DefaultCacheConfig = &CacheConfig{
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
@@ -222,7 +223,7 @@ type BlockChain struct {
// and Processor.
func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(header *types.Header) bool, txLookupLimit *uint64) (*BlockChain, error) {
if cacheConfig == nil {
- cacheConfig = defaultCacheConfig
+ cacheConfig = DefaultCacheConfig
}
bodyCache, _ := lru.New(bodyCacheLimit)
bodyRLPCache, _ := lru.New(bodyCacheLimit)
diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go
index b93444b8cd..f61f930496 100644
--- a/core/blockchain_reader.go
+++ b/core/blockchain_reader.go
@@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
+ "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
@@ -397,10 +398,25 @@ func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscr
return bc.scope.Track(bc.blockProcFeed.Subscribe(ch))
}
+// Snaps retrieves the snapshot tree.
+func (bc *BlockChain) Snaps() *snapshot.Tree {
+ return bc.snaps
+}
+
+// DB retrieves the blockchain database.
+func (bc *BlockChain) DB() ethdb.Database {
+ return bc.db
+}
+
//
// Bor related changes
//
+type BorStateSyncer interface {
+ SetStateSync(stateData []*types.StateSyncData)
+ SubscribeStateSyncEvent(ch chan<- StateSyncEvent) event.Subscription
+}
+
// SetStateSync set sync data in state_data
func (bc *BlockChain) SetStateSync(stateData []*types.StateSyncData) {
bc.stateSyncData = stateData
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index 37a1a42d0c..de97000918 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -920,7 +920,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
archiveDb, delfn := makeDb()
defer delfn()
- archiveCaching := *defaultCacheConfig
+ archiveCaching := *DefaultCacheConfig
archiveCaching.TrieDirtyDisabled = true
archive, _ := NewBlockChain(archiveDb, &archiveCaching, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
diff --git a/core/chain_makers.go b/core/chain_makers.go
index c7bf60a4b0..4b2d2082df 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
)
@@ -334,7 +335,10 @@ func makeBlockChain(parent *types.Block, n int, engine consensus.Engine, db ethd
}
type fakeChainReader struct {
- config *params.ChainConfig
+ config *params.ChainConfig
+ stateSyncData []*types.StateSyncData
+ stateSyncFeed event.Feed
+ scope event.SubscriptionScope
}
// Config returns the chain configuration.
@@ -348,3 +352,13 @@ func (cr *fakeChainReader) GetHeaderByHash(hash common.Hash) *types.Header
func (cr *fakeChainReader) GetHeader(hash common.Hash, number uint64) *types.Header { return nil }
func (cr *fakeChainReader) GetBlock(hash common.Hash, number uint64) *types.Block { return nil }
func (cr *fakeChainReader) GetTd(hash common.Hash, number uint64) *big.Int { return nil }
+
+// SetStateSync set sync data in state_data
+func (cr *fakeChainReader) SetStateSync(stateData []*types.StateSyncData) {
+ cr.stateSyncData = stateData
+}
+
+// SubscribeStateSyncEvent registers a subscription of StateSyncEvent.
+func (cr *fakeChainReader) SubscribeStateSyncEvent(ch chan<- StateSyncEvent) event.Subscription {
+ return cr.scope.Track(cr.stateSyncFeed.Subscribe(ch))
+}
diff --git a/core/state_processor_test.go b/core/state_processor_test.go
index 7d6939e1dc..158e8c1eeb 100644
--- a/core/state_processor_test.go
+++ b/core/state_processor_test.go
@@ -21,6 +21,8 @@ import (
"math/big"
"testing"
+ "golang.org/x/crypto/sha3"
+
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/consensus"
@@ -32,7 +34,6 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
- "golang.org/x/crypto/sha3"
)
// TestStateProcessorErrors tests the output from the 'core' errors
@@ -308,7 +309,7 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr
header := &types.Header{
ParentHash: parent.Hash(),
Coinbase: parent.Coinbase(),
- Difficulty: engine.CalcDifficulty(&fakeChainReader{config}, parent.Time()+10, &types.Header{
+ Difficulty: engine.CalcDifficulty(&fakeChainReader{config: config}, parent.Time()+10, &types.Header{
Number: parent.Number(),
Time: parent.Time(),
Difficulty: parent.Difficulty(),
diff --git a/core/blockchain_repair_test.go b/core/tests/blockchain_repair_test.go
similarity index 93%
rename from core/blockchain_repair_test.go
rename to core/tests/blockchain_repair_test.go
index 9133671796..b52bf76fd9 100644
--- a/core/blockchain_repair_test.go
+++ b/core/tests/blockchain_repair_test.go
@@ -18,7 +18,7 @@
// the database in some strange state with gaps in the chain, nor with block data
// dangling in the future.
-package core
+package tests
import (
"io/ioutil"
@@ -27,12 +27,22 @@ import (
"testing"
"time"
+ "github.com/golang/mock/gomock"
+
+ "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/bor"
+ "github.com/ethereum/go-ethereum/consensus/bor/api"
+ "github.com/ethereum/go-ethereum/consensus/bor/valset"
"github.com/ethereum/go-ethereum/consensus/ethash"
+ "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/miner"
"github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/tests/bor/mocks"
)
// Tests a recovery for a short canonical chain where a recent block was already
@@ -1750,7 +1760,17 @@ func testLongReorgedSnapSyncingDeepRepair(t *testing.T, snapshots bool) {
}, snapshots)
}
+var (
+ testKey1, _ = crypto.GenerateKey()
+ testAddress1 = crypto.PubkeyToAddress(testKey1.PublicKey)
+
+ testKey2, _ = crypto.GenerateKey()
+ testAddress2 = crypto.PubkeyToAddress(testKey2.PublicKey) //nolint:unused,varcheck
+)
+
func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
+ t.Skip("need to add a proper signer for Bor consensus")
+
// It's hard to follow the test case, visualize the input
//log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
// fmt.Println(tt.dump(true))
@@ -1768,47 +1788,104 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
}
defer db.Close() // Might double close, should be fine
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ ethAPIMock := api.NewMockCaller(ctrl)
+ ethAPIMock.EXPECT().Call(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
+
+ spanner := bor.NewMockSpanner(ctrl)
+ spanner.EXPECT().GetCurrentValidators(gomock.Any(), gomock.Any()).Return([]*valset.Validator{
+ {
+ ID: 0,
+ Address: common.Address{0x1},
+ VotingPower: 100,
+ ProposerPriority: 0,
+ },
+ }, nil).AnyTimes()
+
+ heimdallClientMock := mocks.NewMockIHeimdallClient(ctrl)
+ heimdallClientMock.EXPECT().Close().Times(1)
+
+ contractMock := bor.NewMockGenesisContract(ctrl)
+
// Initialize a fresh chain
var (
- genesis = (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
- engine = ethash.NewFullFaker()
- config = &CacheConfig{
+ gspec = &core.Genesis{
+ Config: params.BorUnittestChainConfig,
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ }
+ config = &core.CacheConfig{
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
SnapshotLimit: 0, // Disable snapshot by default
}
)
+
+ engine := miner.NewFakeBor(t, db, params.BorUnittestChainConfig, ethAPIMock, spanner, heimdallClientMock, contractMock)
defer engine.Close()
+
+ engineBorInternal, ok := engine.(*bor.Bor)
+ if ok {
+ gspec.ExtraData = make([]byte, 32+common.AddressLength+crypto.SignatureLength)
+ copy(gspec.ExtraData[32:32+common.AddressLength], testAddress1.Bytes())
+
+ engineBorInternal.Authorize(testAddress1, func(account accounts.Account, s string, data []byte) ([]byte, error) {
+ return crypto.Sign(crypto.Keccak256(data), testKey1)
+ })
+ }
+
+ genesis := gspec.MustCommit(db)
+
if snapshots {
config.SnapshotLimit = 256
config.SnapshotWait = true
}
- chain, err := NewBlockChain(db, config, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+
+ chain, err := core.NewBlockChain(db, config, params.BorUnittestChainConfig, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create chain: %v", err)
}
+
// If sidechain blocks are needed, make a light chain and import it
var sideblocks types.Blocks
if tt.sidechainBlocks > 0 {
- sideblocks, _ = GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.sidechainBlocks, func(i int, b *BlockGen) {
- b.SetCoinbase(common.Address{0x01})
+ sideblocks, _ = core.GenerateChain(params.BorUnittestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.sidechainBlocks, func(i int, b *core.BlockGen) {
+ b.SetCoinbase(testAddress1)
+
+ if bor.IsSprintStart(b.Number().Uint64(), params.BorUnittestChainConfig.Bor.Sprint) {
+ b.SetExtra(gspec.ExtraData)
+ } else {
+ b.SetExtra(make([]byte, 32+crypto.SignatureLength))
+ }
})
if _, err := chain.InsertChain(sideblocks); err != nil {
t.Fatalf("Failed to import side chain: %v", err)
}
}
- canonblocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.canonicalBlocks, func(i int, b *BlockGen) {
+
+ canonblocks, _ := core.GenerateChain(params.BorUnittestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.canonicalBlocks, func(i int, b *core.BlockGen) {
b.SetCoinbase(common.Address{0x02})
b.SetDifficulty(big.NewInt(1000000))
+
+ if bor.IsSprintStart(b.Number().Uint64(), params.BorUnittestChainConfig.Bor.Sprint) {
+ b.SetExtra(gspec.ExtraData)
+ } else {
+ b.SetExtra(make([]byte, 32+crypto.SignatureLength))
+ }
})
if _, err := chain.InsertChain(canonblocks[:tt.commitBlock]); err != nil {
t.Fatalf("Failed to import canonical chain start: %v", err)
}
if tt.commitBlock > 0 {
- chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true, nil)
+ err = chain.StateCache().TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true, nil)
+ if err != nil {
+ t.Fatal("on trieDB.Commit", err)
+ }
+
if snapshots {
- if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
+ if err := chain.Snaps().Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
t.Fatalf("Failed to flatten snapshots: %v", err)
}
}
@@ -1837,7 +1914,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
}
defer db.Close()
- newChain, err := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+ newChain, err := core.NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
@@ -1888,19 +1965,21 @@ func TestIssue23496(t *testing.T) {
if err != nil {
t.Fatalf("Failed to create temporary datadir: %v", err)
}
+
os.RemoveAll(datadir)
db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false)
if err != nil {
t.Fatalf("Failed to create persistent database: %v", err)
}
+
defer db.Close() // Might double close, should be fine
// Initialize a fresh chain
var (
- genesis = (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
+ genesis = (&core.Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
engine = ethash.NewFullFaker()
- config = &CacheConfig{
+ config = &core.CacheConfig{
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
@@ -1908,11 +1987,13 @@ func TestIssue23496(t *testing.T) {
SnapshotWait: true,
}
)
- chain, err := NewBlockChain(db, config, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+
+ chain, err := core.NewBlockChain(db, config, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create chain: %v", err)
}
- blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), 4, func(i int, b *BlockGen) {
+
+ blocks, _ := core.GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), 4, func(i int, b *core.BlockGen) {
b.SetCoinbase(common.Address{0x02})
b.SetDifficulty(big.NewInt(1000000))
})
@@ -1921,13 +2002,18 @@ func TestIssue23496(t *testing.T) {
if _, err := chain.InsertChain(blocks[:1]); err != nil {
t.Fatalf("Failed to import canonical chain start: %v", err)
}
- chain.stateCache.TrieDB().Commit(blocks[0].Root(), true, nil)
+
+ err = chain.StateCache().TrieDB().Commit(blocks[0].Root(), true, nil)
+ if err != nil {
+ t.Fatal("on trieDB.Commit", err)
+ }
// Insert block B2 and commit the snapshot into disk
if _, err := chain.InsertChain(blocks[1:2]); err != nil {
t.Fatalf("Failed to import canonical chain start: %v", err)
}
- if err := chain.snaps.Cap(blocks[1].Root(), 0); err != nil {
+
+ if err := chain.Snaps().Cap(blocks[1].Root(), 0); err != nil {
t.Fatalf("Failed to flatten snapshots: %v", err)
}
@@ -1935,7 +2021,11 @@ func TestIssue23496(t *testing.T) {
if _, err := chain.InsertChain(blocks[2:3]); err != nil {
t.Fatalf("Failed to import canonical chain start: %v", err)
}
- chain.stateCache.TrieDB().Commit(blocks[2].Root(), true, nil)
+
+ err = chain.StateCache().TrieDB().Commit(blocks[2].Root(), true, nil)
+ if err != nil {
+ t.Fatal("on trieDB.Commit", err)
+ }
// Insert the remaining blocks
if _, err := chain.InsertChain(blocks[3:]); err != nil {
@@ -1950,20 +2040,24 @@ func TestIssue23496(t *testing.T) {
if err != nil {
t.Fatalf("Failed to reopen persistent database: %v", err)
}
+
defer db.Close()
- chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+ chain, err = core.NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
+
defer chain.Stop()
if head := chain.CurrentHeader(); head.Number.Uint64() != uint64(4) {
t.Errorf("Head header mismatch: have %d, want %d", head.Number, 4)
}
+
if head := chain.CurrentFastBlock(); head.NumberU64() != uint64(4) {
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), uint64(4))
}
+
if head := chain.CurrentBlock(); head.NumberU64() != uint64(1) {
t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), uint64(1))
}
@@ -1972,15 +2066,19 @@ func TestIssue23496(t *testing.T) {
if _, err := chain.InsertChain(blocks[1:]); err != nil {
t.Fatalf("Failed to import canonical chain tail: %v", err)
}
+
if head := chain.CurrentHeader(); head.Number.Uint64() != uint64(4) {
t.Errorf("Head header mismatch: have %d, want %d", head.Number, 4)
}
+
if head := chain.CurrentFastBlock(); head.NumberU64() != uint64(4) {
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), uint64(4))
}
+
if head := chain.CurrentBlock(); head.NumberU64() != uint64(4) {
t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), uint64(4))
}
+
if layer := chain.Snapshots().Snapshot(blocks[2].Root()); layer == nil {
t.Error("Failed to regenerate the snapshot of known state")
}
diff --git a/core/blockchain_sethead_test.go b/core/tests/blockchain_sethead_test.go
similarity index 98%
rename from core/blockchain_sethead_test.go
rename to core/tests/blockchain_sethead_test.go
index b2b3a058a4..3a2d87c4eb 100644
--- a/core/blockchain_sethead_test.go
+++ b/core/tests/blockchain_sethead_test.go
@@ -17,7 +17,7 @@
// Tests that setting the chain head backwards doesn't leave the database in some
// strange state with gaps in the chain, nor with block data dangling in the future.
-package core
+package tests
import (
"fmt"
@@ -30,6 +30,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/ethash"
+ "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
@@ -1959,79 +1960,98 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) {
if err != nil {
t.Fatalf("Failed to create temporary datadir: %v", err)
}
+
os.RemoveAll(datadir)
db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false)
if err != nil {
t.Fatalf("Failed to create persistent database: %v", err)
}
+
defer db.Close()
// Initialize a fresh chain
var (
- genesis = (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
+ genesis = (&core.Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
engine = ethash.NewFullFaker()
- config = &CacheConfig{
+ config = &core.CacheConfig{
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
SnapshotLimit: 0, // Disable snapshot
}
)
+
if snapshots {
config.SnapshotLimit = 256
config.SnapshotWait = true
}
- chain, err := NewBlockChain(db, config, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+
+ chain, err := core.NewBlockChain(db, config, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create chain: %v", err)
}
+
// If sidechain blocks are needed, make a light chain and import it
var sideblocks types.Blocks
if tt.sidechainBlocks > 0 {
- sideblocks, _ = GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.sidechainBlocks, func(i int, b *BlockGen) {
+ sideblocks, _ = core.GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.sidechainBlocks, func(i int, b *core.BlockGen) {
b.SetCoinbase(common.Address{0x01})
})
+
if _, err := chain.InsertChain(sideblocks); err != nil {
t.Fatalf("Failed to import side chain: %v", err)
}
}
- canonblocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.canonicalBlocks, func(i int, b *BlockGen) {
+
+ canonblocks, _ := core.GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.canonicalBlocks, func(i int, b *core.BlockGen) {
b.SetCoinbase(common.Address{0x02})
b.SetDifficulty(big.NewInt(1000000))
})
+
if _, err := chain.InsertChain(canonblocks[:tt.commitBlock]); err != nil {
t.Fatalf("Failed to import canonical chain start: %v", err)
}
+
if tt.commitBlock > 0 {
- chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true, nil)
+ err = chain.StateCache().TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true, nil)
+ if err != nil {
+ t.Fatal("on trieDB.Commit", err)
+ }
+
if snapshots {
- if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
+ if err := chain.Snaps().Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
t.Fatalf("Failed to flatten snapshots: %v", err)
}
}
}
+
if _, err := chain.InsertChain(canonblocks[tt.commitBlock:]); err != nil {
t.Fatalf("Failed to import canonical chain tail: %v", err)
}
+
// Manually dereference anything not committed to not have to work with 128+ tries
for _, block := range sideblocks {
- chain.stateCache.TrieDB().Dereference(block.Root())
+ chain.StateCache().TrieDB().Dereference(block.Root())
}
+
for _, block := range canonblocks {
- chain.stateCache.TrieDB().Dereference(block.Root())
+ chain.StateCache().TrieDB().Dereference(block.Root())
}
+
// Force run a freeze cycle
type freezer interface {
Freeze(threshold uint64) error
Ancients() (uint64, error)
}
+
db.(freezer).Freeze(tt.freezeThreshold)
// Set the simulated pivot block
if tt.pivotBlock != nil {
rawdb.WriteLastPivotNumber(db, *tt.pivotBlock)
}
+
// Set the head of the chain back to the requested number
chain.SetHead(tt.setheadBlock)
@@ -2044,12 +2064,15 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) {
if head := chain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader {
t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader)
}
+
if head := chain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock {
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadFastBlock)
}
+
if head := chain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock {
t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock)
}
+
if frozen, err := db.(freezer).Ancients(); err != nil {
t.Errorf("Failed to retrieve ancient count: %v\n", err)
} else if int(frozen) != tt.expFrozen {
@@ -2059,7 +2082,8 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) {
// verifyNoGaps checks that there are no gaps after the initial set of blocks in
// the database and errors if found.
-func verifyNoGaps(t *testing.T, chain *BlockChain, canonical bool, inserted types.Blocks) {
+//nolint:gocognit
+func verifyNoGaps(t *testing.T, chain *core.BlockChain, canonical bool, inserted types.Blocks) {
t.Helper()
var end uint64
@@ -2111,7 +2135,8 @@ func verifyNoGaps(t *testing.T, chain *BlockChain, canonical bool, inserted type
// verifyCutoff checks that there are no chain data available in the chain after
// the specified limit, but that it is available before.
-func verifyCutoff(t *testing.T, chain *BlockChain, canonical bool, inserted types.Blocks, head int) {
+//nolint:gocognit
+func verifyCutoff(t *testing.T, chain *core.BlockChain, canonical bool, inserted types.Blocks, head int) {
t.Helper()
for i := 1; i <= len(inserted); i++ {
diff --git a/core/blockchain_snapshot_test.go b/core/tests/blockchain_snapshot_test.go
similarity index 87%
rename from core/blockchain_snapshot_test.go
rename to core/tests/blockchain_snapshot_test.go
index a8044ecb4d..fb4c09867c 100644
--- a/core/blockchain_snapshot_test.go
+++ b/core/tests/blockchain_snapshot_test.go
@@ -17,7 +17,7 @@
// Tests that abnormal program termination (i.e.crash) and restart can recovery
// the snapshot properly if the snapshot is enabled.
-package core
+package tests
import (
"bytes"
@@ -31,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/ethash"
+ "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
@@ -57,7 +58,9 @@ type snapshotTestBasic struct {
engine consensus.Engine
}
-func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Block) {
+func (basic *snapshotTestBasic) prepare(t *testing.T) (*core.BlockChain, []*types.Block) {
+ t.Helper()
+
// Create a temporary persistent database
datadir, err := ioutil.TempDir("", "")
if err != nil {
@@ -71,20 +74,22 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo
}
// Initialize a fresh chain
var (
- genesis = (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
+ genesis = (&core.Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
engine = ethash.NewFullFaker()
gendb = rawdb.NewMemoryDatabase()
// Snapshot is enabled, the first snapshot is created from the Genesis.
// The snapshot memory allowance is 256MB, it means no snapshot flush
// will happen during the block insertion.
- cacheConfig = defaultCacheConfig
+ cacheConfig = core.DefaultCacheConfig
)
- chain, err := NewBlockChain(db, cacheConfig, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+
+ chain, err := core.NewBlockChain(db, cacheConfig, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create chain: %v", err)
}
- blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, gendb, basic.chainBlocks, func(i int, b *BlockGen) {})
+
+ blocks, _ := core.GenerateChain(params.TestChainConfig, genesis, engine, gendb, basic.chainBlocks, func(i int, b *core.BlockGen) {})
// Insert the blocks with configured settings.
var breakpoints []uint64
@@ -93,27 +98,39 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo
} else {
breakpoints = append(breakpoints, basic.commitBlock, basic.snapshotBlock)
}
+
var startPoint uint64
+
for _, point := range breakpoints {
if _, err := chain.InsertChain(blocks[startPoint:point]); err != nil {
t.Fatalf("Failed to import canonical chain start: %v", err)
}
+
startPoint = point
if basic.commitBlock > 0 && basic.commitBlock == point {
- chain.stateCache.TrieDB().Commit(blocks[point-1].Root(), true, nil)
+ err = chain.StateCache().TrieDB().Commit(blocks[point-1].Root(), true, nil)
+ if err != nil {
+ t.Fatal("on trieDB.Commit", err)
+ }
}
+
if basic.snapshotBlock > 0 && basic.snapshotBlock == point {
// Flushing the entire snap tree into the disk, the
// relevant (a) snapshot root and (b) snapshot generator
// will be persisted atomically.
- chain.snaps.Cap(blocks[point-1].Root(), 0)
- diskRoot, blockRoot := chain.snaps.DiskRoot(), blocks[point-1].Root()
+ err = chain.Snaps().Cap(blocks[point-1].Root(), 0)
+ if err != nil {
+ t.Fatal("on Snaps.Cap", err)
+ }
+
+ diskRoot, blockRoot := chain.Snaps().DiskRoot(), blocks[point-1].Root()
if !bytes.Equal(diskRoot.Bytes(), blockRoot.Bytes()) {
t.Fatalf("Failed to flush disk layer change, want %x, got %x", blockRoot, diskRoot)
}
}
}
+
if _, err := chain.InsertChain(blocks[startPoint:]); err != nil {
t.Fatalf("Failed to import canonical chain tail: %v", err)
}
@@ -123,10 +140,13 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo
basic.db = db
basic.gendb = gendb
basic.engine = engine
+
return chain, blocks
}
-func (basic *snapshotTestBasic) verify(t *testing.T, chain *BlockChain, blocks []*types.Block) {
+func (basic *snapshotTestBasic) verify(t *testing.T, chain *core.BlockChain, blocks []*types.Block) {
+ t.Helper()
+
// Iterate over all the remaining blocks and ensure there are no gaps
verifyNoGaps(t, chain, true, blocks)
verifyCutoff(t, chain, true, blocks, basic.expCanonicalBlocks)
@@ -134,9 +154,11 @@ func (basic *snapshotTestBasic) verify(t *testing.T, chain *BlockChain, blocks [
if head := chain.CurrentHeader(); head.Number.Uint64() != basic.expHeadHeader {
t.Errorf("Head header mismatch: have %d, want %d", head.Number, basic.expHeadHeader)
}
+
if head := chain.CurrentFastBlock(); head.NumberU64() != basic.expHeadFastBlock {
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), basic.expHeadFastBlock)
}
+
if head := chain.CurrentBlock(); head.NumberU64() != basic.expHeadBlock {
t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), basic.expHeadBlock)
}
@@ -145,12 +167,12 @@ func (basic *snapshotTestBasic) verify(t *testing.T, chain *BlockChain, blocks [
block := chain.GetBlockByNumber(basic.expSnapshotBottom)
if block == nil {
t.Errorf("The correspnding block[%d] of snapshot disk layer is missing", basic.expSnapshotBottom)
- } else if !bytes.Equal(chain.snaps.DiskRoot().Bytes(), block.Root().Bytes()) {
- t.Errorf("The snapshot disk layer root is incorrect, want %x, get %x", block.Root(), chain.snaps.DiskRoot())
+ } else if !bytes.Equal(chain.Snaps().DiskRoot().Bytes(), block.Root().Bytes()) {
+ t.Errorf("The snapshot disk layer root is incorrect, want %x, get %x", block.Root(), chain.Snaps().DiskRoot())
}
// Check the snapshot, ensure it's integrated
- if err := chain.snaps.Verify(block.Root()); err != nil {
+ if err := chain.Snaps().Verify(block.Root()); err != nil {
t.Errorf("The disk layer is not integrated %v", err)
}
}
@@ -223,10 +245,12 @@ func (snaptest *snapshotTest) test(t *testing.T) {
// Restart the chain normally
chain.Stop()
- newchain, err := NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
+
+ newchain, err := core.NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
+
defer newchain.Stop()
snaptest.verify(t, newchain, blocks)
@@ -245,7 +269,7 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) {
chain, blocks := snaptest.prepare(t)
// Pull the plug on the database, simulating a hard crash
- db := chain.db
+ db := chain.DB()
db.Close()
// Start a new blockchain back up and see where the repair leads us
@@ -259,13 +283,13 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) {
// the crash, we do restart twice here: one after the crash and one
// after the normal stop. It's used to ensure the broken snapshot
// can be detected all the time.
- newchain, err := NewBlockChain(newdb, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
+ newchain, err := core.NewBlockChain(newdb, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
newchain.Stop()
- newchain, err = NewBlockChain(newdb, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
+ newchain, err = core.NewBlockChain(newdb, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
@@ -292,27 +316,31 @@ func (snaptest *gappedSnapshotTest) test(t *testing.T) {
// Insert blocks without enabling snapshot if gapping is required.
chain.Stop()
- gappedBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.gendb, snaptest.gapped, func(i int, b *BlockGen) {})
+
+ gappedBlocks, _ := core.GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.gendb, snaptest.gapped, func(i int, b *core.BlockGen) {})
// Insert a few more blocks without enabling snapshot
- var cacheConfig = &CacheConfig{
+ var cacheConfig = &core.CacheConfig{
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
SnapshotLimit: 0,
}
- newchain, err := NewBlockChain(snaptest.db, cacheConfig, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
+
+ newchain, err := core.NewBlockChain(snaptest.db, cacheConfig, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
+
newchain.InsertChain(gappedBlocks)
newchain.Stop()
// Restart the chain with enabling the snapshot
- newchain, err = NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
+ newchain, err = core.NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
+
defer newchain.Stop()
snaptest.verify(t, newchain, blocks)
@@ -337,7 +365,7 @@ func (snaptest *setHeadSnapshotTest) test(t *testing.T) {
chain.SetHead(snaptest.setHead)
chain.Stop()
- newchain, err := NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
+ newchain, err := core.NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
@@ -368,11 +396,12 @@ func (snaptest *restartCrashSnapshotTest) test(t *testing.T) {
// and state committed.
chain.Stop()
- newchain, err := NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
+ newchain, err := core.NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
- newBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.gendb, snaptest.newBlocks, func(i int, b *BlockGen) {})
+
+ newBlocks, _ := core.GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.gendb, snaptest.newBlocks, func(i int, b *core.BlockGen) {})
newchain.InsertChain(newBlocks)
// Commit the entire snapshot into the disk if requested. Note only
@@ -385,7 +414,7 @@ func (snaptest *restartCrashSnapshotTest) test(t *testing.T) {
// journal and latest state will be committed
// Restart the chain after the crash
- newchain, err = NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
+ newchain, err = core.NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
@@ -414,38 +443,42 @@ func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) {
// and state committed.
chain.Stop()
- config := &CacheConfig{
+ config := &core.CacheConfig{
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
SnapshotLimit: 0,
}
- newchain, err := NewBlockChain(snaptest.db, config, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
+
+ newchain, err := core.NewBlockChain(snaptest.db, config, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
- newBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.gendb, snaptest.newBlocks, func(i int, b *BlockGen) {})
+
+ newBlocks, _ := core.GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.gendb, snaptest.newBlocks, func(i int, b *core.BlockGen) {})
newchain.InsertChain(newBlocks)
newchain.Stop()
// Restart the chain, the wiper should starts working
- config = &CacheConfig{
+ config = &core.CacheConfig{
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
SnapshotLimit: 256,
SnapshotWait: false, // Don't wait rebuild
}
- newchain, err = NewBlockChain(snaptest.db, config, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
+
+ _, err = core.NewBlockChain(snaptest.db, config, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
// Simulate the blockchain crash.
- newchain, err = NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
+ newchain, err = core.NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
+
snaptest.verify(t, newchain, blocks)
}
diff --git a/core/vm/interface.go b/core/vm/interface.go
index ad9b05d666..1064adf590 100644
--- a/core/vm/interface.go
+++ b/core/vm/interface.go
@@ -74,6 +74,8 @@ type StateDB interface {
AddPreimage(common.Hash, []byte)
ForEachStorage(common.Address, func(common.Hash, common.Hash) bool) error
+
+ Finalise(bool)
}
// CallContext provides a basic interface for the EVM calling conventions. The EVM
diff --git a/docs/README.md b/docs/README.md
index 95ba38b0da..45021e8c7f 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -1,22 +1,24 @@
# Documentation
-- [Command-line-interface](./cli)
+- [The new command line interface](./cli)
-- [Configuration file](./config.md)
-
-## Deprecation notes
+## Additional notes
- The new entrypoint to run the Bor client is ```server```.
-```
-$ bor server
-```
+ ```
+ $ bor server
+ ```
+
+- The `bor dumpconfig` sub-command prints the default configurations, in the TOML format, on the terminal. One can `pipe (>)` this to a file (say `config.toml`) and use it to start bor.
+
+- A toml file now can be used instead of flags and can contain all configuration for the node to run. To simply run bor with a configuration file, the following command can be used.
-- Toml files to configure nodes are being deprecated. Currently, we only allow for static and trusted nodes to be configured using toml files.
+ ```
+ $ bor server --config
+ ```
-```
-$ bor server --config ./legacy.toml
-```
+- You can find an example config file [here](./cli/example_config.toml) to know more about what each flag is used for, what are the defaults and recommended values for different networks.
-- ```Admin```, ```Personal``` and account related endpoints in ```Eth``` are being removed from the JsonRPC interface. Some of this functionality will be moved to the new GRPC server for operational tasks.
+- Toml files used earlier (with `--config` flag) to configure additional fields (like static and trusted nodes) are being deprecated and have been converted to flags.
diff --git a/docs/cli/README.md b/docs/cli/README.md
index ed8a3f5977..bf37d6ef56 100644
--- a/docs/cli/README.md
+++ b/docs/cli/README.md
@@ -1,24 +1,35 @@
-
-# Command line interface
+# Bor command line interface
## Commands
-- [```server```](./server.md)
+- [```account```](./account.md)
-- [```debug```](./debug.md)
+- [```account import```](./account_import.md)
-- [```account```](./account.md)
+- [```account list```](./account_list.md)
- [```account new```](./account_new.md)
-- [```account list```](./account_list.md)
+- [```attach```](./attach.md)
-- [```account import```](./account_import.md)
+- [```bootnode```](./bootnode.md)
- [```chain```](./chain.md)
- [```chain sethead```](./chain_sethead.md)
+- [```chain watch```](./chain_watch.md)
+
+- [```debug```](./debug.md)
+
+- [```debug block```](./debug_block.md)
+
+- [```debug pprof```](./debug_pprof.md)
+
+- [```dumpconfig```](./dumpconfig.md)
+
+- [```fingerprint```](./fingerprint.md)
+
- [```peers```](./peers.md)
- [```peers add```](./peers_add.md)
@@ -29,8 +40,10 @@
- [```peers status```](./peers_status.md)
-- [```status```](./status.md)
+- [```removedb```](./removedb.md)
-- [```chain watch```](./chain_watch.md)
+- [```server```](./server.md)
+
+- [```status```](./status.md)
-- [```version```](./version.md)
+- [```version```](./version.md)
\ No newline at end of file
diff --git a/docs/cli/account.md b/docs/cli/account.md
index 00fd6cbe65..b3659952f6 100644
--- a/docs/cli/account.md
+++ b/docs/cli/account.md
@@ -1,4 +1,3 @@
-
# Account
The ```account``` command groups actions to interact with accounts:
@@ -7,4 +6,4 @@ The ```account``` command groups actions to interact with accounts:
- [```account list```](./account_list.md): List the wallets in the Bor client.
-- [```account import```](./account_import.md): Import an account to the Bor client.
+- [```account import```](./account_import.md): Import an account to the Bor client.
\ No newline at end of file
diff --git a/docs/cli/account_import.md b/docs/cli/account_import.md
index 2a515866cf..d7b02195bc 100644
--- a/docs/cli/account_import.md
+++ b/docs/cli/account_import.md
@@ -1,4 +1,9 @@
-
# Account import
The ```account import``` command imports an account in Json format to the Bor data directory.
+
+## Options
+
+- ```datadir```: Path of the data directory to store information
+
+- ```keystore```: Path of the data directory to store information
\ No newline at end of file
diff --git a/docs/cli/account_list.md b/docs/cli/account_list.md
index 4a2d28de92..61ebf9e776 100644
--- a/docs/cli/account_list.md
+++ b/docs/cli/account_list.md
@@ -1,4 +1,9 @@
-
# Account list
-The ```account list``` command lists all the accounts in the Bor data directory.
+The `account list` command lists all the accounts in the Bor data directory.
+
+## Options
+
+- ```datadir```: Path of the data directory to store information
+
+- ```keystore```: Path of the data directory to store information
\ No newline at end of file
diff --git a/docs/cli/account_new.md b/docs/cli/account_new.md
index 41e53767a1..dd62061ba0 100644
--- a/docs/cli/account_new.md
+++ b/docs/cli/account_new.md
@@ -1,4 +1,9 @@
-
# Account new
-The ```account new``` command creates a new local account file on the Bor data directory. Bor should not be running to execute this command.
+The `account new` command creates a new local account file on the Bor data directory. Bor should not be running to execute this command.
+
+## Options
+
+- ```datadir```: Path of the data directory to store information
+
+- ```keystore```: Path of the data directory to store information
\ No newline at end of file
diff --git a/docs/cli/attach.md b/docs/cli/attach.md
new file mode 100644
index 0000000000..6e323b8d18
--- /dev/null
+++ b/docs/cli/attach.md
@@ -0,0 +1,11 @@
+# Attach
+
+Connect to remote Bor IPC console.
+
+## Options
+
+- ```exec```: Command to run in remote console
+
+- ```preload```: Comma separated list of JavaScript files to preload into the console
+
+- ```jspath```: JavaScript root path for `loadScript`
\ No newline at end of file
diff --git a/docs/cli/bootnode.md b/docs/cli/bootnode.md
new file mode 100644
index 0000000000..48e933a934
--- /dev/null
+++ b/docs/cli/bootnode.md
@@ -0,0 +1,17 @@
+# Bootnode
+
+## Options
+
+- ```listen-addr```: listening address of bootnode (:)
+
+- ```v5```: Enable UDP v5
+
+- ```log-level```: Log level (trace|debug|info|warn|error|crit)
+
+- ```nat```: port mapping mechanism (any|none|upnp|pmp|extip:)
+
+- ```node-key```: file or hex node key
+
+- ```save-key```: path to save the ecdsa private key
+
+- ```dry-run```: validates parameters and prints bootnode configurations, but does not start bootnode
\ No newline at end of file
diff --git a/docs/cli/chain.md b/docs/cli/chain.md
index e55a90f122..8412588e37 100644
--- a/docs/cli/chain.md
+++ b/docs/cli/chain.md
@@ -1,6 +1,7 @@
-
# Chain
The ```chain``` command groups actions to interact with the blockchain in the client:
- [```chain sethead```](./chain_sethead.md): Set the current chain to a certain block.
+
+- [```chain watch```](./chain_watch.md): Watch the chainHead, reorg and fork events in real-time.
\ No newline at end of file
diff --git a/docs/cli/chain_sethead.md b/docs/cli/chain_sethead.md
index 985383988f..bf97990e62 100644
--- a/docs/cli/chain_sethead.md
+++ b/docs/cli/chain_sethead.md
@@ -1,4 +1,3 @@
-
# Chain sethead
The ```chain sethead ``` command sets the current chain to a certain block.
@@ -9,4 +8,6 @@ The ```chain sethead ``` command sets the current chain to a certain blo
## Options
-- ```yes```: Force set head.
+- ```address```: Address of the grpc endpoint
+
+- ```yes```: Force set head
\ No newline at end of file
diff --git a/docs/cli/chain_watch.md b/docs/cli/chain_watch.md
index 4844bb7618..252ac02531 100644
--- a/docs/cli/chain_watch.md
+++ b/docs/cli/chain_watch.md
@@ -1,3 +1,3 @@
# Chain watch
-The ```chain watch``` command is used to view the chainHead, reorg and fork events in real-time.
+The ```chain watch``` command is used to view the chainHead, reorg and fork events in real-time.
\ No newline at end of file
diff --git a/docs/cli/debug.md b/docs/cli/debug.md
index c75ae79e49..a59e465745 100644
--- a/docs/cli/debug.md
+++ b/docs/cli/debug.md
@@ -1,13 +1,10 @@
-
# Debug
The ```bor debug``` command takes a debug dump of the running client.
-## Options
-
-- ```seconds```: Number of seconds to trace cpu and traces.
+- [```bor debug pprof```](./debug_pprof.md): Dumps bor pprof traces.
-- ```output```: Output directory for the data dump.
+- [```bor debug block ```](./debug_block.md): Dumps bor block traces.
## Examples
@@ -15,8 +12,8 @@ By default it creates a tar.gz file with the output:
```
$ bor debug
-Starting debugger...
-
+Starting debugger...
+
Created debug archive: bor-debug-2021-10-26-073819Z.tar.gz
```
@@ -27,4 +24,4 @@ $ bor debug --output data
Starting debugger...
Created debug directory: data/bor-debug-2021-10-26-075437Z
-```
+```
\ No newline at end of file
diff --git a/docs/cli/debug_block.md b/docs/cli/debug_block.md
new file mode 100644
index 0000000000..ced7e482ee
--- /dev/null
+++ b/docs/cli/debug_block.md
@@ -0,0 +1,9 @@
+# Debug trace
+
+The ```bor debug block ``` command will create an archive containing traces of a bor block.
+
+## Options
+
+- ```address```: Address of the grpc endpoint
+
+- ```output```: Output directory
\ No newline at end of file
diff --git a/docs/cli/debug_pprof.md b/docs/cli/debug_pprof.md
new file mode 100644
index 0000000000..86a84b6065
--- /dev/null
+++ b/docs/cli/debug_pprof.md
@@ -0,0 +1,11 @@
+# Debug Pprof
+
+The ```debug pprof ``` command will create an archive containing bor pprof traces.
+
+## Options
+
+- ```address```: Address of the grpc endpoint
+
+- ```seconds```: seconds to trace
+
+- ```output```: Output directory
\ No newline at end of file
diff --git a/docs/cli/dumpconfig.md b/docs/cli/dumpconfig.md
new file mode 100644
index 0000000000..0383c47310
--- /dev/null
+++ b/docs/cli/dumpconfig.md
@@ -0,0 +1,3 @@
+# Dumpconfig
+
+The ```bor dumpconfig ``` command will export the user provided flags into a configuration file
\ No newline at end of file
diff --git a/docs/cli/example_config.toml b/docs/cli/example_config.toml
new file mode 100644
index 0000000000..64ef60ae12
--- /dev/null
+++ b/docs/cli/example_config.toml
@@ -0,0 +1,147 @@
+# This configuration file is for reference and learning purpose only.
+# The default value of the flags is provided below (except a few flags which has custom defaults which are explicitly mentioned).
+# Recommended values for mainnet and/or mumbai are also provided.
+
+chain = "mainnet" # Name of the chain to sync ("mumbai", "mainnet") or path to a genesis file
+identity = "Annon-Identity" # Name/Identity of the node (default = OS hostname)
+log-level = "INFO" # Set log level for the server
+datadir = "var/lib/bor" # Path of the data directory to store information
+ancient = "" # Data directory for ancient chain segments (default = inside chaindata)
+keystore = "" # Path of the directory where keystores are located
+syncmode = "full" # Blockchain sync mode (only "full" sync supported)
+gcmode = "full" # Blockchain garbage collection mode ("full", "archive")
+snapshot = true # Enables the snapshot-database mode
+"bor.logs" = false # Enables bor log retrieval
+ethstats = "" # Reporting URL of a ethstats service (nodename:secret@host:port)
+
+["eth.requiredblocks"] # Comma separated block number-to-hash mappings to require for peering (=) (default = empty map)
+ "31000000" = "0x2087b9e2b353209c2c21e370c82daa12278efd0fe5f0febe6c29035352cf050e"
+ "32000000" = "0x875500011e5eecc0c554f95d07b31cf59df4ca2505f4dbbfffa7d4e4da917c68"
+
+[p2p]
+ maxpeers = 50 # Maximum number of network peers (network disabled if set to 0)
+ maxpendpeers = 50 # Maximum number of pending connection attempts
+ bind = "0.0.0.0" # Network binding address
+ port = 30303 # Network listening port
+ nodiscover = false # Disables the peer discovery mechanism (manual peer addition)
+ nat = "any" # NAT port mapping mechanism (any|none|upnp|pmp|extip:)
+ [p2p.discovery]
+ v5disc = false # Enables the experimental RLPx V5 (Topic Discovery) mechanism
+ bootnodes = [] # Comma separated enode URLs for P2P discovery bootstrap
+ bootnodesv4 = [] # List of initial v4 bootnodes
+ bootnodesv5 = [] # List of initial v5 bootnodes
+ static-nodes = [] # List of static nodes
+ trusted-nodes = [] # List of trusted nodes
+ dns = [] # List of enrtree:// URLs which will be queried for nodes to connect to
+
+[heimdall]
+ url = "http://localhost:1317" # URL of Heimdall service
+ "bor.without" = false # Run without Heimdall service (for testing purpose)
+ grpc-address = "" # Address of Heimdall gRPC service
+
+[txpool]
+ locals = [] # Comma separated accounts to treat as locals (no flush, priority inclusion)
+ nolocals = false # Disables price exemptions for locally submitted transactions
+ journal = "transactions.rlp" # Disk journal for local transaction to survive node restarts
+ rejournal = "1h0m0s" # Time interval to regenerate the local transaction journal
+ pricelimit = 1 # Minimum gas price limit to enforce for acceptance into the pool (mainnet = 30000000000)
+ pricebump = 10 # Price bump percentage to replace an already existing transaction
+ accountslots = 16 # Minimum number of executable transaction slots guaranteed per account
+ globalslots = 32768 # Maximum number of executable transaction slots for all accounts
+ accountqueue = 16 # Maximum number of non-executable transaction slots permitted per account
+ globalqueue = 32768 # Maximum number of non-executable transaction slots for all accounts
+ lifetime = "3h0m0s" # Maximum amount of time non-executable transaction are queued
+
+[miner]
+ mine = false # Enable mining
+ etherbase = "" # Public address for block mining rewards
+ extradata = "" # Block extra data set by the miner (default = client version)
+ gaslimit = 30000000 # Target gas ceiling for mined blocks
+ gasprice = "1000000000" # Minimum gas price for mining a transaction (recommended for mainnet = 30000000000, default suitable for mumbai/devnet)
+
+[jsonrpc]
+ ipcdisable = false # Disable the IPC-RPC server
+ ipcpath = "" # Filename for IPC socket/pipe within the datadir (explicit paths escape it)
+ gascap = 50000000 # Sets a cap on gas that can be used in eth_call/estimateGas (0=infinite)
+ txfeecap = 5.0 # Sets a cap on transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap)
+ [jsonrpc.http]
+ enabled = false # Enable the HTTP-RPC server
+ port = 8545 # http.port
+ prefix = "" # http.rpcprefix
+ host = "localhost" # HTTP-RPC server listening interface
+ api = ["eth", "net", "web3", "txpool", "bor"] # API's offered over the HTTP-RPC interface
+ vhosts = ["localhost"] # Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.
+ corsdomain = ["localhost"] # Comma separated list of domains from which to accept cross origin requests (browser enforced)
+ [jsonrpc.ws]
+ enabled = false # Enable the WS-RPC server
+ port = 8546 # WS-RPC server listening port
+ prefix = "" # HTTP path prefix on which JSON-RPC is served. Use '/' to serve on all paths.
+ host = "localhost" # ws.addr
+ api = ["net", "web3"] # API's offered over the WS-RPC interface
+ origins = ["localhost"] # Origins from which to accept websockets requests
+ [jsonrpc.graphql]
+ enabled = false # Enable GraphQL on the HTTP-RPC server. Note that GraphQL can only be started if an HTTP server is started as well.
+ port = 0 #
+ prefix = "" #
+ host = "" #
+ vhosts = ["localhost"] # Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.
+ corsdomain = ["localhost"] # Comma separated list of domains from which to accept cross origin requests (browser enforced)
+ [jsonrpc.timeouts]
+ read = "30s"
+ write = "30s"
+ idle = "2m0s"
+
+[gpo]
+ blocks = 20 # Number of recent blocks to check for gas prices
+ percentile = 60 # Suggested gas price is the given percentile of a set of recent transaction gas prices
+ maxprice = "5000000000000" # Maximum gas price will be recommended by gpo
+ ignoreprice = "2" # Gas price below which gpo will ignore transactions (recommended for mainnet = 30000000000, default suitable for mumbai/devnet)
+
+[telemetry]
+ metrics = false # Enable metrics collection and reporting
+ expensive = false # Enable expensive metrics collection and reporting
+ prometheus-addr = "127.0.0.1:7071" # Address for Prometheus Server
+ opencollector-endpoint = "127.0.0.1:4317" # OpenCollector Endpoint (host:port)
+ [telemetry.influx]
+ influxdb = false # Enable metrics export/push to an external InfluxDB database (v1)
+ endpoint = "" # InfluxDB API endpoint to report metrics to
+ database = "" # InfluxDB database name to push reported metrics to
+ username = "" # Username to authorize access to the database
+ password = "" # Password to authorize access to the database
+ influxdbv2 = false # Enable metrics export/push to an external InfluxDB v2 database
+ token = "" # Token to authorize access to the database (v2 only)
+ bucket = "" # InfluxDB bucket name to push reported metrics to (v2 only)
+ organization = "" # InfluxDB organization name (v2 only)
+ [telemetry.influx.tags] # Comma-separated InfluxDB tags (key/values) attached to all measurements
+ cloud = "aws"
+ host = "annon-host"
+ ip = "99.911.221.66"
+ region = "us-north-1"
+
+[cache]
+ cache = 1024 # Megabytes of memory allocated to internal caching (recommended for mainnet = 4096, default suitable for mumbai/devnet)
+ gc = 25 # Percentage of cache memory allowance to use for trie pruning (default = 25% full mode, 0% archive mode)
+ snapshot = 10 # Percentage of cache memory allowance to use for snapshot caching (default = 10% full mode, 20% archive mode)
+ database = 50 # Percentage of cache memory allowance to use for database io
+ trie = 15 # Percentage of cache memory allowance to use for trie caching (default = 15% full mode, 30% archive mode)
+ journal = "triecache" # Disk journal directory for trie cache to survive node restarts
+ rejournal = "1h0m0s" # Time interval to regenerate the trie cache journal
+ noprefetch = false # Disable heuristic state prefetch during block import (less CPU and disk IO, more time waiting for data)
+ preimages = false # Enable recording the SHA3/keccak preimages of trie keys
+ txlookuplimit = 2350000 # Number of recent blocks to maintain transactions index for (default = about 56 days, 0 = entire chain)
+ triesinmemory = 128 # Number of block states (tries) to keep in memory
+ timeout = "1h0m0s" # Time after which the Merkle Patricia Trie is stored to disc from memory
+
+[accounts]
+ unlock = [] # Comma separated list of accounts to unlock
+ password = "" # Password file to use for non-interactive password input
+ allow-insecure-unlock = false # Allow insecure account unlocking when account-related RPCs are exposed by http
+ lightkdf = false # Reduce key-derivation RAM & CPU usage at some expense of KDF strength
+ disable-bor-wallet = true # Disable the personal wallet endpoints
+
+[grpc]
+ addr = ":3131" # Address and port to bind the GRPC server
+
+[developer]
+ dev = false # Enable developer mode with ephemeral proof-of-authority network and a pre-funded developer account, mining enabled
+ period = 0 # Block period to use in developer mode (0 = mine only if transaction pending)
diff --git a/docs/cli/fingerprint.md b/docs/cli/fingerprint.md
new file mode 100644
index 0000000000..8bb7cb1542
--- /dev/null
+++ b/docs/cli/fingerprint.md
@@ -0,0 +1,3 @@
+# Fingerprint
+
+Display the system fingerprint
\ No newline at end of file
diff --git a/docs/cli/peers.md b/docs/cli/peers.md
index 57b4889a00..ac6dfd676e 100644
--- a/docs/cli/peers.md
+++ b/docs/cli/peers.md
@@ -1,4 +1,3 @@
-
# Peers
The ```peers``` command groups actions to interact with peers:
@@ -9,4 +8,4 @@ The ```peers``` command groups actions to interact with peers:
- [```peers remove```](./peers_remove.md): Disconnects the local client from a connected peer if exists.
-- [```peers status```](./peers_status.md): Display the status of a peer by its id.
+- [```peers status```](./peers_status.md): Display the status of a peer by its id.
\ No newline at end of file
diff --git a/docs/cli/peers_add.md b/docs/cli/peers_add.md
index 35e5ece450..5bc4ed1448 100644
--- a/docs/cli/peers_add.md
+++ b/docs/cli/peers_add.md
@@ -1,8 +1,9 @@
-
# Peers add
The ```peers add ``` command joins the local client to another remote peer.
-## Arguments
+## Options
+
+- ```address```: Address of the grpc endpoint
-- ```trusted```: Whether the peer is added as a trusted peer.
+- ```trusted```: Add the peer as a trusted
\ No newline at end of file
diff --git a/docs/cli/peers_list.md b/docs/cli/peers_list.md
index cb1ef2f599..41f398b764 100644
--- a/docs/cli/peers_list.md
+++ b/docs/cli/peers_list.md
@@ -1,4 +1,7 @@
-
-# Peers list
+# Peers add
The ```peers list``` command lists the connected peers.
+
+## Options
+
+- ```address```: Address of the grpc endpoint
\ No newline at end of file
diff --git a/docs/cli/peers_remove.md b/docs/cli/peers_remove.md
index e2fa0a71df..2cac1e7656 100644
--- a/docs/cli/peers_remove.md
+++ b/docs/cli/peers_remove.md
@@ -1,4 +1,9 @@
-
# Peers remove
The ```peers remove ``` command disconnects the local client from a connected peer if exists.
+
+## Options
+
+- ```address```: Address of the grpc endpoint
+
+- ```trusted```: Add the peer as a trusted
\ No newline at end of file
diff --git a/docs/cli/peers_status.md b/docs/cli/peers_status.md
index 56343c8d6b..65a0fe9d8f 100644
--- a/docs/cli/peers_status.md
+++ b/docs/cli/peers_status.md
@@ -1,4 +1,7 @@
-
# Peers status
The ```peers status ``` command displays the status of a peer by its id.
+
+## Options
+
+- ```address```: Address of the grpc endpoint
\ No newline at end of file
diff --git a/docs/cli/removedb.md b/docs/cli/removedb.md
new file mode 100644
index 0000000000..473d47ecef
--- /dev/null
+++ b/docs/cli/removedb.md
@@ -0,0 +1,9 @@
+# RemoveDB
+
+The ```bor removedb``` command will remove the blockchain and state databases at the given datadir location
+
+## Options
+
+- ```address```: Address of the grpc endpoint
+
+- ```datadir```: Path of the data directory to store information
\ No newline at end of file
diff --git a/docs/cli/server.md b/docs/cli/server.md
index 5a3e7e1052..4c291a74b9 100644
--- a/docs/cli/server.md
+++ b/docs/cli/server.md
@@ -1,133 +1,124 @@
-
# Server
The ```bor server``` command runs the Bor client.
-## General Options
+## Options
-- ```chain```: Name of the chain to sync (mainnet or mumbai).
+- ```chain```: Name of the chain to sync ('mumbai', 'mainnet') or path to a genesis file
-- ```log-level```: Set log level for the server (info, warn, debug, trace).
+- ```identity```: Name/Identity of the node
-- ```datadir```: Path of the data directory to store information (defaults to $HOME).
+- ```log-level```: Set log level for the server
-- ```config```: List of files that contain the configuration.
+- ```datadir```: Path of the data directory to store information
-- ```syncmode```: Blockchain sync mode ("fast", "full", "snap" or "light").
+- ```keystore```: Path of the directory where keystores are located
-- ```gcmode```: Blockchain garbage collection mode ("full", "archive").
+- ```config```: File for the config file
-- ```whitelist```: Comma separated block number-to-hash mappings to enforce (=).
+- ```syncmode```: Blockchain sync mode (only "full" sync supported)
-- ```snapshot```: Enables snapshot-database mode (default = enable).
+- ```gcmode```: Blockchain garbage collection mode ("full", "archive")
-- ```bor.heimdall```: URL of Heimdall service.
+- ```eth.requiredblocks```: Comma separated block number-to-hash mappings to require for peering (=)
-- ```bor.withoutheimdall```: Run without Heimdall service (for testing purpose).
+- ```snapshot```: Enables the snapshot-database mode (default = true)
-- ```ethstats```: Reporting URL of a ethstats service (nodename:secret@host:port).
+- ```bor.logs```: Enables bor log retrieval (default = false)
-- ```gpo.blocks```: Number of recent blocks to check for gas prices.
+- ```bor.heimdall```: URL of Heimdall service
-- ```gpo.percentile```: Suggested gas price is the given percentile of a set of recent transaction gas prices.
+- ```bor.withoutheimdall```: Run without Heimdall service (for testing purpose)
-- ```gpo.maxprice```: Maximum gas price will be recommended by gpo.
+- ```ethstats```: Reporting URL of a ethstats service (nodename:secret@host:port)
-- ```gpo.ignoreprice```: Gas price below which gpo will ignore transactions.
+- ```gpo.blocks```: Number of recent blocks to check for gas prices
-- ```grpc.addr```: Address and port to bind the GRPC server.
+- ```gpo.percentile```: Suggested gas price is the given percentile of a set of recent transaction gas prices
-### Transaction Pool Options
+- ```gpo.maxprice```: Maximum gas price will be recommended by gpo
-- ```txpool.locals```: Comma separated accounts to treat as locals (no flush, priority inclusion).
+- ```gpo.ignoreprice```: Gas price below which gpo will ignore transactions
-- ```txpool.nolocals```: Disables price exemptions for locally submitted transactions
+- ```disable-bor-wallet```: Disable the personal wallet endpoints
-- ```txpool.journal```: Disk journal for local transaction to survive node restarts
+- ```grpc.addr```: Address and port to bind the GRPC server
-- ```txpool.rejournal```: Time interval to regenerate the local transaction journal
+- ```dev```: Enable developer mode with ephemeral proof-of-authority network and a pre-funded developer account, mining enabled
-- ```txpool.pricelimit```: Minimum gas price limit to enforce for acceptance into the pool
+- ```dev.period```: Block period to use in developer mode (0 = mine only if transaction pending)
-- ```txpool.pricebump```: Price bump percentage to replace an already existing transaction
-
-- ```txpool.accountslots```: Minimum number of executable transaction slots guaranteed per account
-
-- ```txpool.globalslots```: Maximum number of executable transaction slots for all accounts
+### Account Management Options
-- ```txpool.accountqueue```: Maximum number of non-executable transaction slots permitted per account
+- ```unlock```: Comma separated list of accounts to unlock
-- ```txpool.globalqueue```: Maximum number of non-executable transaction slots for all accounts
+- ```password```: Password file to use for non-interactive password input
-- ```txpool.lifetime```: Maximum amount of time non-executable transaction are queued
+- ```allow-insecure-unlock```: Allow insecure account unlocking when account-related RPCs are exposed by http
-### Sealer Options
+- ```lightkdf```: Reduce key-derivation RAM & CPU usage at some expense of KDF strength
-- ```mine```: Enable sealing.
+### Cache Options
-- ```miner.etherbase```: Public address for block mining rewards (default = first account)
+- ```cache```: Megabytes of memory allocated to internal caching (default = 4096 mainnet full node)
-- ```miner.extradata```: Block extra data set by the miner (default = client version).
+- ```cache.database```: Percentage of cache memory allowance to use for database io
-- ```miner.gaslimit```: Target gas ceiling for mined blocks.
+- ```cache.trie```: Percentage of cache memory allowance to use for trie caching (default = 15% full mode, 30% archive mode)
-- ```miner.gasprice```: Minimum gas price for mining a transaction.
+- ```cache.trie.journal```: Disk journal directory for trie cache to survive node restarts
-### Cache Options
+- ```cache.trie.rejournal```: Time interval to regenerate the trie cache journal
-- ```cache```: Megabytes of memory allocated to internal caching (default = 4096 mainnet full node).
+- ```cache.gc```: Percentage of cache memory allowance to use for trie pruning (default = 25% full mode, 0% archive mode)
-- ```cache.database```: Percentage of cache memory allowance to use for database io.
+- ```cache.snapshot```: Percentage of cache memory allowance to use for snapshot caching (default = 10% full mode, 20% archive mode)
-- ```cache.trie```: Percentage of cache memory allowance to use for trie caching (default = 15% full mode, 30% archive mode).
+- ```cache.noprefetch```: Disable heuristic state prefetch during block import (less CPU and disk IO, more time waiting for data)
-- ```cache.trie.journal```: Disk journal directory for trie cache to survive node restarts.
+- ```cache.preimages```: Enable recording the SHA3/keccak preimages of trie keys
-- ```cache.trie.rejournal```: Time interval to regenerate the trie cache journal.
+- ```txlookuplimit```: Number of recent blocks to maintain transactions index for (default = about 56 days, 0 = entire chain)
-- ```cache.gc```: Percentage of cache memory allowance to use for trie pruning (default = 25% full mode, 0% archive mode).
+### JsonRPC Options
-- ```cache.snapshot```: Percentage of cache memory allowance to use for snapshot caching (default = 10% full mode, 20% archive mode).
+- ```rpc.gascap```: Sets a cap on gas that can be used in eth_call/estimateGas (0=infinite)
-- ```cache.noprefetch```: Disable heuristic state prefetch during block import (less CPU and disk IO, more time waiting for data).
+- ```rpc.txfeecap```: Sets a cap on transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap)
-- ```cache.preimages```: Enable recording the SHA3/keccak preimages of trie keys.
+- ```ipcdisable```: Disable the IPC-RPC server
-- ```txlookuplimit```: Number of recent blocks to maintain transactions index for (default = about 56 days, 0 = entire chain).
+- ```ipcpath```: Filename for IPC socket/pipe within the datadir (explicit paths escape it)
-### JsonRPC Options
+- ```http.corsdomain```: Comma separated list of domains from which to accept cross origin requests (browser enforced)
-- ```rpc.gascap```: Sets a cap on gas that can be used in eth_call/estimateGas (0=infinite).
+- ```http.vhosts```: Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.
-- ```rpc.txfeecap```: Sets a cap on transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap).
+- ```ws.origins```: Origins from which to accept websockets requests
-- ```ipcdisable```: Disable the IPC-RPC server.
+- ```graphql.corsdomain```: Comma separated list of domains from which to accept cross origin requests (browser enforced)
-- ```ipcpath```: Filename for IPC socket/pipe within the datadir (explicit paths escape it).
+- ```graphql.vhosts```: Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.
-- ```jsonrpc.corsdomain```: Comma separated list of domains from which to accept cross.
+- ```http```: Enable the HTTP-RPC server
-- ```jsonrpc.vhosts```: Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.
+- ```http.addr```: HTTP-RPC server listening interface
-- ```http```: Enable the HTTP-RPC server.
+- ```http.port```: HTTP-RPC server listening port
-- ```http.addr```: HTTP-RPC server listening interface.
-
-- ```http.port```: HTTP-RPC server listening port.
-
- ```http.rpcprefix```: HTTP path path prefix on which JSON-RPC is served. Use '/' to serve on all paths.
-- ```http.modules```: API's offered over the HTTP-RPC interface.
+- ```http.api```: API's offered over the HTTP-RPC interface
-- ```ws```: Enable the WS-RPC server.
+- ```ws```: Enable the WS-RPC server
-- ```ws.addr```: WS-RPC server listening interface.
+- ```ws.addr```: WS-RPC server listening interface
-- ```ws.port```: WS-RPC server listening port.
+- ```ws.port```: WS-RPC server listening port
- ```ws.rpcprefix```: HTTP path prefix on which JSON-RPC is served. Use '/' to serve on all paths.
-- ```ws.modules```: API's offered over the WS-RPC interface.
+- ```ws.api```: API's offered over the WS-RPC interface
- ```graphql```: Enable GraphQL on the HTTP-RPC server. Note that GraphQL can only be started if an HTTP server is started as well.
@@ -139,56 +130,78 @@ The ```bor server``` command runs the Bor client.
- ```bootnodes```: Comma separated enode URLs for P2P discovery bootstrap
-- ```maxpeers```: "Maximum number of network peers (network disabled if set to 0)
+- ```maxpeers```: Maximum number of network peers (network disabled if set to 0)
+
+- ```maxpendpeers```: Maximum number of pending connection attempts
-- ```maxpendpeers```: Maximum number of pending connection attempts (defaults used if set to 0)
+- ```nat```: NAT port mapping mechanism (any|none|upnp|pmp|extip:)
-- ```nat```: "NAT port mapping mechanism (any|none|upnp|pmp|extip:)
+- ```nodiscover```: Disables the peer discovery mechanism (manual peer addition)
-- ```nodiscover```: "Disables the peer discovery mechanism (manual peer addition)
+- ```v5disc```: Enables the experimental RLPx V5 (Topic Discovery) mechanism
+
+### Sealer Options
-- ```v5disc```: "Enables the experimental RLPx V5 (Topic Discovery) mechanism
+- ```mine```: Enable mining
+
+- ```miner.etherbase```: Public address for block mining rewards
+
+- ```miner.extradata```: Block extra data set by the miner (default = client version)
+
+- ```miner.gaslimit```: Target gas ceiling (gas limit) for mined blocks
+
+- ```miner.gasprice```: Minimum gas price for mining a transaction
### Telemetry Options
-- ```metrics```: Enable metrics collection and reporting.
+- ```metrics```: Enable metrics collection and reporting
-- ```metrics.expensive```: Enable expensive metrics collection and reporting.
+- ```metrics.expensive```: Enable expensive metrics collection and reporting
-- ```metrics.influxdb```: Enable metrics export/push to an external InfluxDB database (v1).
+- ```metrics.influxdb```: Enable metrics export/push to an external InfluxDB database (v1)
-- ```metrics.influxdb.endpoint```: InfluxDB API endpoint to report metrics to.
+- ```metrics.influxdb.endpoint```: InfluxDB API endpoint to report metrics to
-- ```metrics.influxdb.database```: InfluxDB database name to push reported metrics to.
+- ```metrics.influxdb.database```: InfluxDB database name to push reported metrics to
-- ```metrics.influxdb.username```: Username to authorize access to the database.
+- ```metrics.influxdb.username```: Username to authorize access to the database
-- ```metrics.influxdb.password```: Password to authorize access to the database.
+- ```metrics.influxdb.password```: Password to authorize access to the database
-- ```metrics.influxdb.tags```: Comma-separated InfluxDB tags (key/values) attached to all measurements.
+- ```metrics.influxdb.tags```: Comma-separated InfluxDB tags (key/values) attached to all measurements
-- ```metrics.influxdbv2```: Enable metrics export/push to an external InfluxDB v2 database.
+- ```metrics.prometheus-addr```: Address for Prometheus Server
-- ```metrics.influxdb.token```: Token to authorize access to the database (v2 only).
+- ```metrics.opencollector-endpoint```: OpenCollector Endpoint (host:port)
-- ```metrics.influxdb.bucket```: InfluxDB bucket name to push reported metrics to (v2 only).
+- ```metrics.influxdbv2```: Enable metrics export/push to an external InfluxDB v2 database
-- ```metrics.influxdb.organization```: InfluxDB organization name (v2 only).
+- ```metrics.influxdb.token```: Token to authorize access to the database (v2 only)
-### Account Management Options
+- ```metrics.influxdb.bucket```: InfluxDB bucket name to push reported metrics to (v2 only)
-- ```unlock```: "Comma separated list of accounts to unlock.
+- ```metrics.influxdb.organization```: InfluxDB organization name (v2 only)
-- ```password```: Password file to use for non-interactive password input.
+### Transaction Pool Options
-- ```allow-insecure-unlock```: Allow insecure account unlocking when account-related RPCs are exposed by http.
+- ```txpool.locals```: Comma separated accounts to treat as locals (no flush, priority inclusion)
-- ```lightkdf```: Reduce key-derivation RAM & CPU usage at some expense of KDF strength.
+- ```txpool.nolocals```: Disables price exemptions for locally submitted transactions
-## Usage
+- ```txpool.journal```: Disk journal for local transaction to survive node restarts
-Use multiple files to configure the client:
+- ```txpool.rejournal```: Time interval to regenerate the local transaction journal
+
+- ```txpool.pricelimit```: Minimum gas price limit to enforce for acceptance into the pool
+
+- ```txpool.pricebump```: Price bump percentage to replace an already existing transaction
+
+- ```txpool.accountslots```: Minimum number of executable transaction slots guaranteed per account
+
+- ```txpool.globalslots```: Maximum number of executable transaction slots for all accounts
+
+- ```txpool.accountqueue```: Maximum number of non-executable transaction slots permitted per account
+
+- ```txpool.globalqueue```: Maximum number of non-executable transaction slots for all accounts
-```
-$ bor server --config ./legacy-config.toml --config ./config2.hcl
-```
+- ```txpool.lifetime```: Maximum amount of time non-executable transaction are queued
\ No newline at end of file
diff --git a/docs/cli/status.md b/docs/cli/status.md
index fb7bddb182..9fa8e25905 100644
--- a/docs/cli/status.md
+++ b/docs/cli/status.md
@@ -1,4 +1,3 @@
-
# Status
-The ```status``` command outputs the status of the client.
+The ```status``` command outputs the status of the client.
\ No newline at end of file
diff --git a/docs/cli/version.md b/docs/cli/version.md
index 156bdf607c..87e7cf44a8 100644
--- a/docs/cli/version.md
+++ b/docs/cli/version.md
@@ -1,4 +1,3 @@
-
# Version
The ```bor version``` command outputs the version of the binary.
@@ -8,4 +7,4 @@ The ```bor version``` command outputs the version of the binary.
```
$ bor version
0.2.9-stable
-```
+```
\ No newline at end of file
diff --git a/docs/config.md b/docs/config.md
deleted file mode 100644
index 4f4dec157b..0000000000
--- a/docs/config.md
+++ /dev/null
@@ -1,133 +0,0 @@
-
-# Config
-
-Toml files format used in geth are being deprecated.
-
-Bor uses uses JSON and [HCL](https://github.com/hashicorp/hcl) formats to create configuration files. This is the format in HCL alongside the default values:
-
-```
-chain = "mainnet"
-log-level = "info"
-data-dir = ""
-sync-mode = "fast"
-gc-mode = "full"
-snapshot = true
-ethstats = ""
-whitelist = {}
-
-p2p {
- max-peers = 30
- max-pend-peers = 50
- bind = "0.0.0.0"
- port = 30303
- no-discover = false
- nat = "any"
- discovery {
- v5-enabled = false
- bootnodes = []
- bootnodesv4 = []
- bootnodesv5 = []
- staticNodes = []
- trustedNodes = []
- dns = []
- }
-}
-
-heimdall {
- url = "http://localhost:1317"
- without = false
-}
-
-txpool {
- locals = []
- no-locals = false
- journal = ""
- rejournal = "1h"
- price-limit = 1
- price-bump = 10
- account-slots = 16
- global-slots = 4096
- account-queue = 64
- global-queue = 1024
- lifetime = "3h"
-}
-
-sealer {
- enabled = false
- etherbase = ""
- gas-ceil = 8000000
- extra-data = ""
-}
-
-gpo {
- blocks = 20
- percentile = 60
-}
-
-jsonrpc {
- ipc-disable = false
- ipc-path = ""
- modules = ["web3", "net"]
- cors = ["*"]
- vhost = ["*"]
-
- http {
- enabled = false
- port = 8545
- prefix = ""
- host = "localhost"
- }
-
- ws {
- enabled = false
- port = 8546
- prefix = ""
- host = "localhost"
- }
-
- graphqh {
- enabled = false
- }
-}
-
-telemetry {
- enabled = false
- expensive = false
-
- influxdb {
- v1-enabled = false
- endpoint = ""
- database = ""
- username = ""
- password = ""
- v2-enabled = false
- token = ""
- bucket = ""
- organization = ""
- }
-}
-
-cache {
- cache = 1024
- perc-database = 50
- perc-trie = 15
- perc-gc = 25
- perc-snapshot = 10
- journal = "triecache"
- rejournal = "60m"
- no-prefetch = false
- preimages = false
- tx-lookup-limit = 2350000
-}
-
-accounts {
- unlock = []
- password-file = ""
- allow-insecure-unlock = false
- use-lightweight-kdf = false
-}
-
-grpc {
- addr = ":3131"
-}
-```
diff --git a/eth/api_backend.go b/eth/api_backend.go
index 6577ac1e1a..c33f3cf6f2 100644
--- a/eth/api_backend.go
+++ b/eth/api_backend.go
@@ -359,3 +359,11 @@ func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, re
func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) {
return b.eth.stateAtTransaction(block, txIndex, reexec)
}
+
+func (b *EthAPIBackend) GetCheckpointWhitelist() map[uint64]common.Hash {
+ return b.eth.Downloader().ChainValidator.GetCheckpointWhitelist()
+}
+
+func (b *EthAPIBackend) PurgeCheckpointWhitelist() {
+ b.eth.Downloader().ChainValidator.PurgeCheckpointWhitelist()
+}
diff --git a/eth/backend.go b/eth/backend.go
index 05d6c5c927..0b8a956cfa 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -82,6 +82,7 @@ type Ethereum struct {
eventMux *event.TypeMux
engine consensus.Engine
accountManager *accounts.Manager
+ authorized bool // If consensus engine is authorized with keystore
bloomRequests chan chan *bloombits.Retrieval // Channel receiving bloom data retrieval requests
bloomIndexer *core.ChainIndexer // Bloom indexer operating during block imports
@@ -100,6 +101,8 @@ type Ethereum struct {
lock sync.RWMutex // Protects the variadic fields (e.g. gas price and etherbase)
+ closeCh chan struct{} // Channel to signal the background processes to exit
+
shutdownTracker *shutdowncheck.ShutdownTracker // Tracks if and when the node has shutdown ungracefully
}
@@ -153,6 +156,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
chainDb: chainDb,
eventMux: stack.EventMux(),
accountManager: stack.AccountManager(),
+ authorized: false,
engine: nil,
closeBloomHandler: make(chan struct{}),
networkID: config.NetworkId,
@@ -161,6 +165,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
bloomRequests: make(chan chan *bloombits.Retrieval),
bloomIndexer: core.NewBloomIndexer(chainDb, params.BloomBitsBlocks, params.BloomConfirms),
p2pServer: stack.Server(),
+ closeCh: make(chan struct{}),
shutdownTracker: shutdowncheck.NewShutdownTracker(chainDb),
}
@@ -181,7 +186,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
// END: Bor changes
bcVersion := rawdb.ReadDatabaseVersion(chainDb)
- var dbVer = ""
+ dbVer := ""
if bcVersion != nil {
dbVer = fmt.Sprintf("%d", *bcVersion)
}
@@ -252,6 +257,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
BloomCache: uint64(cacheLimit),
EventMux: eth.eventMux,
Checkpoint: checkpoint,
+ EthAPI: ethAPI,
PeerRequiredBlocks: config.PeerRequiredBlocks,
}); err != nil {
return nil, err
@@ -469,8 +475,10 @@ func (s *Ethereum) StartMining(threads int) error {
if threads == 0 {
threads = -1 // Disable the miner from within
}
+
th.SetThreads(threads)
}
+
// If the miner was not running, initialize it
if !s.IsMining() {
// Propagate the initial price point to the transaction pool
@@ -483,31 +491,43 @@ func (s *Ethereum) StartMining(threads int) error {
eb, err := s.Etherbase()
if err != nil {
log.Error("Cannot start mining without etherbase", "err", err)
+
return fmt.Errorf("etherbase missing: %v", err)
}
- var cli *clique.Clique
- if c, ok := s.engine.(*clique.Clique); ok {
- cli = c
- } else if cl, ok := s.engine.(*beacon.Beacon); ok {
- if c, ok := cl.InnerEngine().(*clique.Clique); ok {
+
+ // If personal endpoints are disabled, the server creating
+ // this Ethereum instance has already Authorized consensus.
+ if !s.authorized {
+ var cli *clique.Clique
+ if c, ok := s.engine.(*clique.Clique); ok {
cli = c
+ } else if cl, ok := s.engine.(*beacon.Beacon); ok {
+ if c, ok := cl.InnerEngine().(*clique.Clique); ok {
+ cli = c
+ }
}
- }
- if cli != nil {
- wallet, err := s.accountManager.Find(accounts.Account{Address: eb})
- if wallet == nil || err != nil {
- log.Error("Etherbase account unavailable locally", "err", err)
- return fmt.Errorf("signer missing: %v", err)
+
+ if cli != nil {
+ wallet, err := s.accountManager.Find(accounts.Account{Address: eb})
+ if wallet == nil || err != nil {
+ log.Error("Etherbase account unavailable locally", "err", err)
+
+ return fmt.Errorf("signer missing: %v", err)
+ }
+
+ cli.Authorize(eb, wallet.SignData)
}
- cli.Authorize(eb, wallet.SignData)
- }
- if bor, ok := s.engine.(*bor.Bor); ok {
- wallet, err := s.accountManager.Find(accounts.Account{Address: eb})
- if wallet == nil || err != nil {
- log.Error("Etherbase account unavailable locally", "err", err)
- return fmt.Errorf("signer missing: %v", err)
+
+ if bor, ok := s.engine.(*bor.Bor); ok {
+ wallet, err := s.accountManager.Find(accounts.Account{Address: eb})
+ if wallet == nil || err != nil {
+ log.Error("Etherbase account unavailable locally", "err", err)
+
+ return fmt.Errorf("signer missing: %v", err)
+ }
+
+ bor.Authorize(eb, wallet.SignData)
}
- bor.Authorize(eb, wallet.SignData)
}
// If mining is started, we can disable the transaction rejection mechanism
// introduced to speed sync times.
@@ -515,6 +535,7 @@ func (s *Ethereum) StartMining(threads int) error {
go s.miner.Start(eb)
}
+
return nil
}
@@ -553,6 +574,14 @@ func (s *Ethereum) SyncMode() downloader.SyncMode {
return mode
}
+// SetAuthorized sets the authorized bool variable
+// denoting that consensus has been authorized while creation
+func (s *Ethereum) SetAuthorized(authorized bool) {
+ s.lock.Lock()
+ s.authorized = authorized
+ s.lock.Unlock()
+}
+
// Protocols returns all the currently configured
// network protocols to start.
func (s *Ethereum) Protocols() []p2p.Protocol {
@@ -560,6 +589,7 @@ func (s *Ethereum) Protocols() []p2p.Protocol {
if s.config.SnapshotCache > 0 {
protos = append(protos, snap.MakeProtocols((*snapHandler)(s.handler), s.snapDialCandidates)...)
}
+
return protos
}
@@ -582,8 +612,77 @@ func (s *Ethereum) Start() error {
}
maxPeers -= s.config.LightPeers
}
+
// Start the networking layer and the light server if requested
s.handler.Start(maxPeers)
+
+ go s.startCheckpointWhitelistService()
+
+ return nil
+}
+
+// StartCheckpointWhitelistService starts the goroutine to fetch checkpoints and update the
+// checkpoint whitelist map.
+func (s *Ethereum) startCheckpointWhitelistService() {
+ // a shortcut helps with tests and early exit
+ select {
+ case <-s.closeCh:
+ return
+ default:
+ }
+
+ // first run the checkpoint whitelist
+ err := s.handleWhitelistCheckpoint()
+ if err != nil {
+ if errors.Is(err, ErrBorConsensusWithoutHeimdall) || errors.Is(err, ErrNotBorConsensus) {
+ return
+ }
+
+ log.Warn("unable to whitelist checkpoint - first run", "err", err)
+ }
+
+ ticker := time.NewTicker(100 * time.Second)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ err := s.handleWhitelistCheckpoint()
+ if err != nil {
+ log.Warn("unable to whitelist checkpoint", "err", err)
+ }
+ case <-s.closeCh:
+ return
+ }
+ }
+}
+
+var (
+ ErrNotBorConsensus = errors.New("not bor consensus was given")
+ ErrBorConsensusWithoutHeimdall = errors.New("bor consensus without heimdall")
+)
+
+// handleWhitelistCheckpoint handles the checkpoint whitelist mechanism.
+func (s *Ethereum) handleWhitelistCheckpoint() error {
+ ethHandler := (*ethHandler)(s.handler)
+
+ bor, ok := ethHandler.chain.Engine().(*bor.Bor)
+ if !ok {
+ return ErrNotBorConsensus
+ }
+
+ if bor.HeimdallClient == nil {
+ return ErrBorConsensusWithoutHeimdall
+ }
+
+ endBlockNum, endBlockHash, err := ethHandler.fetchWhitelistCheckpoint(bor)
+ if err != nil {
+ return err
+ }
+
+ // Update the checkpoint whitelist map.
+ ethHandler.downloader.ProcessCheckpoint(endBlockNum, endBlockHash)
+
return nil
}
@@ -599,6 +698,9 @@ func (s *Ethereum) Stop() error {
s.bloomIndexer.Close()
close(s.closeBloomHandler)
+ // Close all bg processes
+ close(s.closeCh)
+
// closing consensus engine first, as miner has deps on it
s.engine.Close()
s.txPool.Stop()
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 1e68746f97..1a9d815ccd 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -30,6 +30,7 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/eth/downloader/whitelist"
"github.com/ethereum/go-ethereum/eth/protocols/snap"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
@@ -143,6 +144,8 @@ type Downloader struct {
quitCh chan struct{} // Quit channel to signal termination
quitLock sync.Mutex // Lock to prevent double closes
+ ChainValidator
+
// Testing hooks
syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run
bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch
@@ -150,6 +153,14 @@ type Downloader struct {
chainInsertHook func([]*fetchResult) // Method to call upon inserting a chain of blocks (possibly in multiple invocations)
}
+// interface for whitelist service
+type ChainValidator interface {
+ IsValidChain(remoteHeader *types.Header, fetchHeadersByNumber func(number uint64, amount int, skip int, reverse bool) ([]*types.Header, []common.Hash, error)) (bool, error)
+ ProcessCheckpoint(endBlockNum uint64, endBlockHash common.Hash)
+ GetCheckpointWhitelist() map[uint64]common.Hash
+ PurgeCheckpointWhitelist()
+}
+
// LightChain encapsulates functions required to synchronise a light chain.
type LightChain interface {
// HasHeader verifies a header's presence in the local chain.
@@ -204,7 +215,8 @@ type BlockChain interface {
}
// New creates a new downloader to fetch hashes and blocks from remote peers.
-func New(checkpoint uint64, stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn, success func()) *Downloader {
+//nolint: staticcheck
+func New(checkpoint uint64, stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn, success func(), whitelistService ChainValidator) *Downloader {
if lightchain == nil {
lightchain = chain
}
@@ -221,6 +233,7 @@ func New(checkpoint uint64, stateDb ethdb.Database, mux *event.TypeMux, chain Bl
quitCh: make(chan struct{}),
SnapSyncer: snap.NewSyncer(stateDb),
stateSyncStart: make(chan *stateSync),
+ ChainValidator: whitelistService,
}
dl.skeleton = newSkeleton(stateDb, dl.peers, dropPeer, newBeaconBackfiller(dl, success))
@@ -332,9 +345,11 @@ func (d *Downloader) LegacySync(id string, head common.Hash, td, ttd *big.Int, m
case nil, errBusy, errCanceled:
return err
}
+
if errors.Is(err, errInvalidChain) || errors.Is(err, errBadPeer) || errors.Is(err, errTimeout) ||
errors.Is(err, errStallingPeer) || errors.Is(err, errUnsyncedPeer) || errors.Is(err, errEmptyHeaderSet) ||
- errors.Is(err, errPeersUnavailable) || errors.Is(err, errTooOld) || errors.Is(err, errInvalidAncestor) {
+ errors.Is(err, errPeersUnavailable) || errors.Is(err, errTooOld) || errors.Is(err, errInvalidAncestor) ||
+ errors.Is(err, whitelist.ErrCheckpointMismatch) {
log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err)
if d.dropPeer == nil {
// The dropPeer method is nil when `--copydb` is used for a local copy.
@@ -345,10 +360,17 @@ func (d *Downloader) LegacySync(id string, head common.Hash, td, ttd *big.Int, m
}
return err
}
+
if errors.Is(err, ErrMergeTransition) {
return err // This is an expected fault, don't keep printing it in a spin-loop
}
- log.Warn("Synchronisation failed, retrying", "err", err)
+
+ if errors.Is(err, whitelist.ErrNoRemoteCheckoint) {
+ log.Warn("Doesn't have remote checkpoint yet", "peer", id, "err", err)
+ }
+
+ log.Warn("Synchronisation failed, retrying", "peer", id, "err", err)
+
return err
}
@@ -764,12 +786,24 @@ func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, ui
return int64(from), count, span - 1, uint64(max)
}
+// curried fetchHeadersByNumber
+func (d *Downloader) getFetchHeadersByNumber(p *peerConnection) func(number uint64, amount int, skip int, reverse bool) ([]*types.Header, []common.Hash, error) {
+ return func(number uint64, amount int, skip int, reverse bool) ([]*types.Header, []common.Hash, error) {
+ return d.fetchHeadersByNumber(p, number, amount, skip, reverse)
+ }
+}
+
// findAncestor tries to locate the common ancestor link of the local chain and
// a remote peers blockchain. In the general case when our node was in sync and
// on the correct chain, checking the top N links should already get us a match.
// In the rare scenario when we ended up on a long reorganisation (i.e. none of
// the head links match), we do a binary search to find the common ancestor.
func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) (uint64, error) {
+ // Check the validity of chain to be downloaded
+ if _, err := d.IsValidChain(remoteHeader, d.getFetchHeadersByNumber(p)); err != nil {
+ return 0, err
+ }
+
// Figure out the valid ancestor range to prevent rewrite attacks
var (
floor = int64(-1)
@@ -1346,6 +1380,7 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode
if chunkHeaders[len(chunkHeaders)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot {
frequency = 1
}
+
// Although the received headers might be all valid, a legacy
// PoW/PoA sync must not accept post-merge headers. Make sure
// that any transition is rejected at this point.
@@ -1353,13 +1388,16 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode
rejected []*types.Header
td *big.Int
)
+
if !beaconMode && ttd != nil {
td = d.blockchain.GetTd(chunkHeaders[0].ParentHash, chunkHeaders[0].Number.Uint64()-1)
if td == nil {
// This should never really happen, but handle gracefully for now
log.Error("Failed to retrieve parent header TD", "number", chunkHeaders[0].Number.Uint64()-1, "hash", chunkHeaders[0].ParentHash)
+
return fmt.Errorf("%w: parent TD missing", errInvalidChain)
}
+
for i, header := range chunkHeaders {
td = new(big.Int).Add(td, header.Difficulty)
if td.Cmp(ttd) >= 0 {
@@ -1373,10 +1411,12 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode
} else {
chunkHeaders, rejected = chunkHeaders[:i], chunkHeaders[i:]
}
+
break
}
}
}
+
if len(chunkHeaders) > 0 {
if n, err := d.lightchain.InsertHeaderChain(chunkHeaders, frequency); err != nil {
rollbackErr = err
@@ -1385,12 +1425,15 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode
if (mode == SnapSync || frequency > 1) && n > 0 && rollback == 0 {
rollback = chunkHeaders[0].Number.Uint64()
}
+
log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err)
+
return fmt.Errorf("%w: %v", errInvalidChain, err)
}
// All verifications passed, track all headers within the allowed limits
if mode == SnapSync {
head := chunkHeaders[len(chunkHeaders)-1].Number.Uint64()
+
if head-rollback > uint64(fsHeaderSafetyNet) {
rollback = head - uint64(fsHeaderSafetyNet)
} else {
@@ -1398,14 +1441,26 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode
}
}
}
+
+ if len(rejected) != 0 {
+ // Merge threshold reached, stop importing, but don't roll back
+ rollback = 0
+
+ log.Info("Legacy sync reached merge threshold", "number", rejected[0].Number, "hash", rejected[0].Hash(), "td", td, "ttd", ttd)
+
+ return ErrMergeTransition
+ }
+
if len(rejected) != 0 {
// Merge threshold reached, stop importing, but don't roll back
rollback = 0
log.Info("Legacy sync reached merge threshold", "number", rejected[0].Number, "hash", rejected[0].Hash(), "td", td, "ttd", ttd)
+
return ErrMergeTransition
}
}
+
// Unless we're doing light chains, schedule the headers for associated content retrieval
if mode == FullSync || mode == SnapSync {
// If we've reached the allowed number of pending headers, stall a bit
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index 6989252c11..37b07424dd 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -35,6 +35,7 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
+ "github.com/ethereum/go-ethereum/eth/downloader/whitelist"
"github.com/ethereum/go-ethereum/eth/protocols/eth"
"github.com/ethereum/go-ethereum/eth/protocols/snap"
"github.com/ethereum/go-ethereum/event"
@@ -42,6 +43,8 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+
+ "github.com/stretchr/testify/assert"
)
// downloadTester is a test simulator for mocking out local block chain.
@@ -60,25 +63,35 @@ func newTester() *downloadTester {
if err != nil {
panic(err)
}
+
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false)
if err != nil {
panic(err)
}
+
core.GenesisBlockForTesting(db, testAddress, big.NewInt(1000000000000000))
chain, err := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
if err != nil {
panic(err)
}
+
tester := &downloadTester{
freezer: freezer,
chain: chain,
peers: make(map[string]*downloadTesterPeer),
}
- tester.downloader = New(0, db, new(event.TypeMux), tester.chain, nil, tester.dropPeer, nil)
+
+ //nolint: staticcheck
+ tester.downloader = New(0, db, new(event.TypeMux), tester.chain, nil, tester.dropPeer, nil, whitelist.NewService(10))
+
return tester
}
+func (dl *downloadTester) setWhitelist(w ChainValidator) {
+ dl.downloader.ChainValidator = w
+}
+
// terminate aborts any operations on the embedded downloader and releases all
// held resources.
func (dl *downloadTester) terminate() {
@@ -155,7 +168,7 @@ func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
}
func unmarshalRlpHeaders(rlpdata []rlp.RawValue) []*types.Header {
- var headers = make([]*types.Header, len(rlpdata))
+ headers := make([]*types.Header, len(rlpdata))
for i, data := range rlpdata {
var h types.Header
if err := rlp.DecodeBytes(data, &h); err != nil {
@@ -620,6 +633,7 @@ func TestBoundedHeavyForkedSync66Full(t *testing.T) {
func TestBoundedHeavyForkedSync66Snap(t *testing.T) {
testBoundedHeavyForkedSync(t, eth.ETH66, SnapSync)
}
+
func TestBoundedHeavyForkedSync66Light(t *testing.T) {
testBoundedHeavyForkedSync(t, eth.ETH66, LightSync)
}
@@ -711,7 +725,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
// Create peers of every type
tester.newPeer("peer 66", eth.ETH66, chain.blocks[1:])
- //tester.newPeer("peer 65", eth.ETH67, chain.blocks[1:)
+ // tester.newPeer("peer 65", eth.ETH67, chain.blocks[1:)
// Synchronise with the requested peer and make sure all blocks were retrieved
if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
@@ -916,6 +930,7 @@ func TestHighTDStarvationAttack66Full(t *testing.T) {
func TestHighTDStarvationAttack66Snap(t *testing.T) {
testHighTDStarvationAttack(t, eth.ETH66, SnapSync)
}
+
func TestHighTDStarvationAttack66Light(t *testing.T) {
testHighTDStarvationAttack(t, eth.ETH66, LightSync)
}
@@ -1268,36 +1283,45 @@ func TestRemoteHeaderRequestSpan(t *testing.T) {
expected []int
}{
// Remote is way higher. We should ask for the remote head and go backwards
- {1500, 1000,
+ {
+ 1500, 1000,
[]int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499},
},
- {15000, 13006,
+ {
+ 15000, 13006,
[]int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999},
},
// Remote is pretty close to us. We don't have to fetch as many
- {1200, 1150,
+ {
+ 1200, 1150,
[]int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199},
},
// Remote is equal to us (so on a fork with higher td)
// We should get the closest couple of ancestors
- {1500, 1500,
+ {
+ 1500, 1500,
[]int{1497, 1499},
},
// We're higher than the remote! Odd
- {1000, 1500,
+ {
+ 1000, 1500,
[]int{997, 999},
},
// Check some weird edgecases that it behaves somewhat rationally
- {0, 1500,
+ {
+ 0, 1500,
[]int{0, 2},
},
- {6000000, 0,
+ {
+ 6000000, 0,
[]int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999},
},
- {0, 0,
+ {
+ 0, 0,
[]int{0, 2},
},
}
+
reqs := func(from, count, span int) []int {
var r []int
num := from
@@ -1307,32 +1331,39 @@ func TestRemoteHeaderRequestSpan(t *testing.T) {
}
return r
}
+
for i, tt := range testCases {
- from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight)
- data := reqs(int(from), count, span)
+ i := i
+ tt := tt
- if max != uint64(data[len(data)-1]) {
- t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max)
- }
- failed := false
- if len(data) != len(tt.expected) {
- failed = true
- t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data))
- } else {
- for j, n := range data {
- if n != tt.expected[j] {
- failed = true
- break
+ t.Run("", func(t *testing.T) {
+ from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight)
+ data := reqs(int(from), count, span)
+
+ if max != uint64(data[len(data)-1]) {
+ t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max)
+ }
+ failed := false
+ if len(data) != len(tt.expected) {
+ failed = true
+ t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data))
+ } else {
+ for j, n := range data {
+ if n != tt.expected[j] {
+ failed = true
+ break
+ }
}
}
- }
- if failed {
- res := strings.Replace(fmt.Sprint(data), " ", ",", -1)
- exp := strings.Replace(fmt.Sprint(tt.expected), " ", ",", -1)
- t.Logf("got: %v\n", res)
- t.Logf("exp: %v\n", exp)
- t.Errorf("test %d: wrong values", i)
- }
+
+ if failed {
+ res := strings.Replace(fmt.Sprint(data), " ", ",", -1)
+ exp := strings.Replace(fmt.Sprint(tt.expected), " ", ",", -1)
+ t.Logf("got: %v\n", res)
+ t.Logf("exp: %v\n", exp)
+ t.Errorf("test %d: wrong values", i)
+ }
+ })
}
}
@@ -1359,12 +1390,134 @@ func testCheckpointEnforcement(t *testing.T, protocol uint, mode SyncMode) {
if mode == SnapSync || mode == LightSync {
expect = errUnsyncedPeer
}
+
if err := tester.sync("peer", nil, mode); !errors.Is(err, expect) {
t.Fatalf("block sync error mismatch: have %v, want %v", err, expect)
}
+
if mode == SnapSync || mode == LightSync {
assertOwnChain(t, tester, 1)
} else {
assertOwnChain(t, tester, len(chain.blocks))
}
}
+
+// whitelistFake is a mock for the chain validator service
+type whitelistFake struct {
+ // count denotes the number of times the validate function was called
+ count int
+
+ // validate is the dynamic function to be called while syncing
+ validate func(count int) (bool, error)
+}
+
+// newWhitelistFake returns a new mock whitelist
+func newWhitelistFake(validate func(count int) (bool, error)) *whitelistFake {
+ return &whitelistFake{0, validate}
+}
+
+// IsValidChain is the mock function which the downloader will use to validate the chain
+// to be received from a peer.
+func (w *whitelistFake) IsValidChain(_ *types.Header, _ func(number uint64, amount int, skip int, reverse bool) ([]*types.Header, []common.Hash, error)) (bool, error) {
+ defer func() {
+ w.count++
+ }()
+
+ return w.validate(w.count)
+}
+
+func (w *whitelistFake) ProcessCheckpoint(_ uint64, _ common.Hash) {}
+
+func (w *whitelistFake) GetCheckpointWhitelist() map[uint64]common.Hash {
+ return nil
+}
+
+func (w *whitelistFake) PurgeCheckpointWhitelist() {}
+
+// TestFakedSyncProgress66WhitelistMismatch tests if in case of whitelisted
+// checkpoint mismatch with opposite peer, the sync should fail.
+func TestFakedSyncProgress66WhitelistMismatch(t *testing.T) {
+ t.Parallel()
+
+ protocol := uint(eth.ETH66)
+ mode := FullSync
+
+ tester := newTester()
+ validate := func(count int) (bool, error) {
+ return false, whitelist.ErrCheckpointMismatch
+ }
+ tester.downloader.ChainValidator = newWhitelistFake(validate)
+
+ defer tester.terminate()
+
+ chainA := testChainForkLightA.blocks
+ tester.newPeer("light", protocol, chainA[1:])
+
+ // Synchronise with the peer and make sure all blocks were retrieved
+ if err := tester.sync("light", nil, mode); err == nil {
+ t.Fatal("succeeded attacker synchronisation")
+ }
+}
+
+// TestFakedSyncProgress66WhitelistMatch tests if in case of whitelisted
+// checkpoint match with opposite peer, the sync should succeed.
+func TestFakedSyncProgress66WhitelistMatch(t *testing.T) {
+ t.Parallel()
+
+ protocol := uint(eth.ETH66)
+ mode := FullSync
+
+ tester := newTester()
+ validate := func(count int) (bool, error) {
+ return true, nil
+ }
+ tester.downloader.ChainValidator = newWhitelistFake(validate)
+
+ defer tester.terminate()
+
+ chainA := testChainForkLightA.blocks
+ tester.newPeer("light", protocol, chainA[1:])
+
+ // Synchronise with the peer and make sure all blocks were retrieved
+ if err := tester.sync("light", nil, mode); err != nil {
+ t.Fatal("succeeded attacker synchronisation")
+ }
+}
+
+// TestFakedSyncProgress66NoRemoteCheckpoint tests if in case of missing/invalid
+// checkpointed blocks with opposite peer, the sync should fail initially but
+// with the retry mechanism, it should succeed eventually.
+func TestFakedSyncProgress66NoRemoteCheckpoint(t *testing.T) {
+ t.Parallel()
+
+ protocol := uint(eth.ETH66)
+ mode := FullSync
+
+ tester := newTester()
+ validate := func(count int) (bool, error) {
+ // only return the `ErrNoRemoteCheckoint` error for the first call
+ if count == 0 {
+ return false, whitelist.ErrNoRemoteCheckoint
+ }
+
+ return true, nil
+ }
+
+ tester.downloader.ChainValidator = newWhitelistFake(validate)
+
+ defer tester.terminate()
+
+ chainA := testChainForkLightA.blocks
+ tester.newPeer("light", protocol, chainA[1:])
+
+ // Synchronise with the peer and make sure all blocks were retrieved
+ // Should fail in first attempt
+ if err := tester.sync("light", nil, mode); err != nil {
+ assert.Equal(t, whitelist.ErrNoRemoteCheckoint, err, "failed synchronisation")
+ }
+
+ // Try syncing again, should succeed
+ if err := tester.sync("light", nil, mode); err != nil {
+ t.Fatal("succeeded attacker synchronisation")
+ }
+}
diff --git a/eth/downloader/queue_test.go b/eth/downloader/queue_test.go
index f729def671..872bfcd1a9 100644
--- a/eth/downloader/queue_test.go
+++ b/eth/downloader/queue_test.go
@@ -64,8 +64,11 @@ type chainData struct {
offset int
}
-var chain *chainData
-var emptyChain *chainData
+var (
+ chain *chainData
+ chainLongerFork *chainData
+ emptyChain *chainData
+)
func init() {
// Create a chain of blocks to import
@@ -75,6 +78,9 @@ func init() {
blocks, _ = makeChain(targetBlocks, 0, genesis, true)
emptyChain = &chainData{blocks, 0}
+
+ chainLongerForkBlocks, _ := makeChain(1024, 0, blocks[len(blocks)-1], false)
+ chainLongerFork = &chainData{chainLongerForkBlocks, 0}
}
func (chain *chainData) headers() []*types.Header {
diff --git a/eth/downloader/whitelist/service.go b/eth/downloader/whitelist/service.go
new file mode 100644
index 0000000000..7036f24a8f
--- /dev/null
+++ b/eth/downloader/whitelist/service.go
@@ -0,0 +1,126 @@
+package whitelist
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// Checkpoint whitelist
+type Service struct {
+ m sync.Mutex
+ checkpointWhitelist map[uint64]common.Hash // Checkpoint whitelist, populated by reaching out to heimdall
+ checkpointOrder []uint64 // Checkpoint order, populated by reaching out to heimdall
+ maxCapacity uint
+}
+
+func NewService(maxCapacity uint) *Service {
+ return &Service{
+ checkpointWhitelist: make(map[uint64]common.Hash),
+ checkpointOrder: []uint64{},
+ maxCapacity: maxCapacity,
+ }
+}
+
+var (
+ ErrCheckpointMismatch = errors.New("checkpoint mismatch")
+ ErrNoRemoteCheckoint = errors.New("remote peer doesn't have a checkoint")
+)
+
+// IsValidChain checks if the chain we're about to receive from this peer is valid or not
+// in terms of reorgs. We won't reorg beyond the last bor checkpoint submitted to mainchain.
+func (w *Service) IsValidChain(remoteHeader *types.Header, fetchHeadersByNumber func(number uint64, amount int, skip int, reverse bool) ([]*types.Header, []common.Hash, error)) (bool, error) {
+ // We want to validate the chain by comparing the last checkpointed block
+ // we're storing in `checkpointWhitelist` with the peer's block.
+ //
+ // Check for availaibility of the last checkpointed block.
+ // This can be also be empty if our heimdall is not responding
+ // or we're running without it.
+ if len(w.checkpointWhitelist) == 0 {
+ // worst case, we don't have the checkpoints in memory
+ return true, nil
+ }
+
+ // Fetch the last checkpoint entry
+ lastCheckpointBlockNum := w.checkpointOrder[len(w.checkpointOrder)-1]
+ lastCheckpointBlockHash := w.checkpointWhitelist[lastCheckpointBlockNum]
+
+ // todo: we can extract this as an interface and mock as well or just test IsValidChain in isolation from downloader passing fake fetchHeadersByNumber functions
+ headers, hashes, err := fetchHeadersByNumber(lastCheckpointBlockNum, 1, 0, false)
+ if err != nil {
+ return false, fmt.Errorf("%w: last checkpoint %d, err %v", ErrNoRemoteCheckoint, lastCheckpointBlockNum, err)
+ }
+
+ if len(headers) == 0 {
+ return false, fmt.Errorf("%w: last checkpoint %d", ErrNoRemoteCheckoint, lastCheckpointBlockNum)
+ }
+
+ reqBlockNum := headers[0].Number.Uint64()
+ reqBlockHash := hashes[0]
+
+ // Check against the checkpointed blocks
+ if reqBlockNum == lastCheckpointBlockNum && reqBlockHash == lastCheckpointBlockHash {
+ return true, nil
+ }
+
+ return false, ErrCheckpointMismatch
+}
+
+func (w *Service) ProcessCheckpoint(endBlockNum uint64, endBlockHash common.Hash) {
+ w.m.Lock()
+ defer w.m.Unlock()
+
+ w.enqueueCheckpointWhitelist(endBlockNum, endBlockHash)
+ // If size of checkpoint whitelist map is greater than 10, remove the oldest entry.
+
+ if w.length() > int(w.maxCapacity) {
+ w.dequeueCheckpointWhitelist()
+ }
+}
+
+// GetCheckpointWhitelist returns the existing whitelisted
+// entries of checkpoint of the form block number -> block hash.
+func (w *Service) GetCheckpointWhitelist() map[uint64]common.Hash {
+ w.m.Lock()
+ defer w.m.Unlock()
+
+ return w.checkpointWhitelist
+}
+
+// PurgeCheckpointWhitelist purges data from checkpoint whitelist map
+func (w *Service) PurgeCheckpointWhitelist() {
+ w.m.Lock()
+ defer w.m.Unlock()
+
+ w.checkpointWhitelist = make(map[uint64]common.Hash)
+ w.checkpointOrder = make([]uint64, 0)
+}
+
+// EnqueueWhitelistBlock enqueues blockNumber, blockHash to the checkpoint whitelist map
+func (w *Service) enqueueCheckpointWhitelist(key uint64, val common.Hash) {
+ if _, ok := w.checkpointWhitelist[key]; !ok {
+ log.Debug("Enqueing new checkpoint whitelist", "block number", key, "block hash", val)
+
+ w.checkpointWhitelist[key] = val
+ w.checkpointOrder = append(w.checkpointOrder, key)
+ }
+}
+
+// DequeueWhitelistBlock dequeues block, blockhash from the checkpoint whitelist map
+func (w *Service) dequeueCheckpointWhitelist() {
+ if len(w.checkpointOrder) > 0 {
+ log.Debug("Dequeing checkpoint whitelist", "block number", w.checkpointOrder[0], "block hash", w.checkpointWhitelist[w.checkpointOrder[0]])
+
+ delete(w.checkpointWhitelist, w.checkpointOrder[0])
+ w.checkpointOrder = w.checkpointOrder[1:]
+ }
+}
+
+// length returns the len of the whitelist.
+func (w *Service) length() int {
+ return len(w.checkpointWhitelist)
+}
diff --git a/eth/downloader/whitelist/service_test.go b/eth/downloader/whitelist/service_test.go
new file mode 100644
index 0000000000..ca202cc3ad
--- /dev/null
+++ b/eth/downloader/whitelist/service_test.go
@@ -0,0 +1,107 @@
+package whitelist
+
+import (
+ "errors"
+ "math/big"
+ "testing"
+
+ "gotest.tools/assert"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+// NewMockService creates a new mock whitelist service
+func NewMockService(maxCapacity uint) *Service {
+ return &Service{
+ checkpointWhitelist: make(map[uint64]common.Hash),
+ checkpointOrder: []uint64{},
+ maxCapacity: maxCapacity,
+ }
+}
+
+// TestWhitelistCheckpoint checks the checkpoint whitelist map queue mechanism
+func TestWhitelistCheckpoint(t *testing.T) {
+ t.Parallel()
+
+ s := NewMockService(10)
+ for i := 0; i < 10; i++ {
+ s.enqueueCheckpointWhitelist(uint64(i), common.Hash{})
+ }
+ assert.Equal(t, s.length(), 10, "expected 10 items in whitelist")
+
+ s.enqueueCheckpointWhitelist(11, common.Hash{})
+ s.dequeueCheckpointWhitelist()
+ assert.Equal(t, s.length(), 10, "expected 10 items in whitelist")
+}
+
+// TestIsValidChain checks che IsValidChain function in isolation
+// for different cases by providing a mock fetchHeadersByNumber function
+func TestIsValidChain(t *testing.T) {
+ t.Parallel()
+
+ s := NewMockService(10)
+
+ // case1: no checkpoint whitelist, should consider the chain as valid
+ res, err := s.IsValidChain(nil, nil)
+ assert.NilError(t, err, "expected no error")
+ assert.Equal(t, res, true, "expected chain to be valid")
+
+ // add checkpoint entries and mock fetchHeadersByNumber function
+ s.ProcessCheckpoint(uint64(0), common.Hash{})
+ s.ProcessCheckpoint(uint64(1), common.Hash{})
+
+ assert.Equal(t, s.length(), 2, "expected 2 items in whitelist")
+
+ // create a false function, returning absolutely nothing
+ falseFetchHeadersByNumber := func(number uint64, amount int, skip int, reverse bool) ([]*types.Header, []common.Hash, error) {
+ return nil, nil, nil
+ }
+
+ // case2: false fetchHeadersByNumber function provided, should consider the chain as invalid
+ // and throw `ErrNoRemoteCheckoint` error
+ res, err = s.IsValidChain(nil, falseFetchHeadersByNumber)
+ if err == nil {
+ t.Fatal("expected error, got nil")
+ }
+
+ if !errors.Is(err, ErrNoRemoteCheckoint) {
+ t.Fatalf("expected error ErrNoRemoteCheckoint, got %v", err)
+ }
+
+ assert.Equal(t, res, false, "expected chain to be invalid")
+
+ // case3: correct fetchHeadersByNumber function provided, should consider the chain as valid
+ // create a mock function, returning a the required header
+ fetchHeadersByNumber := func(number uint64, _ int, _ int, _ bool) ([]*types.Header, []common.Hash, error) {
+ hash := common.Hash{}
+ header := types.Header{Number: big.NewInt(0)}
+
+ switch number {
+ case 0:
+ return []*types.Header{&header}, []common.Hash{hash}, nil
+ case 1:
+ header.Number = big.NewInt(1)
+ return []*types.Header{&header}, []common.Hash{hash}, nil
+ case 2:
+ header.Number = big.NewInt(1) // sending wrong header for misamatch
+ return []*types.Header{&header}, []common.Hash{hash}, nil
+ default:
+ return nil, nil, errors.New("invalid number")
+ }
+ }
+
+ res, err = s.IsValidChain(nil, fetchHeadersByNumber)
+ assert.NilError(t, err, "expected no error")
+ assert.Equal(t, res, true, "expected chain to be valid")
+
+ // add one more checkpoint whitelist entry
+ s.ProcessCheckpoint(uint64(2), common.Hash{})
+ assert.Equal(t, s.length(), 3, "expected 3 items in whitelist")
+
+ // case4: correct fetchHeadersByNumber function provided with wrong header
+ // for block number 2. Should consider the chain as invalid and throw an error
+ res, err = s.IsValidChain(nil, fetchHeadersByNumber)
+ assert.Equal(t, err, ErrCheckpointMismatch, "expected checkpoint mismatch error")
+ assert.Equal(t, res, false, "expected chain to be invalid")
+}
diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go
index 6ab43891f7..091600d8b5 100644
--- a/eth/ethconfig/config.go
+++ b/eth/ethconfig/config.go
@@ -29,6 +29,9 @@ import (
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/beacon"
"github.com/ethereum/go-ethereum/consensus/bor"
+ "github.com/ethereum/go-ethereum/consensus/bor/contract"
+ "github.com/ethereum/go-ethereum/consensus/bor/heimdall"
+ "github.com/ethereum/go-ethereum/consensus/bor/heimdall/span"
"github.com/ethereum/go-ethereum/consensus/clique"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
@@ -88,13 +91,13 @@ var Defaults = Config{
Miner: miner.Config{
GasCeil: 8000000,
GasPrice: big.NewInt(params.GWei),
- Recommit: 3 * time.Second,
+ Recommit: 125 * time.Second,
},
TxPool: core.DefaultTxPoolConfig,
RPCGasCap: 50000000,
RPCEVMTimeout: 5 * time.Second,
GPO: FullNodeGPO,
- RPCTxFeeCap: 1, // 1 ether
+ RPCTxFeeCap: 5, // 5 matic
}
func init() {
@@ -237,7 +240,14 @@ func CreateConsensusEngine(stack *node.Node, chainConfig *params.ChainConfig, et
// In order to pass the ethereum transaction tests, we need to set the burn contract which is in the bor config
// Then, bor != nil will also be enabled for ethash and clique. Only enable Bor for real if there is a validator contract present.
if chainConfig.Bor != nil && chainConfig.Bor.ValidatorContract != "" {
- return bor.New(chainConfig, db, blockchainAPI, ethConfig.HeimdallURL, ethConfig.WithoutHeimdall)
+ genesisContractsClient := contract.NewGenesisContractsClient(chainConfig, chainConfig.Bor.ValidatorContract, chainConfig.Bor.StateReceiverContract, blockchainAPI)
+ spanner := span.NewChainSpanner(blockchainAPI, contract.ValidatorSet(), chainConfig, common.HexToAddress(chainConfig.Bor.ValidatorContract))
+
+ if ethConfig.WithoutHeimdall {
+ return bor.New(chainConfig, db, blockchainAPI, spanner, nil, genesisContractsClient)
+ } else {
+ return bor.New(chainConfig, db, blockchainAPI, spanner, heimdall.NewHeimdallClient(ethConfig.HeimdallURL), genesisContractsClient)
+ }
} else {
switch config.PowMode {
case ethash.ModeFake:
diff --git a/eth/handler.go b/eth/handler.go
index 40edfa2d17..3d8380412c 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -31,11 +31,13 @@ import (
"github.com/ethereum/go-ethereum/core/forkid"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/downloader"
+ "github.com/ethereum/go-ethereum/eth/downloader/whitelist"
"github.com/ethereum/go-ethereum/eth/fetcher"
"github.com/ethereum/go-ethereum/eth/protocols/eth"
"github.com/ethereum/go-ethereum/eth/protocols/snap"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/params"
@@ -77,15 +79,16 @@ type txPool interface {
// handlerConfig is the collection of initialization parameters to create a full
// node network handler.
type handlerConfig struct {
- Database ethdb.Database // Database for direct sync insertions
- Chain *core.BlockChain // Blockchain to serve data from
- TxPool txPool // Transaction pool to propagate from
- Merger *consensus.Merger // The manager for eth1/2 transition
- Network uint64 // Network identifier to adfvertise
- Sync downloader.SyncMode // Whether to snap or full sync
- BloomCache uint64 // Megabytes to alloc for snap sync bloom
- EventMux *event.TypeMux // Legacy event mux, deprecate for `feed`
- Checkpoint *params.TrustedCheckpoint // Hard coded checkpoint for sync challenges
+ Database ethdb.Database // Database for direct sync insertions
+ Chain *core.BlockChain // Blockchain to serve data from
+ TxPool txPool // Transaction pool to propagate from
+ Merger *consensus.Merger // The manager for eth1/2 transition
+ Network uint64 // Network identifier to adfvertise
+ Sync downloader.SyncMode // Whether to snap or full sync
+ BloomCache uint64 // Megabytes to alloc for snap sync bloom
+ EventMux *event.TypeMux //nolint:staticcheck // Legacy event mux, deprecate for `feed`
+ Checkpoint *params.TrustedCheckpoint // Hard coded checkpoint for sync challenges
+ EthAPI *ethapi.PublicBlockChainAPI // EthAPI to interact
PeerRequiredBlocks map[uint64]common.Hash // Hard coded map of required block hashes for sync challenges
}
@@ -111,6 +114,8 @@ type handler struct {
peers *peerSet
merger *consensus.Merger
+ ethAPI *ethapi.PublicBlockChainAPI // EthAPI to interact
+
eventMux *event.TypeMux
txsCh chan core.NewTxsEvent
txsSub event.Subscription
@@ -141,6 +146,7 @@ func newHandler(config *handlerConfig) (*handler, error) {
chain: config.Chain,
peers: newPeerSet(),
merger: config.Merger,
+ ethAPI: config.EthAPI,
peerRequiredBlocks: config.PeerRequiredBlocks,
quitSync: make(chan struct{}),
}
@@ -155,8 +161,15 @@ func newHandler(config *handlerConfig) (*handler, error) {
// In these cases however it's safe to reenable snap sync.
fullBlock, fastBlock := h.chain.CurrentBlock(), h.chain.CurrentFastBlock()
if fullBlock.NumberU64() == 0 && fastBlock.NumberU64() > 0 {
- h.snapSync = uint32(1)
- log.Warn("Switch sync mode from full sync to snap sync")
+ // Note: Ideally this should never happen with bor, but to be extra
+ // preventive we won't allow it to roll over to snap sync until
+ // we have it working
+
+ // TODO(snap): Uncomment when we have snap sync working
+ // h.snapSync = uint32(1)
+ // log.Warn("Switch sync mode from full sync to snap sync")
+
+ log.Warn("Preventing switching sync mode from full sync to snap sync")
}
} else {
if h.chain.CurrentBlock().NumberU64() > 0 {
@@ -195,7 +208,8 @@ func newHandler(config *handlerConfig) (*handler, error) {
// Construct the downloader (long sync) and its backing state bloom if snap
// sync is requested. The downloader is responsible for deallocating the state
// bloom when it's done.
- h.downloader = downloader.New(h.checkpointNumber, config.Database, h.eventMux, h.chain, nil, h.removePeer, success)
+ // todo: it'd better to extract maxCapacity into config
+ h.downloader = downloader.New(h.checkpointNumber, config.Database, h.eventMux, h.chain, nil, h.removePeer, success, whitelist.NewService(10))
// Construct the fetcher (short sync)
validator := func(header *types.Header) error {
diff --git a/eth/handler_bor.go b/eth/handler_bor.go
new file mode 100644
index 0000000000..11896f3c47
--- /dev/null
+++ b/eth/handler_bor.go
@@ -0,0 +1,75 @@
+package eth
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/consensus/bor"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rpc"
+)
+
+var (
+ // errCheckpoint is returned when we are unable to fetch the
+ // latest checkpoint from the local heimdall.
+ errCheckpoint = errors.New("failed to fetch latest checkpoint")
+
+ // errMissingCheckpoint is returned when we don't have the
+ // checkpoint blocks locally, yet.
+ errMissingCheckpoint = errors.New("missing checkpoint blocks")
+
+ // errRootHash is returned when we aren't able to calculate the root hash
+ // locally for a range of blocks.
+ errRootHash = errors.New("failed to get local root hash")
+
+ // errCheckpointRootHashMismatch is returned when the local root hash
+ // doesn't match with the root hash in checkpoint.
+ errCheckpointRootHashMismatch = errors.New("checkpoint roothash mismatch")
+
+ // errEndBlock is returned when we're unable to fetch a block locally.
+ errEndBlock = errors.New("failed to get end block")
+)
+
+// fetchWhitelistCheckpoint fetched the latest checkpoint from it's local heimdall
+// and verifies the data against bor data.
+func (h *ethHandler) fetchWhitelistCheckpoint(bor *bor.Bor) (uint64, common.Hash, error) {
+ // check for checkpoint whitelisting: bor
+ checkpoint, err := bor.HeimdallClient.FetchLatestCheckpoint()
+ if err != nil {
+ log.Debug("Failed to fetch latest checkpoint for whitelisting")
+ return 0, common.Hash{}, errCheckpoint
+ }
+
+ // check if we have the checkpoint blocks
+ head := h.ethAPI.BlockNumber()
+ if head < hexutil.Uint64(checkpoint.EndBlock.Uint64()) {
+ log.Debug("Head block behind checkpoint block", "head", head, "checkpoint end block", checkpoint.EndBlock)
+ return 0, common.Hash{}, errMissingCheckpoint
+ }
+
+ // verify the root hash of checkpoint
+ roothash, err := h.ethAPI.GetRootHash(context.Background(), checkpoint.StartBlock.Uint64(), checkpoint.EndBlock.Uint64())
+ if err != nil {
+ log.Debug("Failed to get root hash of checkpoint while whitelisting")
+ return 0, common.Hash{}, errRootHash
+ }
+
+ if roothash != checkpoint.RootHash.String()[2:] {
+ log.Warn("Checkpoint root hash mismatch while whitelisting", "expected", checkpoint.RootHash.String()[2:], "got", roothash)
+ return 0, common.Hash{}, errCheckpointRootHashMismatch
+ }
+
+ // fetch the end checkpoint block hash
+ block, err := h.ethAPI.GetBlockByNumber(context.Background(), rpc.BlockNumber(checkpoint.EndBlock.Uint64()), false)
+ if err != nil {
+ log.Debug("Failed to get end block hash of checkpoint while whitelisting")
+ return 0, common.Hash{}, errEndBlock
+ }
+
+ hash := fmt.Sprintf("%v", block["hash"])
+
+ return checkpoint.EndBlock.Uint64(), common.HexToHash(hash), nil
+}
diff --git a/eth/sync.go b/eth/sync.go
index d67d2311d0..22c0c9054a 100644
--- a/eth/sync.go
+++ b/eth/sync.go
@@ -204,25 +204,36 @@ func peerToSyncOp(mode downloader.SyncMode, p *eth.Peer) *chainSyncOp {
}
func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) {
- // If we're in snap sync mode, return that directly
- if atomic.LoadUint32(&cs.handler.snapSync) == 1 {
- block := cs.handler.chain.CurrentFastBlock()
- td := cs.handler.chain.GetTd(block.Hash(), block.NumberU64())
- return downloader.SnapSync, td
- }
- // We are probably in full sync, but we might have rewound to before the
- // snap sync pivot, check if we should reenable
- if pivot := rawdb.ReadLastPivotNumber(cs.handler.database); pivot != nil {
- if head := cs.handler.chain.CurrentBlock(); head.NumberU64() < *pivot {
- block := cs.handler.chain.CurrentFastBlock()
- td := cs.handler.chain.GetTd(block.Hash(), block.NumberU64())
- return downloader.SnapSync, td
- }
- }
- // Nope, we're really full syncing
+ // Note: Ideally this should never happen with bor, but to be extra
+ // preventive we won't allow it to roll over to snap sync until
+ // we have it working
+
+ // Handle full sync mode only
head := cs.handler.chain.CurrentBlock()
td := cs.handler.chain.GetTd(head.Hash(), head.NumberU64())
return downloader.FullSync, td
+
+ // TODO(snap): Uncomment when we have snap sync working
+
+ // If we're in snap sync mode, return that directly
+ // if atomic.LoadUint32(&cs.handler.snapSync) == 1 {
+ // block := cs.handler.chain.CurrentFastBlock()
+ // td := cs.handler.chain.GetTd(block.Hash(), block.NumberU64())
+ // return downloader.SnapSync, td
+ // }
+ // // We are probably in full sync, but we might have rewound to before the
+ // // snap sync pivot, check if we should reenable
+ // if pivot := rawdb.ReadLastPivotNumber(cs.handler.database); pivot != nil {
+ // if head := cs.handler.chain.CurrentBlock(); head.NumberU64() < *pivot {
+ // block := cs.handler.chain.CurrentFastBlock()
+ // td := cs.handler.chain.GetTd(block.Hash(), block.NumberU64())
+ // return downloader.SnapSync, td
+ // }
+ // }
+ // Nope, we're really full syncing
+ // head := cs.handler.chain.CurrentBlock()
+ // td := cs.handler.chain.GetTd(head.Hash(), head.NumberU64())
+ // return downloader.FullSync, td
}
// startSync launches doSync in a new goroutine.
diff --git a/eth/tracers/api.go b/eth/tracers/api.go
index 1e9de63891..a403e13502 100644
--- a/eth/tracers/api.go
+++ b/eth/tracers/api.go
@@ -28,9 +28,11 @@ import (
"sync"
"time"
+ "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/consensus/bor/statefull"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
@@ -63,6 +65,8 @@ const (
defaultTracechainMemLimit = common.StorageSize(500 * 1024 * 1024)
)
+var defaultBorTraceEnabled = newBoolPtr(false)
+
// Backend interface provides the common API services (that are provided by
// both full and light clients) with access to necessary functions.
type Backend interface {
@@ -80,6 +84,9 @@ type Backend interface {
// so this method should be called with the parent.
StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive, preferDisk bool) (*state.StateDB, error)
StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error)
+
+ // Bor related APIs
+ GetBorBlockTransactionWithBlockHash(ctx context.Context, txHash common.Hash, blockHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error)
}
// API is the collection of tracing APIs exposed over the private debugging endpoint.
@@ -164,12 +171,33 @@ func (api *API) blockByNumberAndHash(ctx context.Context, number rpc.BlockNumber
return api.blockByHash(ctx, hash)
}
+// returns block transactions along with state-sync transaction if present
+func (api *API) getAllBlockTransactions(ctx context.Context, block *types.Block) (types.Transactions, bool) {
+ txs := block.Transactions()
+
+ stateSyncPresent := false
+
+ borReceipt := rawdb.ReadBorReceipt(api.backend.ChainDb(), block.Hash(), block.NumberU64())
+ if borReceipt != nil {
+ txHash := types.GetDerivedBorTxHash(types.BorReceiptKey(block.Number().Uint64(), block.Hash()))
+ if txHash != (common.Hash{}) {
+ borTx, _, _, _, _ := api.backend.GetBorBlockTransactionWithBlockHash(ctx, txHash, block.Hash())
+ txs = append(txs, borTx)
+ stateSyncPresent = true
+ }
+ }
+
+ return txs, stateSyncPresent
+}
+
// TraceConfig holds extra parameters to trace functions.
type TraceConfig struct {
*logger.Config
- Tracer *string
- Timeout *string
- Reexec *uint64
+ Tracer *string
+ Timeout *string
+ Reexec *uint64
+ BorTraceEnabled *bool
+ BorTx *bool
}
// TraceCallConfig is the config for traceCall API. It holds one more
@@ -185,8 +213,9 @@ type TraceCallConfig struct {
// StdTraceConfig holds extra parameters to standard-json trace functions.
type StdTraceConfig struct {
logger.Config
- Reexec *uint64
- TxHash common.Hash
+ Reexec *uint64
+ TxHash common.Hash
+ BorTraceEnabled *bool
}
// txTraceResult is the result of a single transaction trace.
@@ -240,6 +269,16 @@ func (api *API) TraceChain(ctx context.Context, start, end rpc.BlockNumber, conf
// executes all the transactions contained within. The return value will be one item
// per transaction, dependent on the requested tracer.
func (api *API) traceChain(ctx context.Context, start, end *types.Block, config *TraceConfig) (*rpc.Subscription, error) {
+ if config == nil {
+ config = &TraceConfig{
+ BorTraceEnabled: defaultBorTraceEnabled,
+ BorTx: newBoolPtr(false),
+ }
+ }
+
+ if config.BorTraceEnabled == nil {
+ config.BorTraceEnabled = defaultBorTraceEnabled
+ }
// Tracing a chain is a **long** operation, only do with subscriptions
notifier, supported := rpc.NotifierFromContext(ctx)
if !supported {
@@ -274,19 +313,39 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config
signer := types.MakeSigner(api.backend.ChainConfig(), task.block.Number())
blockCtx := core.NewEVMBlockContext(task.block.Header(), api.chainContext(localctx), nil)
// Trace all the transactions contained within
- for i, tx := range task.block.Transactions() {
+ txs, stateSyncPresent := api.getAllBlockTransactions(ctx, task.block)
+ if !*config.BorTraceEnabled && stateSyncPresent {
+ txs = txs[:len(txs)-1]
+ stateSyncPresent = false
+ }
+
+ for i, tx := range txs {
msg, _ := tx.AsMessage(signer, task.block.BaseFee())
txctx := &Context{
BlockHash: task.block.Hash(),
TxIndex: i,
TxHash: tx.Hash(),
}
- res, err := api.traceTx(localctx, msg, txctx, blockCtx, task.statedb, config)
+
+ var res interface{}
+
+ var err error
+
+ if stateSyncPresent && i == len(txs)-1 {
+ if *config.BorTraceEnabled {
+ config.BorTx = newBoolPtr(true)
+ res, err = api.traceTx(localctx, msg, txctx, blockCtx, task.statedb, config)
+ }
+ } else {
+ res, err = api.traceTx(localctx, msg, txctx, blockCtx, task.statedb, config)
+ }
+
if err != nil {
task.results[i] = &txTraceResult{Error: err.Error()}
log.Warn("Tracing failed", "hash", tx.Hash(), "block", task.block.NumberU64(), "err", err)
break
}
+
// Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect
task.statedb.Finalise(api.backend.ChainConfig().IsEIP158(task.block.Number()))
task.results[i] = &txTraceResult{Result: res}
@@ -430,6 +489,11 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config
return sub, nil
}
+func newBoolPtr(bb bool) *bool {
+ b := bb
+ return &b
+}
+
// TraceBlockByNumber returns the structured logs created during the execution of
// EVM and returns them as a JSON object.
func (api *API) TraceBlockByNumber(ctx context.Context, number rpc.BlockNumber, config *TraceConfig) ([]*txTraceResult, error) {
@@ -492,9 +556,35 @@ func (api *API) StandardTraceBlockToFile(ctx context.Context, hash common.Hash,
return api.standardTraceBlockToFile(ctx, block, config)
}
+func prepareCallMessage(msg core.Message) statefull.Callmsg {
+ return statefull.Callmsg{
+ CallMsg: ethereum.CallMsg{
+ From: msg.From(),
+ To: msg.To(),
+ Gas: msg.Gas(),
+ GasPrice: msg.GasPrice(),
+ GasFeeCap: msg.GasFeeCap(),
+ GasTipCap: msg.GasTipCap(),
+ Value: msg.Value(),
+ Data: msg.Data(),
+ AccessList: msg.AccessList(),
+ }}
+}
+
// IntermediateRoots executes a block (bad- or canon- or side-), and returns a list
// of intermediate roots: the stateroot after each transaction.
func (api *API) IntermediateRoots(ctx context.Context, hash common.Hash, config *TraceConfig) ([]common.Hash, error) {
+ if config == nil {
+ config = &TraceConfig{
+ BorTraceEnabled: defaultBorTraceEnabled,
+ BorTx: newBoolPtr(false),
+ }
+ }
+
+ if config.BorTraceEnabled == nil {
+ config.BorTraceEnabled = defaultBorTraceEnabled
+ }
+
block, _ := api.blockByHash(ctx, hash)
if block == nil {
// Check in the bad blocks
@@ -525,23 +615,47 @@ func (api *API) IntermediateRoots(ctx context.Context, hash common.Hash, config
vmctx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
deleteEmptyObjects = chainConfig.IsEIP158(block.Number())
)
- for i, tx := range block.Transactions() {
+
+ txs, stateSyncPresent := api.getAllBlockTransactions(ctx, block)
+ for i, tx := range txs {
var (
msg, _ = tx.AsMessage(signer, block.BaseFee())
txContext = core.NewEVMTxContext(msg)
vmenv = vm.NewEVM(vmctx, txContext, statedb, chainConfig, vm.Config{})
)
statedb.Prepare(tx.Hash(), i)
- if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())); err != nil {
- log.Warn("Tracing intermediate roots did not complete", "txindex", i, "txhash", tx.Hash(), "err", err)
- // We intentionally don't return the error here: if we do, then the RPC server will not
- // return the roots. Most likely, the caller already knows that a certain transaction fails to
- // be included, but still want the intermediate roots that led to that point.
- // It may happen the tx_N causes an erroneous state, which in turn causes tx_N+M to not be
- // executable.
- // N.B: This should never happen while tracing canon blocks, only when tracing bad blocks.
- return roots, nil
+ //nolint: nestif
+ if stateSyncPresent && i == len(txs)-1 {
+ if *config.BorTraceEnabled {
+ callmsg := prepareCallMessage(msg)
+
+ if _, err := statefull.ApplyMessage(callmsg, statedb, block.Header(), api.backend.ChainConfig(), api.chainContext(ctx)); err != nil {
+ log.Warn("Tracing intermediate roots did not complete", "txindex", i, "txhash", tx.Hash(), "err", err)
+ // We intentionally don't return the error here: if we do, then the RPC server will not
+ // return the roots. Most likely, the caller already knows that a certain transaction fails to
+ // be included, but still want the intermediate roots that led to that point.
+ // It may happen the tx_N causes an erroneous state, which in turn causes tx_N+M to not be
+ // executable.
+ // N.B: This should never happen while tracing canon blocks, only when tracing bad blocks.
+ return roots, nil
+ }
+ } else {
+ break
+ }
+ } else {
+ if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())); err != nil {
+ log.Warn("Tracing intermediate roots did not complete", "txindex", i, "txhash", tx.Hash(), "err", err)
+ // We intentionally don't return the error here: if we do, then the RPC server will not
+ // return the roots. Most likely, the caller already knows that a certain transaction fails to
+ // be included, but still want the intermediate roots that led to that point.
+ // It may happen the tx_N causes an erroneous state, which in turn causes tx_N+M to not be
+ // executable.
+ // N.B: This should never happen while tracing canon blocks, only when tracing bad blocks.
+ return roots, nil
+ }
+
}
+
// calling IntermediateRoot will internally call Finalize on the state
// so any modifications are written to the trie
roots = append(roots, statedb.IntermediateRoot(deleteEmptyObjects))
@@ -564,6 +678,18 @@ func (api *API) StandardTraceBadBlockToFile(ctx context.Context, hash common.Has
// executes all the transactions contained within. The return value will be one item
// per transaction, dependent on the requestd tracer.
func (api *API) traceBlock(ctx context.Context, block *types.Block, config *TraceConfig) ([]*txTraceResult, error) {
+
+ if config == nil {
+ config = &TraceConfig{
+ BorTraceEnabled: defaultBorTraceEnabled,
+ BorTx: newBoolPtr(false),
+ }
+ }
+
+ if config.BorTraceEnabled == nil {
+ config.BorTraceEnabled = defaultBorTraceEnabled
+ }
+
if block.NumberU64() == 0 {
return nil, errors.New("genesis is not traceable")
}
@@ -581,9 +707,9 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac
}
// Execute all the transaction contained within the block concurrently
var (
- signer = types.MakeSigner(api.backend.ChainConfig(), block.Number())
- txs = block.Transactions()
- results = make([]*txTraceResult, len(txs))
+ signer = types.MakeSigner(api.backend.ChainConfig(), block.Number())
+ txs, stateSyncPresent = api.getAllBlockTransactions(ctx, block)
+ results = make([]*txTraceResult, len(txs))
pend = new(sync.WaitGroup)
jobs = make(chan *txTraceTask, len(txs))
@@ -606,7 +732,21 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac
TxIndex: task.index,
TxHash: txs[task.index].Hash(),
}
- res, err := api.traceTx(ctx, msg, txctx, blockCtx, task.statedb, config)
+
+ var res interface{}
+
+ var err error
+
+ if stateSyncPresent && task.index == len(txs)-1 {
+ if *config.BorTraceEnabled {
+ config.BorTx = newBoolPtr(true)
+ res, err = api.traceTx(ctx, msg, txctx, blockCtx, task.statedb, config)
+ } else {
+ break
+ }
+ } else {
+ res, err = api.traceTx(ctx, msg, txctx, blockCtx, task.statedb, config)
+ }
if err != nil {
results[task.index] = &txTraceResult{Error: err.Error()}
continue
@@ -625,11 +765,26 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac
// Generate the next state snapshot fast without tracing
msg, _ := tx.AsMessage(signer, block.BaseFee())
statedb.Prepare(tx.Hash(), i)
+
vmenv := vm.NewEVM(blockCtx, core.NewEVMTxContext(msg), statedb, api.backend.ChainConfig(), vm.Config{})
- if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())); err != nil {
- failed = err
- break
+ //nolint: nestif
+ if stateSyncPresent && i == len(txs)-1 {
+ if *config.BorTraceEnabled {
+ callmsg := prepareCallMessage(msg)
+ if _, err := statefull.ApplyBorMessage(*vmenv, callmsg); err != nil {
+ failed = err
+ break
+ }
+ } else {
+ break
+ }
+ } else {
+ if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())); err != nil {
+ failed = err
+ break
+ }
}
+
// Finalize the state so any modifications are written to the trie
// Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect
statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number()))
@@ -641,16 +796,30 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac
if failed != nil {
return nil, failed
}
- return results, nil
+
+ if !*config.BorTraceEnabled && stateSyncPresent {
+ return results[:len(results)-1], nil
+ } else {
+ return results, nil
+ }
}
// standardTraceBlockToFile configures a new tracer which uses standard JSON output,
// and traces either a full block or an individual transaction. The return value will
// be one filename per transaction traced.
func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block, config *StdTraceConfig) ([]string, error) {
+ if config == nil {
+ config = &StdTraceConfig{
+ BorTraceEnabled: defaultBorTraceEnabled,
+ }
+ }
+
+ if config.BorTraceEnabled == nil {
+ config.BorTraceEnabled = defaultBorTraceEnabled
+ }
// If we're tracing a single transaction, make sure it's present
if config != nil && config.TxHash != (common.Hash{}) {
- if !containsTx(block, config.TxHash) {
+ if !api.containsTx(ctx, block, config.TxHash) {
return nil, fmt.Errorf("transaction %#x not found in block", config.TxHash)
}
}
@@ -705,7 +874,14 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block
canon = false
}
}
- for i, tx := range block.Transactions() {
+
+ txs, stateSyncPresent := api.getAllBlockTransactions(ctx, block)
+ if !*config.BorTraceEnabled && stateSyncPresent {
+ txs = txs[:len(txs)-1]
+ stateSyncPresent = false
+ }
+
+ for i, tx := range txs {
// Prepare the trasaction for un-traced execution
var (
msg, _ = tx.AsMessage(signer, block.BaseFee())
@@ -739,10 +915,23 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block
// Execute the transaction and flush any traces to disk
vmenv := vm.NewEVM(vmctx, txContext, statedb, chainConfig, vmConf)
statedb.Prepare(tx.Hash(), i)
- _, err = core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas()))
- if writer != nil {
- writer.Flush()
+ //nolint: nestif
+ if stateSyncPresent && i == len(txs)-1 {
+ if *config.BorTraceEnabled {
+ callmsg := prepareCallMessage(msg)
+ _, err = statefull.ApplyBorMessage(*vmenv, callmsg)
+
+ if writer != nil {
+ writer.Flush()
+ }
+ }
+ } else {
+ _, err = core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas()))
+ if writer != nil {
+ writer.Flush()
+ }
}
+
if dump != nil {
dump.Close()
log.Info("Wrote standard trace", "file", dump.Name())
@@ -764,8 +953,9 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block
// containsTx reports whether the transaction with a certain hash
// is contained within the specified block.
-func containsTx(block *types.Block, hash common.Hash) bool {
- for _, tx := range block.Transactions() {
+func (api *API) containsTx(ctx context.Context, block *types.Block, hash common.Hash) bool {
+ txs, _ := api.getAllBlockTransactions(ctx, block)
+ for _, tx := range txs {
if tx.Hash() == hash {
return true
}
@@ -776,6 +966,17 @@ func containsTx(block *types.Block, hash common.Hash) bool {
// TraceTransaction returns the structured logs created during the execution of EVM
// and returns them as a JSON object.
func (api *API) TraceTransaction(ctx context.Context, hash common.Hash, config *TraceConfig) (interface{}, error) {
+ if config == nil {
+ config = &TraceConfig{
+ BorTraceEnabled: defaultBorTraceEnabled,
+ BorTx: newBoolPtr(false),
+ }
+ }
+
+ if config.BorTraceEnabled == nil {
+ config.BorTraceEnabled = defaultBorTraceEnabled
+ }
+
tx, blockHash, blockNumber, index, err := api.backend.GetTransaction(ctx, hash)
if tx == nil {
// For BorTransaction, there will be no trace available
@@ -810,6 +1011,7 @@ func (api *API) TraceTransaction(ctx context.Context, hash common.Hash, config *
TxIndex: int(index),
TxHash: hash,
}
+
return api.traceTx(ctx, msg, txctx, vmctx, statedb, config)
}
@@ -864,6 +1066,7 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, bloc
Reexec: config.Reexec,
}
}
+
return api.traceTx(ctx, msg, new(Context), vmctx, statedb, traceConfig)
}
@@ -871,6 +1074,18 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, bloc
// executes the given message in the provided environment. The return value will
// be tracer dependent.
func (api *API) traceTx(ctx context.Context, message core.Message, txctx *Context, vmctx vm.BlockContext, statedb *state.StateDB, config *TraceConfig) (interface{}, error) {
+
+ if config == nil {
+ config = &TraceConfig{
+ BorTraceEnabled: defaultBorTraceEnabled,
+ BorTx: newBoolPtr(false),
+ }
+ }
+
+ if config.BorTraceEnabled == nil {
+ config.BorTraceEnabled = defaultBorTraceEnabled
+ }
+
// Assemble the structured logger or the JavaScript tracer
var (
tracer vm.EVMLogger
@@ -914,9 +1129,22 @@ func (api *API) traceTx(ctx context.Context, message core.Message, txctx *Contex
// Call Prepare to clear out the statedb access list
statedb.Prepare(txctx.TxHash, txctx.TxIndex)
- result, err := core.ApplyMessage(vmenv, message, new(core.GasPool).AddGas(message.Gas()))
- if err != nil {
- return nil, fmt.Errorf("tracing failed: %w", err)
+ var result *core.ExecutionResult
+
+ if config.BorTx == nil {
+ config.BorTx = newBoolPtr(false)
+ }
+
+ if *config.BorTx {
+ callmsg := prepareCallMessage(message)
+ if result, err = statefull.ApplyBorMessage(*vmenv, callmsg); err != nil {
+ return nil, fmt.Errorf("tracing failed: %w", err)
+ }
+ } else {
+ result, err = core.ApplyMessage(vmenv, message, new(core.GasPool).AddGas(message.Gas()))
+ if err != nil {
+ return nil, fmt.Errorf("tracing failed: %w", err)
+ }
}
// Depending on the tracer type, format and return the output.
diff --git a/eth/tracers/api_bor.go b/eth/tracers/api_bor.go
new file mode 100644
index 0000000000..8993b9ae38
--- /dev/null
+++ b/eth/tracers/api_bor.go
@@ -0,0 +1,163 @@
+package tracers
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/bor/statefull"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/core/vm"
+ "github.com/ethereum/go-ethereum/eth/tracers/logger"
+ "github.com/ethereum/go-ethereum/internal/ethapi"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rpc"
+)
+
+type BlockTraceResult struct {
+ // Trace of each transaction executed
+ Transactions []*TxTraceResult `json:"transactions,omitempty"`
+
+ // Block that we are executing on the trace
+ Block interface{} `json:"block"`
+}
+
+type TxTraceResult struct {
+ // Trace results produced by the tracer
+ Result interface{} `json:"result,omitempty"`
+
+ // Trace failure produced by the tracer
+ Error string `json:"error,omitempty"`
+
+ // IntermediateHash of the execution if succeeds
+ IntermediateHash common.Hash `json:"intermediatehash"`
+}
+
+func (api *API) traceBorBlock(ctx context.Context, block *types.Block, config *TraceConfig) (*BlockTraceResult, error) {
+ if block.NumberU64() == 0 {
+ return nil, fmt.Errorf("genesis is not traceable")
+ }
+
+ res := &BlockTraceResult{
+ Block: block,
+ }
+
+ // block object cannot be converted to JSON since much of the fields are non-public
+ blockFields, err := ethapi.RPCMarshalBlock(block, true, true, api.backend.ChainConfig())
+ if err != nil {
+ return nil, err
+ }
+
+ res.Block = blockFields
+
+ parent, err := api.blockByNumberAndHash(ctx, rpc.BlockNumber(block.NumberU64()-1), block.ParentHash())
+ if err != nil {
+ return nil, err
+ }
+
+ reexec := defaultTraceReexec
+ if config != nil && config.Reexec != nil {
+ reexec = *config.Reexec
+ }
+
+ // TODO: discuss consequences of setting preferDisk false.
+ statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false)
+ if err != nil {
+ return nil, err
+ }
+
+ // Execute all the transaction contained within the block concurrently
+ var (
+ signer = types.MakeSigner(api.backend.ChainConfig(), block.Number())
+ txs, stateSyncPresent = api.getAllBlockTransactions(ctx, block)
+ deleteEmptyObjects = api.backend.ChainConfig().IsEIP158(block.Number())
+ )
+
+ blockCtx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
+
+ traceTxn := func(indx int, tx *types.Transaction, borTx bool) *TxTraceResult {
+ message, _ := tx.AsMessage(signer, block.BaseFee())
+ txContext := core.NewEVMTxContext(message)
+
+ tracer := logger.NewStructLogger(config.Config)
+
+ // Run the transaction with tracing enabled.
+ vmenv := vm.NewEVM(blockCtx, txContext, statedb, api.backend.ChainConfig(), vm.Config{Debug: true, Tracer: tracer, NoBaseFee: true})
+
+ // Call Prepare to clear out the statedb access list
+ // Not sure if we need to do this
+ statedb.Prepare(tx.Hash(), indx)
+
+ var execRes *core.ExecutionResult
+
+ if borTx {
+ callmsg := prepareCallMessage(message)
+ execRes, err = statefull.ApplyBorMessage(*vmenv, callmsg)
+ } else {
+ execRes, err = core.ApplyMessage(vmenv, message, new(core.GasPool).AddGas(message.Gas()))
+ }
+
+ if err != nil {
+ return &TxTraceResult{
+ Error: err.Error(),
+ }
+ }
+
+ returnVal := fmt.Sprintf("%x", execRes.Return())
+ if len(execRes.Revert()) > 0 {
+ returnVal = fmt.Sprintf("%x", execRes.Revert())
+ }
+
+ result := ðapi.ExecutionResult{
+ Gas: execRes.UsedGas,
+ Failed: execRes.Failed(),
+ ReturnValue: returnVal,
+ StructLogs: ethapi.FormatLogs(tracer.StructLogs()),
+ }
+ res := &TxTraceResult{
+ Result: result,
+ IntermediateHash: statedb.IntermediateRoot(deleteEmptyObjects),
+ }
+
+ return res
+ }
+
+ for indx, tx := range txs {
+ if stateSyncPresent && indx == len(txs)-1 {
+ res.Transactions = append(res.Transactions, traceTxn(indx, tx, true))
+ } else {
+ res.Transactions = append(res.Transactions, traceTxn(indx, tx, false))
+ }
+ }
+
+ return res, nil
+}
+
+type TraceBlockRequest struct {
+ Number int64
+ Hash string
+ IsBadBlock bool
+ Config *TraceConfig
+}
+
+// If you use context as first parameter this function gets exposed automaticall on rpc endpoint
+func (api *API) TraceBorBlock(req *TraceBlockRequest) (*BlockTraceResult, error) {
+ ctx := context.Background()
+
+ var blockNumber rpc.BlockNumber
+ if req.Number == -1 {
+ blockNumber = rpc.LatestBlockNumber
+ } else {
+ blockNumber = rpc.BlockNumber(req.Number)
+ }
+
+ log.Debug("Tracing Bor Block", "block number", blockNumber)
+
+ block, err := api.blockByNumber(ctx, blockNumber)
+ if err != nil {
+ return nil, err
+ }
+
+ return api.traceBorBlock(ctx, block, req.Config)
+}
diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go
index a3c0a72494..aa9f913396 100644
--- a/eth/tracers/api_test.go
+++ b/eth/tracers/api_test.go
@@ -176,6 +176,11 @@ func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block
return nil, vm.BlockContext{}, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash())
}
+func (b *testBackend) GetBorBlockTransactionWithBlockHash(ctx context.Context, txHash common.Hash, blockHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) {
+ tx, blockHash, blockNumber, index := rawdb.ReadBorTransactionWithBlockHash(b.ChainDb(), txHash, blockHash)
+ return tx, blockHash, blockNumber, index, nil
+}
+
func TestTraceCall(t *testing.T) {
t.Parallel()
diff --git a/go.mod b/go.mod
index edd6d6887a..e31612cfe3 100644
--- a/go.mod
+++ b/go.mod
@@ -1,10 +1,9 @@
module github.com/ethereum/go-ethereum
-go 1.16
+go 1.18
require (
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0
- github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
github.com/VictoriaMetrics/fastcache v1.6.0
github.com/aws/aws-sdk-go-v2 v1.2.0
github.com/aws/aws-sdk-go-v2/config v1.1.1
@@ -16,18 +15,15 @@ require (
github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f
github.com/davecgh/go-spew v1.1.1
github.com/deckarep/golang-set v1.8.0
- github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf
github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48
github.com/edsrzf/mmap-go v1.0.0
github.com/fatih/color v1.7.0
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff
- github.com/go-kit/kit v0.9.0 // indirect
- github.com/go-logfmt/logfmt v0.5.0 // indirect
- github.com/go-ole/go-ole v1.2.1 // indirect
github.com/go-stack/stack v1.8.0
github.com/golang-jwt/jwt/v4 v4.3.0
+ github.com/golang/mock v1.6.0
github.com/golang/protobuf v1.5.2
github.com/golang/snappy v0.0.4
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa
@@ -43,19 +39,15 @@ require (
github.com/imdario/mergo v0.3.11
github.com/influxdata/influxdb v1.8.3
github.com/influxdata/influxdb-client-go/v2 v2.4.0
- github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
github.com/jackpal/go-nat-pmp v1.0.2
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e
github.com/julienschmidt/httprouter v1.3.0
github.com/karalabe/usb v0.0.2
- github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mattn/go-colorable v0.1.8
github.com/mattn/go-isatty v0.0.12
github.com/mitchellh/cli v1.1.2
github.com/mitchellh/go-grpc-net-conn v0.0.0-20200427190222-eb030e4876f0
github.com/mitchellh/go-homedir v1.1.0
- github.com/naoina/go-stringutil v0.1.0 // indirect
- github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416
github.com/olekukonko/tablewriter v0.0.5
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7
github.com/prometheus/tsdb v0.7.1
@@ -66,22 +58,82 @@ require (
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4
github.com/stretchr/testify v1.7.0
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
- github.com/tklauser/go-sysconf v0.3.5 // indirect
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef
github.com/xsleonard/go-merkle v1.1.0
go.opentelemetry.io/otel v1.2.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.2.0
go.opentelemetry.io/otel/sdk v1.2.0
- golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2
+ go.uber.org/goleak v1.1.12
+ golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
- golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912
+ golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6
golang.org/x/text v0.3.7
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba
- golang.org/x/tools v0.1.0
+ golang.org/x/tools v0.1.10
google.golang.org/grpc v1.42.0
google.golang.org/protobuf v1.27.1
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6
gopkg.in/urfave/cli.v1 v1.20.0
- gotest.tools v2.2.0+incompatible // indirect
+ gotest.tools v2.2.0+incompatible
+ pgregory.net/rapid v0.4.7
+)
+
+require (
+ github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3 // indirect
+ github.com/BurntSushi/toml v1.1.0 // indirect
+ github.com/Masterminds/goutils v1.1.0 // indirect
+ github.com/Masterminds/semver v1.5.0 // indirect
+ github.com/Masterminds/sprig v2.22.0+incompatible // indirect
+ github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
+ github.com/agext/levenshtein v1.2.1 // indirect
+ github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
+ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.2 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.2 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.1.1 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sts v1.1.1 // indirect
+ github.com/aws/smithy-go v1.1.0 // indirect
+ github.com/bgentry/speakeasy v0.1.0 // indirect
+ github.com/cenkalti/backoff/v4 v4.1.1 // indirect
+ github.com/cespare/xxhash/v2 v2.1.1 // indirect
+ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
+ github.com/deepmap/oapi-codegen v1.8.2 // indirect
+ github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 // indirect
+ github.com/go-kit/kit v0.9.0 // indirect
+ github.com/go-logfmt/logfmt v0.5.0 // indirect
+ github.com/go-ole/go-ole v1.2.1 // indirect
+ github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect
+ github.com/google/go-cmp v0.5.8 // indirect
+ github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
+ github.com/hashicorp/errwrap v1.0.0 // indirect
+ github.com/hashicorp/go-multierror v1.0.0 // indirect
+ github.com/huandu/xstrings v1.3.2 // indirect
+ github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
+ github.com/kylelemons/godebug v1.1.0 // indirect
+ github.com/mattn/go-runewidth v0.0.9 // indirect
+ github.com/mitchellh/copystructure v1.0.0 // indirect
+ github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 // indirect
+ github.com/mitchellh/mapstructure v1.4.1 // indirect
+ github.com/mitchellh/pointerstructure v1.2.0 // indirect
+ github.com/mitchellh/reflectwalk v1.0.0 // indirect
+ github.com/opentracing/opentracing-go v1.1.0 // indirect
+ github.com/pelletier/go-toml v1.9.5 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/posener/complete v1.1.1 // indirect
+ github.com/tklauser/go-sysconf v0.3.5 // indirect
+ github.com/tklauser/numcpus v0.2.2 // indirect
+ github.com/zclconf/go-cty v1.8.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.2.0 // indirect
+ go.opentelemetry.io/otel/trace v1.2.0 // indirect
+ go.opentelemetry.io/proto/otlp v0.10.0 // indirect
+ golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
+ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 // indirect
+ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect
+ golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect
+ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect
+ gopkg.in/yaml.v2 v2.4.0 // indirect
+ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
)
diff --git a/go.sum b/go.sum
index 19a6002886..593c8e11d0 100644
--- a/go.sum
+++ b/go.sum
@@ -25,6 +25,8 @@ github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSu
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0 h1:Px2UA+2RvSSvv+RvJNuUB6n7rs5Wsel4dXLe90Um2n4=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ+S5sOiDlINkp7+Ef339+Nz5L5XO+cnOHo=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I=
+github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg=
@@ -49,7 +51,6 @@ github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0=
github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM=
-github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0=
github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk=
github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw=
github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo=
@@ -82,14 +83,12 @@ github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx2
github.com/btcsuite/btcd/btcec/v2 v2.1.2 h1:YoYoC9J0jwfukodSBMzZYUVQ8PTiYg4BnOWiJVzTmLs=
github.com/btcsuite/btcd/btcec/v2 v2.1.2/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0 h1:MSskdM4/xJYcFzy0altH/C/xHopifpWzHUi1JeVI34Q=
-github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ=
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
-github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -195,6 +194,8 @@ github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4er
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -228,8 +229,9 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
+github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa h1:Q75Upo5UN4JbPFURXZ8nLKYUvF85dyFRop/vQ0Rv+64=
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
@@ -372,10 +374,6 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks=
-github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
-github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 h1:shk/vn9oCoOTmwcouEdwIeOtOGA/ELRUw/GwvxwfT+0=
-github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
@@ -393,6 +391,8 @@ github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mo
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE=
+github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
+github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM=
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
@@ -447,7 +447,6 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
@@ -457,6 +456,10 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
+github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
@@ -478,6 +481,7 @@ github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6Ut
github.com/xsleonard/go-merkle v1.1.0 h1:fHe1fuhJjGH22ZzVTAH0jqHLhTGhOq3wQjJN+8P0jQg=
github.com/xsleonard/go-merkle v1.1.0/go.mod h1:cW4z+UZ/4f2n9IJgIiyDCdYguchoDyDAPmpuOWGxdGg=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8=
github.com/zclconf/go-cty v1.8.0 h1:s4AvqaeQzJIu3ndv4gVIhplVD0krU+bgrcLSVUnaWuA=
github.com/zclconf/go-cty v1.8.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk=
@@ -499,6 +503,8 @@ go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqe
go.opentelemetry.io/proto/otlp v0.10.0 h1:n7brgtEbDvXEgGyKKo8SobKT1e9FewlDtXzkVP5djoE=
go.opentelemetry.io/proto/otlp v0.10.0/go.mod h1:zG20xCK0szZ1xdokeSOwEcmlXu+x9kkdRe6N1DhKcfU=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
+go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@@ -511,8 +517,9 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
-golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122 h1:NvGWuYG8dkDHFSKksI1P9faiVJ9rayE6l0+ouWVIDs8=
+golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -533,6 +540,7 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
@@ -540,8 +548,9 @@ golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKG
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o=
+golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -567,9 +576,11 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d h1:20cMwl2fHAzkJMEA+8J4JgqBQcQGzbisXo31MIeenXI=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -619,11 +630,13 @@ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912 h1:uCLL3g5wH2xjxVREVuAbP9JM5PPKjRbXKRa6IBjkzmU=
-golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6 h1:nonptSpoQ4vQjyraW20DXPAglgQfVnM9ZC6MmNLMR60=
+golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -667,13 +680,17 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20=
+golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f h1:GGU+dLjvlC3qDwqYgL6UgRmHXhOOgns0bZu2Ty5mm6U=
+golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=
@@ -769,5 +786,7 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
+pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g=
+pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
diff --git a/integration-tests/smoke_test.sh b/integration-tests/smoke_test.sh
new file mode 100644
index 0000000000..6f41aa5c16
--- /dev/null
+++ b/integration-tests/smoke_test.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+set -e
+
+delay=600
+
+echo "Wait ${delay} seconds for state-sync..."
+sleep $delay
+
+
+balance=$(docker exec bor0 bash -c "bor attach /root/.bor/data/bor.ipc -exec 'Math.round(web3.fromWei(eth.getBalance(eth.accounts[0])))'")
+
+if ! [[ "$balance" =~ ^[0-9]+$ ]]; then
+ echo "Something is wrong! Can't find the balance of first account in bor network."
+ exit 1
+fi
+
+echo "Found matic balance on account[0]: " $balance
+
+if (( $balance <= 1001 )); then
+ echo "Balance in bor network has not increased. This indicates that something is wrong with state sync."
+ exit 1
+fi
+
+checkpointID=$(curl -sL http://localhost:1317/checkpoints/latest | jq .result.id)
+
+if [ $checkpointID == "null" ]; then
+ echo "Something is wrong! Could not find any checkpoint."
+ exit 1
+else
+ echo "Found checkpoint ID:" $checkpointID
+fi
+
+echo "All tests have passed!"
\ No newline at end of file
diff --git a/internal/cli/account.go b/internal/cli/account.go
index b8661821d0..bb8b30b892 100644
--- a/internal/cli/account.go
+++ b/internal/cli/account.go
@@ -1,11 +1,28 @@
package cli
-import "github.com/mitchellh/cli"
+import (
+ "strings"
+
+ "github.com/mitchellh/cli"
+)
type Account struct {
UI cli.Ui
}
+// MarkDown implements cli.MarkDown interface
+func (a *Account) MarkDown() string {
+ items := []string{
+ "# Account",
+ "The ```account``` command groups actions to interact with accounts:",
+ "- [```account new```](./account_new.md): Create a new account in the Bor client.",
+ "- [```account list```](./account_list.md): List the wallets in the Bor client.",
+ "- [```account import```](./account_import.md): Import an account to the Bor client.",
+ }
+
+ return strings.Join(items, "\n\n")
+}
+
// Help implements the cli.Command interface
func (a *Account) Help() string {
return `Usage: bor account
diff --git a/internal/cli/account_import.go b/internal/cli/account_import.go
index 8c9ff40e58..a3f65ab512 100644
--- a/internal/cli/account_import.go
+++ b/internal/cli/account_import.go
@@ -2,6 +2,7 @@ package cli
import (
"fmt"
+ "strings"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/crypto"
@@ -12,6 +13,17 @@ type AccountImportCommand struct {
*Meta
}
+// MarkDown implements cli.MarkDown interface
+func (a *AccountImportCommand) MarkDown() string {
+ items := []string{
+ "# Account import",
+ "The ```account import``` command imports an account in Json format to the Bor data directory.",
+ a.Flags().MarkDown(),
+ }
+
+ return strings.Join(items, "\n\n")
+}
+
// Help implements the cli.Command interface
func (a *AccountImportCommand) Help() string {
return `Usage: bor account import
@@ -47,7 +59,9 @@ func (a *AccountImportCommand) Run(args []string) int {
a.UI.Error("Expected one argument")
return 1
}
+
key, err := crypto.LoadECDSA(args[0])
+
if err != nil {
a.UI.Error(fmt.Sprintf("Failed to load the private key '%s': %v", args[0], err))
return 1
@@ -69,6 +83,8 @@ func (a *AccountImportCommand) Run(args []string) int {
if err != nil {
utils.Fatalf("Could not create the account: %v", err)
}
+
a.UI.Output(fmt.Sprintf("Account created: %s", acct.Address.String()))
+
return 0
}
diff --git a/internal/cli/account_list.go b/internal/cli/account_list.go
index 360d41b558..854934c447 100644
--- a/internal/cli/account_list.go
+++ b/internal/cli/account_list.go
@@ -2,6 +2,7 @@ package cli
import (
"fmt"
+ "strings"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/internal/cli/flagset"
@@ -11,6 +12,17 @@ type AccountListCommand struct {
*Meta
}
+// MarkDown implements cli.MarkDown interface
+func (a *AccountListCommand) MarkDown() string {
+ items := []string{
+ "# Account list",
+ "The `account list` command lists all the accounts in the Bor data directory.",
+ a.Flags().MarkDown(),
+ }
+
+ return strings.Join(items, "\n\n")
+}
+
// Help implements the cli.Command interface
func (a *AccountListCommand) Help() string {
return `Usage: bor account list
@@ -42,7 +54,9 @@ func (a *AccountListCommand) Run(args []string) int {
a.UI.Error(fmt.Sprintf("Failed to get keystore: %v", err))
return 1
}
+
a.UI.Output(formatAccounts(keystore.Accounts()))
+
return 0
}
@@ -53,10 +67,12 @@ func formatAccounts(accts []accounts.Account) string {
rows := make([]string, len(accts)+1)
rows[0] = "Index|Address"
+
for i, d := range accts {
rows[i+1] = fmt.Sprintf("%d|%s",
i,
d.Address.String())
}
+
return formatList(rows)
}
diff --git a/internal/cli/account_new.go b/internal/cli/account_new.go
index 3334384cb0..aef272a389 100644
--- a/internal/cli/account_new.go
+++ b/internal/cli/account_new.go
@@ -2,6 +2,7 @@ package cli
import (
"fmt"
+ "strings"
"github.com/ethereum/go-ethereum/internal/cli/flagset"
)
@@ -10,6 +11,17 @@ type AccountNewCommand struct {
*Meta
}
+// MarkDown implements cli.MarkDown interface
+func (a *AccountNewCommand) MarkDown() string {
+ items := []string{
+ "# Account new",
+ "The `account new` command creates a new local account file on the Bor data directory. Bor should not be running to execute this command.",
+ a.Flags().MarkDown(),
+ }
+
+ return strings.Join(items, "\n\n")
+}
+
// Help implements the cli.Command interface
func (a *AccountNewCommand) Help() string {
return `Usage: bor account new
diff --git a/internal/cli/attach.go b/internal/cli/attach.go
new file mode 100644
index 0000000000..134a282180
--- /dev/null
+++ b/internal/cli/attach.go
@@ -0,0 +1,193 @@
+package cli
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/ethereum/go-ethereum/cmd/utils"
+ "github.com/ethereum/go-ethereum/console"
+ "github.com/ethereum/go-ethereum/internal/cli/flagset"
+ "github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/rpc"
+
+ "github.com/mitchellh/cli"
+)
+
+// AttachCommand is the command to Connect to remote Bor IPC console
+type AttachCommand struct {
+ UI cli.Ui
+ Meta *Meta
+ Meta2 *Meta2
+ ExecCMD string
+ Endpoint string
+ PreloadJSFlag string
+ JSpathFlag string
+}
+
+// MarkDown implements cli.MarkDown interface
+func (c *AttachCommand) MarkDown() string {
+ items := []string{
+ "# Attach",
+ "Connect to remote Bor IPC console.",
+ c.Flags().MarkDown(),
+ }
+
+ return strings.Join(items, "\n\n")
+}
+
+// Help implements the cli.Command interface
+func (c *AttachCommand) Help() string {
+ return `Usage: bor attach
+
+ Connect to remote Bor IPC console.`
+}
+
+// Synopsis implements the cli.Command interface
+func (c *AttachCommand) Synopsis() string {
+ return "Connect to Bor via IPC"
+}
+
+func (c *AttachCommand) Flags() *flagset.Flagset {
+ f := flagset.NewFlagSet("attach")
+
+ f.StringFlag(&flagset.StringFlag{
+ Name: "exec",
+ Usage: "Command to run in remote console",
+ Value: &c.ExecCMD,
+ })
+
+ f.StringFlag(&flagset.StringFlag{
+ Name: "preload",
+ Usage: "Comma separated list of JavaScript files to preload into the console",
+ Value: &c.PreloadJSFlag,
+ })
+
+ f.StringFlag(&flagset.StringFlag{
+ Name: "jspath",
+ Usage: "JavaScript root path for `loadScript`",
+ Value: &c.JSpathFlag,
+ })
+
+ return f
+}
+
+// Run implements the cli.Command interface
+func (c *AttachCommand) Run(args []string) int {
+ flags := c.Flags()
+
+ //check if first arg is flag or IPC location
+ if len(args) == 0 {
+ args = append(args, "")
+ }
+
+ if args[0] != "" && strings.HasPrefix(args[0], "--") {
+ if err := flags.Parse(args); err != nil {
+ c.UI.Error(err.Error())
+ return 1
+ }
+ } else {
+ c.Endpoint = args[0]
+ if err := flags.Parse(args[1:]); err != nil {
+ c.UI.Error(err.Error())
+ return 1
+ }
+ }
+
+ if err := c.remoteConsole(); err != nil {
+ c.UI.Error(err.Error())
+ return 1
+ }
+
+ return 0
+}
+
+// remoteConsole will connect to a remote bor instance, attaching a JavaScript
+// console to it.
+// nolint: unparam
+func (c *AttachCommand) remoteConsole() error {
+ // Attach to a remotely running geth instance and start the JavaScript console
+ path := node.DefaultDataDir()
+
+ if c.Endpoint == "" {
+ if c.Meta.dataDir != "" {
+ path = c.Meta.dataDir
+ }
+
+ if path != "" {
+ homeDir, _ := os.UserHomeDir()
+ path = filepath.Join(homeDir, "/.bor/data")
+ }
+
+ c.Endpoint = fmt.Sprintf("%s/bor.ipc", path)
+ }
+
+ client, err := dialRPC(c.Endpoint)
+
+ if err != nil {
+ utils.Fatalf("Unable to attach to remote bor: %v", err)
+ }
+
+ config := console.Config{
+ DataDir: path,
+ DocRoot: c.JSpathFlag,
+ Client: client,
+ Preload: c.makeConsolePreloads(),
+ }
+
+ console, err := console.New(config)
+ if err != nil {
+ utils.Fatalf("Failed to start the JavaScript console: %v", err)
+ }
+
+ defer func() {
+ if err := console.Stop(false); err != nil {
+ c.UI.Error(err.Error())
+ }
+ }()
+
+ if c.ExecCMD != "" {
+ console.Evaluate(c.ExecCMD)
+ return nil
+ }
+
+ // Otherwise print the welcome screen and enter interactive mode
+ console.Welcome()
+ console.Interactive()
+
+ return nil
+}
+
+// dialRPC returns a RPC client which connects to the given endpoint.
+// The check for empty endpoint implements the defaulting logic
+// for "geth attach" with no argument.
+func dialRPC(endpoint string) (*rpc.Client, error) {
+ if endpoint == "" {
+ endpoint = node.DefaultIPCEndpoint("bor")
+ } else if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") {
+ // Backwards compatibility with geth < 1.5 which required
+ // these prefixes.
+ endpoint = endpoint[4:]
+ }
+
+ return rpc.Dial(endpoint)
+}
+
+// MakeConsolePreloads retrieves the absolute paths for the console JavaScript
+// scripts to preload before starting.
+func (c *AttachCommand) makeConsolePreloads() []string {
+ // Skip preloading if there's nothing to preload
+ if c.PreloadJSFlag == "" {
+ return nil
+ }
+ // Otherwise resolve absolute paths and return them
+ splitFlags := strings.Split(c.PreloadJSFlag, ",")
+ preloads := make([]string, 0, len(splitFlags))
+
+ for _, file := range splitFlags {
+ preloads = append(preloads, strings.TrimSpace(file))
+ }
+
+ return preloads
+}
diff --git a/internal/cli/bootnode.go b/internal/cli/bootnode.go
new file mode 100644
index 0000000000..9e1a0fcde9
--- /dev/null
+++ b/internal/cli/bootnode.go
@@ -0,0 +1,230 @@
+package cli
+
+import (
+ "crypto/ecdsa"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "os"
+ "os/signal"
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ "github.com/ethereum/go-ethereum/cmd/utils"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/internal/cli/flagset"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/p2p/discover"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/p2p/nat"
+
+ "github.com/mitchellh/cli"
+)
+
+type BootnodeCommand struct {
+ UI cli.Ui
+
+ listenAddr string
+ v5 bool
+ logLevel string
+ nat string
+ nodeKey string
+ saveKey string
+ dryRun bool
+}
+
+// Help implements the cli.Command interface
+func (b *BootnodeCommand) Help() string {
+ return `Usage: bor bootnode`
+}
+
+// MarkDown implements cli.MarkDown interface
+func (c *BootnodeCommand) MarkDown() string {
+ items := []string{
+ "# Bootnode",
+ c.Flags().MarkDown(),
+ }
+
+ return strings.Join(items, "\n\n")
+}
+
+func (b *BootnodeCommand) Flags() *flagset.Flagset {
+ flags := flagset.NewFlagSet("bootnode")
+
+ flags.StringFlag(&flagset.StringFlag{
+ Name: "listen-addr",
+ Default: "0.0.0.0:30303",
+ Usage: "listening address of bootnode (:)",
+ Value: &b.listenAddr,
+ })
+ flags.BoolFlag(&flagset.BoolFlag{
+ Name: "v5",
+ Default: false,
+ Usage: "Enable UDP v5",
+ Value: &b.v5,
+ })
+ flags.StringFlag(&flagset.StringFlag{
+ Name: "log-level",
+ Default: "info",
+ Usage: "Log level (trace|debug|info|warn|error|crit)",
+ Value: &b.logLevel,
+ })
+ flags.StringFlag(&flagset.StringFlag{
+ Name: "nat",
+ Default: "none",
+ Usage: "port mapping mechanism (any|none|upnp|pmp|extip:)",
+ Value: &b.nat,
+ })
+ flags.StringFlag(&flagset.StringFlag{
+ Name: "node-key",
+ Default: "",
+ Usage: "file or hex node key",
+ Value: &b.nodeKey,
+ })
+ flags.StringFlag(&flagset.StringFlag{
+ Name: "save-key",
+ Default: "",
+ Usage: "path to save the ecdsa private key",
+ Value: &b.saveKey,
+ })
+ flags.BoolFlag(&flagset.BoolFlag{
+ Name: "dry-run",
+ Default: false,
+ Usage: "validates parameters and prints bootnode configurations, but does not start bootnode",
+ Value: &b.dryRun,
+ })
+
+ return flags
+}
+
+// Synopsis implements the cli.Command interface
+func (b *BootnodeCommand) Synopsis() string {
+ return "Start a bootnode"
+}
+
+// Run implements the cli.Command interface
+// nolint: gocognit
+func (b *BootnodeCommand) Run(args []string) int {
+ flags := b.Flags()
+ if err := flags.Parse(args); err != nil {
+ b.UI.Error(err.Error())
+ return 1
+ }
+
+ glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
+
+ lvl, err := log.LvlFromString(strings.ToLower(b.logLevel))
+ if err == nil {
+ glogger.Verbosity(lvl)
+ } else {
+ glogger.Verbosity(log.LvlInfo)
+ }
+
+ log.Root().SetHandler(glogger)
+
+ natm, err := nat.Parse(b.nat)
+ if err != nil {
+ b.UI.Error(fmt.Sprintf("failed to parse nat: %v", err))
+ return 1
+ }
+
+ // create a one time key
+ var nodeKey *ecdsa.PrivateKey
+ // nolint: nestif
+ if b.nodeKey != "" {
+ // try to read the key either from file or command line
+ if _, err := os.Stat(b.nodeKey); errors.Is(err, os.ErrNotExist) {
+ if nodeKey, err = crypto.HexToECDSA(b.nodeKey); err != nil {
+ b.UI.Error(fmt.Sprintf("failed to parse hex address: %v", err))
+ return 1
+ }
+ } else {
+ if nodeKey, err = crypto.LoadECDSA(b.nodeKey); err != nil {
+ b.UI.Error(fmt.Sprintf("failed to load node key: %v", err))
+ return 1
+ }
+ }
+ } else {
+ // generate a new temporal key
+ if nodeKey, err = crypto.GenerateKey(); err != nil {
+ b.UI.Error(fmt.Sprintf("could not generate key: %v", err))
+ return 1
+ }
+ if b.saveKey != "" {
+ path := b.saveKey
+
+ // save the private key
+ if err = crypto.SaveECDSA(filepath.Join(path, "priv.key"), nodeKey); err != nil {
+ b.UI.Error(fmt.Sprintf("failed to write node priv key: %v", err))
+ return 1
+ }
+ // save the public key
+ pubRaw := fmt.Sprintf("%x", crypto.FromECDSAPub(&nodeKey.PublicKey)[1:])
+ if err := ioutil.WriteFile(filepath.Join(path, "pub.key"), []byte(pubRaw), 0600); err != nil {
+ b.UI.Error(fmt.Sprintf("failed to write node pub key: %v", err))
+ return 1
+ }
+ }
+ }
+
+ addr, err := net.ResolveUDPAddr("udp", b.listenAddr)
+ if err != nil {
+ b.UI.Error(fmt.Sprintf("could not resolve udp addr '%s': %v", b.listenAddr, err))
+ return 1
+ }
+
+ conn, err := net.ListenUDP("udp", addr)
+
+ if err != nil {
+ b.UI.Error(fmt.Sprintf("failed to listen udp addr '%s': %v", b.listenAddr, err))
+ return 1
+ }
+
+ realaddr := conn.LocalAddr().(*net.UDPAddr)
+ if natm != nil {
+ if !realaddr.IP.IsLoopback() {
+ go nat.Map(natm, nil, "udp", realaddr.Port, realaddr.Port, "ethereum discovery")
+ }
+
+ if ext, err := natm.ExternalIP(); err == nil {
+ // nolint: govet
+ realaddr = &net.UDPAddr{IP: ext, Port: realaddr.Port}
+ }
+ }
+
+ n := enode.NewV4(&nodeKey.PublicKey, addr.IP, addr.Port, addr.Port)
+ b.UI.Info(n.String())
+
+ if b.dryRun {
+ return 0
+ }
+
+ db, _ := enode.OpenDB("")
+ ln := enode.NewLocalNode(db, nodeKey)
+ cfg := discover.Config{
+ PrivateKey: nodeKey,
+ Log: log.Root(),
+ }
+
+ if b.v5 {
+ if _, err := discover.ListenV5(conn, ln, cfg); err != nil {
+ utils.Fatalf("%v", err)
+ }
+ } else {
+ if _, err := discover.ListenUDP(conn, ln, cfg); err != nil {
+ utils.Fatalf("%v", err)
+ }
+ }
+
+ signalCh := make(chan os.Signal, 4)
+ signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM, syscall.SIGHUP)
+
+ sig := <-signalCh
+
+ b.UI.Output(fmt.Sprintf("Caught signal: %v", sig))
+ b.UI.Output("Gracefully shutting down agent...")
+
+ return 0
+}
diff --git a/internal/cli/bor_fingerprint.go b/internal/cli/bor_fingerprint.go
new file mode 100644
index 0000000000..4e21f02956
--- /dev/null
+++ b/internal/cli/bor_fingerprint.go
@@ -0,0 +1,188 @@
+package cli
+
+import (
+ "fmt"
+ "math"
+ "os/exec"
+ "strings"
+
+ "github.com/ethereum/go-ethereum/params"
+
+ "github.com/mitchellh/cli"
+ "github.com/shirou/gopsutil/cpu"
+ "github.com/shirou/gopsutil/disk"
+ "github.com/shirou/gopsutil/host"
+ "github.com/shirou/gopsutil/mem"
+)
+
+// VersionCommand is the command to show the version of the agent
+type FingerprintCommand struct {
+ UI cli.Ui
+}
+
+// MarkDown implements cli.MarkDown interface
+func (c *FingerprintCommand) MarkDown() string {
+ items := []string{
+ "# Fingerprint",
+ "Display the system fingerprint",
+ }
+
+ return strings.Join(items, "\n\n")
+}
+
+// Help implements the cli.Command interface
+func (c *FingerprintCommand) Help() string {
+ return `Usage: bor fingerprint
+
+ Display the system fingerprint`
+}
+
+// Synopsis implements the cli.Command interface
+func (c *FingerprintCommand) Synopsis() string {
+ return "Display the system fingerprint"
+}
+
+func getCoresCount(cp []cpu.InfoStat) int {
+ cores := 0
+ for i := 0; i < len(cp); i++ {
+ cores += int(cp[i].Cores)
+ }
+
+ return cores
+}
+
+type MemoryDetails struct {
+ TotalMem float64 `json:"totalMem"`
+ FreeMem float64 `json:"freeMem"`
+ UsedMem float64 `json:"usedMem"`
+}
+
+type DiskDetails struct {
+ TotalDisk float64 `json:"totalDisk"`
+ FreeDisk float64 `json:"freeDisk"`
+ UsedDisk float64 `json:"usedDisk"`
+}
+
+type BorFingerprint struct {
+ CoresCount int `json:"coresCount"`
+ OsName string `json:"osName"`
+ OsVer string `json:"osVer"`
+ DiskDetails *DiskDetails `json:"diskDetails"`
+ MemoryDetails *MemoryDetails `json:"memoryDetails"`
+}
+
+func formatFingerprint(borFingerprint *BorFingerprint) string {
+ base := formatKV([]string{
+ fmt.Sprintf("Bor Version : %s", params.VersionWithMeta),
+ fmt.Sprintf("CPU : %d cores", borFingerprint.CoresCount),
+ fmt.Sprintf("OS : %s %s ", borFingerprint.OsName, borFingerprint.OsVer),
+ fmt.Sprintf("RAM :: total : %v GB, free : %v GB, used : %v GB", borFingerprint.MemoryDetails.TotalMem, borFingerprint.MemoryDetails.FreeMem, borFingerprint.MemoryDetails.UsedMem),
+ fmt.Sprintf("STORAGE :: total : %v GB, free : %v GB, used : %v GB", borFingerprint.DiskDetails.TotalDisk, borFingerprint.DiskDetails.FreeDisk, borFingerprint.DiskDetails.UsedDisk),
+ })
+
+ return base
+}
+
+func convertBytesToGB(bytesValue uint64) float64 {
+ return math.Floor(float64(bytesValue)/(1024*1024*1024)*100) / 100
+}
+
+// Checks if fio exists on the node
+func (c *FingerprintCommand) checkFio() error {
+ cmd := exec.Command("/bin/sh", "-c", "fio -v")
+
+ _, err := cmd.CombinedOutput()
+ if err != nil {
+ message := "\nFio package not installed. Install Fio for IOPS Benchmarking :\n\nDebianOS : 'sudo apt-get update && sudo apt-get install fio -y'\nAWS AMI/CentOS : 'sudo yum install fio -y'\nOracle LinuxOS : 'sudo dnf install fio -y'\n"
+ c.UI.Output(message)
+
+ return err
+ }
+
+ return nil
+}
+
+// Run the IOPS benchmark for the node
+func (c *FingerprintCommand) benchmark() error {
+ var b []byte
+
+ err := c.checkFio()
+
+ if err != nil {
+ // Missing Fio is not a fatal error. A message will be logged in console when it is missing in "checkFio()".
+ return nil //nolint:nilerr
+ }
+
+ c.UI.Output("\nRunning a 10 second test...\n")
+
+ cmd := exec.Command("/bin/sh", "-c", "sudo fio --filename=/file --size=2GB --direct=1 --rw=randrw --bs=64k --ioengine=libaio --iodepth=64 --runtime=10 --numjobs=4 --time_based --group_reporting --name=throughput-test-job --eta-newline=1 | grep -e 'read:' -e 'write:' | awk '{print $1,$2}' ")
+
+ b, err = cmd.CombinedOutput()
+ if err != nil {
+ return err
+ }
+
+ out := string(b)
+ c.UI.Output(out)
+
+ return nil
+}
+
+// Run implements the cli.Command interface
+func (c *FingerprintCommand) Run(args []string) int {
+ v, err := mem.VirtualMemory()
+ if err != nil {
+ c.UI.Error(err.Error())
+ return 1
+ }
+
+ h, err := host.Info()
+ if err != nil {
+ c.UI.Error(err.Error())
+ return 1
+ }
+
+ cp, err := cpu.Info()
+ if err != nil {
+ c.UI.Error(err.Error())
+ return 1
+ }
+
+ d, err := disk.Usage("/")
+ if err != nil {
+ c.UI.Error(err.Error())
+ return 1
+ }
+
+ diskDetails := &DiskDetails{
+ TotalDisk: convertBytesToGB(d.Total),
+ FreeDisk: convertBytesToGB(d.Free),
+ UsedDisk: convertBytesToGB(d.Used),
+ }
+
+ memoryDetails := &MemoryDetails{
+ TotalMem: convertBytesToGB(v.Total),
+ FreeMem: convertBytesToGB(v.Available),
+ UsedMem: convertBytesToGB(v.Used),
+ }
+
+ borFingerprint := &BorFingerprint{
+ CoresCount: getCoresCount(cp),
+ OsName: h.OS,
+ OsVer: h.Platform + " - " + h.PlatformVersion + " - " + h.KernelArch,
+ DiskDetails: diskDetails,
+ MemoryDetails: memoryDetails,
+ }
+
+ c.UI.Output(formatFingerprint(borFingerprint))
+
+ if borFingerprint.OsName == "linux" {
+ err = c.benchmark()
+ if err != nil {
+ c.UI.Error(err.Error())
+ return 1
+ }
+ }
+
+ return 0
+}
diff --git a/internal/cli/chain.go b/internal/cli/chain.go
index b43f22f999..9a7e9e8537 100644
--- a/internal/cli/chain.go
+++ b/internal/cli/chain.go
@@ -1,6 +1,8 @@
package cli
import (
+ "strings"
+
"github.com/mitchellh/cli"
)
@@ -9,6 +11,18 @@ type ChainCommand struct {
UI cli.Ui
}
+// MarkDown implements cli.MarkDown interface
+func (c *ChainCommand) MarkDown() string {
+ items := []string{
+ "# Chain",
+ "The ```chain``` command groups actions to interact with the blockchain in the client:",
+ "- [```chain sethead```](./chain_sethead.md): Set the current chain to a certain block.",
+ "- [```chain watch```](./chain_watch.md): Watch the chainHead, reorg and fork events in real-time.",
+ }
+
+ return strings.Join(items, "\n\n")
+}
+
// Help implements the cli.Command interface
func (c *ChainCommand) Help() string {
return `Usage: bor chain
diff --git a/internal/cli/chain_sethead.go b/internal/cli/chain_sethead.go
index 127ac38f15..718ada4648 100644
--- a/internal/cli/chain_sethead.go
+++ b/internal/cli/chain_sethead.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"strconv"
+ "strings"
"github.com/ethereum/go-ethereum/internal/cli/flagset"
"github.com/ethereum/go-ethereum/internal/cli/server/proto"
@@ -16,6 +17,19 @@ type ChainSetHeadCommand struct {
yes bool
}
+// MarkDown implements cli.MarkDown interface
+func (a *ChainSetHeadCommand) MarkDown() string {
+ items := []string{
+ "# Chain sethead",
+ "The ```chain sethead ``` command sets the current chain to a certain block.",
+ "## Arguments",
+ "- ```number```: The block number to roll back.",
+ a.Flags().MarkDown(),
+ }
+
+ return strings.Join(items, "\n\n")
+}
+
// Help implements the cli.Command interface
func (c *ChainSetHeadCommand) Help() string {
return `Usage: bor chain sethead [--yes]
@@ -32,6 +46,7 @@ func (c *ChainSetHeadCommand) Flags() *flagset.Flagset {
Default: false,
Value: &c.yes,
})
+
return flags
}
@@ -75,6 +90,7 @@ func (c *ChainSetHeadCommand) Run(args []string) int {
c.UI.Error(err.Error())
return 1
}
+
if response != "y" {
c.UI.Output("set head aborted")
return 0
@@ -87,5 +103,6 @@ func (c *ChainSetHeadCommand) Run(args []string) int {
}
c.UI.Output("Done!")
+
return 0
}
diff --git a/internal/cli/chain_watch.go b/internal/cli/chain_watch.go
index 9469c1df00..17a65a8d99 100644
--- a/internal/cli/chain_watch.go
+++ b/internal/cli/chain_watch.go
@@ -5,6 +5,7 @@ import (
"fmt"
"os"
"os/signal"
+ "strings"
"syscall"
"github.com/ethereum/go-ethereum/core"
@@ -17,6 +18,16 @@ type ChainWatchCommand struct {
*Meta2
}
+// MarkDown implements cli.MarkDown interface
+func (c *ChainWatchCommand) MarkDown() string {
+ items := []string{
+ "# Chain watch",
+ "The ```chain watch``` command is used to view the chainHead, reorg and fork events in real-time.",
+ }
+
+ return strings.Join(items, "\n\n")
+}
+
// Help implements the cli.Command interface
func (c *ChainWatchCommand) Help() string {
return `Usage: bor chain watch
@@ -60,7 +71,10 @@ func (c *ChainWatchCommand) Run(args []string) int {
go func() {
<-signalCh
- sub.CloseSend()
+
+ if err := sub.CloseSend(); err != nil {
+ c.UI.Error(err.Error())
+ }
}()
for {
@@ -70,6 +84,7 @@ func (c *ChainWatchCommand) Run(args []string) int {
c.UI.Output(err.Error())
break
}
+
c.UI.Output(formatHeadEvent(msg))
}
@@ -85,5 +100,6 @@ func formatHeadEvent(msg *proto.ChainWatchResponse) string {
} else if msg.Type == core.Chain2HeadReorgEvent {
out = fmt.Sprintf("Reorg Detected \nAdded : %v \nRemoved : %v", msg.Newchain, msg.Oldchain)
}
+
return out
}
diff --git a/internal/cli/command.go b/internal/cli/command.go
index d164791f80..93dca4cb3e 100644
--- a/internal/cli/command.go
+++ b/internal/cli/command.go
@@ -9,18 +9,43 @@ import (
"github.com/ethereum/go-ethereum/internal/cli/server"
"github.com/ethereum/go-ethereum/internal/cli/server/proto"
"github.com/ethereum/go-ethereum/node"
+
"github.com/mitchellh/cli"
"github.com/ryanuber/columnize"
"google.golang.org/grpc"
)
+const (
+ emptyPlaceHolder = ""
+)
+
+type MarkDownCommand interface {
+ MarkDown
+ cli.Command
+}
+
+type MarkDownCommandFactory func() (MarkDownCommand, error)
+
func Run(args []string) int {
- commands := commands()
+ commands := Commands()
+
+ mappedCommands := make(map[string]cli.CommandFactory)
+
+ for k, v := range commands {
+ // Declare a new v to limit the scope of v to inside the block, so the anonymous function below
+ // can get the "current" value of v, instead of the value of last v in the loop.
+ // See this post: https://stackoverflow.com/questions/10116507/go-transfer-var-into-anonymous-function for more explanation
+ v := v
+ mappedCommands[k] = func() (cli.Command, error) {
+ cmd, err := v()
+ return cmd.(cli.Command), err
+ }
+ }
cli := &cli.CLI{
Name: "bor",
Args: args,
- Commands: commands,
+ Commands: mappedCommands,
}
exitCode, err := cli.Run()
@@ -28,10 +53,11 @@ func Run(args []string) int {
fmt.Fprintf(os.Stderr, "Error executing CLI: %s\n", err.Error())
return 1
}
+
return exitCode
}
-func commands() map[string]cli.CommandFactory {
+func Commands() map[string]MarkDownCommandFactory {
ui := &cli.BasicUi{
Reader: os.Stdin,
Writer: os.Stdout,
@@ -44,87 +70,125 @@ func commands() map[string]cli.CommandFactory {
meta := &Meta{
UI: ui,
}
- return map[string]cli.CommandFactory{
- "server": func() (cli.Command, error) {
+
+ return map[string]MarkDownCommandFactory{
+ "server": func() (MarkDownCommand, error) {
return &server.Command{
UI: ui,
}, nil
},
- "version": func() (cli.Command, error) {
+ "version": func() (MarkDownCommand, error) {
return &VersionCommand{
UI: ui,
}, nil
},
- "debug": func() (cli.Command, error) {
+ "dumpconfig": func() (MarkDownCommand, error) {
+ return &DumpconfigCommand{
+ Meta2: meta2,
+ }, nil
+ },
+ "debug": func() (MarkDownCommand, error) {
return &DebugCommand{
+ UI: ui,
+ }, nil
+ },
+ "debug pprof": func() (MarkDownCommand, error) {
+ return &DebugPprofCommand{
+ Meta2: meta2,
+ }, nil
+ },
+ "debug block": func() (MarkDownCommand, error) {
+ return &DebugBlockCommand{
Meta2: meta2,
}, nil
},
- "chain": func() (cli.Command, error) {
+ "chain": func() (MarkDownCommand, error) {
return &ChainCommand{
UI: ui,
}, nil
},
- "chain watch": func() (cli.Command, error) {
+ "chain watch": func() (MarkDownCommand, error) {
return &ChainWatchCommand{
Meta2: meta2,
}, nil
},
- "chain sethead": func() (cli.Command, error) {
+ "chain sethead": func() (MarkDownCommand, error) {
return &ChainSetHeadCommand{
Meta2: meta2,
}, nil
},
- "account": func() (cli.Command, error) {
+ "account": func() (MarkDownCommand, error) {
return &Account{
UI: ui,
}, nil
},
- "account new": func() (cli.Command, error) {
+ "account new": func() (MarkDownCommand, error) {
return &AccountNewCommand{
Meta: meta,
}, nil
},
- "account import": func() (cli.Command, error) {
+ "account import": func() (MarkDownCommand, error) {
return &AccountImportCommand{
Meta: meta,
}, nil
},
- "account list": func() (cli.Command, error) {
+ "account list": func() (MarkDownCommand, error) {
return &AccountListCommand{
Meta: meta,
}, nil
},
- "peers": func() (cli.Command, error) {
+ "peers": func() (MarkDownCommand, error) {
return &PeersCommand{
UI: ui,
}, nil
},
- "peers add": func() (cli.Command, error) {
+ "peers add": func() (MarkDownCommand, error) {
return &PeersAddCommand{
Meta2: meta2,
}, nil
},
- "peers remove": func() (cli.Command, error) {
+ "peers remove": func() (MarkDownCommand, error) {
return &PeersRemoveCommand{
Meta2: meta2,
}, nil
},
- "peers list": func() (cli.Command, error) {
+ "peers list": func() (MarkDownCommand, error) {
return &PeersListCommand{
Meta2: meta2,
}, nil
},
- "peers status": func() (cli.Command, error) {
+ "peers status": func() (MarkDownCommand, error) {
return &PeersStatusCommand{
Meta2: meta2,
}, nil
},
- "status": func() (cli.Command, error) {
+ "status": func() (MarkDownCommand, error) {
return &StatusCommand{
Meta2: meta2,
}, nil
},
+ "fingerprint": func() (MarkDownCommand, error) {
+ return &FingerprintCommand{
+ UI: ui,
+ }, nil
+ },
+ "attach": func() (MarkDownCommand, error) {
+ return &AttachCommand{
+ UI: ui,
+ Meta: meta,
+ Meta2: meta2,
+ }, nil
+ },
+ "bootnode": func() (MarkDownCommand, error) {
+ return &BootnodeCommand{
+ UI: ui,
+ }, nil
+ },
+ "removedb": func() (MarkDownCommand, error) {
+ return &RemoveDBCommand{
+ Meta2: meta2,
+ }, nil
+ },
}
}
@@ -143,6 +207,7 @@ func (m *Meta2) NewFlagSet(n string) *flagset.Flagset {
Usage: "Address of the grpc endpoint",
Default: "127.0.0.1:3131",
})
+
return f
}
@@ -151,6 +216,7 @@ func (m *Meta2) Conn() (*grpc.ClientConn, error) {
if err != nil {
return nil, fmt.Errorf("failed to connect to server: %v", err)
}
+
return conn, nil
}
@@ -159,6 +225,7 @@ func (m *Meta2) BorConn() (proto.BorClient, error) {
if err != nil {
return nil, err
}
+
return proto.NewBorClient(conn), nil
}
@@ -206,18 +273,21 @@ func (m *Meta) GetKeystore() (*keystore.KeyStore, error) {
scryptP := keystore.StandardScryptP
keys := keystore.NewKeyStore(keydir, scryptN, scryptP)
+
return keys, nil
}
func formatList(in []string) string {
columnConf := columnize.DefaultConfig()
- columnConf.Empty = ""
+ columnConf.Empty = emptyPlaceHolder
+
return columnize.Format(in, columnConf)
}
func formatKV(in []string) string {
columnConf := columnize.DefaultConfig()
- columnConf.Empty = ""
+ columnConf.Empty = emptyPlaceHolder
columnConf.Glue = " = "
+
return columnize.Format(in, columnConf)
}
diff --git a/internal/cli/debug.go b/internal/cli/debug.go
index a6b6ff7973..6e083a6974 100644
--- a/internal/cli/debug.go
+++ b/internal/cli/debug.go
@@ -1,11 +1,8 @@
package cli
-// Based on https://github.com/hashicorp/nomad/blob/main/command/operator_debug.go
-
import (
"archive/tar"
"compress/gzip"
- "context"
"fmt"
"io"
"io/ioutil"
@@ -16,194 +13,187 @@ import (
"syscall"
"time"
- "github.com/ethereum/go-ethereum/internal/cli/flagset"
+ "github.com/mitchellh/cli"
+
"github.com/ethereum/go-ethereum/internal/cli/server/proto"
- "github.com/golang/protobuf/jsonpb"
- gproto "github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/ptypes/empty"
+
+ "github.com/golang/protobuf/jsonpb" // nolint:staticcheck
+ gproto "github.com/golang/protobuf/proto" // nolint:staticcheck
grpc_net_conn "github.com/mitchellh/go-grpc-net-conn"
+ "google.golang.org/grpc"
+ "google.golang.org/protobuf/runtime/protoiface"
)
+// DebugCommand is the command to group the peers commands
type DebugCommand struct {
- *Meta2
-
- seconds uint64
- output string
+ UI cli.Ui
}
-// Help implements the cli.Command interface
-func (d *DebugCommand) Help() string {
- return `Usage: bor debug
+// MarkDown implements cli.MarkDown interface
+func (d *DebugCommand) MarkDown() string {
+ examples := []string{
+ "## Examples",
+ "By default it creates a tar.gz file with the output:",
+ CodeBlock([]string{
+ "$ bor debug",
+ "Starting debugger...\n",
+ "Created debug archive: bor-debug-2021-10-26-073819Z.tar.gz",
+ }),
+ "Send the output to a specific directory:",
+ CodeBlock([]string{
+ "$ bor debug --output data",
+ "Starting debugger...\n",
+ "Created debug directory: data/bor-debug-2021-10-26-075437Z",
+ }),
+ }
- Build an archive containing Bor pprof traces
+ items := []string{
+ "# Debug",
+ "The ```bor debug``` command takes a debug dump of the running client.",
+ "- [```bor debug pprof```](./debug_pprof.md): Dumps bor pprof traces.",
+ "- [```bor debug block ```](./debug_block.md): Dumps bor block traces.",
+ }
+ items = append(items, examples...)
- ` + d.Flags().Help()
+ return strings.Join(items, "\n\n")
}
-func (d *DebugCommand) Flags() *flagset.Flagset {
- flags := d.NewFlagSet("debug")
+// Help implements the cli.Command interface
+func (c *DebugCommand) Help() string {
+ return `Usage: bor debug
- flags.Uint64Flag(&flagset.Uint64Flag{
- Name: "seconds",
- Usage: "seconds to trace",
- Value: &d.seconds,
- Default: 2,
- })
- flags.StringFlag(&flagset.StringFlag{
- Name: "output",
- Value: &d.output,
- Usage: "Output directory",
- })
+ This command takes a debug dump of the running client.
+
+ Get the pprof traces:
+
+ $ bor debug pprof
+
+ Get the block traces:
- return flags
+ $ bor debug block `
}
// Synopsis implements the cli.Command interface
-func (d *DebugCommand) Synopsis() string {
- return "Build an archive containing Bor pprof traces"
+func (c *DebugCommand) Synopsis() string {
+ return "Get traces of the running client"
}
// Run implements the cli.Command interface
-func (d *DebugCommand) Run(args []string) int {
- flags := d.Flags()
- if err := flags.Parse(args); err != nil {
- d.UI.Error(err.Error())
- return 1
- }
+func (c *DebugCommand) Run(args []string) int {
+ return cli.RunResultHelp
+}
- clt, err := d.BorConn()
- if err != nil {
- d.UI.Error(err.Error())
- return 1
- }
+type debugEnv struct {
+ output string
+ prefix string
+
+ name string
+ dst string
+}
- stamped := "bor-debug-" + time.Now().UTC().Format("2006-01-02-150405Z")
+func (d *debugEnv) init() error {
+ d.name = d.prefix + time.Now().UTC().Format("2006-01-02-150405Z")
+
+ var err error
// Create the output directory
var tmp string
if d.output != "" {
// User specified output directory
- tmp = filepath.Join(d.output, stamped)
+ tmp = filepath.Join(d.output, d.name)
_, err := os.Stat(tmp)
+
if !os.IsNotExist(err) {
- d.UI.Error("Output directory already exists")
- return 1
+ return fmt.Errorf("output directory already exists")
}
} else {
// Generate temp directory
- tmp, err = ioutil.TempDir(os.TempDir(), stamped)
+ tmp, err = ioutil.TempDir(os.TempDir(), d.name)
if err != nil {
- d.UI.Error(fmt.Sprintf("Error creating tmp directory: %s", err.Error()))
- return 1
+ return fmt.Errorf("error creating tmp directory: %s", err.Error())
}
- defer os.RemoveAll(tmp)
}
- d.UI.Output("Starting debugger...")
- d.UI.Output("")
-
// ensure destine folder exists
if err := os.MkdirAll(tmp, os.ModePerm); err != nil {
- d.UI.Error(fmt.Sprintf("failed to create parent directory: %v", err))
- return 1
+ return fmt.Errorf("failed to create parent directory: %v", err)
}
- pprofProfile := func(ctx context.Context, profile string, filename string) error {
- req := &proto.PprofRequest{
- Seconds: int64(d.seconds),
- }
- switch profile {
- case "cpu":
- req.Type = proto.PprofRequest_CPU
- case "trace":
- req.Type = proto.PprofRequest_TRACE
- default:
- req.Type = proto.PprofRequest_LOOKUP
- req.Profile = profile
- }
- stream, err := clt.Pprof(ctx, req)
- if err != nil {
- return err
- }
- // wait for open request
- msg, err := stream.Recv()
- if err != nil {
- return err
- }
- if _, ok := msg.Event.(*proto.PprofResponse_Open_); !ok {
- return fmt.Errorf("expected open message")
- }
+ d.dst = tmp
- // create the stream
- conn := &grpc_net_conn.Conn{
- Stream: stream,
- Response: &proto.PprofResponse_Input{},
- Decode: grpc_net_conn.SimpleDecoder(func(msg gproto.Message) *[]byte {
- return &msg.(*proto.PprofResponse_Input).Data
- }),
- }
+ return nil
+}
- file, err := os.OpenFile(filepath.Join(tmp, filename+".prof"), os.O_RDWR|os.O_CREATE, 0644)
- if err != nil {
- return err
- }
- defer file.Close()
+func (d *debugEnv) tarName() string {
+ return d.name + ".tar.gz"
+}
- if _, err := io.Copy(file, conn); err != nil {
- return err
- }
+func (d *debugEnv) finish() error {
+ // Exit before archive if output directory was specified
+ if d.output != "" {
return nil
}
- ctx, cancelFn := context.WithCancel(context.Background())
- trapSignal(cancelFn)
+ // Create archive tarball
+ archiveFile := d.tarName()
+ if err := tarCZF(archiveFile, d.dst, d.name); err != nil {
+ return fmt.Errorf("error creating archive: %s", err.Error())
+ }
+
+ return nil
+}
+
+type debugStream interface {
+ Recv() (*proto.DebugFileResponse, error)
+ grpc.ClientStream
+}
- profiles := map[string]string{
- "heap": "heap",
- "cpu": "cpu",
- "trace": "trace",
+func (d *debugEnv) writeFromStream(name string, stream debugStream) error {
+ // wait for open request
+ msg, err := stream.Recv()
+ if err != nil {
+ return err
}
- for profile, filename := range profiles {
- if err := pprofProfile(ctx, profile, filename); err != nil {
- d.UI.Error(fmt.Sprintf("Error creating profile '%s': %v", profile, err))
- return 1
- }
+
+ if _, ok := msg.Event.(*proto.DebugFileResponse_Open_); !ok {
+ return fmt.Errorf("expected open message")
}
- // append the status
- {
- statusResp, err := clt.Status(ctx, &empty.Empty{})
- if err != nil {
- d.UI.Output(fmt.Sprintf("Failed to get status: %v", err))
- return 1
- }
- m := jsonpb.Marshaler{}
- data, err := m.MarshalToString(statusResp)
- if err != nil {
- d.UI.Output(err.Error())
- return 1
- }
- if err := ioutil.WriteFile(filepath.Join(tmp, "status.json"), []byte(data), 0644); err != nil {
- d.UI.Output(fmt.Sprintf("Failed to write status: %v", err))
- return 1
- }
+ // create the stream
+ conn := &grpc_net_conn.Conn{
+ Stream: stream,
+ Response: &proto.DebugFileResponse_Input{},
+ Decode: grpc_net_conn.SimpleDecoder(func(msg gproto.Message) *[]byte {
+ return &msg.(*proto.DebugFileResponse_Input).Data
+ }),
}
- // Exit before archive if output directory was specified
- if d.output != "" {
- d.UI.Output(fmt.Sprintf("Created debug directory: %s", tmp))
- return 0
+ file, err := os.OpenFile(filepath.Join(d.dst, name), os.O_RDWR|os.O_CREATE, 0644)
+ if err != nil {
+ return err
}
+ defer file.Close()
- // Create archive tarball
- archiveFile := stamped + ".tar.gz"
- if err = tarCZF(archiveFile, tmp, stamped); err != nil {
- d.UI.Error(fmt.Sprintf("Error creating archive: %s", err.Error()))
- return 1
+ if _, err := io.Copy(file, conn); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (d *debugEnv) writeJSON(name string, msg protoiface.MessageV1) error {
+ m := jsonpb.Marshaler{}
+ data, err := m.MarshalToString(msg)
+
+ if err != nil {
+ return err
+ }
+
+ if err := ioutil.WriteFile(filepath.Join(d.dst, name), []byte(data), 0600); err != nil {
+ return fmt.Errorf("failed to write status: %v", err)
}
- d.UI.Output(fmt.Sprintf("Created debug archive: %s", archiveFile))
- return 0
+ return nil
}
func trapSignal(cancel func()) {
diff --git a/internal/cli/debug_block.go b/internal/cli/debug_block.go
new file mode 100644
index 0000000000..5a282cc550
--- /dev/null
+++ b/internal/cli/debug_block.go
@@ -0,0 +1,126 @@
+package cli
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/ethereum/go-ethereum/internal/cli/flagset"
+ "github.com/ethereum/go-ethereum/internal/cli/server/proto"
+)
+
+// DebugBlockCommand is the command to group the peers commands
+type DebugBlockCommand struct {
+ *Meta2
+
+ output string
+}
+
+func (p *DebugBlockCommand) MarkDown() string {
+ items := []string{
+ "# Debug trace",
+ "The ```bor debug block ``` command will create an archive containing traces of a bor block.",
+ p.Flags().MarkDown(),
+ }
+
+ return strings.Join(items, "\n\n")
+}
+
+// Help implements the cli.Command interface
+func (c *DebugBlockCommand) Help() string {
+ return `Usage: bor debug block
+
+ This command is used get traces of a bor block`
+}
+
+func (c *DebugBlockCommand) Flags() *flagset.Flagset {
+ flags := c.NewFlagSet("trace")
+
+ flags.StringFlag(&flagset.StringFlag{
+ Name: "output",
+ Value: &c.output,
+ Usage: "Output directory",
+ })
+
+ return flags
+}
+
+// Synopsis implements the cli.Command interface
+func (c *DebugBlockCommand) Synopsis() string {
+ return "Get trace of a bor block"
+}
+
+// Run implements the cli.Command interface
+func (c *DebugBlockCommand) Run(args []string) int {
+ flags := c.Flags()
+
+ var number *int64 = nil
+
+ // parse the block number (if available)
+ if len(args)%2 != 0 {
+ num, err := strconv.ParseInt(args[0], 10, 64)
+ if err == nil {
+ number = &num
+ }
+
+ args = args[1:]
+ }
+ // parse output directory
+ if err := flags.Parse(args); err != nil {
+ c.UI.Error(err.Error())
+ return 1
+ }
+
+ borClt, err := c.BorConn()
+ if err != nil {
+ c.UI.Error(err.Error())
+ return 1
+ }
+
+ dEnv := &debugEnv{
+ output: c.output,
+ prefix: "bor-block-trace-",
+ }
+ if err := dEnv.init(); err != nil {
+ c.UI.Error(err.Error())
+ return 1
+ }
+
+ c.UI.Output("Starting block tracer...")
+ c.UI.Output("")
+
+ // create a debug block request
+ var debugRequest *proto.DebugBlockRequest = &proto.DebugBlockRequest{}
+ if number != nil {
+ debugRequest.Number = *number
+ } else {
+ debugRequest.Number = -1
+ }
+
+ // send the request
+ // receives a grpc stream of debug block response
+ stream, err := borClt.DebugBlock(context.Background(), debugRequest)
+ if err != nil {
+ c.UI.Error(err.Error())
+ return 1
+ }
+
+ if err := dEnv.writeFromStream("block.json", stream); err != nil {
+ c.UI.Error(err.Error())
+ return 1
+ }
+
+ if err := dEnv.finish(); err != nil {
+ c.UI.Error(err.Error())
+ return 1
+ }
+
+ if c.output != "" {
+ c.UI.Output(fmt.Sprintf("Created debug directory: %s", dEnv.dst))
+ } else {
+ c.UI.Output(fmt.Sprintf("Created block trace archive: %s", dEnv.tarName()))
+ }
+
+ return 0
+}
diff --git a/internal/cli/debug_pprof.go b/internal/cli/debug_pprof.go
new file mode 100644
index 0000000000..ef15e45b58
--- /dev/null
+++ b/internal/cli/debug_pprof.go
@@ -0,0 +1,159 @@
+package cli
+
+// Based on https://github.com/hashicorp/nomad/blob/main/command/operator_debug.go
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/golang/protobuf/ptypes/empty"
+
+ "github.com/ethereum/go-ethereum/internal/cli/flagset"
+ "github.com/ethereum/go-ethereum/internal/cli/server/proto"
+)
+
+type DebugPprofCommand struct {
+ *Meta2
+
+ seconds uint64
+ output string
+}
+
+func (p *DebugPprofCommand) MarkDown() string {
+ items := []string{
+ "# Debug Pprof",
+ "The ```debug pprof ``` command will create an archive containing bor pprof traces.",
+ p.Flags().MarkDown(),
+ }
+
+ return strings.Join(items, "\n\n")
+}
+
+// Help implements the cli.Command interface
+func (d *DebugPprofCommand) Help() string {
+ return `Usage: bor debug
+
+ Build an archive containing Bor pprof traces
+
+ ` + d.Flags().Help()
+}
+
+func (d *DebugPprofCommand) Flags() *flagset.Flagset {
+ flags := d.NewFlagSet("debug")
+
+ flags.Uint64Flag(&flagset.Uint64Flag{
+ Name: "seconds",
+ Usage: "seconds to trace",
+ Value: &d.seconds,
+ Default: 2,
+ })
+ flags.StringFlag(&flagset.StringFlag{
+ Name: "output",
+ Value: &d.output,
+ Usage: "Output directory",
+ })
+
+ return flags
+}
+
+// Synopsis implements the cli.Command interface
+func (d *DebugPprofCommand) Synopsis() string {
+ return "Build an archive containing Bor pprof traces"
+}
+
+// Run implements the cli.Command interface
+func (d *DebugPprofCommand) Run(args []string) int {
+ flags := d.Flags()
+ if err := flags.Parse(args); err != nil {
+ d.UI.Error(err.Error())
+ return 1
+ }
+
+ clt, err := d.BorConn()
+ if err != nil {
+ d.UI.Error(err.Error())
+ return 1
+ }
+
+ dEnv := &debugEnv{
+ output: d.output,
+ prefix: "bor-debug-",
+ }
+ if err := dEnv.init(); err != nil {
+ d.UI.Error(err.Error())
+ return 1
+ }
+
+ d.UI.Output("Starting debugger...")
+ d.UI.Output("")
+
+ pprofProfile := func(ctx context.Context, profile string, filename string) error {
+ req := &proto.DebugPprofRequest{
+ Seconds: int64(d.seconds),
+ }
+
+ switch profile {
+ case "cpu":
+ req.Type = proto.DebugPprofRequest_CPU
+ case "trace":
+ req.Type = proto.DebugPprofRequest_TRACE
+ default:
+ req.Type = proto.DebugPprofRequest_LOOKUP
+ req.Profile = profile
+ }
+
+ stream, err := clt.DebugPprof(ctx, req)
+
+ if err != nil {
+ return err
+ }
+
+ if err := dEnv.writeFromStream(filename+".prof", stream); err != nil {
+ return err
+ }
+
+ return nil
+ }
+
+ ctx, cancelFn := context.WithCancel(context.Background())
+ trapSignal(cancelFn)
+
+ profiles := map[string]string{
+ "heap": "heap",
+ "cpu": "cpu",
+ "trace": "trace",
+ }
+ for profile, filename := range profiles {
+ if err := pprofProfile(ctx, profile, filename); err != nil {
+ d.UI.Error(fmt.Sprintf("Error creating profile '%s': %v", profile, err))
+ return 1
+ }
+ }
+
+ // append the status
+ {
+ statusResp, err := clt.Status(ctx, &empty.Empty{})
+ if err != nil {
+ d.UI.Output(fmt.Sprintf("Failed to get status: %v", err))
+ return 1
+ }
+ if err := dEnv.writeJSON("status.json", statusResp); err != nil {
+ d.UI.Error(err.Error())
+ return 1
+ }
+ }
+
+ if err := dEnv.finish(); err != nil {
+ d.UI.Error(err.Error())
+ return 1
+ }
+
+ if d.output != "" {
+ d.UI.Output(fmt.Sprintf("Created debug directory: %s", dEnv.dst))
+ } else {
+ d.UI.Output(fmt.Sprintf("Created debug archive: %s", dEnv.tarName()))
+ }
+
+ return 0
+}
diff --git a/internal/cli/debug_test.go b/internal/cli/debug_test.go
new file mode 100644
index 0000000000..f77cf839ac
--- /dev/null
+++ b/internal/cli/debug_test.go
@@ -0,0 +1,112 @@
+package cli
+
+import (
+ "os"
+ "path"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/mitchellh/cli"
+ "github.com/stretchr/testify/assert"
+
+ "github.com/ethereum/go-ethereum/internal/cli/server"
+)
+
+var currentDir string = ""
+
+func TestCommand_DebugBlock(t *testing.T) {
+ t.Parallel()
+
+ // Start a blockchain in developer mode and get trace of block
+ config := server.DefaultConfig()
+
+ // enable developer mode
+ config.Developer.Enabled = true
+ config.Developer.Period = 2 // block time
+
+ // enable archive mode for getting traces of ancient blocks
+ config.GcMode = "archive"
+
+ // start the mock server
+ srv, err := server.CreateMockServer(config)
+ assert.NoError(t, err)
+
+ defer server.CloseMockServer(srv)
+
+ // get the grpc port
+ port := srv.GetGrpcAddr()
+
+ // wait for 4 seconds to mine a 2 blocks
+ time.Sleep(2 * time.Duration(config.Developer.Period) * time.Second)
+
+ // add prefix for debug trace
+ prefix := "bor-block-trace-"
+
+ // output dir
+ output := "debug_block_test"
+
+ // set current directory
+ currentDir, _ = os.Getwd()
+
+ // trace 1st block
+ start := time.Now()
+ dst1 := path.Join(output, prefix+time.Now().UTC().Format("2006-01-02-150405Z"), "block.json")
+ res := traceBlock(port, 1, output)
+ assert.Equal(t, 0, res)
+ t.Logf("Completed trace of block %d in %d ms at %s", 1, time.Since(start).Milliseconds(), dst1)
+
+ // adding this to avoid debug directory name conflicts
+ time.Sleep(time.Second)
+
+ // trace last/recent block
+ start = time.Now()
+ latestBlock := srv.GetLatestBlockNumber().Int64()
+ dst2 := path.Join(output, prefix+time.Now().UTC().Format("2006-01-02-150405Z"), "block.json")
+ res = traceBlock(port, latestBlock, output)
+ assert.Equal(t, 0, res)
+ t.Logf("Completed trace of block %d in %d ms at %s", latestBlock, time.Since(start).Milliseconds(), dst2)
+
+ // verify if the trace files are created
+ done := verify(dst1)
+ assert.Equal(t, true, done)
+ done = verify(dst2)
+ assert.Equal(t, true, done)
+
+ // delete the traces
+ deleteTraces(output)
+}
+
+// traceBlock calls the cli command to trace a block
+func traceBlock(port string, number int64, output string) int {
+ ui := cli.NewMockUi()
+ command := &DebugBlockCommand{
+ Meta2: &Meta2{
+ UI: ui,
+ addr: "127.0.0.1:" + port,
+ },
+ }
+
+ // run trace (by explicitly passing the output directory and grpc address)
+ return command.Run([]string{strconv.FormatInt(number, 10), "--output", output, "--address", command.Meta2.addr})
+}
+
+// verify checks if the trace file is created at the destination
+// directory or not
+func verify(dst string) bool {
+ dst = path.Join(currentDir, dst)
+ if file, err := os.Stat(dst); err == nil {
+ // check if the file has content
+ if file.Size() > 0 {
+ return true
+ }
+ }
+
+ return false
+}
+
+// deleteTraces removes the traces created during the test
+func deleteTraces(dst string) {
+ dst = path.Join(currentDir, dst)
+ os.RemoveAll(dst)
+}
diff --git a/internal/cli/dumpconfig.go b/internal/cli/dumpconfig.go
new file mode 100644
index 0000000000..a748af3357
--- /dev/null
+++ b/internal/cli/dumpconfig.go
@@ -0,0 +1,72 @@
+package cli
+
+import (
+ "os"
+ "strings"
+
+ "github.com/BurntSushi/toml"
+
+ "github.com/ethereum/go-ethereum/internal/cli/server"
+)
+
+// DumpconfigCommand is for exporting user provided flags into a config file
+type DumpconfigCommand struct {
+ *Meta2
+}
+
+// MarkDown implements cli.MarkDown interface
+func (p *DumpconfigCommand) MarkDown() string {
+ items := []string{
+ "# Dumpconfig",
+ "The ```bor dumpconfig ``` command will export the user provided flags into a configuration file",
+ }
+
+ return strings.Join(items, "\n\n")
+}
+
+// Help implements the cli.Command interface
+func (c *DumpconfigCommand) Help() string {
+ return `Usage: bor dumpconfig
+
+ This command will will export the user provided flags into a configuration file`
+}
+
+// Synopsis implements the cli.Command interface
+func (c *DumpconfigCommand) Synopsis() string {
+ return "Export configuration file"
+}
+
+// TODO: add flags for file location and format (toml, json, hcl) of the configuration file.
+
+// Run implements the cli.Command interface
+func (c *DumpconfigCommand) Run(args []string) int {
+ // Initialize an empty command instance to get flags
+ command := server.Command{}
+ flags := command.Flags()
+
+ if err := flags.Parse(args); err != nil {
+ c.UI.Error(err.Error())
+ return 1
+ }
+
+ userConfig := command.GetConfig()
+
+ // convert the big.Int and time.Duration fields to their corresponding Raw fields
+ userConfig.JsonRPC.HttpTimeout.ReadTimeoutRaw = userConfig.JsonRPC.HttpTimeout.ReadTimeout.String()
+ userConfig.JsonRPC.HttpTimeout.WriteTimeoutRaw = userConfig.JsonRPC.HttpTimeout.WriteTimeout.String()
+ userConfig.JsonRPC.HttpTimeout.IdleTimeoutRaw = userConfig.JsonRPC.HttpTimeout.IdleTimeout.String()
+ userConfig.TxPool.RejournalRaw = userConfig.TxPool.Rejournal.String()
+ userConfig.TxPool.LifeTimeRaw = userConfig.TxPool.LifeTime.String()
+ userConfig.Sealer.GasPriceRaw = userConfig.Sealer.GasPrice.String()
+ userConfig.Gpo.MaxPriceRaw = userConfig.Gpo.MaxPrice.String()
+ userConfig.Gpo.IgnorePriceRaw = userConfig.Gpo.IgnorePrice.String()
+ userConfig.Cache.RejournalRaw = userConfig.Cache.Rejournal.String()
+ userConfig.Cache.TrieTimeoutRaw = userConfig.Cache.TrieTimeout.String()
+
+ if err := toml.NewEncoder(os.Stdout).Encode(userConfig); err != nil {
+ c.UI.Error(err.Error())
+ return 1
+ }
+
+ return 0
+}
diff --git a/internal/cli/flagset/flagset.go b/internal/cli/flagset/flagset.go
index 4388dd03fc..933fe59060 100644
--- a/internal/cli/flagset/flagset.go
+++ b/internal/cli/flagset/flagset.go
@@ -4,6 +4,7 @@ import (
"flag"
"fmt"
"math/big"
+ "sort"
"strings"
"time"
)
@@ -18,12 +19,14 @@ func NewFlagSet(name string) *Flagset {
flags: []*FlagVar{},
set: flag.NewFlagSet(name, flag.ContinueOnError),
}
+
return f
}
type FlagVar struct {
Name string
Usage string
+ Group string
}
func (f *Flagset) addFlag(fl *FlagVar) {
@@ -32,13 +35,63 @@ func (f *Flagset) addFlag(fl *FlagVar) {
func (f *Flagset) Help() string {
str := "Options:\n\n"
+
items := []string{}
for _, item := range f.flags {
items = append(items, fmt.Sprintf(" -%s\n %s", item.Name, item.Usage))
}
+
return str + strings.Join(items, "\n\n")
}
+func (f *Flagset) GetAllFlags() []string {
+ flags := []string{}
+ for _, flag := range f.flags {
+ flags = append(flags, flag.Name)
+ }
+
+ return flags
+}
+
+// MarkDown implements cli.MarkDown interface
+func (f *Flagset) MarkDown() string {
+ if len(f.flags) == 0 {
+ return ""
+ }
+
+ groups := make(map[string][]*FlagVar)
+
+ for _, item := range f.flags {
+ groups[item.Group] = append(groups[item.Group], item)
+ }
+
+ i := 0
+ keys := make([]string, len(groups))
+
+ for k := range groups {
+ keys[i] = k
+ i++
+ }
+
+ sort.Strings(keys)
+
+ items := []string{}
+
+ for _, k := range keys {
+ if k == "" {
+ items = append(items, "## Options")
+ } else {
+ items = append(items, fmt.Sprintf("### %s Options", k))
+ }
+
+ for _, item := range groups[k] {
+ items = append(items, fmt.Sprintf("- ```%s```: %s", item.Name, item.Usage))
+ }
+ }
+
+ return strings.Join(items, "\n\n")
+}
+
func (f *Flagset) Parse(args []string) error {
return f.set.Parse(args)
}
@@ -52,12 +105,14 @@ type BoolFlag struct {
Usage string
Default bool
Value *bool
+ Group string
}
func (f *Flagset) BoolFlag(b *BoolFlag) {
f.addFlag(&FlagVar{
Name: b.Name,
Usage: b.Usage,
+ Group: b.Group,
})
f.set.BoolVar(b.Value, b.Name, b.Default, b.Usage)
}
@@ -67,12 +122,14 @@ type StringFlag struct {
Usage string
Default string
Value *string
+ Group string
}
func (f *Flagset) StringFlag(b *StringFlag) {
f.addFlag(&FlagVar{
Name: b.Name,
Usage: b.Usage,
+ Group: b.Group,
})
f.set.StringVar(b.Value, b.Name, b.Default, b.Usage)
}
@@ -82,12 +139,14 @@ type IntFlag struct {
Usage string
Value *int
Default int
+ Group string
}
func (f *Flagset) IntFlag(i *IntFlag) {
f.addFlag(&FlagVar{
Name: i.Name,
Usage: i.Usage,
+ Group: i.Group,
})
f.set.IntVar(i.Value, i.Name, i.Default, i.Usage)
}
@@ -97,12 +156,14 @@ type Uint64Flag struct {
Usage string
Value *uint64
Default uint64
+ Group string
}
func (f *Flagset) Uint64Flag(i *Uint64Flag) {
f.addFlag(&FlagVar{
Name: i.Name,
Usage: i.Usage,
+ Group: i.Group,
})
f.set.Uint64Var(i.Value, i.Name, i.Default, i.Usage)
}
@@ -111,12 +172,14 @@ type BigIntFlag struct {
Name string
Usage string
Value *big.Int
+ Group string
}
func (b *BigIntFlag) String() string {
if b.Value == nil {
return ""
}
+
return b.Value.String()
}
@@ -126,13 +189,16 @@ func (b *BigIntFlag) Set(value string) error {
var ok bool
if strings.HasPrefix(value, "0x") {
num, ok = num.SetString(value[2:], 16)
+ *b.Value = *num
} else {
num, ok = num.SetString(value, 10)
+ *b.Value = *num
}
+
if !ok {
return fmt.Errorf("failed to set big int")
}
- b.Value = num
+
return nil
}
@@ -140,25 +206,43 @@ func (f *Flagset) BigIntFlag(b *BigIntFlag) {
f.addFlag(&FlagVar{
Name: b.Name,
Usage: b.Usage,
+ Group: b.Group,
})
f.set.Var(b, b.Name, b.Usage)
}
type SliceStringFlag struct {
- Name string
- Usage string
- Value *[]string
+ Name string
+ Usage string
+ Value *[]string
+ Default []string
+ Group string
+}
+
+// SplitAndTrim splits input separated by a comma
+// and trims excessive white space from the substrings.
+func SplitAndTrim(input string) (ret []string) {
+ l := strings.Split(input, ",")
+ for _, r := range l {
+ if r = strings.TrimSpace(r); r != "" {
+ ret = append(ret, r)
+ }
+ }
+
+ return ret
}
func (i *SliceStringFlag) String() string {
if i.Value == nil {
return ""
}
+
return strings.Join(*i.Value, ",")
}
func (i *SliceStringFlag) Set(value string) error {
- *i.Value = append(*i.Value, strings.Split(value, ",")...)
+ // overwritting insted of appending
+ *i.Value = SplitAndTrim(value)
return nil
}
@@ -166,6 +250,7 @@ func (f *Flagset) SliceStringFlag(s *SliceStringFlag) {
f.addFlag(&FlagVar{
Name: s.Name,
Usage: s.Usage,
+ Group: s.Group,
})
f.set.Var(s, s.Name, s.Usage)
}
@@ -175,12 +260,14 @@ type DurationFlag struct {
Usage string
Value *time.Duration
Default time.Duration
+ Group string
}
func (f *Flagset) DurationFlag(d *DurationFlag) {
f.addFlag(&FlagVar{
Name: d.Name,
Usage: d.Usage,
+ Group: d.Group,
})
f.set.DurationVar(d.Value, d.Name, d.Default, "")
}
@@ -189,16 +276,19 @@ type MapStringFlag struct {
Name string
Usage string
Value *map[string]string
+ Group string
}
func (m *MapStringFlag) String() string {
if m.Value == nil {
return ""
}
+
ls := []string{}
for k, v := range *m.Value {
ls = append(ls, k+"="+v)
}
+
return strings.Join(ls, ",")
}
@@ -206,6 +296,7 @@ func (m *MapStringFlag) Set(value string) error {
if m.Value == nil {
m.Value = &map[string]string{}
}
+
for _, t := range strings.Split(value, ",") {
if t != "" {
kv := strings.Split(t, "=")
@@ -215,6 +306,7 @@ func (m *MapStringFlag) Set(value string) error {
}
}
}
+
return nil
}
@@ -222,6 +314,7 @@ func (f *Flagset) MapStringFlag(m *MapStringFlag) {
f.addFlag(&FlagVar{
Name: m.Name,
Usage: m.Usage,
+ Group: m.Group,
})
f.set.Var(m, m.Name, m.Usage)
}
@@ -231,12 +324,14 @@ type Float64Flag struct {
Usage string
Value *float64
Default float64
+ Group string
}
func (f *Flagset) Float64Flag(i *Float64Flag) {
f.addFlag(&FlagVar{
Name: i.Name,
Usage: i.Usage,
+ Group: i.Group,
})
f.set.Float64Var(i.Value, i.Name, i.Default, "")
}
diff --git a/internal/cli/flagset/flagset_test.go b/internal/cli/flagset/flagset_test.go
index 2f046c3248..118361320d 100644
--- a/internal/cli/flagset/flagset_test.go
+++ b/internal/cli/flagset/flagset_test.go
@@ -23,14 +23,17 @@ func TestFlagsetBool(t *testing.T) {
func TestFlagsetSliceString(t *testing.T) {
f := NewFlagSet("")
- value := []string{}
+ value := []string{"a", "b", "c"}
f.SliceStringFlag(&SliceStringFlag{
- Name: "flag",
- Value: &value,
+ Name: "flag",
+ Value: &value,
+ Default: value,
})
- assert.NoError(t, f.Parse([]string{"--flag", "a,b", "--flag", "c"}))
+ assert.NoError(t, f.Parse([]string{}))
assert.Equal(t, value, []string{"a", "b", "c"})
+ assert.NoError(t, f.Parse([]string{"--flag", "a,b"}))
+ assert.Equal(t, value, []string{"a", "b"})
}
func TestFlagsetDuration(t *testing.T) {
diff --git a/internal/cli/markdown.go b/internal/cli/markdown.go
new file mode 100644
index 0000000000..652c98e56b
--- /dev/null
+++ b/internal/cli/markdown.go
@@ -0,0 +1,14 @@
+package cli
+
+import (
+ "strings"
+)
+
+type MarkDown interface {
+ MarkDown() string
+}
+
+// Create a Markdown code block from a slice of string, where each string is a line of code
+func CodeBlock(lines []string) string {
+ return "```\n" + strings.Join(lines, "\n") + "\n```"
+}
diff --git a/internal/cli/markdown_test.go b/internal/cli/markdown_test.go
new file mode 100644
index 0000000000..30c272a220
--- /dev/null
+++ b/internal/cli/markdown_test.go
@@ -0,0 +1,20 @@
+package cli
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestCodeBlock(t *testing.T) {
+ t.Parallel()
+ assert := assert.New(t)
+
+ lines := []string{
+ "abc",
+ "bcd",
+ }
+
+ expected := "```\n" + "abc\n" + "bcd\n" + "```"
+ assert.Equal(expected, CodeBlock(lines))
+}
diff --git a/internal/cli/peers.go b/internal/cli/peers.go
index b591366222..fbbca24fad 100644
--- a/internal/cli/peers.go
+++ b/internal/cli/peers.go
@@ -1,6 +1,8 @@
package cli
import (
+ "strings"
+
"github.com/mitchellh/cli"
)
@@ -9,6 +11,20 @@ type PeersCommand struct {
UI cli.Ui
}
+// MarkDown implements cli.MarkDown interface
+func (a *PeersCommand) MarkDown() string {
+ items := []string{
+ "# Peers",
+ "The ```peers``` command groups actions to interact with peers:",
+ "- [```peers add```](./peers_add.md): Joins the local client to another remote peer.",
+ "- [```peers list```](./peers_list.md): Lists the connected peers to the Bor client.",
+ "- [```peers remove```](./peers_remove.md): Disconnects the local client from a connected peer if exists.",
+ "- [```peers status```](./peers_status.md): Display the status of a peer by its id.",
+ }
+
+ return strings.Join(items, "\n\n")
+}
+
// Help implements the cli.Command interface
func (c *PeersCommand) Help() string {
return `Usage: bor peers
diff --git a/internal/cli/peers_add.go b/internal/cli/peers_add.go
index 2b2fe127e0..3df1a6b6cb 100644
--- a/internal/cli/peers_add.go
+++ b/internal/cli/peers_add.go
@@ -2,6 +2,7 @@ package cli
import (
"context"
+ "strings"
"github.com/ethereum/go-ethereum/internal/cli/flagset"
"github.com/ethereum/go-ethereum/internal/cli/server/proto"
@@ -14,6 +15,17 @@ type PeersAddCommand struct {
trusted bool
}
+// MarkDown implements cli.MarkDown interface
+func (p *PeersAddCommand) MarkDown() string {
+ items := []string{
+ "# Peers add",
+ "The ```peers add ``` command joins the local client to another remote peer.",
+ p.Flags().MarkDown(),
+ }
+
+ return strings.Join(items, "\n\n")
+}
+
// Help implements the cli.Command interface
func (p *PeersAddCommand) Help() string {
return `Usage: bor peers add
@@ -68,5 +80,6 @@ func (c *PeersAddCommand) Run(args []string) int {
c.UI.Error(err.Error())
return 1
}
+
return 0
}
diff --git a/internal/cli/peers_list.go b/internal/cli/peers_list.go
index a42e0011a5..4a572447c1 100644
--- a/internal/cli/peers_list.go
+++ b/internal/cli/peers_list.go
@@ -14,6 +14,17 @@ type PeersListCommand struct {
*Meta2
}
+// MarkDown implements cli.MarkDown interface
+func (p *PeersListCommand) MarkDown() string {
+ items := []string{
+ "# Peers add",
+ "The ```peers list``` command lists the connected peers.",
+ p.Flags().MarkDown(),
+ }
+
+ return strings.Join(items, "\n\n")
+}
+
// Help implements the cli.Command interface
func (p *PeersListCommand) Help() string {
return `Usage: bor peers list
@@ -50,12 +61,14 @@ func (c *PeersListCommand) Run(args []string) int {
req := &proto.PeersListRequest{}
resp, err := borClt.PeersList(context.Background(), req)
+
if err != nil {
c.UI.Error(err.Error())
return 1
}
c.UI.Output(formatPeers(resp.Peers))
+
return 0
}
@@ -66,6 +79,7 @@ func formatPeers(peers []*proto.Peer) string {
rows := make([]string, len(peers)+1)
rows[0] = "ID|Enode|Name|Caps|Static|Trusted"
+
for i, d := range peers {
enode := strings.TrimPrefix(d.Enode, "enode://")
@@ -77,5 +91,6 @@ func formatPeers(peers []*proto.Peer) string {
d.Static,
d.Trusted)
}
+
return formatList(rows)
}
diff --git a/internal/cli/peers_remove.go b/internal/cli/peers_remove.go
index 979f139251..f53284c40c 100644
--- a/internal/cli/peers_remove.go
+++ b/internal/cli/peers_remove.go
@@ -2,6 +2,7 @@ package cli
import (
"context"
+ "strings"
"github.com/ethereum/go-ethereum/internal/cli/flagset"
"github.com/ethereum/go-ethereum/internal/cli/server/proto"
@@ -14,6 +15,17 @@ type PeersRemoveCommand struct {
trusted bool
}
+// MarkDown implements cli.MarkDown interface
+func (p *PeersRemoveCommand) MarkDown() string {
+ items := []string{
+ "# Peers remove",
+ "The ```peers remove ``` command disconnects the local client from a connected peer if exists.",
+ p.Flags().MarkDown(),
+ }
+
+ return strings.Join(items, "\n\n")
+}
+
// Help implements the cli.Command interface
func (p *PeersRemoveCommand) Help() string {
return `Usage: bor peers remove
@@ -68,5 +80,6 @@ func (c *PeersRemoveCommand) Run(args []string) int {
c.UI.Error(err.Error())
return 1
}
+
return 0
}
diff --git a/internal/cli/peers_status.go b/internal/cli/peers_status.go
index c1da2b2647..f5d700a273 100644
--- a/internal/cli/peers_status.go
+++ b/internal/cli/peers_status.go
@@ -14,6 +14,17 @@ type PeersStatusCommand struct {
*Meta2
}
+// MarkDown implements cli.MarkDown interface
+func (p *PeersStatusCommand) MarkDown() string {
+ items := []string{
+ "# Peers status",
+ "The ```peers status ``` command displays the status of a peer by its id.",
+ p.Flags().MarkDown(),
+ }
+
+ return strings.Join(items, "\n\n")
+}
+
// Help implements the cli.Command interface
func (p *PeersStatusCommand) Help() string {
return `Usage: bor peers status
@@ -58,12 +69,14 @@ func (c *PeersStatusCommand) Run(args []string) int {
Enode: args[0],
}
resp, err := borClt.PeersStatus(context.Background(), req)
+
if err != nil {
c.UI.Error(err.Error())
return 1
}
c.UI.Output(formatPeer(resp.Peer))
+
return 0
}
@@ -77,5 +90,6 @@ func formatPeer(peer *proto.Peer) string {
fmt.Sprintf("Static|%v", peer.Static),
fmt.Sprintf("Trusted|%v", peer.Trusted),
})
+
return base
}
diff --git a/internal/cli/removedb.go b/internal/cli/removedb.go
new file mode 100644
index 0000000000..4a604086ed
--- /dev/null
+++ b/internal/cli/removedb.go
@@ -0,0 +1,154 @@
+package cli
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/internal/cli/flagset"
+ "github.com/ethereum/go-ethereum/internal/cli/server"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/node"
+
+ "github.com/mitchellh/cli"
+)
+
+// RemoveDBCommand is for removing blockchain and state databases
+type RemoveDBCommand struct {
+ *Meta2
+
+ datadir string
+}
+
+const (
+ chaindataPath string = "chaindata"
+ ancientPath string = "ancient"
+ lightchaindataPath string = "lightchaindata"
+)
+
+// MarkDown implements cli.MarkDown interface
+func (c *RemoveDBCommand) MarkDown() string {
+ items := []string{
+ "# RemoveDB",
+ "The ```bor removedb``` command will remove the blockchain and state databases at the given datadir location",
+ c.Flags().MarkDown(),
+ }
+
+ return strings.Join(items, "\n\n")
+}
+
+// Help implements the cli.Command interface
+func (c *RemoveDBCommand) Help() string {
+ return `Usage: bor removedb
+
+ This command will remove the blockchain and state databases at the given datadir location`
+}
+
+// Synopsis implements the cli.Command interface
+func (c *RemoveDBCommand) Synopsis() string {
+ return "Remove blockchain and state databases"
+}
+
+func (c *RemoveDBCommand) Flags() *flagset.Flagset {
+ flags := c.NewFlagSet("removedb")
+
+ flags.StringFlag(&flagset.StringFlag{
+ Name: "datadir",
+ Value: &c.datadir,
+ Usage: "Path of the data directory to store information",
+ })
+
+ return flags
+}
+
+// Run implements the cli.Command interface
+func (c *RemoveDBCommand) Run(args []string) int {
+ flags := c.Flags()
+
+ // parse datadir
+ if err := flags.Parse(args); err != nil {
+ c.UI.Error(err.Error())
+ return 1
+ }
+
+ datadir := c.datadir
+ if datadir == "" {
+ datadir = server.DefaultDataDir()
+ }
+
+ // create ethereum node config with just the datadir
+ nodeCfg := &node.Config{DataDir: datadir}
+
+ // Remove the full node state database
+ path := nodeCfg.ResolvePath(chaindataPath)
+ if common.FileExist(path) {
+ confirmAndRemoveDB(c.UI, path, "full node state database")
+ } else {
+ log.Info("Full node state database missing", "path", path)
+ }
+
+ // Remove the full node ancient database
+ // Note: The old cli used DatabaseFreezer path from config if provided explicitly
+ // We don't have access to eth config and hence we assume it to be
+ // under the "chaindata" folder.
+ path = filepath.Join(nodeCfg.ResolvePath(chaindataPath), ancientPath)
+ if common.FileExist(path) {
+ confirmAndRemoveDB(c.UI, path, "full node ancient database")
+ } else {
+ log.Info("Full node ancient database missing", "path", path)
+ }
+
+ // Remove the light node database
+ path = nodeCfg.ResolvePath(lightchaindataPath)
+ if common.FileExist(path) {
+ confirmAndRemoveDB(c.UI, path, "light node database")
+ } else {
+ log.Info("Light node database missing", "path", path)
+ }
+
+ return 0
+}
+
+// confirmAndRemoveDB prompts the user for a last confirmation and removes the
+// folder if accepted.
+func confirmAndRemoveDB(ui cli.Ui, database string, kind string) {
+ for {
+ confirm, err := ui.Ask(fmt.Sprintf("Remove %s (%s)? [y/n]", kind, database))
+
+ switch {
+ case err != nil:
+ ui.Output(err.Error())
+ return
+ case confirm != "":
+ switch strings.ToLower(confirm) {
+ case "y":
+ start := time.Now()
+ err = filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
+ // If we're at the top level folder, recurse into
+ if path == database {
+ return nil
+ }
+ // Delete all the files, but not subfolders
+ if !info.IsDir() {
+ return os.Remove(path)
+ }
+ return filepath.SkipDir
+ })
+
+ if err != nil && err != filepath.SkipDir {
+ ui.Output(err.Error())
+ } else {
+ log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
+ }
+
+ return
+ case "n":
+ log.Info("Database deletion skipped", "path", database)
+ return
+ }
+ }
+ }
+}
diff --git a/internal/cli/server/chains/chain.go b/internal/cli/server/chains/chain.go
index 05c12ef0b0..d6717f5893 100644
--- a/internal/cli/server/chains/chain.go
+++ b/internal/cli/server/chains/chain.go
@@ -1,8 +1,15 @@
package chains
import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/log"
)
type Chain struct {
@@ -18,7 +25,61 @@ var chains = map[string]*Chain{
"mumbai": mumbaiTestnet,
}
-func GetChain(name string) (*Chain, bool) {
- chain, ok := chains[name]
- return chain, ok
+func GetChain(name string) (*Chain, error) {
+ var (
+ chain *Chain
+ err error
+ )
+
+ if _, fileErr := os.Stat(name); fileErr == nil {
+ if chain, err = ImportFromFile(name); err != nil {
+ return nil, fmt.Errorf("error importing chain from file: %v", err)
+ }
+
+ return chain, nil
+ } else if errors.Is(fileErr, os.ErrNotExist) {
+ var ok bool
+ if chain, ok = chains[name]; !ok {
+ return nil, fmt.Errorf("chain %s not found", name)
+ }
+
+ return chain, nil
+ } else {
+ return nil, fileErr
+ }
+}
+
+func ImportFromFile(filename string) (*Chain, error) {
+ data, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+
+ return importChain(data)
+}
+
+func importChain(content []byte) (*Chain, error) {
+ var chain *Chain
+
+ if err := json.Unmarshal(content, &chain); err != nil {
+ return nil, err
+ }
+
+ if chain.Genesis == nil {
+ log.Info("Try reading as legacy genesis")
+
+ var genesis core.Genesis
+ if err := json.Unmarshal(content, &genesis); err != nil {
+ return nil, err
+ }
+
+ if genesis.Config != nil {
+ chain.Genesis = &genesis
+ chain.NetworkId = genesis.Config.ChainID.Uint64()
+ } else {
+ return nil, fmt.Errorf("unable to parse chain config")
+ }
+ }
+
+ return chain, nil
}
diff --git a/internal/cli/server/chains/chain_test.go b/internal/cli/server/chains/chain_test.go
new file mode 100644
index 0000000000..b372a3c4b9
--- /dev/null
+++ b/internal/cli/server/chains/chain_test.go
@@ -0,0 +1,54 @@
+package chains
+
+import (
+ "testing"
+)
+
+func TestChain_ImportFromFile(t *testing.T) {
+ t.Parallel()
+
+ type args struct {
+ filename string
+ }
+
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {
+ name: "ImportFromFile correct json file",
+ args: args{filename: "test_files/chain_test.json"},
+ wantErr: false,
+ },
+ {
+ name: "ImportFromFile correct legacy json file",
+ args: args{filename: "test_files/chain_legacy_test.json"},
+ wantErr: false,
+ },
+ {
+ name: "ImportFromFile wrong json file",
+ args: args{filename: "test_files/wrong_chain.json"},
+ wantErr: true,
+ },
+ {
+ name: "ImportFromFile nonexistent json file",
+ args: args{filename: "test_files/chain_test_nonexistent.json"},
+ wantErr: true,
+ },
+ }
+
+ for _, tt := range tests {
+ tt := tt
+
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+
+ _, err := ImportFromFile(tt.args.filename)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("ImportFromFile() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ })
+ }
+}
diff --git a/internal/cli/server/chains/test_files/chain_legacy_test.json b/internal/cli/server/chains/test_files/chain_legacy_test.json
new file mode 100644
index 0000000000..5702eaca40
--- /dev/null
+++ b/internal/cli/server/chains/test_files/chain_legacy_test.json
@@ -0,0 +1,83 @@
+{
+ "config":{
+ "chainId":80001,
+ "homesteadBlock":0,
+ "daoForkSupport":true,
+ "eip150Block":0,
+ "eip150Hash":"0x0000000000000000000000000000000000000000000000000000000000000000",
+ "eip155Block":0,
+ "eip158Block":0,
+ "byzantiumBlock":0,
+ "constantinopleBlock":0,
+ "petersburgBlock":0,
+ "istanbulBlock":2722000,
+ "muirGlacierBlock":2722000,
+ "berlinBlock":13996000,
+ "londonBlock":13996000,
+ "bor":{
+ "period":{
+ "0":2
+ },
+ "producerDelay":6,
+ "sprint":64,
+ "backupMultiplier":{
+ "0":2
+ },
+ "validatorContract":"0x0000000000000000000000000000000000001000",
+ "stateReceiverContract":"0x0000000000000000000000000000000000001001",
+ "overrideStateSyncRecords":null,
+ "blockAlloc":{
+ "22244000":{
+ "0000000000000000000000000000000000001010":{
+ "balance":"0x0",
+ "code":"0x60806040526004361061019c5760003560e01c806377d32e94116100ec578063acd06cb31161008a578063e306f77911610064578063e306f77914610a7b578063e614d0d614610aa6578063f2fde38b14610ad1578063fc0c546a14610b225761019c565b8063acd06cb31461097a578063b789543c146109cd578063cc79f97b14610a505761019c565b80639025e64c116100c65780639025e64c146107c957806395d89b4114610859578063a9059cbb146108e9578063abceeba21461094f5761019c565b806377d32e94146106315780638da5cb5b146107435780638f32d59b1461079a5761019c565b806347e7ef24116101595780637019d41a116101335780637019d41a1461053357806370a082311461058a578063715018a6146105ef578063771282f6146106065761019c565b806347e7ef2414610410578063485cc9551461046b57806360f96a8f146104dc5761019c565b806306fdde03146101a15780631499c5921461023157806318160ddd1461028257806319d27d9c146102ad5780632e1a7d4d146103b1578063313ce567146103df575b600080fd5b3480156101ad57600080fd5b506101b6610b79565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101f65780820151818401526020810190506101db565b50505050905090810190601f1680156102235780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561023d57600080fd5b506102806004803603602081101561025457600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610bb6565b005b34801561028e57600080fd5b50610297610c24565b6040518082815260200191505060405180910390f35b3480156102b957600080fd5b5061036f600480360360a08110156102d057600080fd5b81019080803590602001906401000000008111156102ed57600080fd5b8201836020820111156102ff57600080fd5b8035906020019184600183028401116401000000008311171561032157600080fd5b9091929391929390803590602001909291908035906020019092919080359060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610c3a565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6103dd600480360360208110156103c757600080fd5b8101908080359060200190929190505050610caa565b005b3480156103eb57600080fd5b506103f4610dfc565b604051808260ff1660ff16815260200191505060405180910390f35b34801561041c57600080fd5b506104696004803603604081101561043357600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050610e05565b005b34801561047757600080fd5b506104da6004803603604081101561048e57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610fc1565b005b3480156104e857600080fd5b506104f1611090565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561053f57600080fd5b506105486110b6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561059657600080fd5b506105d9600480360360208110156105ad57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506110dc565b6040518082815260200191505060405180910390f35b3480156105fb57600080fd5b506106046110fd565b005b34801561061257600080fd5b5061061b6111cd565b6040518082815260200191505060405180910390f35b34801561063d57600080fd5b506107016004803603604081101561065457600080fd5b81019080803590602001909291908035906020019064010000000081111561067b57600080fd5b82018360208201111561068d57600080fd5b803590602001918460018302840111640100000000831117156106af57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f8201169050808301925050505050505091929192905050506111d3565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561074f57600080fd5b50610758611358565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b3480156107a657600080fd5b506107af611381565b604051808215151515815260200191505060405180910390f35b3480156107d557600080fd5b506107de6113d8565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561081e578082015181840152602081019050610803565b50505050905090810190601f16801561084b5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561086557600080fd5b5061086e611411565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156108ae578082015181840152602081019050610893565b50505050905090810190601f1680156108db5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b610935600480360360408110156108ff57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291908035906020019092919050505061144e565b604051808215151515815260200191505060405180910390f35b34801561095b57600080fd5b50610964611474565b6040518082815260200191505060405180910390f35b34801561098657600080fd5b506109b36004803603602081101561099d57600080fd5b8101908080359060200190929190505050611501565b604051808215151515815260200191505060405180910390f35b3480156109d957600080fd5b50610a3a600480360360808110156109f057600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803590602001909291908035906020019092919080359060200190929190505050611521565b6040518082815260200191505060405180910390f35b348015610a5c57600080fd5b50610a65611541565b6040518082815260200191505060405180910390f35b348015610a8757600080fd5b50610a90611548565b6040518082815260200191505060405180910390f35b348015610ab257600080fd5b50610abb61154e565b6040518082815260200191505060405180910390f35b348015610add57600080fd5b50610b2060048036036020811015610af457600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506115db565b005b348015610b2e57600080fd5b50610b376115f8565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b60606040518060400160405280600b81526020017f4d6174696320546f6b656e000000000000000000000000000000000000000000815250905090565b6040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260108152602001807f44697361626c656420666561747572650000000000000000000000000000000081525060200191505060405180910390fd5b6000601260ff16600a0a6402540be40002905090565b60006040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260108152602001807f44697361626c656420666561747572650000000000000000000000000000000081525060200191505060405180910390fd5b60003390506000610cba826110dc565b9050610cd18360065461161e90919063ffffffff16565b600681905550600083118015610ce657508234145b610d58576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260138152602001807f496e73756666696369656e7420616d6f756e740000000000000000000000000081525060200191505060405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167febff2602b3f468259e1e99f613fed6691f3a6526effe6ef3e768ba7ae7a36c4f8584610dd4876110dc565b60405180848152602001838152602001828152602001935050505060405180910390a3505050565b60006012905090565b610e0d611381565b610e1657600080fd5b600081118015610e535750600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614155b610ea8576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180611da96023913960400191505060405180910390fd5b6000610eb3836110dc565b905060008390508073ffffffffffffffffffffffffffffffffffffffff166108fc849081150290604051600060405180830381858888f19350505050158015610f00573d6000803e3d6000fd5b50610f168360065461163e90919063ffffffff16565b6006819055508373ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f4e2ca0515ed1aef1395f66b5303bb5d6f1bf9d61a353fa53f73f8ac9973fa9f68585610f98896110dc565b60405180848152602001838152602001828152602001935050505060405180910390a350505050565b600760009054906101000a900460ff1615611027576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180611d866023913960400191505060405180910390fd5b6001600760006101000a81548160ff02191690831515021790555080600260006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555061108c8261165d565b5050565b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600460009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60008173ffffffffffffffffffffffffffffffffffffffff16319050919050565b611105611381565b61110e57600080fd5b600073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a360008060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550565b60065481565b60008060008060418551146111ee5760009350505050611352565b602085015192506040850151915060ff6041860151169050601b8160ff16101561121957601b810190505b601b8160ff16141580156112315750601c8160ff1614155b156112425760009350505050611352565b60018682858560405160008152602001604052604051808581526020018460ff1660ff1681526020018381526020018281526020019450505050506020604051602081039080840390855afa15801561129f573d6000803e3d6000fd5b505050602060405103519350600073ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff16141561134e576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f4572726f7220696e2065637265636f766572000000000000000000000000000081525060200191505060405180910390fd5b5050505b92915050565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614905090565b6040518060400160405280600381526020017f013881000000000000000000000000000000000000000000000000000000000081525081565b60606040518060400160405280600581526020017f4d41544943000000000000000000000000000000000000000000000000000000815250905090565b6000813414611460576000905061146e565b61146b338484611755565b90505b92915050565b6040518060800160405280605b8152602001611e1e605b91396040516020018082805190602001908083835b602083106114c357805182526020820191506020810190506020830392506114a0565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b60056020528060005260406000206000915054906101000a900460ff1681565b600061153761153286868686611b12565b611be8565b9050949350505050565b6201388181565b60015481565b604051806080016040528060528152602001611dcc605291396040516020018082805190602001908083835b6020831061159d578051825260208201915060208101905060208303925061157a565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b6115e3611381565b6115ec57600080fd5b6115f58161165d565b50565b600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60008282111561162d57600080fd5b600082840390508091505092915050565b60008082840190508381101561165357600080fd5b8091505092915050565b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16141561169757600080fd5b8073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a3806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b6000803073ffffffffffffffffffffffffffffffffffffffff166370a08231866040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b1580156117d557600080fd5b505afa1580156117e9573d6000803e3d6000fd5b505050506040513d60208110156117ff57600080fd5b8101908080519060200190929190505050905060003073ffffffffffffffffffffffffffffffffffffffff166370a08231866040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b15801561189157600080fd5b505afa1580156118a5573d6000803e3d6000fd5b505050506040513d60208110156118bb57600080fd5b810190808051906020019092919050505090506118d9868686611c32565b8473ffffffffffffffffffffffffffffffffffffffff168673ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167fe6497e3ee548a3372136af2fcb0696db31fc6cf20260707645068bd3fe97f3c48786863073ffffffffffffffffffffffffffffffffffffffff166370a082318e6040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b1580156119e157600080fd5b505afa1580156119f5573d6000803e3d6000fd5b505050506040513d6020811015611a0b57600080fd5b81019080805190602001909291905050503073ffffffffffffffffffffffffffffffffffffffff166370a082318e6040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b158015611a9957600080fd5b505afa158015611aad573d6000803e3d6000fd5b505050506040513d6020811015611ac357600080fd5b8101908080519060200190929190505050604051808681526020018581526020018481526020018381526020018281526020019550505050505060405180910390a46001925050509392505050565b6000806040518060800160405280605b8152602001611e1e605b91396040516020018082805190602001908083835b60208310611b645780518252602082019150602081019050602083039250611b41565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405160208183030381529060405280519060200120905060405181815273ffffffffffffffffffffffffffffffffffffffff8716602082015285604082015284606082015283608082015260a0812092505081915050949350505050565b60008060015490506040517f190100000000000000000000000000000000000000000000000000000000000081528160028201528360228201526042812092505081915050919050565b3073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff161415611cd4576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260138152602001807f63616e27742073656e6420746f204d524332300000000000000000000000000081525060200191505060405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050158015611d1a573d6000803e3d6000fd5b508173ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040518082815260200191505060405180910390a350505056fe54686520636f6e747261637420697320616c726561647920696e697469616c697a6564496e73756666696369656e7420616d6f756e74206f7220696e76616c69642075736572454950373132446f6d61696e28737472696e67206e616d652c737472696e672076657273696f6e2c75696e7432353620636861696e49642c6164647265737320766572696679696e67436f6e747261637429546f6b656e5472616e736665724f726465722861646472657373207370656e6465722c75696e7432353620746f6b656e49644f72416d6f756e742c6279746573333220646174612c75696e743235362065787069726174696f6e29a265627a7a72315820ccd6c2a9c259832bbb367986ee06cd87af23022681b0cb22311a864b701d939564736f6c63430005100032"
+ }
+ }
+ },
+ "burntContract":{
+ "22640000":"0x70bcA57F4579f58670aB2d18Ef16e02C17553C38"
+ },
+ "jaipurBlock":22770000
+ }
+ },
+ "nonce":"0x0",
+ "timestamp":"0x5ce28211",
+ "extraData":"0x",
+ "gasLimit":"0x989680",
+ "difficulty":"0x1",
+ "mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase":"0x0000000000000000000000000000000000000000",
+ "alloc":{
+ "0000000000000000000000000000000000001000":{
+ "code":"0x608060405234801561001057600080fd5b50600436106101f05760003560e01c806360c8614d1161010f578063af26aa96116100a2578063d5b844eb11610071578063d5b844eb14610666578063dcf2793a14610684578063e3b7c924146106b6578063f59cf565146106d4576101f0565b8063af26aa96146105c7578063b71d7a69146105e7578063b7ab4db514610617578063c1b3c91914610636576101f0565b806370ba5707116100de57806370ba57071461052b57806398ab2b621461055b5780639d11b80714610579578063ae756451146105a9576101f0565b806360c8614d1461049c57806365b3a1e2146104bc57806366332354146104db578063687a9bd6146104f9576101f0565b80633434735f1161018757806344d6528f1161015657806344d6528f146103ee5780634dbc959f1461041e57806355614fcc1461043c578063582a8d081461046c576101f0565b80633434735f1461035257806335ddfeea1461037057806343ee8213146103a057806344c15cb1146103be576101f0565b806323f2a73f116101c357806323f2a73f146102a45780632bc06564146102d45780632de3a180146102f25780632eddf35214610322576101f0565b8063047a6c5b146101f55780630c35b1cb146102275780631270b5741461025857806323c2a2b414610288575b600080fd5b61020f600480360361020a9190810190612b24565b610706565b60405161021e93929190613463565b60405180910390f35b610241600480360361023c9190810190612b24565b61075d565b60405161024f929190613284565b60405180910390f35b610272600480360361026d9190810190612b4d565b610939565b60405161027f91906132bb565b60405180910390f35b6102a2600480360361029d9190810190612c2c565b610a91565b005b6102be60048036036102b99190810190612b4d565b61112a565b6040516102cb91906132bb565b60405180910390f35b6102dc611281565b6040516102e99190613411565b60405180910390f35b61030c60048036036103079190810190612a81565b611286565b60405161031991906132d6565b60405180910390f35b61033c60048036036103379190810190612b24565b611307565b6040516103499190613411565b60405180910390f35b61035a611437565b6040516103679190613269565b60405180910390f35b61038a60048036036103859190810190612abd565b61144f565b60405161039791906132bb565b60405180910390f35b6103a861151a565b6040516103b591906132d6565b60405180910390f35b6103d860048036036103d39190810190612b89565b611531565b6040516103e59190613411565b60405180910390f35b61040860048036036104039190810190612b4d565b611619565b60405161041591906133f6565b60405180910390f35b610426611781565b6040516104339190613411565b60405180910390f35b61045660048036036104519190810190612a06565b611791565b60405161046391906132bb565b60405180910390f35b61048660048036036104819190810190612a2f565b6117ab565b60405161049391906132d6565b60405180910390f35b6104a4611829565b6040516104b393929190613463565b60405180910390f35b6104c461189d565b6040516104d2929190613284565b60405180910390f35b6104e3611b6e565b6040516104f09190613411565b60405180910390f35b610513600480360361050e9190810190612bf0565b611b73565b6040516105229392919061342c565b60405180910390f35b61054560048036036105409190810190612a06565b611bd7565b60405161055291906132bb565b60405180910390f35b610563611bf1565b60405161057091906132d6565b60405180910390f35b610593600480360361058e9190810190612b24565b611c08565b6040516105a09190613411565b60405180910390f35b6105b1611d39565b6040516105be91906132d6565b60405180910390f35b6105cf611d50565b6040516105de93929190613463565b60405180910390f35b61060160048036036105fc9190810190612b24565b611db1565b60405161060e9190613411565b60405180910390f35b61061f611eb1565b60405161062d929190613284565b60405180910390f35b610650600480360361064b9190810190612b24565b611ec5565b60405161065d9190613411565b60405180910390f35b61066e611ee6565b60405161067b919061349a565b60405180910390f35b61069e60048036036106999190810190612bf0565b611eeb565b6040516106ad9392919061342c565b60405180910390f35b6106be611f4f565b6040516106cb9190613411565b60405180910390f35b6106ee60048036036106e99190810190612b24565b611f61565b6040516106fd93929190613463565b60405180910390f35b60008060006002600085815260200190815260200160002060000154600260008681526020019081526020016000206001015460026000878152602001908152602001600020600201549250925092509193909250565b60608060ff83116107795761077061189d565b91509150610934565b600061078484611db1565b9050606060016000838152602001908152602001600020805490506040519080825280602002602001820160405280156107cd5781602001602082028038833980820191505090505b509050606060016000848152602001908152602001600020805490506040519080825280602002602001820160405280156108175781602001602082028038833980820191505090505b50905060008090505b60016000858152602001908152602001600020805490508110156109295760016000858152602001908152602001600020818154811061085c57fe5b906000526020600020906003020160020160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1683828151811061089a57fe5b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff16815250506001600085815260200190815260200160002081815481106108f257fe5b90600052602060002090600302016001015482828151811061091057fe5b6020026020010181815250508080600101915050610820565b508181945094505050505b915091565b6000606060016000858152602001908152602001600020805480602002602001604051908101604052809291908181526020016000905b82821015610a0c578382906000526020600020906003020160405180606001604052908160008201548152602001600182015481526020016002820160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152505081526020019060010190610970565b50505050905060008090505b8151811015610a84578373ffffffffffffffffffffffffffffffffffffffff16828281518110610a4457fe5b60200260200101516040015173ffffffffffffffffffffffffffffffffffffffff161415610a7757600192505050610a8b565b8080600101915050610a18565b5060009150505b92915050565b73fffffffffffffffffffffffffffffffffffffffe73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610b13576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610b0a906133d6565b60405180910390fd5b6000610b1d611781565b90506000811415610b3157610b30611f8b565b5b610b456001826122ac90919063ffffffff16565b8814610b86576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610b7d90613356565b60405180910390fd5b868611610bc8576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610bbf906133b6565b60405180910390fd5b6000604060018989030181610bd957fe5b0614610c1a576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610c1190613396565b60405180910390fd5b8660026000838152602001908152602001600020600101541115610c73576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610c6a90613336565b60405180910390fd5b6000600260008a81526020019081526020016000206000015414610ccc576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610cc390613376565b60405180910390fd5b604051806060016040528089815260200188815260200187815250600260008a8152602001908152602001600020600082015181600001556020820151816001015560408201518160020155905050600388908060018154018082558091505090600182039060005260206000200160009091929091909150555060008060008a815260200190815260200160002081610d669190612800565b506000600160008a815260200190815260200160002081610d879190612800565b506060610ddf610dda87878080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050506122cb565b6122f9565b905060008090505b8151811015610f51576060610e0e838381518110610e0157fe5b60200260200101516122f9565b90506000808c81526020019081526020016000208054809190600101610e349190612800565b506040518060600160405280610e5d83600081518110610e5057fe5b60200260200101516123d6565b8152602001610e7f83600181518110610e7257fe5b60200260200101516123d6565b8152602001610ea183600281518110610e9457fe5b6020026020010151612447565b73ffffffffffffffffffffffffffffffffffffffff168152506000808d81526020019081526020016000208381548110610ed757fe5b9060005260206000209060030201600082015181600001556020820151816001015560408201518160020160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550905050508080600101915050610de7565b506060610fa9610fa486868080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050506122cb565b6122f9565b905060008090505b815181101561111d576060610fd8838381518110610fcb57fe5b60200260200101516122f9565b9050600160008d81526020019081526020016000208054809190600101610fff9190612800565b5060405180606001604052806110288360008151811061101b57fe5b60200260200101516123d6565b815260200161104a8360018151811061103d57fe5b60200260200101516123d6565b815260200161106c8360028151811061105f57fe5b6020026020010151612447565b73ffffffffffffffffffffffffffffffffffffffff16815250600160008e815260200190815260200160002083815481106110a357fe5b9060005260206000209060030201600082015181600001556020820151816001015560408201518160020160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550905050508080600101915050610fb1565b5050505050505050505050565b60006060600080858152602001908152602001600020805480602002602001604051908101604052809291908181526020016000905b828210156111fc578382906000526020600020906003020160405180606001604052908160008201548152602001600182015481526020016002820160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152505081526020019060010190611160565b50505050905060008090505b8151811015611274578373ffffffffffffffffffffffffffffffffffffffff1682828151811061123457fe5b60200260200101516040015173ffffffffffffffffffffffffffffffffffffffff1614156112675760019250505061127b565b8080600101915050611208565b5060009150505b92915050565b604081565b60006002600160f81b84846040516020016112a3939291906131d6565b6040516020818303038152906040526040516112bf9190613213565b602060405180830381855afa1580156112dc573d6000803e3d6000fd5b5050506040513d601f19601f820116820180604052506112ff9190810190612a58565b905092915050565b60006060600080848152602001908152602001600020805480602002602001604051908101604052809291908181526020016000905b828210156113d9578382906000526020600020906003020160405180606001604052908160008201548152602001600182015481526020016002820160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815250508152602001906001019061133d565b505050509050600080905060008090505b825181101561142c5761141d83828151811061140257fe5b602002602001015160200151836122ac90919063ffffffff16565b915080806001019150506113ea565b508092505050919050565b73fffffffffffffffffffffffffffffffffffffffe81565b600080600080859050600060218087518161146657fe5b04029050600081111561147f5761147c876117ab565b91505b6000602190505b818111611509576000600182038801519050818801519550806000602081106114ab57fe5b1a60f81b9450600060f81b857effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191614156114f0576114e98685611286565b93506114fd565b6114fa8487611286565b93505b50602181019050611486565b508782149450505050509392505050565b60405161152690613254565b604051809103902081565b60008060009050600080905060008090505b84518167ffffffffffffffff16101561160c57606061156e868367ffffffffffffffff16604161246a565b9050600061158582896124f690919063ffffffff16565b905061158f612832565b6115998a83611619565b90506115a58a8361112a565b80156115dc57508473ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff16115b156115fe578194506115fb8160200151876122ac90919063ffffffff16565b95505b505050604181019050611543565b5081925050509392505050565b611621612832565b6060600080858152602001908152602001600020805480602002602001604051908101604052809291908181526020016000905b828210156116f1578382906000526020600020906003020160405180606001604052908160008201548152602001600182015481526020016002820160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152505081526020019060010190611655565b50505050905060008090505b8151811015611779578373ffffffffffffffffffffffffffffffffffffffff1682828151811061172957fe5b60200260200101516040015173ffffffffffffffffffffffffffffffffffffffff16141561176c5781818151811061175d57fe5b60200260200101519250611779565b80806001019150506116fd565b505092915050565b600061178c43611db1565b905090565b60006117a461179e611781565b8361112a565b9050919050565b60006002600060f81b836040516020016117c69291906131aa565b6040516020818303038152906040526040516117e29190613213565b602060405180830381855afa1580156117ff573d6000803e3d6000fd5b5050506040513d601f19601f820116820180604052506118229190810190612a58565b9050919050565b60008060008061184a600161183c611781565b6122ac90919063ffffffff16565b905060026000828152602001908152602001600020600001546002600083815260200190815260200160002060010154600260008481526020019081526020016000206002015493509350935050909192565b606080606060056040519080825280602002602001820160405280156118d25781602001602082028038833980820191505090505b50905073c26880a0af2ea0c7e8130e6ec47af756465452e8816000815181106118f757fe5b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505073be188d6641e8b680743a4815dfa0f6208038960f8160018151811061195357fe5b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505073c275dc8be39f50d12f66b6a63629c39da5bae5bd816002815181106119af57fe5b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505073f903ba9e006193c1527bfbe65fe2123704ea3f9981600381518110611a0b57fe5b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505073928ed6a3e94437bbd316ccad78479f1d163a6a8c81600481518110611a6757fe5b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505060606005604051908082528060200260200182016040528015611ad35781602001602082028038833980820191505090505b50905061271081600081518110611ae657fe5b60200260200101818152505061271081600181518110611b0257fe5b60200260200101818152505061271081600281518110611b1e57fe5b60200260200101818152505061271081600381518110611b3a57fe5b60200260200101818152505061271081600481518110611b5657fe5b60200260200101818152505081819350935050509091565b60ff81565b60016020528160005260406000208181548110611b8c57fe5b9060005260206000209060030201600091509150508060000154908060010154908060020160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905083565b6000611bea611be4611781565b83610939565b9050919050565b604051611bfd9061322a565b604051809103902081565b6000606060016000848152602001908152602001600020805480602002602001604051908101604052809291908181526020016000905b82821015611cdb578382906000526020600020906003020160405180606001604052908160008201548152602001600182015481526020016002820160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152505081526020019060010190611c3f565b505050509050600080905060008090505b8251811015611d2e57611d1f838281518110611d0457fe5b602002602001015160200151836122ac90919063ffffffff16565b91508080600101915050611cec565b508092505050919050565b604051611d459061323f565b604051809103902081565b600080600080611d5e611781565b905060026000828152602001908152602001600020600001546002600083815260200190815260200160002060010154600260008481526020019081526020016000206002015493509350935050909192565b60008060038054905090505b6000811115611e7157611dce612869565b6002600060036001850381548110611de257fe5b906000526020600020015481526020019081526020016000206040518060600160405290816000820154815260200160018201548152602001600282015481525050905083816020015111158015611e3f57506000816040015114155b8015611e4f575080604001518411155b15611e6257806000015192505050611eac565b50808060019003915050611dbd565b5060006003805490501115611ea757600360016003805490500381548110611e9557fe5b90600052602060002001549050611eac565b600090505b919050565b606080611ebd4361075d565b915091509091565b60038181548110611ed257fe5b906000526020600020016000915090505481565b600281565b60006020528160005260406000208181548110611f0457fe5b9060005260206000209060030201600091509150508060000154908060010154908060020160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905083565b600060404381611f5b57fe5b04905090565b60026020528060005260406000206000915090508060000154908060010154908060020154905083565b606080611f9661189d565b8092508193505050600080905060405180606001604052808281526020016000815260200160ff81525060026000838152602001908152602001600020600082015181600001556020820151816001015560408201518160020155905050600381908060018154018082558091505090600182039060005260206000200160009091929091909150555060008060008381526020019081526020016000208161203f9190612800565b50600060016000838152602001908152602001600020816120609190612800565b5060008090505b83518110156121825760008083815260200190815260200160002080548091906001016120949190612800565b5060405180606001604052808281526020018483815181106120b257fe5b602002602001015181526020018583815181106120cb57fe5b602002602001015173ffffffffffffffffffffffffffffffffffffffff16815250600080848152602001908152602001600020828154811061210957fe5b9060005260206000209060030201600082015181600001556020820151816001015560408201518160020160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055509050508080600101915050612067565b5060008090505b83518110156122a6576001600083815260200190815260200160002080548091906001016121b79190612800565b5060405180606001604052808281526020018483815181106121d557fe5b602002602001015181526020018583815181106121ee57fe5b602002602001015173ffffffffffffffffffffffffffffffffffffffff1681525060016000848152602001908152602001600020828154811061222d57fe5b9060005260206000209060030201600082015181600001556020820151816001015560408201518160020160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055509050508080600101915050612189565b50505050565b6000808284019050838110156122c157600080fd5b8091505092915050565b6122d361288a565b600060208301905060405180604001604052808451815260200182815250915050919050565b606061230482612600565b61230d57600080fd5b60006123188361264e565b905060608160405190808252806020026020018201604052801561235657816020015b6123436128a4565b81526020019060019003908161233b5790505b509050600061236885602001516126bf565b8560200151019050600080600090505b848110156123c95761238983612748565b91506040518060400160405280838152602001848152508482815181106123ac57fe5b602002602001018190525081830192508080600101915050612378565b5082945050505050919050565b60008082600001511180156123f057506021826000015111155b6123f957600080fd5b600061240883602001516126bf565b9050600081846000015103905060008083866020015101905080519150602083101561243b57826020036101000a820491505b81945050505050919050565b6000601582600001511461245a57600080fd5b612463826123d6565b9050919050565b60608183018451101561247c57600080fd5b6060821560008114612499576040519150602082016040526124ea565b6040519150601f8416801560200281840101858101878315602002848b0101015b818310156124d757805183526020830192506020810190506124ba565b50868552601f19601f8301166040525050505b50809150509392505050565b600080600080604185511461251157600093505050506125fa565b602085015192506040850151915060ff6041860151169050601b8160ff16101561253c57601b810190505b601b8160ff16141580156125545750601c8160ff1614155b1561256557600093505050506125fa565b60006001878386866040516000815260200160405260405161258a94939291906132f1565b6020604051602081039080840390855afa1580156125ac573d6000803e3d6000fd5b505050602060405103519050600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614156125f257600080fd5b809450505050505b92915050565b600080826000015114156126175760009050612649565b60008083602001519050805160001a915060c060ff168260ff16101561264257600092505050612649565b6001925050505b919050565b6000808260000151141561266557600090506126ba565b6000809050600061267984602001516126bf565b84602001510190506000846000015185602001510190505b808210156126b3576126a282612748565b820191508280600101935050612691565b8293505050505b919050565b600080825160001a9050608060ff168110156126df576000915050612743565b60b860ff16811080612704575060c060ff168110158015612703575060f860ff1681105b5b15612713576001915050612743565b60c060ff168110156127335760018060b80360ff16820301915050612743565b60018060f80360ff168203019150505b919050565b6000806000835160001a9050608060ff1681101561276957600191506127f6565b60b860ff16811015612786576001608060ff1682030191506127f5565b60c060ff168110156127b65760b78103600185019450806020036101000a855104600182018101935050506127f4565b60f860ff168110156127d357600160c060ff1682030191506127f3565b60f78103600185019450806020036101000a855104600182018101935050505b5b5b5b8192505050919050565b81548183558181111561282d5760030281600302836000526020600020918201910161282c91906128be565b5b505050565b60405180606001604052806000815260200160008152602001600073ffffffffffffffffffffffffffffffffffffffff1681525090565b60405180606001604052806000815260200160008152602001600081525090565b604051806040016040528060008152602001600081525090565b604051806040016040528060008152602001600081525090565b61291191905b8082111561290d5760008082016000905560018201600090556002820160006101000a81549073ffffffffffffffffffffffffffffffffffffffff0219169055506003016128c4565b5090565b90565b60008135905061292381613693565b92915050565b600081359050612938816136aa565b92915050565b60008151905061294d816136aa565b92915050565b60008083601f84011261296557600080fd5b8235905067ffffffffffffffff81111561297e57600080fd5b60208301915083600182028301111561299657600080fd5b9250929050565b600082601f8301126129ae57600080fd5b81356129c16129bc826134e2565b6134b5565b915080825260208301602083018583830111156129dd57600080fd5b6129e883828461363d565b50505092915050565b600081359050612a00816136c1565b92915050565b600060208284031215612a1857600080fd5b6000612a2684828501612914565b91505092915050565b600060208284031215612a4157600080fd5b6000612a4f84828501612929565b91505092915050565b600060208284031215612a6a57600080fd5b6000612a788482850161293e565b91505092915050565b60008060408385031215612a9457600080fd5b6000612aa285828601612929565b9250506020612ab385828601612929565b9150509250929050565b600080600060608486031215612ad257600080fd5b6000612ae086828701612929565b9350506020612af186828701612929565b925050604084013567ffffffffffffffff811115612b0e57600080fd5b612b1a8682870161299d565b9150509250925092565b600060208284031215612b3657600080fd5b6000612b44848285016129f1565b91505092915050565b60008060408385031215612b6057600080fd5b6000612b6e858286016129f1565b9250506020612b7f85828601612914565b9150509250929050565b600080600060608486031215612b9e57600080fd5b6000612bac868287016129f1565b9350506020612bbd86828701612929565b925050604084013567ffffffffffffffff811115612bda57600080fd5b612be68682870161299d565b9150509250925092565b60008060408385031215612c0357600080fd5b6000612c11858286016129f1565b9250506020612c22858286016129f1565b9150509250929050565b600080600080600080600060a0888a031215612c4757600080fd5b6000612c558a828b016129f1565b9750506020612c668a828b016129f1565b9650506040612c778a828b016129f1565b955050606088013567ffffffffffffffff811115612c9457600080fd5b612ca08a828b01612953565b9450945050608088013567ffffffffffffffff811115612cbf57600080fd5b612ccb8a828b01612953565b925092505092959891949750929550565b6000612ce88383612d0c565b60208301905092915050565b6000612d00838361317d565b60208301905092915050565b612d15816135b2565b82525050565b612d24816135b2565b82525050565b6000612d358261352e565b612d3f8185613569565b9350612d4a8361350e565b8060005b83811015612d7b578151612d628882612cdc565b9750612d6d8361354f565b925050600181019050612d4e565b5085935050505092915050565b6000612d9382613539565b612d9d818561357a565b9350612da88361351e565b8060005b83811015612dd9578151612dc08882612cf4565b9750612dcb8361355c565b925050600181019050612dac565b5085935050505092915050565b612def816135c4565b82525050565b612e06612e01826135d0565b61367f565b82525050565b612e15816135fc565b82525050565b612e2c612e27826135fc565b613689565b82525050565b6000612e3d82613544565b612e47818561358b565b9350612e5781856020860161364c565b80840191505092915050565b6000612e706004836135a7565b91507f766f7465000000000000000000000000000000000000000000000000000000006000830152600482019050919050565b6000612eb0602d83613596565b91507f537461727420626c6f636b206d7573742062652067726561746572207468616e60008301527f2063757272656e74207370616e000000000000000000000000000000000000006020830152604082019050919050565b6000612f16600f83613596565b91507f496e76616c6964207370616e20696400000000000000000000000000000000006000830152602082019050919050565b6000612f56601383613596565b91507f5370616e20616c726561647920657869737473000000000000000000000000006000830152602082019050919050565b6000612f96604583613596565b91507f446966666572656e6365206265747765656e20737461727420616e6420656e6460008301527f20626c6f636b206d75737420626520696e206d756c7469706c6573206f66207360208301527f7072696e740000000000000000000000000000000000000000000000000000006040830152606082019050919050565b6000613022602a83613596565b91507f456e6420626c6f636b206d7573742062652067726561746572207468616e207360008301527f7461727420626c6f636b000000000000000000000000000000000000000000006020830152604082019050919050565b6000613088601283613596565b91507f4e6f742053797374656d204164646573732100000000000000000000000000006000830152602082019050919050565b60006130c86005836135a7565b91507f38303030310000000000000000000000000000000000000000000000000000006000830152600582019050919050565b6000613108600e836135a7565b91507f6865696d64616c6c2d38303030310000000000000000000000000000000000006000830152600e82019050919050565b606082016000820151613151600085018261317d565b506020820151613164602085018261317d565b5060408201516131776040850182612d0c565b50505050565b61318681613626565b82525050565b61319581613626565b82525050565b6131a481613630565b82525050565b60006131b68285612df5565b6001820191506131c68284612e1b565b6020820191508190509392505050565b60006131e28286612df5565b6001820191506131f28285612e1b565b6020820191506132028284612e1b565b602082019150819050949350505050565b600061321f8284612e32565b915081905092915050565b600061323582612e63565b9150819050919050565b600061324a826130bb565b9150819050919050565b600061325f826130fb565b9150819050919050565b600060208201905061327e6000830184612d1b565b92915050565b6000604082019050818103600083015261329e8185612d2a565b905081810360208301526132b28184612d88565b90509392505050565b60006020820190506132d06000830184612de6565b92915050565b60006020820190506132eb6000830184612e0c565b92915050565b60006080820190506133066000830187612e0c565b613313602083018661319b565b6133206040830185612e0c565b61332d6060830184612e0c565b95945050505050565b6000602082019050818103600083015261334f81612ea3565b9050919050565b6000602082019050818103600083015261336f81612f09565b9050919050565b6000602082019050818103600083015261338f81612f49565b9050919050565b600060208201905081810360008301526133af81612f89565b9050919050565b600060208201905081810360008301526133cf81613015565b9050919050565b600060208201905081810360008301526133ef8161307b565b9050919050565b600060608201905061340b600083018461313b565b92915050565b6000602082019050613426600083018461318c565b92915050565b6000606082019050613441600083018661318c565b61344e602083018561318c565b61345b6040830184612d1b565b949350505050565b6000606082019050613478600083018661318c565b613485602083018561318c565b613492604083018461318c565b949350505050565b60006020820190506134af600083018461319b565b92915050565b6000604051905081810181811067ffffffffffffffff821117156134d857600080fd5b8060405250919050565b600067ffffffffffffffff8211156134f957600080fd5b601f19601f8301169050602081019050919050565b6000819050602082019050919050565b6000819050602082019050919050565b600081519050919050565b600081519050919050565b600081519050919050565b6000602082019050919050565b6000602082019050919050565b600082825260208201905092915050565b600082825260208201905092915050565b600081905092915050565b600082825260208201905092915050565b600081905092915050565b60006135bd82613606565b9050919050565b60008115159050919050565b60007fff0000000000000000000000000000000000000000000000000000000000000082169050919050565b6000819050919050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000819050919050565b600060ff82169050919050565b82818337600083830152505050565b60005b8381101561366a57808201518184015260208101905061364f565b83811115613679576000848401525b50505050565b6000819050919050565b6000819050919050565b61369c816135b2565b81146136a757600080fd5b50565b6136b3816135fc565b81146136be57600080fd5b50565b6136ca81613626565b81146136d557600080fd5b5056fea365627a7a723158208f52ee07630ffe523cc6ad3e15f437f973dcfa36729cd697f9b0fc4a145a48f06c6578706572696d656e74616cf564736f6c634300050b0040",
+ "balance":"0x0"
+ },
+ "0000000000000000000000000000000000001001":{
+ "code":"0x608060405234801561001057600080fd5b50600436106100415760003560e01c806319494a17146100465780633434735f146100e15780635407ca671461012b575b600080fd5b6100c76004803603604081101561005c57600080fd5b81019080803590602001909291908035906020019064010000000081111561008357600080fd5b82018360208201111561009557600080fd5b803590602001918460018302840111640100000000831117156100b757600080fd5b9091929391929390505050610149565b604051808215151515815260200191505060405180910390f35b6100e961047a565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b610133610492565b6040518082815260200191505060405180910390f35b600073fffffffffffffffffffffffffffffffffffffffe73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610200576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f4e6f742053797374656d2041646465737321000000000000000000000000000081525060200191505060405180910390fd5b606061025761025285858080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f82011690508083019250505050505050610498565b6104c6565b905060006102788260008151811061026b57fe5b60200260200101516105a3565b905080600160005401146102f4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601b8152602001807f537461746549647320617265206e6f742073657175656e7469616c000000000081525060200191505060405180910390fd5b600080815480929190600101919050555060006103248360018151811061031757fe5b6020026020010151610614565b905060606103458460028151811061033857fe5b6020026020010151610637565b9050610350826106c3565b1561046f576000624c4b409050606084836040516024018083815260200180602001828103825283818151815260200191508051906020019080838360005b838110156103aa57808201518184015260208101905061038f565b50505050905090810190601f1680156103d75780820380516001836020036101000a031916815260200191505b5093505050506040516020818303038152906040527f26c53bea000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff8381831617835250505050905060008082516020840160008887f1965050505b505050509392505050565b73fffffffffffffffffffffffffffffffffffffffe81565b60005481565b6104a0610943565b600060208301905060405180604001604052808451815260200182815250915050919050565b60606104d1826106dc565b6104da57600080fd5b60006104e58361072a565b905060608160405190808252806020026020018201604052801561052357816020015b61051061095d565b8152602001906001900390816105085790505b5090506000610535856020015161079b565b8560200151019050600080600090505b848110156105965761055683610824565b915060405180604001604052808381526020018481525084828151811061057957fe5b602002602001018190525081830192508080600101915050610545565b5082945050505050919050565b60008082600001511180156105bd57506021826000015111155b6105c657600080fd5b60006105d5836020015161079b565b9050600081846000015103905060008083866020015101905080519150602083101561060857826020036101000a820491505b81945050505050919050565b6000601582600001511461062757600080fd5b610630826105a3565b9050919050565b6060600082600001511161064a57600080fd5b6000610659836020015161079b565b905060008184600001510390506060816040519080825280601f01601f19166020018201604052801561069b5781602001600182028038833980820191505090505b50905060008160200190506106b78487602001510182856108dc565b81945050505050919050565b600080823b905060008163ffffffff1611915050919050565b600080826000015114156106f35760009050610725565b60008083602001519050805160001a915060c060ff168260ff16101561071e57600092505050610725565b6001925050505b919050565b600080826000015114156107415760009050610796565b60008090506000610755846020015161079b565b84602001510190506000846000015185602001510190505b8082101561078f5761077e82610824565b82019150828060010193505061076d565b8293505050505b919050565b600080825160001a9050608060ff168110156107bb57600091505061081f565b60b860ff168110806107e0575060c060ff1681101580156107df575060f860ff1681105b5b156107ef57600191505061081f565b60c060ff1681101561080f5760018060b80360ff1682030191505061081f565b60018060f80360ff168203019150505b919050565b6000806000835160001a9050608060ff1681101561084557600191506108d2565b60b860ff16811015610862576001608060ff1682030191506108d1565b60c060ff168110156108925760b78103600185019450806020036101000a855104600182018101935050506108d0565b60f860ff168110156108af57600160c060ff1682030191506108cf565b60f78103600185019450806020036101000a855104600182018101935050505b5b5b5b8192505050919050565b60008114156108ea5761093e565b5b602060ff16811061091a5782518252602060ff1683019250602060ff1682019150602060ff16810390506108eb565b6000600182602060ff16036101000a03905080198451168184511681811785525050505b505050565b604051806040016040528060008152602001600081525090565b60405180604001604052806000815260200160008152509056fea265627a7a7231582083fbdacb76f32b4112d0f7db9a596937925824798a0026ba0232322390b5263764736f6c634300050b0032",
+ "balance":"0x0"
+ },
+ "0000000000000000000000000000000000001010":{
+ "code":"0x60806040526004361061019c5760003560e01c806377d32e94116100ec578063acd06cb31161008a578063e306f77911610064578063e306f77914610a7b578063e614d0d614610aa6578063f2fde38b14610ad1578063fc0c546a14610b225761019c565b8063acd06cb31461097a578063b789543c146109cd578063cc79f97b14610a505761019c565b80639025e64c116100c65780639025e64c146107c957806395d89b4114610859578063a9059cbb146108e9578063abceeba21461094f5761019c565b806377d32e94146106315780638da5cb5b146107435780638f32d59b1461079a5761019c565b806347e7ef24116101595780637019d41a116101335780637019d41a1461053357806370a082311461058a578063715018a6146105ef578063771282f6146106065761019c565b806347e7ef2414610410578063485cc9551461046b57806360f96a8f146104dc5761019c565b806306fdde03146101a15780631499c5921461023157806318160ddd1461028257806319d27d9c146102ad5780632e1a7d4d146103b1578063313ce567146103df575b600080fd5b3480156101ad57600080fd5b506101b6610b79565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101f65780820151818401526020810190506101db565b50505050905090810190601f1680156102235780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561023d57600080fd5b506102806004803603602081101561025457600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610bb6565b005b34801561028e57600080fd5b50610297610c24565b6040518082815260200191505060405180910390f35b3480156102b957600080fd5b5061036f600480360360a08110156102d057600080fd5b81019080803590602001906401000000008111156102ed57600080fd5b8201836020820111156102ff57600080fd5b8035906020019184600183028401116401000000008311171561032157600080fd5b9091929391929390803590602001909291908035906020019092919080359060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610c3a565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6103dd600480360360208110156103c757600080fd5b8101908080359060200190929190505050610e06565b005b3480156103eb57600080fd5b506103f4610f58565b604051808260ff1660ff16815260200191505060405180910390f35b34801561041c57600080fd5b506104696004803603604081101561043357600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050610f61565b005b34801561047757600080fd5b506104da6004803603604081101561048e57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff16906020019092919050505061111d565b005b3480156104e857600080fd5b506104f16111ec565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561053f57600080fd5b50610548611212565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561059657600080fd5b506105d9600480360360208110156105ad57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050611238565b6040518082815260200191505060405180910390f35b3480156105fb57600080fd5b50610604611259565b005b34801561061257600080fd5b5061061b611329565b6040518082815260200191505060405180910390f35b34801561063d57600080fd5b506107016004803603604081101561065457600080fd5b81019080803590602001909291908035906020019064010000000081111561067b57600080fd5b82018360208201111561068d57600080fd5b803590602001918460018302840111640100000000831117156106af57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f82011690508083019250505050505050919291929050505061132f565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561074f57600080fd5b506107586114b4565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b3480156107a657600080fd5b506107af6114dd565b604051808215151515815260200191505060405180910390f35b3480156107d557600080fd5b506107de611534565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561081e578082015181840152602081019050610803565b50505050905090810190601f16801561084b5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561086557600080fd5b5061086e61156d565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156108ae578082015181840152602081019050610893565b50505050905090810190601f1680156108db5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b610935600480360360408110156108ff57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803590602001909291905050506115aa565b604051808215151515815260200191505060405180910390f35b34801561095b57600080fd5b506109646115d0565b6040518082815260200191505060405180910390f35b34801561098657600080fd5b506109b36004803603602081101561099d57600080fd5b810190808035906020019092919050505061165d565b604051808215151515815260200191505060405180910390f35b3480156109d957600080fd5b50610a3a600480360360808110156109f057600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190803590602001909291908035906020019092919050505061167d565b6040518082815260200191505060405180910390f35b348015610a5c57600080fd5b50610a6561169d565b6040518082815260200191505060405180910390f35b348015610a8757600080fd5b50610a906116a4565b6040518082815260200191505060405180910390f35b348015610ab257600080fd5b50610abb6116aa565b6040518082815260200191505060405180910390f35b348015610add57600080fd5b50610b2060048036036020811015610af457600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050611737565b005b348015610b2e57600080fd5b50610b37611754565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b60606040518060400160405280600b81526020017f4d6174696320546f6b656e000000000000000000000000000000000000000000815250905090565b6040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260108152602001807f44697361626c656420666561747572650000000000000000000000000000000081525060200191505060405180910390fd5b6000601260ff16600a0a6402540be40002905090565b6000808511610c4857600080fd5b6000831480610c575750824311155b610cc9576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260148152602001807f5369676e6174757265206973206578706972656400000000000000000000000081525060200191505060405180910390fd5b6000610cd73387878761167d565b9050600015156005600083815260200190815260200160002060009054906101000a900460ff16151514610d73576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600f8152602001807f536967206465616374697661746564000000000000000000000000000000000081525060200191505060405180910390fd5b60016005600083815260200190815260200160002060006101000a81548160ff021916908315150217905550610ded8189898080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f8201169050808301925050505050505061132f565b9150610dfa82848861177a565b50509695505050505050565b60003390506000610e1682611238565b9050610e2d83600654611b3790919063ffffffff16565b600681905550600083118015610e4257508234145b610eb4576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260138152602001807f496e73756666696369656e7420616d6f756e740000000000000000000000000081525060200191505060405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167febff2602b3f468259e1e99f613fed6691f3a6526effe6ef3e768ba7ae7a36c4f8584610f3087611238565b60405180848152602001838152602001828152602001935050505060405180910390a3505050565b60006012905090565b610f696114dd565b610f7257600080fd5b600081118015610faf5750600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614155b611004576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180611e636023913960400191505060405180910390fd5b600061100f83611238565b905060008390508073ffffffffffffffffffffffffffffffffffffffff166108fc849081150290604051600060405180830381858888f1935050505015801561105c573d6000803e3d6000fd5b5061107283600654611b5790919063ffffffff16565b6006819055508373ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f4e2ca0515ed1aef1395f66b5303bb5d6f1bf9d61a353fa53f73f8ac9973fa9f685856110f489611238565b60405180848152602001838152602001828152602001935050505060405180910390a350505050565b600760009054906101000a900460ff1615611183576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180611e406023913960400191505060405180910390fd5b6001600760006101000a81548160ff02191690831515021790555080600260006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506111e882611b76565b5050565b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600460009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60008173ffffffffffffffffffffffffffffffffffffffff16319050919050565b6112616114dd565b61126a57600080fd5b600073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a360008060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550565b60065481565b600080600080604185511461134a57600093505050506114ae565b602085015192506040850151915060ff6041860151169050601b8160ff16101561137557601b810190505b601b8160ff161415801561138d5750601c8160ff1614155b1561139e57600093505050506114ae565b60018682858560405160008152602001604052604051808581526020018460ff1660ff1681526020018381526020018281526020019450505050506020604051602081039080840390855afa1580156113fb573d6000803e3d6000fd5b505050602060405103519350600073ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff1614156114aa576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f4572726f7220696e2065637265636f766572000000000000000000000000000081525060200191505060405180910390fd5b5050505b92915050565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614905090565b6040518060400160405280600381526020017f013881000000000000000000000000000000000000000000000000000000000081525081565b60606040518060400160405280600581526020017f4d41544943000000000000000000000000000000000000000000000000000000815250905090565b60008134146115bc57600090506115ca565b6115c733848461177a565b90505b92915050565b6040518060800160405280605b8152602001611ed8605b91396040516020018082805190602001908083835b6020831061161f57805182526020820191506020810190506020830392506115fc565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b60056020528060005260406000206000915054906101000a900460ff1681565b600061169361168e86868686611c6e565b611d44565b9050949350505050565b6201388181565b60015481565b604051806080016040528060528152602001611e86605291396040516020018082805190602001908083835b602083106116f957805182526020820191506020810190506020830392506116d6565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b61173f6114dd565b61174857600080fd5b61175181611b76565b50565b600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000803073ffffffffffffffffffffffffffffffffffffffff166370a08231866040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b1580156117fa57600080fd5b505afa15801561180e573d6000803e3d6000fd5b505050506040513d602081101561182457600080fd5b8101908080519060200190929190505050905060003073ffffffffffffffffffffffffffffffffffffffff166370a08231866040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b1580156118b657600080fd5b505afa1580156118ca573d6000803e3d6000fd5b505050506040513d60208110156118e057600080fd5b810190808051906020019092919050505090506118fe868686611d8e565b8473ffffffffffffffffffffffffffffffffffffffff168673ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167fe6497e3ee548a3372136af2fcb0696db31fc6cf20260707645068bd3fe97f3c48786863073ffffffffffffffffffffffffffffffffffffffff166370a082318e6040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b158015611a0657600080fd5b505afa158015611a1a573d6000803e3d6000fd5b505050506040513d6020811015611a3057600080fd5b81019080805190602001909291905050503073ffffffffffffffffffffffffffffffffffffffff166370a082318e6040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b158015611abe57600080fd5b505afa158015611ad2573d6000803e3d6000fd5b505050506040513d6020811015611ae857600080fd5b8101908080519060200190929190505050604051808681526020018581526020018481526020018381526020018281526020019550505050505060405180910390a46001925050509392505050565b600082821115611b4657600080fd5b600082840390508091505092915050565b600080828401905083811015611b6c57600080fd5b8091505092915050565b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff161415611bb057600080fd5b8073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a3806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b6000806040518060800160405280605b8152602001611ed8605b91396040516020018082805190602001908083835b60208310611cc05780518252602082019150602081019050602083039250611c9d565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405160208183030381529060405280519060200120905060405181815273ffffffffffffffffffffffffffffffffffffffff8716602082015285604082015284606082015283608082015260a0812092505081915050949350505050565b60008060015490506040517f190100000000000000000000000000000000000000000000000000000000000081528160028201528360228201526042812092505081915050919050565b8173ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050158015611dd4573d6000803e3d6000fd5b508173ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040518082815260200191505060405180910390a350505056fe54686520636f6e747261637420697320616c726561647920696e697469616c697a6564496e73756666696369656e7420616d6f756e74206f7220696e76616c69642075736572454950373132446f6d61696e28737472696e67206e616d652c737472696e672076657273696f6e2c75696e7432353620636861696e49642c6164647265737320766572696679696e67436f6e747261637429546f6b656e5472616e736665724f726465722861646472657373207370656e6465722c75696e7432353620746f6b656e49644f72416d6f756e742c6279746573333220646174612c75696e743235362065787069726174696f6e29a265627a7a723158208f81700133738d766ae3d68af591ad588b0125bd91449192179f460893f79f6b64736f6c634300050b0032",
+ "balance":"0x204fcd4f31349d83b6e00000"
+ },
+ "928ed6a3e94437bbd316ccad78479f1d163a6a8c":{
+ "balance":"0x3635c9adc5dea00000"
+ },
+ "be188d6641e8b680743a4815dfa0f6208038960f":{
+ "balance":"0x3635c9adc5dea00000"
+ },
+ "c26880a0af2ea0c7e8130e6ec47af756465452e8":{
+ "balance":"0x3635c9adc5dea00000"
+ },
+ "c275dc8be39f50d12f66b6a63629c39da5bae5bd":{
+ "balance":"0x3635c9adc5dea00000"
+ },
+ "f903ba9e006193c1527bfbe65fe2123704ea3f99":{
+ "balance":"0x3635c9adc5dea00000"
+ }
+ },
+ "number":"0x0",
+ "gasUsed":"0x0",
+ "parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
+ "baseFeePerGas":null
+}
diff --git a/internal/cli/server/chains/test_files/chain_test.json b/internal/cli/server/chains/test_files/chain_test.json
new file mode 100644
index 0000000000..5bfe155d27
--- /dev/null
+++ b/internal/cli/server/chains/test_files/chain_test.json
@@ -0,0 +1,92 @@
+{
+ "Hash":"0x7b66506a9ebdbf30d32b43c5f15a3b1216269a1ec3a75aa3182b86176a2b1ca7",
+ "Genesis":{
+ "config":{
+ "chainId":80001,
+ "homesteadBlock":0,
+ "daoForkSupport":true,
+ "eip150Block":0,
+ "eip150Hash":"0x0000000000000000000000000000000000000000000000000000000000000000",
+ "eip155Block":0,
+ "eip158Block":0,
+ "byzantiumBlock":0,
+ "constantinopleBlock":0,
+ "petersburgBlock":0,
+ "istanbulBlock":2722000,
+ "muirGlacierBlock":2722000,
+ "berlinBlock":13996000,
+ "londonBlock":13996000,
+ "bor":{
+ "period":{
+ "0":2
+ },
+ "producerDelay":6,
+ "sprint":64,
+ "backupMultiplier":{
+ "0":2
+ },
+ "validatorContract":"0x0000000000000000000000000000000000001000",
+ "stateReceiverContract":"0x0000000000000000000000000000000000001001",
+ "overrideStateSyncRecords":null,
+ "blockAlloc":{
+ "22244000":{
+ "0000000000000000000000000000000000001010":{
+ "balance":"0x0",
+ "code":"0x60806040526004361061019c5760003560e01c806377d32e94116100ec578063acd06cb31161008a578063e306f77911610064578063e306f77914610a7b578063e614d0d614610aa6578063f2fde38b14610ad1578063fc0c546a14610b225761019c565b8063acd06cb31461097a578063b789543c146109cd578063cc79f97b14610a505761019c565b80639025e64c116100c65780639025e64c146107c957806395d89b4114610859578063a9059cbb146108e9578063abceeba21461094f5761019c565b806377d32e94146106315780638da5cb5b146107435780638f32d59b1461079a5761019c565b806347e7ef24116101595780637019d41a116101335780637019d41a1461053357806370a082311461058a578063715018a6146105ef578063771282f6146106065761019c565b806347e7ef2414610410578063485cc9551461046b57806360f96a8f146104dc5761019c565b806306fdde03146101a15780631499c5921461023157806318160ddd1461028257806319d27d9c146102ad5780632e1a7d4d146103b1578063313ce567146103df575b600080fd5b3480156101ad57600080fd5b506101b6610b79565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101f65780820151818401526020810190506101db565b50505050905090810190601f1680156102235780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561023d57600080fd5b506102806004803603602081101561025457600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610bb6565b005b34801561028e57600080fd5b50610297610c24565b6040518082815260200191505060405180910390f35b3480156102b957600080fd5b5061036f600480360360a08110156102d057600080fd5b81019080803590602001906401000000008111156102ed57600080fd5b8201836020820111156102ff57600080fd5b8035906020019184600183028401116401000000008311171561032157600080fd5b9091929391929390803590602001909291908035906020019092919080359060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610c3a565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6103dd600480360360208110156103c757600080fd5b8101908080359060200190929190505050610caa565b005b3480156103eb57600080fd5b506103f4610dfc565b604051808260ff1660ff16815260200191505060405180910390f35b34801561041c57600080fd5b506104696004803603604081101561043357600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050610e05565b005b34801561047757600080fd5b506104da6004803603604081101561048e57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610fc1565b005b3480156104e857600080fd5b506104f1611090565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561053f57600080fd5b506105486110b6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561059657600080fd5b506105d9600480360360208110156105ad57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506110dc565b6040518082815260200191505060405180910390f35b3480156105fb57600080fd5b506106046110fd565b005b34801561061257600080fd5b5061061b6111cd565b6040518082815260200191505060405180910390f35b34801561063d57600080fd5b506107016004803603604081101561065457600080fd5b81019080803590602001909291908035906020019064010000000081111561067b57600080fd5b82018360208201111561068d57600080fd5b803590602001918460018302840111640100000000831117156106af57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f8201169050808301925050505050505091929192905050506111d3565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561074f57600080fd5b50610758611358565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b3480156107a657600080fd5b506107af611381565b604051808215151515815260200191505060405180910390f35b3480156107d557600080fd5b506107de6113d8565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561081e578082015181840152602081019050610803565b50505050905090810190601f16801561084b5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561086557600080fd5b5061086e611411565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156108ae578082015181840152602081019050610893565b50505050905090810190601f1680156108db5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b610935600480360360408110156108ff57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291908035906020019092919050505061144e565b604051808215151515815260200191505060405180910390f35b34801561095b57600080fd5b50610964611474565b6040518082815260200191505060405180910390f35b34801561098657600080fd5b506109b36004803603602081101561099d57600080fd5b8101908080359060200190929190505050611501565b604051808215151515815260200191505060405180910390f35b3480156109d957600080fd5b50610a3a600480360360808110156109f057600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803590602001909291908035906020019092919080359060200190929190505050611521565b6040518082815260200191505060405180910390f35b348015610a5c57600080fd5b50610a65611541565b6040518082815260200191505060405180910390f35b348015610a8757600080fd5b50610a90611548565b6040518082815260200191505060405180910390f35b348015610ab257600080fd5b50610abb61154e565b6040518082815260200191505060405180910390f35b348015610add57600080fd5b50610b2060048036036020811015610af457600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506115db565b005b348015610b2e57600080fd5b50610b376115f8565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b60606040518060400160405280600b81526020017f4d6174696320546f6b656e000000000000000000000000000000000000000000815250905090565b6040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260108152602001807f44697361626c656420666561747572650000000000000000000000000000000081525060200191505060405180910390fd5b6000601260ff16600a0a6402540be40002905090565b60006040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260108152602001807f44697361626c656420666561747572650000000000000000000000000000000081525060200191505060405180910390fd5b60003390506000610cba826110dc565b9050610cd18360065461161e90919063ffffffff16565b600681905550600083118015610ce657508234145b610d58576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260138152602001807f496e73756666696369656e7420616d6f756e740000000000000000000000000081525060200191505060405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167febff2602b3f468259e1e99f613fed6691f3a6526effe6ef3e768ba7ae7a36c4f8584610dd4876110dc565b60405180848152602001838152602001828152602001935050505060405180910390a3505050565b60006012905090565b610e0d611381565b610e1657600080fd5b600081118015610e535750600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614155b610ea8576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180611da96023913960400191505060405180910390fd5b6000610eb3836110dc565b905060008390508073ffffffffffffffffffffffffffffffffffffffff166108fc849081150290604051600060405180830381858888f19350505050158015610f00573d6000803e3d6000fd5b50610f168360065461163e90919063ffffffff16565b6006819055508373ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f4e2ca0515ed1aef1395f66b5303bb5d6f1bf9d61a353fa53f73f8ac9973fa9f68585610f98896110dc565b60405180848152602001838152602001828152602001935050505060405180910390a350505050565b600760009054906101000a900460ff1615611027576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180611d866023913960400191505060405180910390fd5b6001600760006101000a81548160ff02191690831515021790555080600260006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555061108c8261165d565b5050565b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600460009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60008173ffffffffffffffffffffffffffffffffffffffff16319050919050565b611105611381565b61110e57600080fd5b600073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a360008060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550565b60065481565b60008060008060418551146111ee5760009350505050611352565b602085015192506040850151915060ff6041860151169050601b8160ff16101561121957601b810190505b601b8160ff16141580156112315750601c8160ff1614155b156112425760009350505050611352565b60018682858560405160008152602001604052604051808581526020018460ff1660ff1681526020018381526020018281526020019450505050506020604051602081039080840390855afa15801561129f573d6000803e3d6000fd5b505050602060405103519350600073ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff16141561134e576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f4572726f7220696e2065637265636f766572000000000000000000000000000081525060200191505060405180910390fd5b5050505b92915050565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614905090565b6040518060400160405280600381526020017f013881000000000000000000000000000000000000000000000000000000000081525081565b60606040518060400160405280600581526020017f4d41544943000000000000000000000000000000000000000000000000000000815250905090565b6000813414611460576000905061146e565b61146b338484611755565b90505b92915050565b6040518060800160405280605b8152602001611e1e605b91396040516020018082805190602001908083835b602083106114c357805182526020820191506020810190506020830392506114a0565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b60056020528060005260406000206000915054906101000a900460ff1681565b600061153761153286868686611b12565b611be8565b9050949350505050565b6201388181565b60015481565b604051806080016040528060528152602001611dcc605291396040516020018082805190602001908083835b6020831061159d578051825260208201915060208101905060208303925061157a565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b6115e3611381565b6115ec57600080fd5b6115f58161165d565b50565b600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60008282111561162d57600080fd5b600082840390508091505092915050565b60008082840190508381101561165357600080fd5b8091505092915050565b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16141561169757600080fd5b8073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a3806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b6000803073ffffffffffffffffffffffffffffffffffffffff166370a08231866040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b1580156117d557600080fd5b505afa1580156117e9573d6000803e3d6000fd5b505050506040513d60208110156117ff57600080fd5b8101908080519060200190929190505050905060003073ffffffffffffffffffffffffffffffffffffffff166370a08231866040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b15801561189157600080fd5b505afa1580156118a5573d6000803e3d6000fd5b505050506040513d60208110156118bb57600080fd5b810190808051906020019092919050505090506118d9868686611c32565b8473ffffffffffffffffffffffffffffffffffffffff168673ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167fe6497e3ee548a3372136af2fcb0696db31fc6cf20260707645068bd3fe97f3c48786863073ffffffffffffffffffffffffffffffffffffffff166370a082318e6040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b1580156119e157600080fd5b505afa1580156119f5573d6000803e3d6000fd5b505050506040513d6020811015611a0b57600080fd5b81019080805190602001909291905050503073ffffffffffffffffffffffffffffffffffffffff166370a082318e6040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b158015611a9957600080fd5b505afa158015611aad573d6000803e3d6000fd5b505050506040513d6020811015611ac357600080fd5b8101908080519060200190929190505050604051808681526020018581526020018481526020018381526020018281526020019550505050505060405180910390a46001925050509392505050565b6000806040518060800160405280605b8152602001611e1e605b91396040516020018082805190602001908083835b60208310611b645780518252602082019150602081019050602083039250611b41565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405160208183030381529060405280519060200120905060405181815273ffffffffffffffffffffffffffffffffffffffff8716602082015285604082015284606082015283608082015260a0812092505081915050949350505050565b60008060015490506040517f190100000000000000000000000000000000000000000000000000000000000081528160028201528360228201526042812092505081915050919050565b3073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff161415611cd4576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260138152602001807f63616e27742073656e6420746f204d524332300000000000000000000000000081525060200191505060405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050158015611d1a573d6000803e3d6000fd5b508173ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040518082815260200191505060405180910390a350505056fe54686520636f6e747261637420697320616c726561647920696e697469616c697a6564496e73756666696369656e7420616d6f756e74206f7220696e76616c69642075736572454950373132446f6d61696e28737472696e67206e616d652c737472696e672076657273696f6e2c75696e7432353620636861696e49642c6164647265737320766572696679696e67436f6e747261637429546f6b656e5472616e736665724f726465722861646472657373207370656e6465722c75696e7432353620746f6b656e49644f72416d6f756e742c6279746573333220646174612c75696e743235362065787069726174696f6e29a265627a7a72315820ccd6c2a9c259832bbb367986ee06cd87af23022681b0cb22311a864b701d939564736f6c63430005100032"
+ }
+ }
+ },
+ "burntContract":{
+ "22640000":"0x70bcA57F4579f58670aB2d18Ef16e02C17553C38"
+ },
+ "jaipurBlock":22770000
+ }
+ },
+ "nonce":"0x0",
+ "timestamp":"0x5ce28211",
+ "extraData":"0x",
+ "gasLimit":"0x989680",
+ "difficulty":"0x1",
+ "mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase":"0x0000000000000000000000000000000000000000",
+ "alloc":{
+ "0000000000000000000000000000000000001000":{
+ "code":"0x608060405234801561001057600080fd5b50600436106101f05760003560e01c806360c8614d1161010f578063af26aa96116100a2578063d5b844eb11610071578063d5b844eb14610666578063dcf2793a14610684578063e3b7c924146106b6578063f59cf565146106d4576101f0565b8063af26aa96146105c7578063b71d7a69146105e7578063b7ab4db514610617578063c1b3c91914610636576101f0565b806370ba5707116100de57806370ba57071461052b57806398ab2b621461055b5780639d11b80714610579578063ae756451146105a9576101f0565b806360c8614d1461049c57806365b3a1e2146104bc57806366332354146104db578063687a9bd6146104f9576101f0565b80633434735f1161018757806344d6528f1161015657806344d6528f146103ee5780634dbc959f1461041e57806355614fcc1461043c578063582a8d081461046c576101f0565b80633434735f1461035257806335ddfeea1461037057806343ee8213146103a057806344c15cb1146103be576101f0565b806323f2a73f116101c357806323f2a73f146102a45780632bc06564146102d45780632de3a180146102f25780632eddf35214610322576101f0565b8063047a6c5b146101f55780630c35b1cb146102275780631270b5741461025857806323c2a2b414610288575b600080fd5b61020f600480360361020a9190810190612b24565b610706565b60405161021e93929190613463565b60405180910390f35b610241600480360361023c9190810190612b24565b61075d565b60405161024f929190613284565b60405180910390f35b610272600480360361026d9190810190612b4d565b610939565b60405161027f91906132bb565b60405180910390f35b6102a2600480360361029d9190810190612c2c565b610a91565b005b6102be60048036036102b99190810190612b4d565b61112a565b6040516102cb91906132bb565b60405180910390f35b6102dc611281565b6040516102e99190613411565b60405180910390f35b61030c60048036036103079190810190612a81565b611286565b60405161031991906132d6565b60405180910390f35b61033c60048036036103379190810190612b24565b611307565b6040516103499190613411565b60405180910390f35b61035a611437565b6040516103679190613269565b60405180910390f35b61038a60048036036103859190810190612abd565b61144f565b60405161039791906132bb565b60405180910390f35b6103a861151a565b6040516103b591906132d6565b60405180910390f35b6103d860048036036103d39190810190612b89565b611531565b6040516103e59190613411565b60405180910390f35b61040860048036036104039190810190612b4d565b611619565b60405161041591906133f6565b60405180910390f35b610426611781565b6040516104339190613411565b60405180910390f35b61045660048036036104519190810190612a06565b611791565b60405161046391906132bb565b60405180910390f35b61048660048036036104819190810190612a2f565b6117ab565b60405161049391906132d6565b60405180910390f35b6104a4611829565b6040516104b393929190613463565b60405180910390f35b6104c461189d565b6040516104d2929190613284565b60405180910390f35b6104e3611b6e565b6040516104f09190613411565b60405180910390f35b610513600480360361050e9190810190612bf0565b611b73565b6040516105229392919061342c565b60405180910390f35b61054560048036036105409190810190612a06565b611bd7565b60405161055291906132bb565b60405180910390f35b610563611bf1565b60405161057091906132d6565b60405180910390f35b610593600480360361058e9190810190612b24565b611c08565b6040516105a09190613411565b60405180910390f35b6105b1611d39565b6040516105be91906132d6565b60405180910390f35b6105cf611d50565b6040516105de93929190613463565b60405180910390f35b61060160048036036105fc9190810190612b24565b611db1565b60405161060e9190613411565b60405180910390f35b61061f611eb1565b60405161062d929190613284565b60405180910390f35b610650600480360361064b9190810190612b24565b611ec5565b60405161065d9190613411565b60405180910390f35b61066e611ee6565b60405161067b919061349a565b60405180910390f35b61069e60048036036106999190810190612bf0565b611eeb565b6040516106ad9392919061342c565b60405180910390f35b6106be611f4f565b6040516106cb9190613411565b60405180910390f35b6106ee60048036036106e99190810190612b24565b611f61565b6040516106fd93929190613463565b60405180910390f35b60008060006002600085815260200190815260200160002060000154600260008681526020019081526020016000206001015460026000878152602001908152602001600020600201549250925092509193909250565b60608060ff83116107795761077061189d565b91509150610934565b600061078484611db1565b9050606060016000838152602001908152602001600020805490506040519080825280602002602001820160405280156107cd5781602001602082028038833980820191505090505b509050606060016000848152602001908152602001600020805490506040519080825280602002602001820160405280156108175781602001602082028038833980820191505090505b50905060008090505b60016000858152602001908152602001600020805490508110156109295760016000858152602001908152602001600020818154811061085c57fe5b906000526020600020906003020160020160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1683828151811061089a57fe5b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff16815250506001600085815260200190815260200160002081815481106108f257fe5b90600052602060002090600302016001015482828151811061091057fe5b6020026020010181815250508080600101915050610820565b508181945094505050505b915091565b6000606060016000858152602001908152602001600020805480602002602001604051908101604052809291908181526020016000905b82821015610a0c578382906000526020600020906003020160405180606001604052908160008201548152602001600182015481526020016002820160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152505081526020019060010190610970565b50505050905060008090505b8151811015610a84578373ffffffffffffffffffffffffffffffffffffffff16828281518110610a4457fe5b60200260200101516040015173ffffffffffffffffffffffffffffffffffffffff161415610a7757600192505050610a8b565b8080600101915050610a18565b5060009150505b92915050565b73fffffffffffffffffffffffffffffffffffffffe73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610b13576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610b0a906133d6565b60405180910390fd5b6000610b1d611781565b90506000811415610b3157610b30611f8b565b5b610b456001826122ac90919063ffffffff16565b8814610b86576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610b7d90613356565b60405180910390fd5b868611610bc8576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610bbf906133b6565b60405180910390fd5b6000604060018989030181610bd957fe5b0614610c1a576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610c1190613396565b60405180910390fd5b8660026000838152602001908152602001600020600101541115610c73576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610c6a90613336565b60405180910390fd5b6000600260008a81526020019081526020016000206000015414610ccc576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610cc390613376565b60405180910390fd5b604051806060016040528089815260200188815260200187815250600260008a8152602001908152602001600020600082015181600001556020820151816001015560408201518160020155905050600388908060018154018082558091505090600182039060005260206000200160009091929091909150555060008060008a815260200190815260200160002081610d669190612800565b506000600160008a815260200190815260200160002081610d879190612800565b506060610ddf610dda87878080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050506122cb565b6122f9565b905060008090505b8151811015610f51576060610e0e838381518110610e0157fe5b60200260200101516122f9565b90506000808c81526020019081526020016000208054809190600101610e349190612800565b506040518060600160405280610e5d83600081518110610e5057fe5b60200260200101516123d6565b8152602001610e7f83600181518110610e7257fe5b60200260200101516123d6565b8152602001610ea183600281518110610e9457fe5b6020026020010151612447565b73ffffffffffffffffffffffffffffffffffffffff168152506000808d81526020019081526020016000208381548110610ed757fe5b9060005260206000209060030201600082015181600001556020820151816001015560408201518160020160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550905050508080600101915050610de7565b506060610fa9610fa486868080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050506122cb565b6122f9565b905060008090505b815181101561111d576060610fd8838381518110610fcb57fe5b60200260200101516122f9565b9050600160008d81526020019081526020016000208054809190600101610fff9190612800565b5060405180606001604052806110288360008151811061101b57fe5b60200260200101516123d6565b815260200161104a8360018151811061103d57fe5b60200260200101516123d6565b815260200161106c8360028151811061105f57fe5b6020026020010151612447565b73ffffffffffffffffffffffffffffffffffffffff16815250600160008e815260200190815260200160002083815481106110a357fe5b9060005260206000209060030201600082015181600001556020820151816001015560408201518160020160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550905050508080600101915050610fb1565b5050505050505050505050565b60006060600080858152602001908152602001600020805480602002602001604051908101604052809291908181526020016000905b828210156111fc578382906000526020600020906003020160405180606001604052908160008201548152602001600182015481526020016002820160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152505081526020019060010190611160565b50505050905060008090505b8151811015611274578373ffffffffffffffffffffffffffffffffffffffff1682828151811061123457fe5b60200260200101516040015173ffffffffffffffffffffffffffffffffffffffff1614156112675760019250505061127b565b8080600101915050611208565b5060009150505b92915050565b604081565b60006002600160f81b84846040516020016112a3939291906131d6565b6040516020818303038152906040526040516112bf9190613213565b602060405180830381855afa1580156112dc573d6000803e3d6000fd5b5050506040513d601f19601f820116820180604052506112ff9190810190612a58565b905092915050565b60006060600080848152602001908152602001600020805480602002602001604051908101604052809291908181526020016000905b828210156113d9578382906000526020600020906003020160405180606001604052908160008201548152602001600182015481526020016002820160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815250508152602001906001019061133d565b505050509050600080905060008090505b825181101561142c5761141d83828151811061140257fe5b602002602001015160200151836122ac90919063ffffffff16565b915080806001019150506113ea565b508092505050919050565b73fffffffffffffffffffffffffffffffffffffffe81565b600080600080859050600060218087518161146657fe5b04029050600081111561147f5761147c876117ab565b91505b6000602190505b818111611509576000600182038801519050818801519550806000602081106114ab57fe5b1a60f81b9450600060f81b857effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191614156114f0576114e98685611286565b93506114fd565b6114fa8487611286565b93505b50602181019050611486565b508782149450505050509392505050565b60405161152690613254565b604051809103902081565b60008060009050600080905060008090505b84518167ffffffffffffffff16101561160c57606061156e868367ffffffffffffffff16604161246a565b9050600061158582896124f690919063ffffffff16565b905061158f612832565b6115998a83611619565b90506115a58a8361112a565b80156115dc57508473ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff16115b156115fe578194506115fb8160200151876122ac90919063ffffffff16565b95505b505050604181019050611543565b5081925050509392505050565b611621612832565b6060600080858152602001908152602001600020805480602002602001604051908101604052809291908181526020016000905b828210156116f1578382906000526020600020906003020160405180606001604052908160008201548152602001600182015481526020016002820160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152505081526020019060010190611655565b50505050905060008090505b8151811015611779578373ffffffffffffffffffffffffffffffffffffffff1682828151811061172957fe5b60200260200101516040015173ffffffffffffffffffffffffffffffffffffffff16141561176c5781818151811061175d57fe5b60200260200101519250611779565b80806001019150506116fd565b505092915050565b600061178c43611db1565b905090565b60006117a461179e611781565b8361112a565b9050919050565b60006002600060f81b836040516020016117c69291906131aa565b6040516020818303038152906040526040516117e29190613213565b602060405180830381855afa1580156117ff573d6000803e3d6000fd5b5050506040513d601f19601f820116820180604052506118229190810190612a58565b9050919050565b60008060008061184a600161183c611781565b6122ac90919063ffffffff16565b905060026000828152602001908152602001600020600001546002600083815260200190815260200160002060010154600260008481526020019081526020016000206002015493509350935050909192565b606080606060056040519080825280602002602001820160405280156118d25781602001602082028038833980820191505090505b50905073c26880a0af2ea0c7e8130e6ec47af756465452e8816000815181106118f757fe5b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505073be188d6641e8b680743a4815dfa0f6208038960f8160018151811061195357fe5b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505073c275dc8be39f50d12f66b6a63629c39da5bae5bd816002815181106119af57fe5b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505073f903ba9e006193c1527bfbe65fe2123704ea3f9981600381518110611a0b57fe5b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505073928ed6a3e94437bbd316ccad78479f1d163a6a8c81600481518110611a6757fe5b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505060606005604051908082528060200260200182016040528015611ad35781602001602082028038833980820191505090505b50905061271081600081518110611ae657fe5b60200260200101818152505061271081600181518110611b0257fe5b60200260200101818152505061271081600281518110611b1e57fe5b60200260200101818152505061271081600381518110611b3a57fe5b60200260200101818152505061271081600481518110611b5657fe5b60200260200101818152505081819350935050509091565b60ff81565b60016020528160005260406000208181548110611b8c57fe5b9060005260206000209060030201600091509150508060000154908060010154908060020160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905083565b6000611bea611be4611781565b83610939565b9050919050565b604051611bfd9061322a565b604051809103902081565b6000606060016000848152602001908152602001600020805480602002602001604051908101604052809291908181526020016000905b82821015611cdb578382906000526020600020906003020160405180606001604052908160008201548152602001600182015481526020016002820160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152505081526020019060010190611c3f565b505050509050600080905060008090505b8251811015611d2e57611d1f838281518110611d0457fe5b602002602001015160200151836122ac90919063ffffffff16565b91508080600101915050611cec565b508092505050919050565b604051611d459061323f565b604051809103902081565b600080600080611d5e611781565b905060026000828152602001908152602001600020600001546002600083815260200190815260200160002060010154600260008481526020019081526020016000206002015493509350935050909192565b60008060038054905090505b6000811115611e7157611dce612869565b6002600060036001850381548110611de257fe5b906000526020600020015481526020019081526020016000206040518060600160405290816000820154815260200160018201548152602001600282015481525050905083816020015111158015611e3f57506000816040015114155b8015611e4f575080604001518411155b15611e6257806000015192505050611eac565b50808060019003915050611dbd565b5060006003805490501115611ea757600360016003805490500381548110611e9557fe5b90600052602060002001549050611eac565b600090505b919050565b606080611ebd4361075d565b915091509091565b60038181548110611ed257fe5b906000526020600020016000915090505481565b600281565b60006020528160005260406000208181548110611f0457fe5b9060005260206000209060030201600091509150508060000154908060010154908060020160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905083565b600060404381611f5b57fe5b04905090565b60026020528060005260406000206000915090508060000154908060010154908060020154905083565b606080611f9661189d565b8092508193505050600080905060405180606001604052808281526020016000815260200160ff81525060026000838152602001908152602001600020600082015181600001556020820151816001015560408201518160020155905050600381908060018154018082558091505090600182039060005260206000200160009091929091909150555060008060008381526020019081526020016000208161203f9190612800565b50600060016000838152602001908152602001600020816120609190612800565b5060008090505b83518110156121825760008083815260200190815260200160002080548091906001016120949190612800565b5060405180606001604052808281526020018483815181106120b257fe5b602002602001015181526020018583815181106120cb57fe5b602002602001015173ffffffffffffffffffffffffffffffffffffffff16815250600080848152602001908152602001600020828154811061210957fe5b9060005260206000209060030201600082015181600001556020820151816001015560408201518160020160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055509050508080600101915050612067565b5060008090505b83518110156122a6576001600083815260200190815260200160002080548091906001016121b79190612800565b5060405180606001604052808281526020018483815181106121d557fe5b602002602001015181526020018583815181106121ee57fe5b602002602001015173ffffffffffffffffffffffffffffffffffffffff1681525060016000848152602001908152602001600020828154811061222d57fe5b9060005260206000209060030201600082015181600001556020820151816001015560408201518160020160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055509050508080600101915050612189565b50505050565b6000808284019050838110156122c157600080fd5b8091505092915050565b6122d361288a565b600060208301905060405180604001604052808451815260200182815250915050919050565b606061230482612600565b61230d57600080fd5b60006123188361264e565b905060608160405190808252806020026020018201604052801561235657816020015b6123436128a4565b81526020019060019003908161233b5790505b509050600061236885602001516126bf565b8560200151019050600080600090505b848110156123c95761238983612748565b91506040518060400160405280838152602001848152508482815181106123ac57fe5b602002602001018190525081830192508080600101915050612378565b5082945050505050919050565b60008082600001511180156123f057506021826000015111155b6123f957600080fd5b600061240883602001516126bf565b9050600081846000015103905060008083866020015101905080519150602083101561243b57826020036101000a820491505b81945050505050919050565b6000601582600001511461245a57600080fd5b612463826123d6565b9050919050565b60608183018451101561247c57600080fd5b6060821560008114612499576040519150602082016040526124ea565b6040519150601f8416801560200281840101858101878315602002848b0101015b818310156124d757805183526020830192506020810190506124ba565b50868552601f19601f8301166040525050505b50809150509392505050565b600080600080604185511461251157600093505050506125fa565b602085015192506040850151915060ff6041860151169050601b8160ff16101561253c57601b810190505b601b8160ff16141580156125545750601c8160ff1614155b1561256557600093505050506125fa565b60006001878386866040516000815260200160405260405161258a94939291906132f1565b6020604051602081039080840390855afa1580156125ac573d6000803e3d6000fd5b505050602060405103519050600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614156125f257600080fd5b809450505050505b92915050565b600080826000015114156126175760009050612649565b60008083602001519050805160001a915060c060ff168260ff16101561264257600092505050612649565b6001925050505b919050565b6000808260000151141561266557600090506126ba565b6000809050600061267984602001516126bf565b84602001510190506000846000015185602001510190505b808210156126b3576126a282612748565b820191508280600101935050612691565b8293505050505b919050565b600080825160001a9050608060ff168110156126df576000915050612743565b60b860ff16811080612704575060c060ff168110158015612703575060f860ff1681105b5b15612713576001915050612743565b60c060ff168110156127335760018060b80360ff16820301915050612743565b60018060f80360ff168203019150505b919050565b6000806000835160001a9050608060ff1681101561276957600191506127f6565b60b860ff16811015612786576001608060ff1682030191506127f5565b60c060ff168110156127b65760b78103600185019450806020036101000a855104600182018101935050506127f4565b60f860ff168110156127d357600160c060ff1682030191506127f3565b60f78103600185019450806020036101000a855104600182018101935050505b5b5b5b8192505050919050565b81548183558181111561282d5760030281600302836000526020600020918201910161282c91906128be565b5b505050565b60405180606001604052806000815260200160008152602001600073ffffffffffffffffffffffffffffffffffffffff1681525090565b60405180606001604052806000815260200160008152602001600081525090565b604051806040016040528060008152602001600081525090565b604051806040016040528060008152602001600081525090565b61291191905b8082111561290d5760008082016000905560018201600090556002820160006101000a81549073ffffffffffffffffffffffffffffffffffffffff0219169055506003016128c4565b5090565b90565b60008135905061292381613693565b92915050565b600081359050612938816136aa565b92915050565b60008151905061294d816136aa565b92915050565b60008083601f84011261296557600080fd5b8235905067ffffffffffffffff81111561297e57600080fd5b60208301915083600182028301111561299657600080fd5b9250929050565b600082601f8301126129ae57600080fd5b81356129c16129bc826134e2565b6134b5565b915080825260208301602083018583830111156129dd57600080fd5b6129e883828461363d565b50505092915050565b600081359050612a00816136c1565b92915050565b600060208284031215612a1857600080fd5b6000612a2684828501612914565b91505092915050565b600060208284031215612a4157600080fd5b6000612a4f84828501612929565b91505092915050565b600060208284031215612a6a57600080fd5b6000612a788482850161293e565b91505092915050565b60008060408385031215612a9457600080fd5b6000612aa285828601612929565b9250506020612ab385828601612929565b9150509250929050565b600080600060608486031215612ad257600080fd5b6000612ae086828701612929565b9350506020612af186828701612929565b925050604084013567ffffffffffffffff811115612b0e57600080fd5b612b1a8682870161299d565b9150509250925092565b600060208284031215612b3657600080fd5b6000612b44848285016129f1565b91505092915050565b60008060408385031215612b6057600080fd5b6000612b6e858286016129f1565b9250506020612b7f85828601612914565b9150509250929050565b600080600060608486031215612b9e57600080fd5b6000612bac868287016129f1565b9350506020612bbd86828701612929565b925050604084013567ffffffffffffffff811115612bda57600080fd5b612be68682870161299d565b9150509250925092565b60008060408385031215612c0357600080fd5b6000612c11858286016129f1565b9250506020612c22858286016129f1565b9150509250929050565b600080600080600080600060a0888a031215612c4757600080fd5b6000612c558a828b016129f1565b9750506020612c668a828b016129f1565b9650506040612c778a828b016129f1565b955050606088013567ffffffffffffffff811115612c9457600080fd5b612ca08a828b01612953565b9450945050608088013567ffffffffffffffff811115612cbf57600080fd5b612ccb8a828b01612953565b925092505092959891949750929550565b6000612ce88383612d0c565b60208301905092915050565b6000612d00838361317d565b60208301905092915050565b612d15816135b2565b82525050565b612d24816135b2565b82525050565b6000612d358261352e565b612d3f8185613569565b9350612d4a8361350e565b8060005b83811015612d7b578151612d628882612cdc565b9750612d6d8361354f565b925050600181019050612d4e565b5085935050505092915050565b6000612d9382613539565b612d9d818561357a565b9350612da88361351e565b8060005b83811015612dd9578151612dc08882612cf4565b9750612dcb8361355c565b925050600181019050612dac565b5085935050505092915050565b612def816135c4565b82525050565b612e06612e01826135d0565b61367f565b82525050565b612e15816135fc565b82525050565b612e2c612e27826135fc565b613689565b82525050565b6000612e3d82613544565b612e47818561358b565b9350612e5781856020860161364c565b80840191505092915050565b6000612e706004836135a7565b91507f766f7465000000000000000000000000000000000000000000000000000000006000830152600482019050919050565b6000612eb0602d83613596565b91507f537461727420626c6f636b206d7573742062652067726561746572207468616e60008301527f2063757272656e74207370616e000000000000000000000000000000000000006020830152604082019050919050565b6000612f16600f83613596565b91507f496e76616c6964207370616e20696400000000000000000000000000000000006000830152602082019050919050565b6000612f56601383613596565b91507f5370616e20616c726561647920657869737473000000000000000000000000006000830152602082019050919050565b6000612f96604583613596565b91507f446966666572656e6365206265747765656e20737461727420616e6420656e6460008301527f20626c6f636b206d75737420626520696e206d756c7469706c6573206f66207360208301527f7072696e740000000000000000000000000000000000000000000000000000006040830152606082019050919050565b6000613022602a83613596565b91507f456e6420626c6f636b206d7573742062652067726561746572207468616e207360008301527f7461727420626c6f636b000000000000000000000000000000000000000000006020830152604082019050919050565b6000613088601283613596565b91507f4e6f742053797374656d204164646573732100000000000000000000000000006000830152602082019050919050565b60006130c86005836135a7565b91507f38303030310000000000000000000000000000000000000000000000000000006000830152600582019050919050565b6000613108600e836135a7565b91507f6865696d64616c6c2d38303030310000000000000000000000000000000000006000830152600e82019050919050565b606082016000820151613151600085018261317d565b506020820151613164602085018261317d565b5060408201516131776040850182612d0c565b50505050565b61318681613626565b82525050565b61319581613626565b82525050565b6131a481613630565b82525050565b60006131b68285612df5565b6001820191506131c68284612e1b565b6020820191508190509392505050565b60006131e28286612df5565b6001820191506131f28285612e1b565b6020820191506132028284612e1b565b602082019150819050949350505050565b600061321f8284612e32565b915081905092915050565b600061323582612e63565b9150819050919050565b600061324a826130bb565b9150819050919050565b600061325f826130fb565b9150819050919050565b600060208201905061327e6000830184612d1b565b92915050565b6000604082019050818103600083015261329e8185612d2a565b905081810360208301526132b28184612d88565b90509392505050565b60006020820190506132d06000830184612de6565b92915050565b60006020820190506132eb6000830184612e0c565b92915050565b60006080820190506133066000830187612e0c565b613313602083018661319b565b6133206040830185612e0c565b61332d6060830184612e0c565b95945050505050565b6000602082019050818103600083015261334f81612ea3565b9050919050565b6000602082019050818103600083015261336f81612f09565b9050919050565b6000602082019050818103600083015261338f81612f49565b9050919050565b600060208201905081810360008301526133af81612f89565b9050919050565b600060208201905081810360008301526133cf81613015565b9050919050565b600060208201905081810360008301526133ef8161307b565b9050919050565b600060608201905061340b600083018461313b565b92915050565b6000602082019050613426600083018461318c565b92915050565b6000606082019050613441600083018661318c565b61344e602083018561318c565b61345b6040830184612d1b565b949350505050565b6000606082019050613478600083018661318c565b613485602083018561318c565b613492604083018461318c565b949350505050565b60006020820190506134af600083018461319b565b92915050565b6000604051905081810181811067ffffffffffffffff821117156134d857600080fd5b8060405250919050565b600067ffffffffffffffff8211156134f957600080fd5b601f19601f8301169050602081019050919050565b6000819050602082019050919050565b6000819050602082019050919050565b600081519050919050565b600081519050919050565b600081519050919050565b6000602082019050919050565b6000602082019050919050565b600082825260208201905092915050565b600082825260208201905092915050565b600081905092915050565b600082825260208201905092915050565b600081905092915050565b60006135bd82613606565b9050919050565b60008115159050919050565b60007fff0000000000000000000000000000000000000000000000000000000000000082169050919050565b6000819050919050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000819050919050565b600060ff82169050919050565b82818337600083830152505050565b60005b8381101561366a57808201518184015260208101905061364f565b83811115613679576000848401525b50505050565b6000819050919050565b6000819050919050565b61369c816135b2565b81146136a757600080fd5b50565b6136b3816135fc565b81146136be57600080fd5b50565b6136ca81613626565b81146136d557600080fd5b5056fea365627a7a723158208f52ee07630ffe523cc6ad3e15f437f973dcfa36729cd697f9b0fc4a145a48f06c6578706572696d656e74616cf564736f6c634300050b0040",
+ "balance":"0x0"
+ },
+ "0000000000000000000000000000000000001001":{
+ "code":"0x608060405234801561001057600080fd5b50600436106100415760003560e01c806319494a17146100465780633434735f146100e15780635407ca671461012b575b600080fd5b6100c76004803603604081101561005c57600080fd5b81019080803590602001909291908035906020019064010000000081111561008357600080fd5b82018360208201111561009557600080fd5b803590602001918460018302840111640100000000831117156100b757600080fd5b9091929391929390505050610149565b604051808215151515815260200191505060405180910390f35b6100e961047a565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b610133610492565b6040518082815260200191505060405180910390f35b600073fffffffffffffffffffffffffffffffffffffffe73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610200576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f4e6f742053797374656d2041646465737321000000000000000000000000000081525060200191505060405180910390fd5b606061025761025285858080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f82011690508083019250505050505050610498565b6104c6565b905060006102788260008151811061026b57fe5b60200260200101516105a3565b905080600160005401146102f4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601b8152602001807f537461746549647320617265206e6f742073657175656e7469616c000000000081525060200191505060405180910390fd5b600080815480929190600101919050555060006103248360018151811061031757fe5b6020026020010151610614565b905060606103458460028151811061033857fe5b6020026020010151610637565b9050610350826106c3565b1561046f576000624c4b409050606084836040516024018083815260200180602001828103825283818151815260200191508051906020019080838360005b838110156103aa57808201518184015260208101905061038f565b50505050905090810190601f1680156103d75780820380516001836020036101000a031916815260200191505b5093505050506040516020818303038152906040527f26c53bea000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff8381831617835250505050905060008082516020840160008887f1965050505b505050509392505050565b73fffffffffffffffffffffffffffffffffffffffe81565b60005481565b6104a0610943565b600060208301905060405180604001604052808451815260200182815250915050919050565b60606104d1826106dc565b6104da57600080fd5b60006104e58361072a565b905060608160405190808252806020026020018201604052801561052357816020015b61051061095d565b8152602001906001900390816105085790505b5090506000610535856020015161079b565b8560200151019050600080600090505b848110156105965761055683610824565b915060405180604001604052808381526020018481525084828151811061057957fe5b602002602001018190525081830192508080600101915050610545565b5082945050505050919050565b60008082600001511180156105bd57506021826000015111155b6105c657600080fd5b60006105d5836020015161079b565b9050600081846000015103905060008083866020015101905080519150602083101561060857826020036101000a820491505b81945050505050919050565b6000601582600001511461062757600080fd5b610630826105a3565b9050919050565b6060600082600001511161064a57600080fd5b6000610659836020015161079b565b905060008184600001510390506060816040519080825280601f01601f19166020018201604052801561069b5781602001600182028038833980820191505090505b50905060008160200190506106b78487602001510182856108dc565b81945050505050919050565b600080823b905060008163ffffffff1611915050919050565b600080826000015114156106f35760009050610725565b60008083602001519050805160001a915060c060ff168260ff16101561071e57600092505050610725565b6001925050505b919050565b600080826000015114156107415760009050610796565b60008090506000610755846020015161079b565b84602001510190506000846000015185602001510190505b8082101561078f5761077e82610824565b82019150828060010193505061076d565b8293505050505b919050565b600080825160001a9050608060ff168110156107bb57600091505061081f565b60b860ff168110806107e0575060c060ff1681101580156107df575060f860ff1681105b5b156107ef57600191505061081f565b60c060ff1681101561080f5760018060b80360ff1682030191505061081f565b60018060f80360ff168203019150505b919050565b6000806000835160001a9050608060ff1681101561084557600191506108d2565b60b860ff16811015610862576001608060ff1682030191506108d1565b60c060ff168110156108925760b78103600185019450806020036101000a855104600182018101935050506108d0565b60f860ff168110156108af57600160c060ff1682030191506108cf565b60f78103600185019450806020036101000a855104600182018101935050505b5b5b5b8192505050919050565b60008114156108ea5761093e565b5b602060ff16811061091a5782518252602060ff1683019250602060ff1682019150602060ff16810390506108eb565b6000600182602060ff16036101000a03905080198451168184511681811785525050505b505050565b604051806040016040528060008152602001600081525090565b60405180604001604052806000815260200160008152509056fea265627a7a7231582083fbdacb76f32b4112d0f7db9a596937925824798a0026ba0232322390b5263764736f6c634300050b0032",
+ "balance":"0x0"
+ },
+ "0000000000000000000000000000000000001010":{
+ "code":"0x60806040526004361061019c5760003560e01c806377d32e94116100ec578063acd06cb31161008a578063e306f77911610064578063e306f77914610a7b578063e614d0d614610aa6578063f2fde38b14610ad1578063fc0c546a14610b225761019c565b8063acd06cb31461097a578063b789543c146109cd578063cc79f97b14610a505761019c565b80639025e64c116100c65780639025e64c146107c957806395d89b4114610859578063a9059cbb146108e9578063abceeba21461094f5761019c565b806377d32e94146106315780638da5cb5b146107435780638f32d59b1461079a5761019c565b806347e7ef24116101595780637019d41a116101335780637019d41a1461053357806370a082311461058a578063715018a6146105ef578063771282f6146106065761019c565b806347e7ef2414610410578063485cc9551461046b57806360f96a8f146104dc5761019c565b806306fdde03146101a15780631499c5921461023157806318160ddd1461028257806319d27d9c146102ad5780632e1a7d4d146103b1578063313ce567146103df575b600080fd5b3480156101ad57600080fd5b506101b6610b79565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101f65780820151818401526020810190506101db565b50505050905090810190601f1680156102235780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561023d57600080fd5b506102806004803603602081101561025457600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610bb6565b005b34801561028e57600080fd5b50610297610c24565b6040518082815260200191505060405180910390f35b3480156102b957600080fd5b5061036f600480360360a08110156102d057600080fd5b81019080803590602001906401000000008111156102ed57600080fd5b8201836020820111156102ff57600080fd5b8035906020019184600183028401116401000000008311171561032157600080fd5b9091929391929390803590602001909291908035906020019092919080359060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610c3a565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6103dd600480360360208110156103c757600080fd5b8101908080359060200190929190505050610e06565b005b3480156103eb57600080fd5b506103f4610f58565b604051808260ff1660ff16815260200191505060405180910390f35b34801561041c57600080fd5b506104696004803603604081101561043357600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050610f61565b005b34801561047757600080fd5b506104da6004803603604081101561048e57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff16906020019092919050505061111d565b005b3480156104e857600080fd5b506104f16111ec565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561053f57600080fd5b50610548611212565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561059657600080fd5b506105d9600480360360208110156105ad57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050611238565b6040518082815260200191505060405180910390f35b3480156105fb57600080fd5b50610604611259565b005b34801561061257600080fd5b5061061b611329565b6040518082815260200191505060405180910390f35b34801561063d57600080fd5b506107016004803603604081101561065457600080fd5b81019080803590602001909291908035906020019064010000000081111561067b57600080fd5b82018360208201111561068d57600080fd5b803590602001918460018302840111640100000000831117156106af57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f82011690508083019250505050505050919291929050505061132f565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561074f57600080fd5b506107586114b4565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b3480156107a657600080fd5b506107af6114dd565b604051808215151515815260200191505060405180910390f35b3480156107d557600080fd5b506107de611534565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561081e578082015181840152602081019050610803565b50505050905090810190601f16801561084b5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561086557600080fd5b5061086e61156d565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156108ae578082015181840152602081019050610893565b50505050905090810190601f1680156108db5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b610935600480360360408110156108ff57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803590602001909291905050506115aa565b604051808215151515815260200191505060405180910390f35b34801561095b57600080fd5b506109646115d0565b6040518082815260200191505060405180910390f35b34801561098657600080fd5b506109b36004803603602081101561099d57600080fd5b810190808035906020019092919050505061165d565b604051808215151515815260200191505060405180910390f35b3480156109d957600080fd5b50610a3a600480360360808110156109f057600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190803590602001909291908035906020019092919050505061167d565b6040518082815260200191505060405180910390f35b348015610a5c57600080fd5b50610a6561169d565b6040518082815260200191505060405180910390f35b348015610a8757600080fd5b50610a906116a4565b6040518082815260200191505060405180910390f35b348015610ab257600080fd5b50610abb6116aa565b6040518082815260200191505060405180910390f35b348015610add57600080fd5b50610b2060048036036020811015610af457600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050611737565b005b348015610b2e57600080fd5b50610b37611754565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b60606040518060400160405280600b81526020017f4d6174696320546f6b656e000000000000000000000000000000000000000000815250905090565b6040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260108152602001807f44697361626c656420666561747572650000000000000000000000000000000081525060200191505060405180910390fd5b6000601260ff16600a0a6402540be40002905090565b6000808511610c4857600080fd5b6000831480610c575750824311155b610cc9576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260148152602001807f5369676e6174757265206973206578706972656400000000000000000000000081525060200191505060405180910390fd5b6000610cd73387878761167d565b9050600015156005600083815260200190815260200160002060009054906101000a900460ff16151514610d73576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600f8152602001807f536967206465616374697661746564000000000000000000000000000000000081525060200191505060405180910390fd5b60016005600083815260200190815260200160002060006101000a81548160ff021916908315150217905550610ded8189898080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f8201169050808301925050505050505061132f565b9150610dfa82848861177a565b50509695505050505050565b60003390506000610e1682611238565b9050610e2d83600654611b3790919063ffffffff16565b600681905550600083118015610e4257508234145b610eb4576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260138152602001807f496e73756666696369656e7420616d6f756e740000000000000000000000000081525060200191505060405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167febff2602b3f468259e1e99f613fed6691f3a6526effe6ef3e768ba7ae7a36c4f8584610f3087611238565b60405180848152602001838152602001828152602001935050505060405180910390a3505050565b60006012905090565b610f696114dd565b610f7257600080fd5b600081118015610faf5750600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614155b611004576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180611e636023913960400191505060405180910390fd5b600061100f83611238565b905060008390508073ffffffffffffffffffffffffffffffffffffffff166108fc849081150290604051600060405180830381858888f1935050505015801561105c573d6000803e3d6000fd5b5061107283600654611b5790919063ffffffff16565b6006819055508373ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f4e2ca0515ed1aef1395f66b5303bb5d6f1bf9d61a353fa53f73f8ac9973fa9f685856110f489611238565b60405180848152602001838152602001828152602001935050505060405180910390a350505050565b600760009054906101000a900460ff1615611183576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180611e406023913960400191505060405180910390fd5b6001600760006101000a81548160ff02191690831515021790555080600260006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506111e882611b76565b5050565b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600460009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60008173ffffffffffffffffffffffffffffffffffffffff16319050919050565b6112616114dd565b61126a57600080fd5b600073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a360008060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550565b60065481565b600080600080604185511461134a57600093505050506114ae565b602085015192506040850151915060ff6041860151169050601b8160ff16101561137557601b810190505b601b8160ff161415801561138d5750601c8160ff1614155b1561139e57600093505050506114ae565b60018682858560405160008152602001604052604051808581526020018460ff1660ff1681526020018381526020018281526020019450505050506020604051602081039080840390855afa1580156113fb573d6000803e3d6000fd5b505050602060405103519350600073ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff1614156114aa576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f4572726f7220696e2065637265636f766572000000000000000000000000000081525060200191505060405180910390fd5b5050505b92915050565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614905090565b6040518060400160405280600381526020017f013881000000000000000000000000000000000000000000000000000000000081525081565b60606040518060400160405280600581526020017f4d41544943000000000000000000000000000000000000000000000000000000815250905090565b60008134146115bc57600090506115ca565b6115c733848461177a565b90505b92915050565b6040518060800160405280605b8152602001611ed8605b91396040516020018082805190602001908083835b6020831061161f57805182526020820191506020810190506020830392506115fc565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b60056020528060005260406000206000915054906101000a900460ff1681565b600061169361168e86868686611c6e565b611d44565b9050949350505050565b6201388181565b60015481565b604051806080016040528060528152602001611e86605291396040516020018082805190602001908083835b602083106116f957805182526020820191506020810190506020830392506116d6565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b61173f6114dd565b61174857600080fd5b61175181611b76565b50565b600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000803073ffffffffffffffffffffffffffffffffffffffff166370a08231866040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b1580156117fa57600080fd5b505afa15801561180e573d6000803e3d6000fd5b505050506040513d602081101561182457600080fd5b8101908080519060200190929190505050905060003073ffffffffffffffffffffffffffffffffffffffff166370a08231866040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b1580156118b657600080fd5b505afa1580156118ca573d6000803e3d6000fd5b505050506040513d60208110156118e057600080fd5b810190808051906020019092919050505090506118fe868686611d8e565b8473ffffffffffffffffffffffffffffffffffffffff168673ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167fe6497e3ee548a3372136af2fcb0696db31fc6cf20260707645068bd3fe97f3c48786863073ffffffffffffffffffffffffffffffffffffffff166370a082318e6040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b158015611a0657600080fd5b505afa158015611a1a573d6000803e3d6000fd5b505050506040513d6020811015611a3057600080fd5b81019080805190602001909291905050503073ffffffffffffffffffffffffffffffffffffffff166370a082318e6040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b158015611abe57600080fd5b505afa158015611ad2573d6000803e3d6000fd5b505050506040513d6020811015611ae857600080fd5b8101908080519060200190929190505050604051808681526020018581526020018481526020018381526020018281526020019550505050505060405180910390a46001925050509392505050565b600082821115611b4657600080fd5b600082840390508091505092915050565b600080828401905083811015611b6c57600080fd5b8091505092915050565b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff161415611bb057600080fd5b8073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a3806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b6000806040518060800160405280605b8152602001611ed8605b91396040516020018082805190602001908083835b60208310611cc05780518252602082019150602081019050602083039250611c9d565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405160208183030381529060405280519060200120905060405181815273ffffffffffffffffffffffffffffffffffffffff8716602082015285604082015284606082015283608082015260a0812092505081915050949350505050565b60008060015490506040517f190100000000000000000000000000000000000000000000000000000000000081528160028201528360228201526042812092505081915050919050565b8173ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050158015611dd4573d6000803e3d6000fd5b508173ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040518082815260200191505060405180910390a350505056fe54686520636f6e747261637420697320616c726561647920696e697469616c697a6564496e73756666696369656e7420616d6f756e74206f7220696e76616c69642075736572454950373132446f6d61696e28737472696e67206e616d652c737472696e672076657273696f6e2c75696e7432353620636861696e49642c6164647265737320766572696679696e67436f6e747261637429546f6b656e5472616e736665724f726465722861646472657373207370656e6465722c75696e7432353620746f6b656e49644f72416d6f756e742c6279746573333220646174612c75696e743235362065787069726174696f6e29a265627a7a723158208f81700133738d766ae3d68af591ad588b0125bd91449192179f460893f79f6b64736f6c634300050b0032",
+ "balance":"0x204fcd4f31349d83b6e00000"
+ },
+ "928ed6a3e94437bbd316ccad78479f1d163a6a8c":{
+ "balance":"0x3635c9adc5dea00000"
+ },
+ "be188d6641e8b680743a4815dfa0f6208038960f":{
+ "balance":"0x3635c9adc5dea00000"
+ },
+ "c26880a0af2ea0c7e8130e6ec47af756465452e8":{
+ "balance":"0x3635c9adc5dea00000"
+ },
+ "c275dc8be39f50d12f66b6a63629c39da5bae5bd":{
+ "balance":"0x3635c9adc5dea00000"
+ },
+ "f903ba9e006193c1527bfbe65fe2123704ea3f99":{
+ "balance":"0x3635c9adc5dea00000"
+ }
+ },
+ "number":"0x0",
+ "gasUsed":"0x0",
+ "parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
+ "baseFeePerGas":null
+ },
+ "Bootnodes":[
+ "enode://320553cda00dfc003f499a3ce9598029f364fbb3ed1222fdc20a94d97dcc4d8ba0cd0bfa996579dcc6d17a534741fb0a5da303a90579431259150de66b597251@54.147.31.250:30303",
+ "enode://f0f48a8781629f95ff02606081e6e43e4aebd503f3d07fc931fad7dd5ca1ba52bd849a6f6c3be0e375cf13c9ae04d859c4a9ae3546dc8ed4f10aa5dbb47d4998@34.226.134.117:30303"
+ ],
+ "NetworkId":80001,
+ "DNS":null
+ }
diff --git a/internal/cli/server/chains/test_files/wrong_chain.json b/internal/cli/server/chains/test_files/wrong_chain.json
new file mode 100644
index 0000000000..9e26dfeeb6
--- /dev/null
+++ b/internal/cli/server/chains/test_files/wrong_chain.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/internal/cli/server/command.go b/internal/cli/server/command.go
index 4b2374dc42..2995f10f69 100644
--- a/internal/cli/server/command.go
+++ b/internal/cli/server/command.go
@@ -4,10 +4,12 @@ import (
"fmt"
"os"
"os/signal"
+ "strings"
"syscall"
- "github.com/ethereum/go-ethereum/log"
"github.com/mitchellh/cli"
+
+ "github.com/ethereum/go-ethereum/log"
)
// Command is the command to start the sever
@@ -20,11 +22,22 @@ type Command struct {
// final configuration
config *Config
- configFile []string
+ configFile string
srv *Server
}
+// MarkDown implements cli.MarkDown interface
+func (c *Command) MarkDown() string {
+ items := []string{
+ "# Server",
+ "The ```bor server``` command runs the Bor client.",
+ c.Flags().MarkDown(),
+ }
+
+ return strings.Join(items, "\n\n")
+}
+
// Help implements the cli.Command interface
func (c *Command) Help() string {
return `Usage: bor [options]
@@ -38,34 +51,57 @@ func (c *Command) Synopsis() string {
return "Run the Bor server"
}
-// Run implements the cli.Command interface
-func (c *Command) Run(args []string) int {
+func (c *Command) extractFlags(args []string) error {
+ config := *DefaultConfig()
+
flags := c.Flags()
if err := flags.Parse(args); err != nil {
c.UI.Error(err.Error())
- return 1
+ c.config = &config
+
+ return err
}
- // read config file
- config := DefaultConfig()
- for _, configFile := range c.configFile {
- cfg, err := readConfigFile(configFile)
+ // TODO: Check if this can be removed or not
+ // read cli flags
+ if err := config.Merge(c.cliConfig); err != nil {
+ c.UI.Error(err.Error())
+ c.config = &config
+
+ return err
+ }
+ // read if config file is provided, this will overwrite the cli flags, if provided
+ if c.configFile != "" {
+ log.Warn("Config File provided, this will overwrite the cli flags.", "configFile:", c.configFile)
+ cfg, err := readConfigFile(c.configFile)
if err != nil {
c.UI.Error(err.Error())
- return 1
+ c.config = &config
+
+ return err
}
if err := config.Merge(cfg); err != nil {
c.UI.Error(err.Error())
- return 1
+ c.config = &config
+
+ return err
}
}
- if err := config.Merge(c.cliConfig); err != nil {
+
+ c.config = &config
+
+ return nil
+}
+
+// Run implements the cli.Command interface
+func (c *Command) Run(args []string) int {
+ err := c.extractFlags(args)
+ if err != nil {
c.UI.Error(err.Error())
return 1
}
- c.config = config
- srv, err := NewServer(config)
+ srv, err := NewServer(c.config)
if err != nil {
c.UI.Error(err.Error())
return 1
@@ -100,3 +136,8 @@ func (c *Command) handleSignals() int {
}
return 1
}
+
+// GetConfig returns the user specified config
+func (c *Command) GetConfig() *Config {
+ return c.cliConfig
+}
diff --git a/internal/cli/server/command_test.go b/internal/cli/server/command_test.go
new file mode 100644
index 0000000000..ab28de5ee6
--- /dev/null
+++ b/internal/cli/server/command_test.go
@@ -0,0 +1,50 @@
+package server
+
+import (
+ "math/big"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestFlags(t *testing.T) {
+ t.Parallel()
+
+ var c Command
+
+ args := []string{
+ "--txpool.rejournal", "30m0s",
+ "--txpool.lifetime", "30m0s",
+ "--miner.gasprice", "20000000000",
+ "--gpo.maxprice", "70000000000",
+ "--gpo.ignoreprice", "1",
+ "--cache.trie.rejournal", "40m0s",
+ "--dev",
+ "--dev.period", "2",
+ "--datadir", "./data",
+ "--maxpeers", "30",
+ "--eth.requiredblocks", "a=b",
+ "--http.api", "eth,web3,bor",
+ }
+ err := c.extractFlags(args)
+
+ require.NoError(t, err)
+
+ txRe, _ := time.ParseDuration("30m0s")
+ txLt, _ := time.ParseDuration("30m0s")
+ caRe, _ := time.ParseDuration("40m0s")
+
+ require.Equal(t, c.config.DataDir, "./data")
+ require.Equal(t, c.config.Developer.Enabled, true)
+ require.Equal(t, c.config.Developer.Period, uint64(2))
+ require.Equal(t, c.config.TxPool.Rejournal, txRe)
+ require.Equal(t, c.config.TxPool.LifeTime, txLt)
+ require.Equal(t, c.config.Sealer.GasPrice, big.NewInt(20000000000))
+ require.Equal(t, c.config.Gpo.MaxPrice, big.NewInt(70000000000))
+ require.Equal(t, c.config.Gpo.IgnorePrice, big.NewInt(1))
+ require.Equal(t, c.config.Cache.Rejournal, caRe)
+ require.Equal(t, c.config.P2P.MaxPeers, uint64(30))
+ require.Equal(t, c.config.RequiredBlocks, map[string]string{"a": "b"})
+ require.Equal(t, c.config.JsonRPC.Http.API, []string{"eth", "web3", "bor"})
+}
diff --git a/internal/cli/server/config.go b/internal/cli/server/config.go
index 06ec8c16af..5657e85b20 100644
--- a/internal/cli/server/config.go
+++ b/internal/cli/server/config.go
@@ -14,6 +14,11 @@ import (
godebug "runtime/debug"
+ "github.com/hashicorp/hcl/v2/hclsimple"
+ "github.com/imdario/mergo"
+ "github.com/mitchellh/go-homedir"
+ gopsutil "github.com/shirou/gopsutil/mem"
+
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/common"
@@ -28,365 +33,411 @@ import (
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/params"
- "github.com/hashicorp/hcl/v2/hclsimple"
- "github.com/imdario/mergo"
- "github.com/mitchellh/go-homedir"
- gopsutil "github.com/shirou/gopsutil/mem"
+ "github.com/ethereum/go-ethereum/rpc"
)
type Config struct {
chain *chains.Chain
// Chain is the chain to sync with
- Chain string `hcl:"chain,optional"`
+ Chain string `hcl:"chain,optional" toml:"chain,optional"`
- // Name, or identity of the node
- Name string `hcl:"name,optional"`
+ // Identity of the node
+ Identity string `hcl:"identity,optional" toml:"identity,optional"`
- // Whitelist is a list of required (block number, hash) pairs to accept
- Whitelist map[string]string `hcl:"whitelist,optional"`
+ // RequiredBlocks is a list of required (block number, hash) pairs to accept
+ RequiredBlocks map[string]string `hcl:"eth.requiredblocks,optional" toml:"eth.requiredblocks,optional"`
// LogLevel is the level of the logs to put out
- LogLevel string `hcl:"log-level,optional"`
+ LogLevel string `hcl:"log-level,optional" toml:"log-level,optional"`
// DataDir is the directory to store the state in
- DataDir string `hcl:"data-dir,optional"`
+ DataDir string `hcl:"datadir,optional" toml:"datadir,optional"`
+
+ // Ancient is the directory to store the state in
+ Ancient string `hcl:"ancient,optional" toml:"ancient,optional"`
+
+ // KeyStoreDir is the directory to store keystores
+ KeyStoreDir string `hcl:"keystore,optional" toml:"keystore,optional"`
// SyncMode selects the sync protocol
- SyncMode string `hcl:"sync-mode,optional"`
+ SyncMode string `hcl:"syncmode,optional" toml:"syncmode,optional"`
// GcMode selects the garbage collection mode for the trie
- GcMode string `hcl:"gc-mode,optional"`
+ GcMode string `hcl:"gcmode,optional" toml:"gcmode,optional"`
+
+ // Snapshot enables the snapshot database mode
+ Snapshot bool `hcl:"snapshot,optional" toml:"snapshot,optional"`
- // XXX
- Snapshot bool `hcl:"snapshot,optional"`
+ // BorLogs enables bor log retrieval
+ BorLogs bool `hcl:"bor.logs,optional" toml:"bor.logs,optional"`
// Ethstats is the address of the ethstats server to send telemetry
- Ethstats string `hcl:"ethstats,optional"`
+ Ethstats string `hcl:"ethstats,optional" toml:"ethstats,optional"`
// P2P has the p2p network related settings
- P2P *P2PConfig `hcl:"p2p,block"`
+ P2P *P2PConfig `hcl:"p2p,block" toml:"p2p,block"`
// Heimdall has the heimdall connection related settings
- Heimdall *HeimdallConfig `hcl:"heimdall,block"`
+ Heimdall *HeimdallConfig `hcl:"heimdall,block" toml:"heimdall,block"`
// TxPool has the transaction pool related settings
- TxPool *TxPoolConfig `hcl:"txpool,block"`
+ TxPool *TxPoolConfig `hcl:"txpool,block" toml:"txpool,block"`
// Sealer has the validator related settings
- Sealer *SealerConfig `hcl:"sealer,block"`
+ Sealer *SealerConfig `hcl:"miner,block" toml:"miner,block"`
// JsonRPC has the json-rpc related settings
- JsonRPC *JsonRPCConfig `hcl:"jsonrpc,block"`
+ JsonRPC *JsonRPCConfig `hcl:"jsonrpc,block" toml:"jsonrpc,block"`
// Gpo has the gas price oracle related settings
- Gpo *GpoConfig `hcl:"gpo,block"`
+ Gpo *GpoConfig `hcl:"gpo,block" toml:"gpo,block"`
// Telemetry has the telemetry related settings
- Telemetry *TelemetryConfig `hcl:"telemetry,block"`
+ Telemetry *TelemetryConfig `hcl:"telemetry,block" toml:"telemetry,block"`
// Cache has the cache related settings
- Cache *CacheConfig `hcl:"cache,block"`
+ Cache *CacheConfig `hcl:"cache,block" toml:"cache,block"`
// Account has the validator account related settings
- Accounts *AccountsConfig `hcl:"accounts,block"`
+ Accounts *AccountsConfig `hcl:"accounts,block" toml:"accounts,block"`
// GRPC has the grpc server related settings
- GRPC *GRPCConfig
+ GRPC *GRPCConfig `hcl:"grpc,block" toml:"grpc,block"`
// Developer has the developer mode related settings
- Developer *DeveloperConfig
+ Developer *DeveloperConfig `hcl:"developer,block" toml:"developer,block"`
}
type P2PConfig struct {
// MaxPeers sets the maximum number of connected peers
- MaxPeers uint64 `hcl:"max-peers,optional"`
+ MaxPeers uint64 `hcl:"maxpeers,optional" toml:"maxpeers,optional"`
// MaxPendPeers sets the maximum number of pending connected peers
- MaxPendPeers uint64 `hcl:"max-pend-peers,optional"`
+ MaxPendPeers uint64 `hcl:"maxpendpeers,optional" toml:"maxpendpeers,optional"`
// Bind is the bind address
- Bind string `hcl:"bind,optional"`
+ Bind string `hcl:"bind,optional" toml:"bind,optional"`
// Port is the port number
- Port uint64 `hcl:"port,optional"`
+ Port uint64 `hcl:"port,optional" toml:"port,optional"`
// NoDiscover is used to disable discovery
- NoDiscover bool `hcl:"no-discover,optional"`
+ NoDiscover bool `hcl:"nodiscover,optional" toml:"nodiscover,optional"`
// NAT it used to set NAT options
- NAT string `hcl:"nat,optional"`
+ NAT string `hcl:"nat,optional" toml:"nat,optional"`
// Discovery has the p2p discovery related settings
- Discovery *P2PDiscovery `hcl:"discovery,block"`
+ Discovery *P2PDiscovery `hcl:"discovery,block" toml:"discovery,block"`
}
type P2PDiscovery struct {
// V5Enabled is used to enable disc v5 discovery mode
- V5Enabled bool `hcl:"v5-enabled,optional"`
+ V5Enabled bool `hcl:"v5disc,optional" toml:"v5disc,optional"`
// Bootnodes is the list of initial bootnodes
- Bootnodes []string `hcl:"bootnodes,optional"`
+ Bootnodes []string `hcl:"bootnodes,optional" toml:"bootnodes,optional"`
// BootnodesV4 is the list of initial v4 bootnodes
- BootnodesV4 []string `hcl:"bootnodesv4,optional"`
+ BootnodesV4 []string `hcl:"bootnodesv4,optional" toml:"bootnodesv4,optional"`
// BootnodesV5 is the list of initial v5 bootnodes
- BootnodesV5 []string `hcl:"bootnodesv5,optional"`
+ BootnodesV5 []string `hcl:"bootnodesv5,optional" toml:"bootnodesv5,optional"`
// StaticNodes is the list of static nodes
- StaticNodes []string `hcl:"static-nodes,optional"`
+ StaticNodes []string `hcl:"static-nodes,optional" toml:"static-nodes,optional"`
// TrustedNodes is the list of trusted nodes
- TrustedNodes []string `hcl:"trusted-nodes,optional"`
+ TrustedNodes []string `hcl:"trusted-nodes,optional" toml:"trusted-nodes,optional"`
// DNS is the list of enrtree:// URLs which will be queried for nodes to connect to
- DNS []string `hcl:"dns,optional"`
+ DNS []string `hcl:"dns,optional" toml:"dns,optional"`
}
type HeimdallConfig struct {
// URL is the url of the heimdall server
- URL string `hcl:"url,optional"`
+ URL string `hcl:"url,optional" toml:"url,optional"`
// Without is used to disable remote heimdall during testing
- Without bool `hcl:"without,optional"`
+ Without bool `hcl:"bor.without,optional" toml:"bor.without,optional"`
}
type TxPoolConfig struct {
// Locals are the addresses that should be treated by default as local
- Locals []string `hcl:"locals,optional"`
+ Locals []string `hcl:"locals,optional" toml:"locals,optional"`
// NoLocals enables whether local transaction handling should be disabled
- NoLocals bool `hcl:"no-locals,optional"`
+ NoLocals bool `hcl:"nolocals,optional" toml:"nolocals,optional"`
// Journal is the path to store local transactions to survive node restarts
- Journal string `hcl:"journal,optional"`
+ Journal string `hcl:"journal,optional" toml:"journal,optional"`
// Rejournal is the time interval to regenerate the local transaction journal
- Rejournal time.Duration
- RejournalRaw string `hcl:"rejournal,optional"`
+ Rejournal time.Duration `hcl:"-,optional" toml:"-"`
+ RejournalRaw string `hcl:"rejournal,optional" toml:"rejournal,optional"`
// PriceLimit is the minimum gas price to enforce for acceptance into the pool
- PriceLimit uint64 `hcl:"price-limit,optional"`
+ PriceLimit uint64 `hcl:"pricelimit,optional" toml:"pricelimit,optional"`
// PriceBump is the minimum price bump percentage to replace an already existing transaction (nonce)
- PriceBump uint64 `hcl:"price-bump,optional"`
+ PriceBump uint64 `hcl:"pricebump,optional" toml:"pricebump,optional"`
// AccountSlots is the number of executable transaction slots guaranteed per account
- AccountSlots uint64 `hcl:"account-slots,optional"`
+ AccountSlots uint64 `hcl:"accountslots,optional" toml:"accountslots,optional"`
// GlobalSlots is the maximum number of executable transaction slots for all accounts
- GlobalSlots uint64 `hcl:"global-slots,optional"`
+ GlobalSlots uint64 `hcl:"globalslots,optional" toml:"globalslots,optional"`
// AccountQueue is the maximum number of non-executable transaction slots permitted per account
- AccountQueue uint64 `hcl:"account-queue,optional"`
+ AccountQueue uint64 `hcl:"accountqueue,optional" toml:"accountqueue,optional"`
// GlobalQueueis the maximum number of non-executable transaction slots for all accounts
- GlobalQueue uint64 `hcl:"global-queue,optional"`
+ GlobalQueue uint64 `hcl:"globalqueue,optional" toml:"globalqueue,optional"`
- // Lifetime is the maximum amount of time non-executable transaction are queued
- LifeTime time.Duration
- LifeTimeRaw string `hcl:"lifetime,optional"`
+ // lifetime is the maximum amount of time non-executable transaction are queued
+ LifeTime time.Duration `hcl:"-,optional" toml:"-"`
+ LifeTimeRaw string `hcl:"lifetime,optional" toml:"lifetime,optional"`
}
type SealerConfig struct {
// Enabled is used to enable validator mode
- Enabled bool `hcl:"enabled,optional"`
+ Enabled bool `hcl:"mine,optional" toml:"mine,optional"`
// Etherbase is the address of the validator
- Etherbase string `hcl:"etherbase,optional"`
+ Etherbase string `hcl:"etherbase,optional" toml:"etherbase,optional"`
// ExtraData is the block extra data set by the miner
- ExtraData string `hcl:"extra-data,optional"`
+ ExtraData string `hcl:"extradata,optional" toml:"extradata,optional"`
// GasCeil is the target gas ceiling for mined blocks.
- GasCeil uint64 `hcl:"gas-ceil,optional"`
+ GasCeil uint64 `hcl:"gaslimit,optional" toml:"gaslimit,optional"`
// GasPrice is the minimum gas price for mining a transaction
- GasPrice *big.Int
- GasPriceRaw string `hcl:"gas-price,optional"`
+ GasPrice *big.Int `hcl:"-,optional" toml:"-"`
+ GasPriceRaw string `hcl:"gasprice,optional" toml:"gasprice,optional"`
}
type JsonRPCConfig struct {
// IPCDisable enables whether ipc is enabled or not
- IPCDisable bool `hcl:"ipc-disable,optional"`
+ IPCDisable bool `hcl:"ipcdisable,optional" toml:"ipcdisable,optional"`
// IPCPath is the path of the ipc endpoint
- IPCPath string `hcl:"ipc-path,optional"`
-
- // VHost is the list of valid virtual hosts
- VHost []string `hcl:"vhost,optional"`
-
- // Cors is the list of Cors endpoints
- Cors []string `hcl:"cors,optional"`
+ IPCPath string `hcl:"ipcpath,optional" toml:"ipcpath,optional"`
// GasCap is the global gas cap for eth-call variants.
- GasCap uint64 `hcl:"gas-cap,optional"`
+ GasCap uint64 `hcl:"gascap,optional" toml:"gascap,optional"`
// TxFeeCap is the global transaction fee cap for send-transaction variants
- TxFeeCap float64 `hcl:"tx-fee-cap,optional"`
+ TxFeeCap float64 `hcl:"txfeecap,optional" toml:"txfeecap,optional"`
// Http has the json-rpc http related settings
- Http *APIConfig `hcl:"http,block"`
+ Http *APIConfig `hcl:"http,block" toml:"http,block"`
+
+ // Ws has the json-rpc websocket related settings
+ Ws *APIConfig `hcl:"ws,block" toml:"ws,block"`
- // Http has the json-rpc websocket related settings
- Ws *APIConfig `hcl:"ws,block"`
+ // Graphql has the json-rpc graphql related settings
+ Graphql *APIConfig `hcl:"graphql,block" toml:"graphql,block"`
- // Http has the json-rpc graphql related settings
- Graphql *APIConfig `hcl:"graphql,block"`
+ HttpTimeout *HttpTimeouts `hcl:"timeouts,block" toml:"timeouts,block"`
}
type GRPCConfig struct {
// Addr is the bind address for the grpc rpc server
- Addr string
+ Addr string `hcl:"addr,optional" toml:"addr,optional"`
}
type APIConfig struct {
// Enabled selects whether the api is enabled
- Enabled bool `hcl:"enabled,optional"`
+ Enabled bool `hcl:"enabled,optional" toml:"enabled,optional"`
// Port is the port number for this api
- Port uint64 `hcl:"port,optional"`
+ Port uint64 `hcl:"port,optional" toml:"port,optional"`
// Prefix is the http prefix to expose this api
- Prefix string `hcl:"prefix,optional"`
+ Prefix string `hcl:"prefix,optional" toml:"prefix,optional"`
// Host is the address to bind the api
- Host string `hcl:"host,optional"`
+ Host string `hcl:"host,optional" toml:"host,optional"`
+
+ // API is the list of enabled api modules
+ API []string `hcl:"api,optional" toml:"api,optional"`
+
+ // VHost is the list of valid virtual hosts
+ VHost []string `hcl:"vhosts,optional" toml:"vhosts,optional"`
- // Modules is the list of enabled api modules
- Modules []string `hcl:"modules,optional"`
+ // Cors is the list of Cors endpoints
+ Cors []string `hcl:"corsdomain,optional" toml:"corsdomain,optional"`
+
+ // Origins is the list of endpoints to accept requests from (only consumed for websockets)
+ Origins []string `hcl:"origins,optional" toml:"origins,optional"`
+}
+
+// Used from rpc.HTTPTimeouts
+type HttpTimeouts struct {
+ // ReadTimeout is the maximum duration for reading the entire
+ // request, including the body.
+ //
+ // Because ReadTimeout does not let Handlers make per-request
+ // decisions on each request body's acceptable deadline or
+ // upload rate, most users will prefer to use
+ // ReadHeaderTimeout. It is valid to use them both.
+ ReadTimeout time.Duration `hcl:"-,optional" toml:"-"`
+ ReadTimeoutRaw string `hcl:"read,optional" toml:"read,optional"`
+
+ // WriteTimeout is the maximum duration before timing out
+ // writes of the response. It is reset whenever a new
+ // request's header is read. Like ReadTimeout, it does not
+ // let Handlers make decisions on a per-request basis.
+ WriteTimeout time.Duration `hcl:"-,optional" toml:"-"`
+ WriteTimeoutRaw string `hcl:"write,optional" toml:"write,optional"`
+
+ // IdleTimeout is the maximum amount of time to wait for the
+ // next request when keep-alives are enabled. If IdleTimeout
+ // is zero, the value of ReadTimeout is used. If both are
+ // zero, ReadHeaderTimeout is used.
+ IdleTimeout time.Duration `hcl:"-,optional" toml:"-"`
+ IdleTimeoutRaw string `hcl:"idle,optional" toml:"idle,optional"`
}
type GpoConfig struct {
// Blocks is the number of blocks to track to compute the price oracle
- Blocks uint64 `hcl:"blocks,optional"`
+ Blocks uint64 `hcl:"blocks,optional" toml:"blocks,optional"`
// Percentile sets the weights to new blocks
- Percentile uint64 `hcl:"percentile,optional"`
+ Percentile uint64 `hcl:"percentile,optional" toml:"percentile,optional"`
// MaxPrice is an upper bound gas price
- MaxPrice *big.Int
- MaxPriceRaw string `hcl:"max-price,optional"`
+ MaxPrice *big.Int `hcl:"-,optional" toml:"-"`
+ MaxPriceRaw string `hcl:"maxprice,optional" toml:"maxprice,optional"`
// IgnorePrice is a lower bound gas price
- IgnorePrice *big.Int
- IgnorePriceRaw string `hcl:"ignore-price,optional"`
+ IgnorePrice *big.Int `hcl:"-,optional" toml:"-"`
+ IgnorePriceRaw string `hcl:"ignoreprice,optional" toml:"ignoreprice,optional"`
}
type TelemetryConfig struct {
// Enabled enables metrics
- Enabled bool `hcl:"enabled,optional"`
+ Enabled bool `hcl:"metrics,optional" toml:"metrics,optional"`
// Expensive enables expensive metrics
- Expensive bool `hcl:"expensive,optional"`
+ Expensive bool `hcl:"expensive,optional" toml:"expensive,optional"`
// InfluxDB has the influxdb related settings
- InfluxDB *InfluxDBConfig `hcl:"influx,block"`
+ InfluxDB *InfluxDBConfig `hcl:"influx,block" toml:"influx,block"`
// Prometheus Address
- PrometheusAddr string `hcl:"prometheus-addr,optional"`
+ PrometheusAddr string `hcl:"prometheus-addr,optional" toml:"prometheus-addr,optional"`
// Open collector endpoint
- OpenCollectorEndpoint string `hcl:"opencollector-endpoint,optional"`
+ OpenCollectorEndpoint string `hcl:"opencollector-endpoint,optional" toml:"opencollector-endpoint,optional"`
}
type InfluxDBConfig struct {
// V1Enabled enables influx v1 mode
- V1Enabled bool `hcl:"v1-enabled,optional"`
+ V1Enabled bool `hcl:"influxdb,optional" toml:"influxdb,optional"`
// Endpoint is the url endpoint of the influxdb service
- Endpoint string `hcl:"endpoint,optional"`
+ Endpoint string `hcl:"endpoint,optional" toml:"endpoint,optional"`
// Database is the name of the database in Influxdb to store the metrics.
- Database string `hcl:"database,optional"`
+ Database string `hcl:"database,optional" toml:"database,optional"`
// Enabled is the username to authorize access to Influxdb
- Username string `hcl:"username,optional"`
+ Username string `hcl:"username,optional" toml:"username,optional"`
// Password is the password to authorize access to Influxdb
- Password string `hcl:"password,optional"`
+ Password string `hcl:"password,optional" toml:"password,optional"`
// Tags are tags attaches to all generated metrics
- Tags map[string]string `hcl:"tags,optional"`
+ Tags map[string]string `hcl:"tags,optional" toml:"tags,optional"`
// Enabled enables influx v2 mode
- V2Enabled bool `hcl:"v2-enabled,optional"`
+ V2Enabled bool `hcl:"influxdbv2,optional" toml:"influxdbv2,optional"`
// Token is the token to authorize access to Influxdb V2.
- Token string `hcl:"token,optional"`
+ Token string `hcl:"token,optional" toml:"token,optional"`
// Bucket is the bucket to store metrics in Influxdb V2.
- Bucket string `hcl:"bucket,optional"`
+ Bucket string `hcl:"bucket,optional" toml:"bucket,optional"`
// Organization is the name of the organization for Influxdb V2.
- Organization string `hcl:"organization,optional"`
+ Organization string `hcl:"organization,optional" toml:"organization,optional"`
}
type CacheConfig struct {
// Cache is the amount of cache of the node
- Cache uint64 `hcl:"cache,optional"`
+ Cache uint64 `hcl:"cache,optional" toml:"cache,optional"`
// PercGc is percentage of cache used for garbage collection
- PercGc uint64 `hcl:"perc-gc,optional"`
+ PercGc uint64 `hcl:"gc,optional" toml:"gc,optional"`
// PercSnapshot is percentage of cache used for snapshots
- PercSnapshot uint64 `hcl:"perc-snapshot,optional"`
+ PercSnapshot uint64 `hcl:"snapshot,optional" toml:"snapshot,optional"`
// PercDatabase is percentage of cache used for the database
- PercDatabase uint64 `hcl:"perc-database,optional"`
+ PercDatabase uint64 `hcl:"database,optional" toml:"database,optional"`
// PercTrie is percentage of cache used for the trie
- PercTrie uint64 `hcl:"perc-trie,optional"`
+ PercTrie uint64 `hcl:"trie,optional" toml:"trie,optional"`
// Journal is the disk journal directory for trie cache to survive node restarts
- Journal string `hcl:"journal,optional"`
+ Journal string `hcl:"journal,optional" toml:"journal,optional"`
// Rejournal is the time interval to regenerate the journal for clean cache
- Rejournal time.Duration
- RejournalRaw string `hcl:"rejournal,optional"`
+ Rejournal time.Duration `hcl:"-,optional" toml:"-"`
+ RejournalRaw string `hcl:"rejournal,optional" toml:"rejournal,optional"`
// NoPrefetch is used to disable prefetch of tries
- NoPrefetch bool `hcl:"no-prefetch,optional"`
+ NoPrefetch bool `hcl:"noprefetch,optional" toml:"noprefetch,optional"`
// Preimages is used to enable the track of hash preimages
- Preimages bool `hcl:"preimages,optional"`
+ Preimages bool `hcl:"preimages,optional" toml:"preimages,optional"`
// TxLookupLimit sets the maximum number of blocks from head whose tx indices are reserved.
- TxLookupLimit uint64 `hcl:"tx-lookup-limit,optional"`
+ TxLookupLimit uint64 `hcl:"txlookuplimit,optional" toml:"txlookuplimit,optional"`
+
+ // Time after which the Merkle Patricia Trie is stored to disc from memory
+ TrieTimeout time.Duration `hcl:"-,optional" toml:"-"`
+ TrieTimeoutRaw string `hcl:"timeout,optional" toml:"timeout,optional"`
}
type AccountsConfig struct {
// Unlock is the list of addresses to unlock in the node
- Unlock []string `hcl:"unlock,optional"`
+ Unlock []string `hcl:"unlock,optional" toml:"unlock,optional"`
// PasswordFile is the file where the account passwords are stored
- PasswordFile string `hcl:"password-file,optional"`
+ PasswordFile string `hcl:"password,optional" toml:"password,optional"`
// AllowInsecureUnlock allows user to unlock accounts in unsafe http environment.
- AllowInsecureUnlock bool `hcl:"allow-insecure-unlock,optional"`
+ AllowInsecureUnlock bool `hcl:"allow-insecure-unlock,optional" toml:"allow-insecure-unlock,optional"`
// UseLightweightKDF enables a faster but less secure encryption of accounts
- UseLightweightKDF bool `hcl:"use-lightweight-kdf,optional"`
+ UseLightweightKDF bool `hcl:"lightkdf,optional" toml:"lightkdf,optional"`
+
+ // DisableBorWallet disables the personal wallet endpoints
+ DisableBorWallet bool `hcl:"disable-bor-wallet,optional" toml:"disable-bor-wallet,optional"`
}
type DeveloperConfig struct {
// Enabled enables the developer mode
- Enabled bool `hcl:"dev,optional"`
+ Enabled bool `hcl:"dev,optional" toml:"dev,optional"`
// Period is the block period to use in developer mode
- Period uint64 `hcl:"period,optional"`
+ Period uint64 `hcl:"period,optional" toml:"period,optional"`
}
func DefaultConfig() *Config {
return &Config{
- Chain: "mainnet",
- Name: Hostname(),
- Whitelist: map[string]string{},
- LogLevel: "INFO",
- DataDir: defaultDataDir(),
+ Chain: "mainnet",
+ Identity: Hostname(),
+ RequiredBlocks: map[string]string{},
+ LogLevel: "INFO",
+ DataDir: DefaultDataDir(),
+ Ancient: "",
P2P: &P2PConfig{
- MaxPeers: 30,
+ MaxPeers: 50,
MaxPendPeers: 50,
Bind: "0.0.0.0",
Port: 30303,
@@ -409,24 +460,25 @@ func DefaultConfig() *Config {
SyncMode: "full",
GcMode: "full",
Snapshot: true,
+ BorLogs: false,
TxPool: &TxPoolConfig{
Locals: []string{},
NoLocals: false,
- Journal: "",
- Rejournal: time.Duration(1 * time.Hour),
- PriceLimit: 1,
+ Journal: "transactions.rlp",
+ Rejournal: 1 * time.Hour,
+ PriceLimit: 1, // geth's default
PriceBump: 10,
AccountSlots: 16,
- GlobalSlots: 4096,
- AccountQueue: 64,
- GlobalQueue: 1024,
- LifeTime: time.Duration(3 * time.Hour),
+ GlobalSlots: 32768,
+ AccountQueue: 16,
+ GlobalQueue: 32768,
+ LifeTime: 3 * time.Hour,
},
Sealer: &SealerConfig{
Enabled: false,
Etherbase: "",
- GasCeil: 8000000,
- GasPrice: big.NewInt(params.GWei),
+ GasCeil: 30_000_000, // geth's default
+ GasPrice: big.NewInt(1 * params.GWei), // geth's default
ExtraData: "",
},
Gpo: &GpoConfig{
@@ -438,8 +490,6 @@ func DefaultConfig() *Config {
JsonRPC: &JsonRPCConfig{
IPCDisable: false,
IPCPath: "",
- Cors: []string{"*"},
- VHost: []string{"*"},
GasCap: ethconfig.Defaults.RPCGasCap,
TxFeeCap: ethconfig.Defaults.RPCTxFeeCap,
Http: &APIConfig{
@@ -447,25 +497,35 @@ func DefaultConfig() *Config {
Port: 8545,
Prefix: "",
Host: "localhost",
- Modules: []string{"web3", "net"},
+ API: []string{"eth", "net", "web3", "txpool", "bor"},
+ Cors: []string{"localhost"},
+ VHost: []string{"localhost"},
},
Ws: &APIConfig{
Enabled: false,
Port: 8546,
Prefix: "",
Host: "localhost",
- Modules: []string{"web3", "net"},
+ API: []string{"net", "web3"},
+ Origins: []string{"localhost"},
},
Graphql: &APIConfig{
Enabled: false,
+ Cors: []string{"localhost"},
+ VHost: []string{"localhost"},
+ },
+ HttpTimeout: &HttpTimeouts{
+ ReadTimeout: 30 * time.Second,
+ WriteTimeout: 30 * time.Second,
+ IdleTimeout: 120 * time.Second,
},
},
Ethstats: "",
Telemetry: &TelemetryConfig{
Enabled: false,
Expensive: false,
- PrometheusAddr: "",
- OpenCollectorEndpoint: "",
+ PrometheusAddr: "127.0.0.1:7071",
+ OpenCollectorEndpoint: "127.0.0.1:4317",
InfluxDB: &InfluxDBConfig{
V1Enabled: false,
Endpoint: "",
@@ -480,7 +540,7 @@ func DefaultConfig() *Config {
},
},
Cache: &CacheConfig{
- Cache: 1024,
+ Cache: 1024, // geth's default (suitable for mumbai)
PercDatabase: 50,
PercTrie: 15,
PercGc: 25,
@@ -490,12 +550,14 @@ func DefaultConfig() *Config {
NoPrefetch: false,
Preimages: false,
TxLookupLimit: 2350000,
+ TrieTimeout: 60 * time.Minute,
},
Accounts: &AccountsConfig{
Unlock: []string{},
PasswordFile: "",
AllowInsecureUnlock: false,
UseLightweightKDF: false,
+ DisableBorWallet: true,
},
GRPC: &GRPCConfig{
Addr: ":3131",
@@ -515,7 +577,7 @@ func (c *Config) fillBigInt() error {
}{
{"gpo.maxprice", &c.Gpo.MaxPrice, &c.Gpo.MaxPriceRaw},
{"gpo.ignoreprice", &c.Gpo.IgnorePrice, &c.Gpo.IgnorePriceRaw},
- {"sealer.gasprice", &c.Sealer.GasPrice, &c.Sealer.GasPriceRaw},
+ {"miner.gasprice", &c.Sealer.GasPrice, &c.Sealer.GasPriceRaw},
}
for _, x := range tds {
@@ -523,18 +585,22 @@ func (c *Config) fillBigInt() error {
b := new(big.Int)
var ok bool
+
if strings.HasPrefix(*x.str, "0x") {
b, ok = b.SetString((*x.str)[2:], 16)
} else {
b, ok = b.SetString(*x.str, 10)
}
+
if !ok {
return fmt.Errorf("%s can't parse big int %s", x.path, *x.str)
}
+
*x.str = ""
*x.td = b
}
}
+
return nil
}
@@ -544,9 +610,13 @@ func (c *Config) fillTimeDurations() error {
td *time.Duration
str *string
}{
+ {"jsonrpc.timeouts.read", &c.JsonRPC.HttpTimeout.ReadTimeout, &c.JsonRPC.HttpTimeout.ReadTimeoutRaw},
+ {"jsonrpc.timeouts.write", &c.JsonRPC.HttpTimeout.WriteTimeout, &c.JsonRPC.HttpTimeout.WriteTimeoutRaw},
+ {"jsonrpc.timeouts.idle", &c.JsonRPC.HttpTimeout.IdleTimeout, &c.JsonRPC.HttpTimeout.IdleTimeoutRaw},
{"txpool.lifetime", &c.TxPool.LifeTime, &c.TxPool.LifeTimeRaw},
{"txpool.rejournal", &c.TxPool.Rejournal, &c.TxPool.RejournalRaw},
{"cache.rejournal", &c.Cache.Rejournal, &c.Cache.RejournalRaw},
+ {"cache.timeout", &c.Cache.TrieTimeout, &c.Cache.TrieTimeoutRaw},
}
for _, x := range tds {
@@ -555,22 +625,19 @@ func (c *Config) fillTimeDurations() error {
if err != nil {
return fmt.Errorf("%s can't parse time duration %s", x.path, *x.str)
}
+
*x.str = ""
*x.td = d
}
}
+
return nil
}
func readConfigFile(path string) (*Config, error) {
ext := filepath.Ext(path)
if ext == ".toml" {
- // read file and apply the legacy config
- data, err := ioutil.ReadFile(path)
- if err != nil {
- return nil, err
- }
- return readLegacyConfig(data)
+ return readLegacyConfig(path)
}
config := &Config{
@@ -578,26 +645,28 @@ func readConfigFile(path string) (*Config, error) {
Cache: &CacheConfig{},
Sealer: &SealerConfig{},
}
+
if err := hclsimple.DecodeFile(path, nil, config); err != nil {
return nil, fmt.Errorf("failed to decode config file '%s': %v", path, err)
}
+
if err := config.fillBigInt(); err != nil {
return nil, err
}
+
if err := config.fillTimeDurations(); err != nil {
return nil, err
}
+
return config, nil
}
func (c *Config) loadChain() error {
- if c.Developer.Enabled {
- return nil
- }
- chain, ok := chains.GetChain(c.Chain)
- if !ok {
- return fmt.Errorf("chain '%s' not found", c.Chain)
+ chain, err := chains.GetChain(c.Chain)
+ if err != nil {
+ return err
}
+
c.chain = chain
// preload some default values that depend on the chain file
@@ -605,20 +674,16 @@ func (c *Config) loadChain() error {
c.P2P.Discovery.DNS = c.chain.DNS
}
- // depending on the chain we have different cache values
- if c.Chain == "mainnet" {
- c.Cache.Cache = 4096
- } else {
- c.Cache.Cache = 1024
- }
return nil
}
-func (c *Config) buildEth(stack *node.Node) (*ethconfig.Config, error) {
+//nolint:gocognit
+func (c *Config) buildEth(stack *node.Node, accountManager *accounts.Manager) (*ethconfig.Config, error) {
dbHandles, err := makeDatabaseHandles()
if err != nil {
return nil, err
}
+
n := ethconfig.Defaults
// only update for non-developer mode as we don't yet
@@ -627,6 +692,7 @@ func (c *Config) buildEth(stack *node.Node) (*ethconfig.Config, error) {
n.NetworkId = c.chain.NetworkId
n.Genesis = c.chain.Genesis
}
+
n.HeimdallURL = c.Heimdall.URL
n.WithoutHeimdall = c.Heimdall.Without
@@ -662,15 +728,42 @@ func (c *Config) buildEth(stack *node.Node) (*ethconfig.Config, error) {
if !common.IsHexAddress(etherbase) {
return nil, fmt.Errorf("etherbase is not an address: %s", etherbase)
}
+
n.Miner.Etherbase = common.HexToAddress(etherbase)
}
}
+ // unlock accounts
+ if len(c.Accounts.Unlock) > 0 {
+ if !stack.Config().InsecureUnlockAllowed && stack.Config().ExtRPCEnabled() {
+ return nil, fmt.Errorf("account unlock with HTTP access is forbidden")
+ }
+
+ ks := accountManager.Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
+
+ passwords, err := MakePasswordListFromFile(c.Accounts.PasswordFile)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(passwords) < len(c.Accounts.Unlock) {
+ return nil, fmt.Errorf("number of passwords provided (%v) is less than number of accounts (%v) to unlock",
+ len(passwords), len(c.Accounts.Unlock))
+ }
+
+ for i, account := range c.Accounts.Unlock {
+ err = ks.Unlock(accounts.Account{Address: common.HexToAddress(account)}, passwords[i])
+ if err != nil {
+ return nil, fmt.Errorf("could not unlock an account %q", account)
+ }
+ }
+ }
+
// update for developer mode
if c.Developer.Enabled {
// Get a keystore
var ks *keystore.KeyStore
- if keystores := stack.AccountManager().Backends(keystore.KeyStoreType); len(keystores) > 0 {
+ if keystores := accountManager.Backends(keystore.KeyStoreType); len(keystores) > 0 {
ks = keystores[0].(*keystore.KeyStore)
}
@@ -680,6 +773,7 @@ func (c *Config) buildEth(stack *node.Node) (*ethconfig.Config, error) {
passphrase string
err error
)
+
// etherbase has been set above, configuring the miner address from command line flags.
if n.Miner.Etherbase != (common.Address{}) {
developer = accounts.Account{Address: n.Miner.Etherbase}
@@ -694,8 +788,13 @@ func (c *Config) buildEth(stack *node.Node) (*ethconfig.Config, error) {
if err := ks.Unlock(developer, passphrase); err != nil {
return nil, fmt.Errorf("failed to unlock developer account: %v", err)
}
+
log.Info("Using developer account", "address", developer.Address)
+ // Set the Etherbase
+ c.Sealer.Etherbase = developer.Address.Hex()
+ n.Miner.Etherbase = developer.Address
+
// get developer mode chain config
c.chain = chains.GetDeveloperChain(c.Developer.Period, developer.Address)
@@ -721,18 +820,20 @@ func (c *Config) buildEth(stack *node.Node) (*ethconfig.Config, error) {
n.SnapDiscoveryURLs = c.P2P.Discovery.DNS
}
- // whitelist
+ // RequiredBlocks
{
n.PeerRequiredBlocks = map[uint64]common.Hash{}
- for k, v := range c.Whitelist {
+ for k, v := range c.RequiredBlocks {
number, err := strconv.ParseUint(k, 0, 64)
if err != nil {
- return nil, fmt.Errorf("invalid whitelist block number %s: %v", k, err)
+ return nil, fmt.Errorf("invalid required block number %s: %v", k, err)
}
+
var hash common.Hash
if err = hash.UnmarshalText([]byte(v)); err != nil {
- return nil, fmt.Errorf("invalid whitelist hash %s: %v", v, err)
+ return nil, fmt.Errorf("invalid required block hash %s: %v", v, err)
}
+
n.PeerRequiredBlocks[number] = hash
}
}
@@ -751,6 +852,7 @@ func (c *Config) buildEth(stack *node.Node) (*ethconfig.Config, error) {
log.Warn("Lowering memory allowance on 32bit arch", "available", mem.Total/1024/1024, "addressable", 2*1024)
mem.Total = 2 * 1024 * 1024 * 1024
}
+
allowance := uint64(mem.Total / 1024 / 1024 / 3)
if cache > allowance {
log.Warn("Sanitizing cache to Go's GC limits", "provided", cache, "updated", allowance)
@@ -772,6 +874,7 @@ func (c *Config) buildEth(stack *node.Node) (*ethconfig.Config, error) {
n.NoPrefetch = c.Cache.NoPrefetch
n.Preimages = c.Cache.Preimages
n.TxLookupLimit = c.Cache.TxLookupLimit
+ n.TrieTimeout = c.Cache.TrieTimeout
}
n.RPCGasCap = c.JsonRPC.GasCap
@@ -780,6 +883,7 @@ func (c *Config) buildEth(stack *node.Node) (*ethconfig.Config, error) {
} else {
log.Info("Global gas cap disabled")
}
+
n.RPCTxFeeCap = c.JsonRPC.TxFeeCap
// sync mode. It can either be "fast", "full" or "snap". We disable
@@ -788,7 +892,10 @@ func (c *Config) buildEth(stack *node.Node) (*ethconfig.Config, error) {
case "full":
n.SyncMode = downloader.FullSync
case "snap":
- n.SyncMode = downloader.SnapSync
+ // n.SyncMode = downloader.SnapSync // TODO(snap): Uncomment when we have snap sync working
+ n.SyncMode = downloader.FullSync
+
+ log.Warn("Bor doesn't support Snap Sync yet, switching to Full Sync mode")
default:
return nil, fmt.Errorf("sync mode '%s' not found", c.SyncMode)
}
@@ -808,7 +915,7 @@ func (c *Config) buildEth(stack *node.Node) (*ethconfig.Config, error) {
}
// snapshot disable check
- if c.Snapshot {
+ if !c.Snapshot {
if n.SyncMode == downloader.SnapSync {
log.Info("Snap sync requested, enabling --snapshot")
} else {
@@ -818,7 +925,13 @@ func (c *Config) buildEth(stack *node.Node) (*ethconfig.Config, error) {
}
}
+ n.BorLogs = c.BorLogs
n.DatabaseHandles = dbHandles
+
+ if c.Ancient != "" {
+ n.DatabaseFreezer = c.Ancient
+ }
+
return &n, nil
}
@@ -840,6 +953,7 @@ func (c *Config) buildNode() (*node.Config, error) {
cfg := &node.Config{
Name: clientIdentifier,
DataDir: c.DataDir,
+ KeyStoreDir: c.KeyStoreDir,
UseLightweightKDF: c.Accounts.UseLightweightKDF,
InsecureUnlockAllowed: c.Accounts.AllowInsecureUnlock,
Version: params.VersionWithCommit(gitCommit, gitDate),
@@ -850,15 +964,20 @@ func (c *Config) buildNode() (*node.Config, error) {
ListenAddr: c.P2P.Bind + ":" + strconv.Itoa(int(c.P2P.Port)),
DiscoveryV5: c.P2P.Discovery.V5Enabled,
},
- HTTPModules: c.JsonRPC.Http.Modules,
- HTTPCors: c.JsonRPC.Cors,
- HTTPVirtualHosts: c.JsonRPC.VHost,
+ HTTPModules: c.JsonRPC.Http.API,
+ HTTPCors: c.JsonRPC.Http.Cors,
+ HTTPVirtualHosts: c.JsonRPC.Http.VHost,
HTTPPathPrefix: c.JsonRPC.Http.Prefix,
- WSModules: c.JsonRPC.Ws.Modules,
- WSOrigins: c.JsonRPC.Cors,
+ WSModules: c.JsonRPC.Ws.API,
+ WSOrigins: c.JsonRPC.Ws.Origins,
WSPathPrefix: c.JsonRPC.Ws.Prefix,
- GraphQLCors: c.JsonRPC.Cors,
- GraphQLVirtualHosts: c.JsonRPC.VHost,
+ GraphQLCors: c.JsonRPC.Graphql.Cors,
+ GraphQLVirtualHosts: c.JsonRPC.Graphql.VHost,
+ HTTPTimeouts: rpc.HTTPTimeouts{
+ ReadTimeout: c.JsonRPC.HttpTimeout.ReadTimeout,
+ WriteTimeout: c.JsonRPC.HttpTimeout.WriteTimeout,
+ IdleTimeout: c.JsonRPC.HttpTimeout.IdleTimeout,
+ },
}
// dev mode
@@ -870,6 +989,10 @@ func (c *Config) buildNode() (*node.Config, error) {
cfg.P2P.ListenAddr = ""
cfg.P2P.NoDial = true
cfg.P2P.DiscoveryV5 = false
+
+ // enable JsonRPC HTTP API
+ c.JsonRPC.Http.Enabled = true
+ cfg.HTTPModules = []string{"admin", "debug", "eth", "miner", "net", "personal", "txpool", "web3", "bor"}
}
// enable jsonrpc endpoints
@@ -878,6 +1001,7 @@ func (c *Config) buildNode() (*node.Config, error) {
cfg.HTTPHost = c.JsonRPC.Http.Host
cfg.HTTPPort = int(c.JsonRPC.Http.Port)
}
+
if c.JsonRPC.Ws.Enabled {
cfg.WSHost = c.JsonRPC.Ws.Host
cfg.WSPort = int(c.JsonRPC.Ws.Port)
@@ -888,6 +1012,7 @@ func (c *Config) buildNode() (*node.Config, error) {
if err != nil {
return nil, fmt.Errorf("wrong 'nat' flag: %v", err)
}
+
cfg.P2P.NAT = natif
// only check for non-developer modes
@@ -898,34 +1023,47 @@ func (c *Config) buildNode() (*node.Config, error) {
if len(bootnodes) == 0 {
bootnodes = c.chain.Bootnodes
}
+
if cfg.P2P.BootstrapNodes, err = parseBootnodes(bootnodes); err != nil {
return nil, err
}
+
if cfg.P2P.BootstrapNodesV5, err = parseBootnodes(c.P2P.Discovery.BootnodesV5); err != nil {
return nil, err
}
+
if cfg.P2P.StaticNodes, err = parseBootnodes(c.P2P.Discovery.StaticNodes); err != nil {
return nil, err
}
+
+ if len(cfg.P2P.StaticNodes) == 0 {
+ cfg.P2P.StaticNodes = cfg.StaticNodes()
+ }
+
if cfg.P2P.TrustedNodes, err = parseBootnodes(c.P2P.Discovery.TrustedNodes); err != nil {
return nil, err
}
+
+ if len(cfg.P2P.TrustedNodes) == 0 {
+ cfg.P2P.TrustedNodes = cfg.TrustedNodes()
+ }
}
if c.P2P.NoDiscover {
- // Disable networking, for now, we will not even allow incomming connections
- cfg.P2P.MaxPeers = 0
+ // Disable peer discovery
cfg.P2P.NoDiscovery = true
}
+
return cfg, nil
}
func (c *Config) Merge(cc ...*Config) error {
for _, elem := range cc {
- if err := mergo.Merge(c, elem, mergo.WithOverride, mergo.WithAppendSlice); err != nil {
+ if err := mergo.Merge(c, elem, mergo.WithOverwriteWithEmptyValue); err != nil {
return fmt.Errorf("failed to merge configurations: %v", err)
}
}
+
return nil
}
@@ -934,10 +1072,12 @@ func makeDatabaseHandles() (int, error) {
if err != nil {
return -1, err
}
+
raised, err := fdlimit.Raise(uint64(limit))
if err != nil {
return -1, err
}
+
return int(raised / 2), nil
}
@@ -952,16 +1092,18 @@ func parseBootnodes(urls []string) ([]*enode.Node, error) {
dst = append(dst, node)
}
}
+
return dst, nil
}
-func defaultDataDir() string {
+func DefaultDataDir() string {
// Try to place the data folder in the user's home dir
home, _ := homedir.Dir()
if home == "" {
// we cannot guess a stable location
return ""
}
+
switch runtime.GOOS {
case "darwin":
return filepath.Join(home, "Library", "Bor")
@@ -971,6 +1113,7 @@ func defaultDataDir() string {
// Windows XP and below don't have LocalAppData.
panic("environment variable LocalAppData is undefined")
}
+
return filepath.Join(appdata, "Bor")
default:
return filepath.Join(home, ".bor")
@@ -982,5 +1125,22 @@ func Hostname() string {
if err != nil {
return "bor"
}
+
return hostname
}
+
+func MakePasswordListFromFile(path string) ([]string, error) {
+ text, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read password file: %v", err)
+ }
+
+ lines := strings.Split(string(text), "\n")
+
+ // Sanitise DOS line endings.
+ for i := range lines {
+ lines[i] = strings.TrimRight(lines[i], "\r")
+ }
+
+ return lines, nil
+}
diff --git a/internal/cli/server/config_legacy.go b/internal/cli/server/config_legacy.go
index 0d96b2e023..9411b8290d 100644
--- a/internal/cli/server/config_legacy.go
+++ b/internal/cli/server/config_legacy.go
@@ -1,33 +1,33 @@
package server
import (
- "bytes"
+ "fmt"
+ "io/ioutil"
- "github.com/naoina/toml"
+ "github.com/BurntSushi/toml"
)
-type legacyConfig struct {
- Node struct {
- P2P struct {
- StaticNodes []string
- TrustedNodes []string
- }
+func readLegacyConfig(path string) (*Config, error) {
+ data, err := ioutil.ReadFile(path)
+ tomlData := string(data)
+
+ if err != nil {
+ return nil, fmt.Errorf("failed to read toml config file: %v", err)
}
-}
-func (l *legacyConfig) Config() *Config {
- c := DefaultConfig()
- c.P2P.Discovery.StaticNodes = l.Node.P2P.StaticNodes
- c.P2P.Discovery.TrustedNodes = l.Node.P2P.TrustedNodes
- return c
-}
+ conf := *DefaultConfig()
+
+ if _, err := toml.Decode(tomlData, &conf); err != nil {
+ return nil, fmt.Errorf("failed to decode toml config file: %v", err)
+ }
-func readLegacyConfig(data []byte) (*Config, error) {
- var legacy legacyConfig
+ if err := conf.fillBigInt(); err != nil {
+ return nil, err
+ }
- r := toml.NewDecoder(bytes.NewReader(data))
- if err := r.Decode(&legacy); err != nil {
+ if err := conf.fillTimeDurations(); err != nil {
return nil, err
}
- return legacy.Config(), nil
+
+ return &conf, nil
}
diff --git a/internal/cli/server/config_legacy_test.go b/internal/cli/server/config_legacy_test.go
index 399481fc9b..29cefdd7bf 100644
--- a/internal/cli/server/config_legacy_test.go
+++ b/internal/cli/server/config_legacy_test.go
@@ -1,21 +1,42 @@
package server
import (
+ "math/big"
"testing"
+ "time"
"github.com/stretchr/testify/assert"
)
func TestConfigLegacy(t *testing.T) {
- toml := `[Node.P2P]
-StaticNodes = ["node1"]
-TrustedNodes = ["node2"]`
- config, err := readLegacyConfig([]byte(toml))
- if err != nil {
- t.Fatal(err)
+ readFile := func(path string) {
+ expectedConfig, err := readLegacyConfig(path)
+ assert.NoError(t, err)
+
+ testConfig := DefaultConfig()
+
+ testConfig.DataDir = "./data"
+ testConfig.Snapshot = false
+ testConfig.RequiredBlocks = map[string]string{
+ "31000000": "0x2087b9e2b353209c2c21e370c82daa12278efd0fe5f0febe6c29035352cf050e",
+ "32000000": "0x875500011e5eecc0c554f95d07b31cf59df4ca2505f4dbbfffa7d4e4da917c68",
+ }
+ testConfig.P2P.MaxPeers = 30
+ testConfig.TxPool.Locals = []string{}
+ testConfig.TxPool.LifeTime = time.Second
+ testConfig.Sealer.Enabled = true
+ testConfig.Sealer.GasCeil = 30000000
+ testConfig.Sealer.GasPrice = big.NewInt(1000000000)
+ testConfig.Gpo.IgnorePrice = big.NewInt(4)
+ testConfig.Cache.Cache = 1024
+ testConfig.Cache.Rejournal = time.Second
+
+ assert.Equal(t, expectedConfig, testConfig)
}
- assert.Equal(t, config.P2P.Discovery.StaticNodes, []string{"node1"})
- assert.Equal(t, config.P2P.Discovery.TrustedNodes, []string{"node2"})
+ // read file in hcl format
+ t.Run("toml", func(t *testing.T) {
+ readFile("./testdata/test.toml")
+ })
}
diff --git a/internal/cli/server/config_test.go b/internal/cli/server/config_test.go
index 62296d82a4..3e6bb76b59 100644
--- a/internal/cli/server/config_test.go
+++ b/internal/cli/server/config_test.go
@@ -1,7 +1,6 @@
package server
import (
- "math/big"
"testing"
"time"
@@ -16,7 +15,7 @@ func TestConfigDefault(t *testing.T) {
_, err := config.buildNode()
assert.NoError(t, err)
- _, err = config.buildEth(nil)
+ _, err = config.buildEth(nil, nil)
assert.NoError(t, err)
}
@@ -24,7 +23,7 @@ func TestConfigMerge(t *testing.T) {
c0 := &Config{
Chain: "0",
Snapshot: true,
- Whitelist: map[string]string{
+ RequiredBlocks: map[string]string{
"a": "b",
},
TxPool: &TxPoolConfig{
@@ -40,7 +39,7 @@ func TestConfigMerge(t *testing.T) {
}
c1 := &Config{
Chain: "1",
- Whitelist: map[string]string{
+ RequiredBlocks: map[string]string{
"b": "c",
},
P2P: &P2PConfig{
@@ -52,61 +51,53 @@ func TestConfigMerge(t *testing.T) {
},
},
}
+
expected := &Config{
Chain: "1",
- Snapshot: true,
- Whitelist: map[string]string{
+ Snapshot: false,
+ RequiredBlocks: map[string]string{
"a": "b",
"b": "c",
},
- TxPool: &TxPoolConfig{
- LifeTime: 5 * time.Second,
- },
P2P: &P2PConfig{
MaxPeers: 10,
Discovery: &P2PDiscovery{
StaticNodes: []string{
- "a",
"b",
},
},
},
}
+
assert.NoError(t, c0.Merge(c1))
assert.Equal(t, c0, expected)
}
-func TestConfigLoadFile(t *testing.T) {
- readFile := func(path string) {
- config, err := readConfigFile(path)
- assert.NoError(t, err)
- assert.Equal(t, config, &Config{
- DataDir: "./data",
- Whitelist: map[string]string{
- "a": "b",
- },
- P2P: &P2PConfig{
- MaxPeers: 30,
- },
- TxPool: &TxPoolConfig{
- LifeTime: time.Duration(1 * time.Second),
- },
- Gpo: &GpoConfig{
- MaxPrice: big.NewInt(100),
- },
- Sealer: &SealerConfig{},
- Cache: &CacheConfig{},
- })
+func TestDefaultDatatypeOverride(t *testing.T) {
+ t.Parallel()
+
+ // This test is specific to `maxpeers` flag (for now) to check
+ // if default datatype value (0 in case of uint64) is overridden.
+ c0 := &Config{
+ P2P: &P2PConfig{
+ MaxPeers: 30,
+ },
}
- // read file in hcl format
- t.Run("hcl", func(t *testing.T) {
- readFile("./testdata/simple.hcl")
- })
- // read file in json format
- t.Run("json", func(t *testing.T) {
- readFile("./testdata/simple.json")
- })
+ c1 := &Config{
+ P2P: &P2PConfig{
+ MaxPeers: 0,
+ },
+ }
+
+ expected := &Config{
+ P2P: &P2PConfig{
+ MaxPeers: 0,
+ },
+ }
+
+ assert.NoError(t, c0.Merge(c1))
+ assert.Equal(t, c0, expected)
}
var dummyEnodeAddr = "enode://0cb82b395094ee4a2915e9714894627de9ed8498fb881cec6db7c65e8b9a5bd7f2f25cc84e71e89d0947e51c76e85d0847de848c7782b13c0255247a6758178c@44.232.55.71:30303"
@@ -131,3 +122,14 @@ func TestConfigBootnodesDefault(t *testing.T) {
assert.Len(t, cfg.P2P.BootstrapNodes, 1)
})
}
+
+func TestMakePasswordListFromFile(t *testing.T) {
+ t.Parallel()
+
+ t.Run("ReadPasswordFile", func(t *testing.T) {
+ t.Parallel()
+
+ result, _ := MakePasswordListFromFile("./testdata/password.txt")
+ assert.Equal(t, []string{"test1", "test2"}, result)
+ })
+}
diff --git a/internal/cli/server/flags.go b/internal/cli/server/flags.go
index b8f6003420..a53e1c3e46 100644
--- a/internal/cli/server/flags.go
+++ b/internal/cli/server/flags.go
@@ -10,164 +10,224 @@ func (c *Command) Flags() *flagset.Flagset {
f := flagset.NewFlagSet("server")
f.StringFlag(&flagset.StringFlag{
- Name: "chain",
- Usage: "Name of the chain to sync",
- Value: &c.cliConfig.Chain,
+ Name: "chain",
+ Usage: "Name of the chain to sync ('mumbai', 'mainnet') or path to a genesis file",
+ Value: &c.cliConfig.Chain,
+ Default: c.cliConfig.Chain,
})
f.StringFlag(&flagset.StringFlag{
- Name: "name",
- Usage: "Name/Identity of the node",
- Value: &c.cliConfig.Name,
+ Name: "identity",
+ Usage: "Name/Identity of the node",
+ Value: &c.cliConfig.Identity,
+ Default: c.cliConfig.Identity,
})
f.StringFlag(&flagset.StringFlag{
- Name: "log-level",
- Usage: "Set log level for the server",
- Value: &c.cliConfig.LogLevel,
+ Name: "log-level",
+ Usage: "Set log level for the server",
+ Value: &c.cliConfig.LogLevel,
+ Default: c.cliConfig.LogLevel,
})
f.StringFlag(&flagset.StringFlag{
- Name: "datadir",
- Usage: "Path of the data directory to store information",
- Value: &c.cliConfig.DataDir,
+ Name: "datadir",
+ Usage: "Path of the data directory to store information",
+ Value: &c.cliConfig.DataDir,
+ Default: c.cliConfig.DataDir,
})
- f.SliceStringFlag(&flagset.SliceStringFlag{
+ f.StringFlag(&flagset.StringFlag{
+ Name: "datadir.ancient",
+ Usage: "Data directory for ancient chain segments (default = inside chaindata)",
+ Value: &c.cliConfig.Ancient,
+ Default: c.cliConfig.Ancient,
+ })
+ f.StringFlag(&flagset.StringFlag{
+ Name: "keystore",
+ Usage: "Path of the directory where keystores are located",
+ Value: &c.cliConfig.KeyStoreDir,
+ })
+ f.StringFlag(&flagset.StringFlag{
Name: "config",
Usage: "File for the config file",
Value: &c.configFile,
})
f.StringFlag(&flagset.StringFlag{
- Name: "syncmode",
- Usage: `Blockchain sync mode ("fast", "full", "snap" or "light")`,
- Value: &c.cliConfig.SyncMode,
+ Name: "syncmode",
+ Usage: `Blockchain sync mode (only "full" sync supported)`,
+ Value: &c.cliConfig.SyncMode,
+ Default: c.cliConfig.SyncMode,
})
f.StringFlag(&flagset.StringFlag{
- Name: "gcmode",
- Usage: `Blockchain garbage collection mode ("full", "archive")`,
- Value: &c.cliConfig.GcMode,
+ Name: "gcmode",
+ Usage: `Blockchain garbage collection mode ("full", "archive")`,
+ Value: &c.cliConfig.GcMode,
+ Default: c.cliConfig.GcMode,
})
f.MapStringFlag(&flagset.MapStringFlag{
- Name: "whitelist",
- Usage: "Comma separated block number-to-hash mappings to enforce (=)",
- Value: &c.cliConfig.Whitelist,
+ Name: "eth.requiredblocks",
+ Usage: "Comma separated block number-to-hash mappings to require for peering (=)",
+ Value: &c.cliConfig.RequiredBlocks,
+ })
+ f.BoolFlag(&flagset.BoolFlag{
+ Name: "snapshot",
+ Usage: `Enables the snapshot-database mode (default = true)`,
+ Value: &c.cliConfig.Snapshot,
+ Default: c.cliConfig.Snapshot,
})
f.BoolFlag(&flagset.BoolFlag{
- Name: "snapshot",
- Usage: `Enables snapshot-database mode (default = enable)`,
- Value: &c.cliConfig.Snapshot,
+ Name: "bor.logs",
+ Usage: `Enables bor log retrieval (default = false)`,
+ Value: &c.cliConfig.BorLogs,
+ Default: c.cliConfig.BorLogs,
})
// heimdall
f.StringFlag(&flagset.StringFlag{
- Name: "bor.heimdall",
- Usage: "URL of Heimdall service",
- Value: &c.cliConfig.Heimdall.URL,
+ Name: "bor.heimdall",
+ Usage: "URL of Heimdall service",
+ Value: &c.cliConfig.Heimdall.URL,
+ Default: c.cliConfig.Heimdall.URL,
})
f.BoolFlag(&flagset.BoolFlag{
- Name: "bor.withoutheimdall",
- Usage: "Run without Heimdall service (for testing purpose)",
- Value: &c.cliConfig.Heimdall.Without,
+ Name: "bor.withoutheimdall",
+ Usage: "Run without Heimdall service (for testing purpose)",
+ Value: &c.cliConfig.Heimdall.Without,
+ Default: c.cliConfig.Heimdall.Without,
})
// txpool options
f.SliceStringFlag(&flagset.SliceStringFlag{
- Name: "txpool.locals",
- Usage: "Comma separated accounts to treat as locals (no flush, priority inclusion)",
- Value: &c.cliConfig.TxPool.Locals,
+ Name: "txpool.locals",
+ Usage: "Comma separated accounts to treat as locals (no flush, priority inclusion)",
+ Value: &c.cliConfig.TxPool.Locals,
+ Default: c.cliConfig.TxPool.Locals,
+ Group: "Transaction Pool",
})
f.BoolFlag(&flagset.BoolFlag{
- Name: "txpool.nolocals",
- Usage: "Disables price exemptions for locally submitted transactions",
- Value: &c.cliConfig.TxPool.NoLocals,
+ Name: "txpool.nolocals",
+ Usage: "Disables price exemptions for locally submitted transactions",
+ Value: &c.cliConfig.TxPool.NoLocals,
+ Default: c.cliConfig.TxPool.NoLocals,
+ Group: "Transaction Pool",
})
f.StringFlag(&flagset.StringFlag{
- Name: "txpool.journal",
- Usage: "Disk journal for local transaction to survive node restarts",
- Value: &c.cliConfig.TxPool.Journal,
+ Name: "txpool.journal",
+ Usage: "Disk journal for local transaction to survive node restarts",
+ Value: &c.cliConfig.TxPool.Journal,
+ Default: c.cliConfig.TxPool.Journal,
+ Group: "Transaction Pool",
})
f.DurationFlag(&flagset.DurationFlag{
- Name: "txpool.rejournal",
- Usage: "Time interval to regenerate the local transaction journal",
- Value: &c.cliConfig.TxPool.Rejournal,
+ Name: "txpool.rejournal",
+ Usage: "Time interval to regenerate the local transaction journal",
+ Value: &c.cliConfig.TxPool.Rejournal,
+ Default: c.cliConfig.TxPool.Rejournal,
+ Group: "Transaction Pool",
})
f.Uint64Flag(&flagset.Uint64Flag{
- Name: "txpool.pricelimit",
- Usage: "Minimum gas price limit to enforce for acceptance into the pool",
- Value: &c.cliConfig.TxPool.PriceLimit,
+ Name: "txpool.pricelimit",
+ Usage: "Minimum gas price limit to enforce for acceptance into the pool",
+ Value: &c.cliConfig.TxPool.PriceLimit,
+ Default: c.cliConfig.TxPool.PriceLimit,
+ Group: "Transaction Pool",
})
f.Uint64Flag(&flagset.Uint64Flag{
- Name: "txpool.pricebump",
- Usage: "Price bump percentage to replace an already existing transaction",
- Value: &c.cliConfig.TxPool.PriceBump,
+ Name: "txpool.pricebump",
+ Usage: "Price bump percentage to replace an already existing transaction",
+ Value: &c.cliConfig.TxPool.PriceBump,
+ Default: c.cliConfig.TxPool.PriceBump,
+ Group: "Transaction Pool",
})
f.Uint64Flag(&flagset.Uint64Flag{
- Name: "txpool.accountslots",
- Usage: "Minimum number of executable transaction slots guaranteed per account",
- Value: &c.cliConfig.TxPool.AccountSlots,
+ Name: "txpool.accountslots",
+ Usage: "Minimum number of executable transaction slots guaranteed per account",
+ Value: &c.cliConfig.TxPool.AccountSlots,
+ Default: c.cliConfig.TxPool.AccountSlots,
+ Group: "Transaction Pool",
})
f.Uint64Flag(&flagset.Uint64Flag{
- Name: "txpool.globalslots",
- Usage: "Maximum number of executable transaction slots for all accounts",
- Value: &c.cliConfig.TxPool.GlobalSlots,
+ Name: "txpool.globalslots",
+ Usage: "Maximum number of executable transaction slots for all accounts",
+ Value: &c.cliConfig.TxPool.GlobalSlots,
+ Default: c.cliConfig.TxPool.GlobalSlots,
+ Group: "Transaction Pool",
})
f.Uint64Flag(&flagset.Uint64Flag{
- Name: "txpool.accountqueue",
- Usage: "Maximum number of non-executable transaction slots permitted per account",
- Value: &c.cliConfig.TxPool.AccountQueue,
+ Name: "txpool.accountqueue",
+ Usage: "Maximum number of non-executable transaction slots permitted per account",
+ Value: &c.cliConfig.TxPool.AccountQueue,
+ Default: c.cliConfig.TxPool.AccountQueue,
+ Group: "Transaction Pool",
})
f.Uint64Flag(&flagset.Uint64Flag{
- Name: "txpool.globalqueue",
- Usage: "Maximum number of non-executable transaction slots for all accounts",
- Value: &c.cliConfig.TxPool.GlobalQueue,
+ Name: "txpool.globalqueue",
+ Usage: "Maximum number of non-executable transaction slots for all accounts",
+ Value: &c.cliConfig.TxPool.GlobalQueue,
+ Default: c.cliConfig.TxPool.GlobalQueue,
+ Group: "Transaction Pool",
})
f.DurationFlag(&flagset.DurationFlag{
- Name: "txpool.lifetime",
- Usage: "Maximum amount of time non-executable transaction are queued",
- Value: &c.cliConfig.TxPool.LifeTime,
+ Name: "txpool.lifetime",
+ Usage: "Maximum amount of time non-executable transaction are queued",
+ Value: &c.cliConfig.TxPool.LifeTime,
+ Default: c.cliConfig.TxPool.LifeTime,
+ Group: "Transaction Pool",
})
// sealer options
f.BoolFlag(&flagset.BoolFlag{
- Name: "mine",
- Usage: "Enable mining",
- Value: &c.cliConfig.Sealer.Enabled,
+ Name: "mine",
+ Usage: "Enable mining",
+ Value: &c.cliConfig.Sealer.Enabled,
+ Default: c.cliConfig.Sealer.Enabled,
+ Group: "Sealer",
})
f.StringFlag(&flagset.StringFlag{
- Name: "miner.etherbase",
- Usage: "Public address for block mining rewards (default = first account)",
- Value: &c.cliConfig.Sealer.Etherbase,
+ Name: "miner.etherbase",
+ Usage: "Public address for block mining rewards",
+ Value: &c.cliConfig.Sealer.Etherbase,
+ Default: c.cliConfig.Sealer.Etherbase,
+ Group: "Sealer",
})
f.StringFlag(&flagset.StringFlag{
- Name: "miner.extradata",
- Usage: "Block extra data set by the miner (default = client version)",
- Value: &c.cliConfig.Sealer.ExtraData,
+ Name: "miner.extradata",
+ Usage: "Block extra data set by the miner (default = client version)",
+ Value: &c.cliConfig.Sealer.ExtraData,
+ Default: c.cliConfig.Sealer.ExtraData,
+ Group: "Sealer",
})
f.Uint64Flag(&flagset.Uint64Flag{
- Name: "miner.gaslimit",
- Usage: "Target gas ceiling for mined blocks",
- Value: &c.cliConfig.Sealer.GasCeil,
+ Name: "miner.gaslimit",
+ Usage: "Target gas ceiling (gas limit) for mined blocks",
+ Value: &c.cliConfig.Sealer.GasCeil,
+ Default: c.cliConfig.Sealer.GasCeil,
+ Group: "Sealer",
})
f.BigIntFlag(&flagset.BigIntFlag{
Name: "miner.gasprice",
Usage: "Minimum gas price for mining a transaction",
Value: c.cliConfig.Sealer.GasPrice,
+ Group: "Sealer",
})
// ethstats
f.StringFlag(&flagset.StringFlag{
- Name: "ethstats",
- Usage: "Reporting URL of a ethstats service (nodename:secret@host:port)",
- Value: &c.cliConfig.Ethstats,
+ Name: "ethstats",
+ Usage: "Reporting URL of a ethstats service (nodename:secret@host:port)",
+ Value: &c.cliConfig.Ethstats,
+ Default: c.cliConfig.Ethstats,
})
// gas price oracle
f.Uint64Flag(&flagset.Uint64Flag{
- Name: "gpo.blocks",
- Usage: "Number of recent blocks to check for gas prices",
- Value: &c.cliConfig.Gpo.Blocks,
+ Name: "gpo.blocks",
+ Usage: "Number of recent blocks to check for gas prices",
+ Value: &c.cliConfig.Gpo.Blocks,
+ Default: c.cliConfig.Gpo.Blocks,
})
f.Uint64Flag(&flagset.Uint64Flag{
- Name: "gpo.percentile",
- Usage: "Suggested gas price is the given percentile of a set of recent transaction gas prices",
- Value: &c.cliConfig.Gpo.Percentile,
+ Name: "gpo.percentile",
+ Usage: "Suggested gas price is the given percentile of a set of recent transaction gas prices",
+ Value: &c.cliConfig.Gpo.Percentile,
+ Default: c.cliConfig.Gpo.Percentile,
})
f.BigIntFlag(&flagset.BigIntFlag{
Name: "gpo.maxprice",
@@ -182,303 +242,438 @@ func (c *Command) Flags() *flagset.Flagset {
// cache options
f.Uint64Flag(&flagset.Uint64Flag{
- Name: "cache",
- Usage: "Megabytes of memory allocated to internal caching (default = 4096 mainnet full node)",
- Value: &c.cliConfig.Cache.Cache,
+ Name: "cache",
+ Usage: "Megabytes of memory allocated to internal caching (default = 4096 mainnet full node)",
+ Value: &c.cliConfig.Cache.Cache,
+ Default: c.cliConfig.Cache.Cache,
+ Group: "Cache",
})
f.Uint64Flag(&flagset.Uint64Flag{
- Name: "cache.database",
- Usage: "Percentage of cache memory allowance to use for database io",
- Value: &c.cliConfig.Cache.PercDatabase,
+ Name: "cache.database",
+ Usage: "Percentage of cache memory allowance to use for database io",
+ Value: &c.cliConfig.Cache.PercDatabase,
+ Default: c.cliConfig.Cache.PercDatabase,
+ Group: "Cache",
})
f.Uint64Flag(&flagset.Uint64Flag{
- Name: "cache.trie",
- Usage: "Percentage of cache memory allowance to use for trie caching (default = 15% full mode, 30% archive mode)",
- Value: &c.cliConfig.Cache.PercTrie,
+ Name: "cache.trie",
+ Usage: "Percentage of cache memory allowance to use for trie caching (default = 15% full mode, 30% archive mode)",
+ Value: &c.cliConfig.Cache.PercTrie,
+ Default: c.cliConfig.Cache.PercTrie,
+ Group: "Cache",
})
f.StringFlag(&flagset.StringFlag{
- Name: "cache.trie.journal",
- Usage: "Disk journal directory for trie cache to survive node restarts",
- Value: &c.cliConfig.Cache.Journal,
+ Name: "cache.trie.journal",
+ Usage: "Disk journal directory for trie cache to survive node restarts",
+ Value: &c.cliConfig.Cache.Journal,
+ Default: c.cliConfig.Cache.Journal,
+ Group: "Cache",
})
f.DurationFlag(&flagset.DurationFlag{
- Name: "cache.trie.rejournal",
- Usage: "Time interval to regenerate the trie cache journal",
- Value: &c.cliConfig.Cache.Rejournal,
+ Name: "cache.trie.rejournal",
+ Usage: "Time interval to regenerate the trie cache journal",
+ Value: &c.cliConfig.Cache.Rejournal,
+ Default: c.cliConfig.Cache.Rejournal,
+ Group: "Cache",
})
f.Uint64Flag(&flagset.Uint64Flag{
- Name: "cache.gc",
- Usage: "Percentage of cache memory allowance to use for trie pruning (default = 25% full mode, 0% archive mode)",
- Value: &c.cliConfig.Cache.PercGc,
+ Name: "cache.gc",
+ Usage: "Percentage of cache memory allowance to use for trie pruning (default = 25% full mode, 0% archive mode)",
+ Value: &c.cliConfig.Cache.PercGc,
+ Default: c.cliConfig.Cache.PercGc,
+ Group: "Cache",
})
f.Uint64Flag(&flagset.Uint64Flag{
- Name: "cache.snapshot",
- Usage: "Percentage of cache memory allowance to use for snapshot caching (default = 10% full mode, 20% archive mode)",
- Value: &c.cliConfig.Cache.PercSnapshot,
+ Name: "cache.snapshot",
+ Usage: "Percentage of cache memory allowance to use for snapshot caching (default = 10% full mode, 20% archive mode)",
+ Value: &c.cliConfig.Cache.PercSnapshot,
+ Default: c.cliConfig.Cache.PercSnapshot,
+ Group: "Cache",
})
f.BoolFlag(&flagset.BoolFlag{
- Name: "cache.noprefetch",
- Usage: "Disable heuristic state prefetch during block import (less CPU and disk IO, more time waiting for data)",
- Value: &c.cliConfig.Cache.NoPrefetch,
+ Name: "cache.noprefetch",
+ Usage: "Disable heuristic state prefetch during block import (less CPU and disk IO, more time waiting for data)",
+ Value: &c.cliConfig.Cache.NoPrefetch,
+ Default: c.cliConfig.Cache.NoPrefetch,
+ Group: "Cache",
})
f.BoolFlag(&flagset.BoolFlag{
- Name: "cache.preimages",
- Usage: "Enable recording the SHA3/keccak preimages of trie keys",
- Value: &c.cliConfig.Cache.Preimages,
+ Name: "cache.preimages",
+ Usage: "Enable recording the SHA3/keccak preimages of trie keys",
+ Value: &c.cliConfig.Cache.Preimages,
+ Default: c.cliConfig.Cache.Preimages,
+ Group: "Cache",
})
f.Uint64Flag(&flagset.Uint64Flag{
- Name: "txlookuplimit",
- Usage: "Number of recent blocks to maintain transactions index for (default = about 56 days, 0 = entire chain)",
- Value: &c.cliConfig.Cache.TxLookupLimit,
+ Name: "txlookuplimit",
+ Usage: "Number of recent blocks to maintain transactions index for (default = about 56 days, 0 = entire chain)",
+ Value: &c.cliConfig.Cache.TxLookupLimit,
+ Default: c.cliConfig.Cache.TxLookupLimit,
+ Group: "Cache",
})
// rpc options
f.Uint64Flag(&flagset.Uint64Flag{
- Name: "rpc.gascap",
- Usage: "Sets a cap on gas that can be used in eth_call/estimateGas (0=infinite)",
- Value: &c.cliConfig.JsonRPC.GasCap,
+ Name: "rpc.gascap",
+ Usage: "Sets a cap on gas that can be used in eth_call/estimateGas (0=infinite)",
+ Value: &c.cliConfig.JsonRPC.GasCap,
+ Default: c.cliConfig.JsonRPC.GasCap,
+ Group: "JsonRPC",
})
f.Float64Flag(&flagset.Float64Flag{
- Name: "rpc.txfeecap",
- Usage: "Sets a cap on transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap)",
- Value: &c.cliConfig.JsonRPC.TxFeeCap,
+ Name: "rpc.txfeecap",
+ Usage: "Sets a cap on transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap)",
+ Value: &c.cliConfig.JsonRPC.TxFeeCap,
+ Default: c.cliConfig.JsonRPC.TxFeeCap,
+ Group: "JsonRPC",
})
f.BoolFlag(&flagset.BoolFlag{
- Name: "ipcdisable",
- Usage: "Disable the IPC-RPC server",
- Value: &c.cliConfig.JsonRPC.IPCDisable,
+ Name: "ipcdisable",
+ Usage: "Disable the IPC-RPC server",
+ Value: &c.cliConfig.JsonRPC.IPCDisable,
+ Default: c.cliConfig.JsonRPC.IPCDisable,
+ Group: "JsonRPC",
})
f.StringFlag(&flagset.StringFlag{
- Name: "ipcpath",
- Usage: "Filename for IPC socket/pipe within the datadir (explicit paths escape it)",
- Value: &c.cliConfig.JsonRPC.IPCPath,
+ Name: "ipcpath",
+ Usage: "Filename for IPC socket/pipe within the datadir (explicit paths escape it)",
+ Value: &c.cliConfig.JsonRPC.IPCPath,
+ Default: c.cliConfig.JsonRPC.IPCPath,
+ Group: "JsonRPC",
})
f.SliceStringFlag(&flagset.SliceStringFlag{
- Name: "jsonrpc.corsdomain",
- Usage: "Comma separated list of domains from which to accept cross origin requests (browser enforced)",
- Value: &c.cliConfig.JsonRPC.Cors,
+ Name: "http.corsdomain",
+ Usage: "Comma separated list of domains from which to accept cross origin requests (browser enforced)",
+ Value: &c.cliConfig.JsonRPC.Http.Cors,
+ Default: c.cliConfig.JsonRPC.Http.Cors,
+ Group: "JsonRPC",
})
f.SliceStringFlag(&flagset.SliceStringFlag{
- Name: "jsonrpc.vhosts",
- Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.",
- Value: &c.cliConfig.JsonRPC.VHost,
+ Name: "http.vhosts",
+ Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.",
+ Value: &c.cliConfig.JsonRPC.Http.VHost,
+ Default: c.cliConfig.JsonRPC.Http.VHost,
+ Group: "JsonRPC",
+ })
+ f.SliceStringFlag(&flagset.SliceStringFlag{
+ Name: "ws.origins",
+ Usage: "Origins from which to accept websockets requests",
+ Value: &c.cliConfig.JsonRPC.Ws.Origins,
+ Default: c.cliConfig.JsonRPC.Ws.Origins,
+ Group: "JsonRPC",
+ })
+ f.SliceStringFlag(&flagset.SliceStringFlag{
+ Name: "graphql.corsdomain",
+ Usage: "Comma separated list of domains from which to accept cross origin requests (browser enforced)",
+ Value: &c.cliConfig.JsonRPC.Graphql.Cors,
+ Default: c.cliConfig.JsonRPC.Graphql.Cors,
+ Group: "JsonRPC",
+ })
+ f.SliceStringFlag(&flagset.SliceStringFlag{
+ Name: "graphql.vhosts",
+ Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.",
+ Value: &c.cliConfig.JsonRPC.Graphql.VHost,
+ Default: c.cliConfig.JsonRPC.Graphql.VHost,
+ Group: "JsonRPC",
})
// http options
f.BoolFlag(&flagset.BoolFlag{
- Name: "http",
- Usage: "Enable the HTTP-RPC server",
- Value: &c.cliConfig.JsonRPC.Http.Enabled,
+ Name: "http",
+ Usage: "Enable the HTTP-RPC server",
+ Value: &c.cliConfig.JsonRPC.Http.Enabled,
+ Default: c.cliConfig.JsonRPC.Http.Enabled,
+ Group: "JsonRPC",
})
f.StringFlag(&flagset.StringFlag{
- Name: "http.addr",
- Usage: "HTTP-RPC server listening interface",
- Value: &c.cliConfig.JsonRPC.Http.Host,
+ Name: "http.addr",
+ Usage: "HTTP-RPC server listening interface",
+ Value: &c.cliConfig.JsonRPC.Http.Host,
+ Default: c.cliConfig.JsonRPC.Http.Host,
+ Group: "JsonRPC",
})
f.Uint64Flag(&flagset.Uint64Flag{
- Name: "http.port",
- Usage: "HTTP-RPC server listening port",
- Value: &c.cliConfig.JsonRPC.Http.Port,
+ Name: "http.port",
+ Usage: "HTTP-RPC server listening port",
+ Value: &c.cliConfig.JsonRPC.Http.Port,
+ Default: c.cliConfig.JsonRPC.Http.Port,
+ Group: "JsonRPC",
})
f.StringFlag(&flagset.StringFlag{
- Name: "http.rpcprefix",
- Usage: "HTTP path path prefix on which JSON-RPC is served. Use '/' to serve on all paths.",
- Value: &c.cliConfig.JsonRPC.Http.Prefix,
+ Name: "http.rpcprefix",
+ Usage: "HTTP path path prefix on which JSON-RPC is served. Use '/' to serve on all paths.",
+ Value: &c.cliConfig.JsonRPC.Http.Prefix,
+ Default: c.cliConfig.JsonRPC.Http.Prefix,
+ Group: "JsonRPC",
})
f.SliceStringFlag(&flagset.SliceStringFlag{
- Name: "http.modules",
- Usage: "API's offered over the HTTP-RPC interface",
- Value: &c.cliConfig.JsonRPC.Http.Modules,
+ Name: "http.api",
+ Usage: "API's offered over the HTTP-RPC interface",
+ Value: &c.cliConfig.JsonRPC.Http.API,
+ Default: c.cliConfig.JsonRPC.Http.API,
+ Group: "JsonRPC",
})
// ws options
f.BoolFlag(&flagset.BoolFlag{
- Name: "ws",
- Usage: "Enable the WS-RPC server",
- Value: &c.cliConfig.JsonRPC.Ws.Enabled,
+ Name: "ws",
+ Usage: "Enable the WS-RPC server",
+ Value: &c.cliConfig.JsonRPC.Ws.Enabled,
+ Default: c.cliConfig.JsonRPC.Ws.Enabled,
+ Group: "JsonRPC",
})
f.StringFlag(&flagset.StringFlag{
- Name: "ws.addr",
- Usage: "WS-RPC server listening interface",
- Value: &c.cliConfig.JsonRPC.Ws.Host,
+ Name: "ws.addr",
+ Usage: "WS-RPC server listening interface",
+ Value: &c.cliConfig.JsonRPC.Ws.Host,
+ Default: c.cliConfig.JsonRPC.Ws.Host,
+ Group: "JsonRPC",
})
f.Uint64Flag(&flagset.Uint64Flag{
- Name: "ws.port",
- Usage: "WS-RPC server listening port",
- Value: &c.cliConfig.JsonRPC.Ws.Port,
+ Name: "ws.port",
+ Usage: "WS-RPC server listening port",
+ Value: &c.cliConfig.JsonRPC.Ws.Port,
+ Default: c.cliConfig.JsonRPC.Ws.Port,
+ Group: "JsonRPC",
})
f.StringFlag(&flagset.StringFlag{
- Name: "ws.rpcprefix",
- Usage: "HTTP path prefix on which JSON-RPC is served. Use '/' to serve on all paths.",
- Value: &c.cliConfig.JsonRPC.Ws.Prefix,
+ Name: "ws.rpcprefix",
+ Usage: "HTTP path prefix on which JSON-RPC is served. Use '/' to serve on all paths.",
+ Value: &c.cliConfig.JsonRPC.Ws.Prefix,
+ Default: c.cliConfig.JsonRPC.Ws.Prefix,
+ Group: "JsonRPC",
})
f.SliceStringFlag(&flagset.SliceStringFlag{
- Name: "ws.modules",
- Usage: "API's offered over the WS-RPC interface",
- Value: &c.cliConfig.JsonRPC.Ws.Modules,
+ Name: "ws.api",
+ Usage: "API's offered over the WS-RPC interface",
+ Value: &c.cliConfig.JsonRPC.Ws.API,
+ Default: c.cliConfig.JsonRPC.Ws.API,
+ Group: "JsonRPC",
})
// graphql options
f.BoolFlag(&flagset.BoolFlag{
- Name: "graphql",
- Usage: "Enable GraphQL on the HTTP-RPC server. Note that GraphQL can only be started if an HTTP server is started as well.",
- Value: &c.cliConfig.JsonRPC.Graphql.Enabled,
+ Name: "graphql",
+ Usage: "Enable GraphQL on the HTTP-RPC server. Note that GraphQL can only be started if an HTTP server is started as well.",
+ Value: &c.cliConfig.JsonRPC.Graphql.Enabled,
+ Default: c.cliConfig.JsonRPC.Graphql.Enabled,
+ Group: "JsonRPC",
})
// p2p options
f.StringFlag(&flagset.StringFlag{
- Name: "bind",
- Usage: "Network binding address",
- Value: &c.cliConfig.P2P.Bind,
+ Name: "bind",
+ Usage: "Network binding address",
+ Value: &c.cliConfig.P2P.Bind,
+ Default: c.cliConfig.P2P.Bind,
+ Group: "P2P",
})
f.Uint64Flag(&flagset.Uint64Flag{
- Name: "port",
- Usage: "Network listening port",
- Value: &c.cliConfig.P2P.Port,
+ Name: "port",
+ Usage: "Network listening port",
+ Value: &c.cliConfig.P2P.Port,
+ Default: c.cliConfig.P2P.Port,
+ Group: "P2P",
})
f.SliceStringFlag(&flagset.SliceStringFlag{
- Name: "bootnodes",
- Usage: "Comma separated enode URLs for P2P discovery bootstrap",
- Value: &c.cliConfig.P2P.Discovery.Bootnodes,
+ Name: "bootnodes",
+ Usage: "Comma separated enode URLs for P2P discovery bootstrap",
+ Value: &c.cliConfig.P2P.Discovery.Bootnodes,
+ Default: c.cliConfig.P2P.Discovery.Bootnodes,
+ Group: "P2P",
})
f.Uint64Flag(&flagset.Uint64Flag{
- Name: "maxpeers",
- Usage: "Maximum number of network peers (network disabled if set to 0)",
- Value: &c.cliConfig.P2P.MaxPeers,
+ Name: "maxpeers",
+ Usage: "Maximum number of network peers (network disabled if set to 0)",
+ Value: &c.cliConfig.P2P.MaxPeers,
+ Default: c.cliConfig.P2P.MaxPeers,
+ Group: "P2P",
})
f.Uint64Flag(&flagset.Uint64Flag{
- Name: "maxpendpeers",
- Usage: "Maximum number of pending connection attempts (defaults used if set to 0)",
- Value: &c.cliConfig.P2P.MaxPendPeers,
+ Name: "maxpendpeers",
+ Usage: "Maximum number of pending connection attempts",
+ Value: &c.cliConfig.P2P.MaxPendPeers,
+ Default: c.cliConfig.P2P.MaxPendPeers,
+ Group: "P2P",
})
f.StringFlag(&flagset.StringFlag{
- Name: "nat",
- Usage: "NAT port mapping mechanism (any|none|upnp|pmp|extip:)",
- Value: &c.cliConfig.P2P.NAT,
+ Name: "nat",
+ Usage: "NAT port mapping mechanism (any|none|upnp|pmp|extip:)",
+ Value: &c.cliConfig.P2P.NAT,
+ Default: c.cliConfig.P2P.NAT,
+ Group: "P2P",
})
f.BoolFlag(&flagset.BoolFlag{
- Name: "nodiscover",
- Usage: "Disables the peer discovery mechanism (manual peer addition)",
- Value: &c.cliConfig.P2P.NoDiscover,
+ Name: "nodiscover",
+ Usage: "Disables the peer discovery mechanism (manual peer addition)",
+ Value: &c.cliConfig.P2P.NoDiscover,
+ Default: c.cliConfig.P2P.NoDiscover,
+ Group: "P2P",
})
f.BoolFlag(&flagset.BoolFlag{
- Name: "v5disc",
- Usage: "Enables the experimental RLPx V5 (Topic Discovery) mechanism",
- Value: &c.cliConfig.P2P.Discovery.V5Enabled,
+ Name: "v5disc",
+ Usage: "Enables the experimental RLPx V5 (Topic Discovery) mechanism",
+ Value: &c.cliConfig.P2P.Discovery.V5Enabled,
+ Default: c.cliConfig.P2P.Discovery.V5Enabled,
+ Group: "P2P",
})
// metrics
f.BoolFlag(&flagset.BoolFlag{
- Name: "metrics",
- Usage: "Enable metrics collection and reporting",
- Value: &c.cliConfig.Telemetry.Enabled,
+ Name: "metrics",
+ Usage: "Enable metrics collection and reporting",
+ Value: &c.cliConfig.Telemetry.Enabled,
+ Default: c.cliConfig.Telemetry.Enabled,
+ Group: "Telemetry",
})
f.BoolFlag(&flagset.BoolFlag{
- Name: "metrics.expensive",
- Usage: "Enable expensive metrics collection and reporting",
- Value: &c.cliConfig.Telemetry.Expensive,
+ Name: "metrics.expensive",
+ Usage: "Enable expensive metrics collection and reporting",
+ Value: &c.cliConfig.Telemetry.Expensive,
+ Default: c.cliConfig.Telemetry.Expensive,
+ Group: "Telemetry",
})
f.BoolFlag(&flagset.BoolFlag{
- Name: "metrics.influxdb",
- Usage: "Enable metrics export/push to an external InfluxDB database (v1)",
- Value: &c.cliConfig.Telemetry.InfluxDB.V1Enabled,
+ Name: "metrics.influxdb",
+ Usage: "Enable metrics export/push to an external InfluxDB database (v1)",
+ Value: &c.cliConfig.Telemetry.InfluxDB.V1Enabled,
+ Default: c.cliConfig.Telemetry.InfluxDB.V1Enabled,
+ Group: "Telemetry",
})
f.StringFlag(&flagset.StringFlag{
- Name: "metrics.influxdb.endpoint",
- Usage: "InfluxDB API endpoint to report metrics to",
- Value: &c.cliConfig.Telemetry.InfluxDB.Endpoint,
+ Name: "metrics.influxdb.endpoint",
+ Usage: "InfluxDB API endpoint to report metrics to",
+ Value: &c.cliConfig.Telemetry.InfluxDB.Endpoint,
+ Default: c.cliConfig.Telemetry.InfluxDB.Endpoint,
+ Group: "Telemetry",
})
f.StringFlag(&flagset.StringFlag{
- Name: "metrics.influxdb.database",
- Usage: "InfluxDB database name to push reported metrics to",
- Value: &c.cliConfig.Telemetry.InfluxDB.Database,
+ Name: "metrics.influxdb.database",
+ Usage: "InfluxDB database name to push reported metrics to",
+ Value: &c.cliConfig.Telemetry.InfluxDB.Database,
+ Default: c.cliConfig.Telemetry.InfluxDB.Database,
+ Group: "Telemetry",
})
f.StringFlag(&flagset.StringFlag{
- Name: "metrics.influxdb.username",
- Usage: "Username to authorize access to the database",
- Value: &c.cliConfig.Telemetry.InfluxDB.Username,
+ Name: "metrics.influxdb.username",
+ Usage: "Username to authorize access to the database",
+ Value: &c.cliConfig.Telemetry.InfluxDB.Username,
+ Default: c.cliConfig.Telemetry.InfluxDB.Username,
+ Group: "Telemetry",
})
f.StringFlag(&flagset.StringFlag{
- Name: "metrics.influxdb.password",
- Usage: "Password to authorize access to the database",
- Value: &c.cliConfig.Telemetry.InfluxDB.Password,
+ Name: "metrics.influxdb.password",
+ Usage: "Password to authorize access to the database",
+ Value: &c.cliConfig.Telemetry.InfluxDB.Password,
+ Default: c.cliConfig.Telemetry.InfluxDB.Password,
+ Group: "Telemetry",
})
f.MapStringFlag(&flagset.MapStringFlag{
Name: "metrics.influxdb.tags",
Usage: "Comma-separated InfluxDB tags (key/values) attached to all measurements",
Value: &c.cliConfig.Telemetry.InfluxDB.Tags,
+ Group: "Telemetry",
})
f.StringFlag(&flagset.StringFlag{
- Name: "metrics.prometheus-addr",
- Usage: "Address for Prometheus Server",
- Value: &c.cliConfig.Telemetry.PrometheusAddr,
+ Name: "metrics.prometheus-addr",
+ Usage: "Address for Prometheus Server",
+ Value: &c.cliConfig.Telemetry.PrometheusAddr,
+ Default: c.cliConfig.Telemetry.PrometheusAddr,
+ Group: "Telemetry",
})
f.StringFlag(&flagset.StringFlag{
- Name: "metrics.opencollector-endpoint",
- Usage: "OpenCollector Endpoint (host:port)",
- Value: &c.cliConfig.Telemetry.OpenCollectorEndpoint,
+ Name: "metrics.opencollector-endpoint",
+ Usage: "OpenCollector Endpoint (host:port)",
+ Value: &c.cliConfig.Telemetry.OpenCollectorEndpoint,
+ Default: c.cliConfig.Telemetry.OpenCollectorEndpoint,
+ Group: "Telemetry",
})
// influx db v2
f.BoolFlag(&flagset.BoolFlag{
- Name: "metrics.influxdbv2",
- Usage: "Enable metrics export/push to an external InfluxDB v2 database",
- Value: &c.cliConfig.Telemetry.InfluxDB.V2Enabled,
+ Name: "metrics.influxdbv2",
+ Usage: "Enable metrics export/push to an external InfluxDB v2 database",
+ Value: &c.cliConfig.Telemetry.InfluxDB.V2Enabled,
+ Default: c.cliConfig.Telemetry.InfluxDB.V2Enabled,
+ Group: "Telemetry",
})
f.StringFlag(&flagset.StringFlag{
- Name: "metrics.influxdb.token",
- Usage: "Token to authorize access to the database (v2 only)",
- Value: &c.cliConfig.Telemetry.InfluxDB.Token,
+ Name: "metrics.influxdb.token",
+ Usage: "Token to authorize access to the database (v2 only)",
+ Value: &c.cliConfig.Telemetry.InfluxDB.Token,
+ Default: c.cliConfig.Telemetry.InfluxDB.Token,
+ Group: "Telemetry",
})
f.StringFlag(&flagset.StringFlag{
- Name: "metrics.influxdb.bucket",
- Usage: "InfluxDB bucket name to push reported metrics to (v2 only)",
- Value: &c.cliConfig.Telemetry.InfluxDB.Bucket,
+ Name: "metrics.influxdb.bucket",
+ Usage: "InfluxDB bucket name to push reported metrics to (v2 only)",
+ Value: &c.cliConfig.Telemetry.InfluxDB.Bucket,
+ Default: c.cliConfig.Telemetry.InfluxDB.Bucket,
+ Group: "Telemetry",
})
f.StringFlag(&flagset.StringFlag{
- Name: "metrics.influxdb.organization",
- Usage: "InfluxDB organization name (v2 only)",
- Value: &c.cliConfig.Telemetry.InfluxDB.Organization,
+ Name: "metrics.influxdb.organization",
+ Usage: "InfluxDB organization name (v2 only)",
+ Value: &c.cliConfig.Telemetry.InfluxDB.Organization,
+ Default: c.cliConfig.Telemetry.InfluxDB.Organization,
+ Group: "Telemetry",
})
// account
f.SliceStringFlag(&flagset.SliceStringFlag{
- Name: "unlock",
- Usage: "Comma separated list of accounts to unlock",
- Value: &c.cliConfig.Accounts.Unlock,
+ Name: "unlock",
+ Usage: "Comma separated list of accounts to unlock",
+ Value: &c.cliConfig.Accounts.Unlock,
+ Default: c.cliConfig.Accounts.Unlock,
+ Group: "Account Management",
})
f.StringFlag(&flagset.StringFlag{
- Name: "password",
- Usage: "Password file to use for non-interactive password input",
- Value: &c.cliConfig.Accounts.PasswordFile,
+ Name: "password",
+ Usage: "Password file to use for non-interactive password input",
+ Value: &c.cliConfig.Accounts.PasswordFile,
+ Default: c.cliConfig.Accounts.PasswordFile,
+ Group: "Account Management",
})
f.BoolFlag(&flagset.BoolFlag{
- Name: "allow-insecure-unlock",
- Usage: "Allow insecure account unlocking when account-related RPCs are exposed by http",
- Value: &c.cliConfig.Accounts.AllowInsecureUnlock,
+ Name: "allow-insecure-unlock",
+ Usage: "Allow insecure account unlocking when account-related RPCs are exposed by http",
+ Value: &c.cliConfig.Accounts.AllowInsecureUnlock,
+ Default: c.cliConfig.Accounts.AllowInsecureUnlock,
+ Group: "Account Management",
})
f.BoolFlag(&flagset.BoolFlag{
- Name: "lightkdf",
- Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
- Value: &c.cliConfig.Accounts.UseLightweightKDF,
- })
+ Name: "lightkdf",
+ Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
+ Value: &c.cliConfig.Accounts.UseLightweightKDF,
+ Default: c.cliConfig.Accounts.UseLightweightKDF,
+ Group: "Account Management",
+ })
+ f.BoolFlag((&flagset.BoolFlag{
+ Name: "disable-bor-wallet",
+ Usage: "Disable the personal wallet endpoints",
+ Value: &c.cliConfig.Accounts.DisableBorWallet,
+ Default: c.cliConfig.Accounts.DisableBorWallet,
+ }))
// grpc
f.StringFlag(&flagset.StringFlag{
- Name: "grpc.addr",
- Usage: "Address and port to bind the GRPC server",
- Value: &c.cliConfig.GRPC.Addr,
+ Name: "grpc.addr",
+ Usage: "Address and port to bind the GRPC server",
+ Value: &c.cliConfig.GRPC.Addr,
+ Default: c.cliConfig.GRPC.Addr,
})
// developer
f.BoolFlag(&flagset.BoolFlag{
- Name: "dev",
- Usage: "Enable developer mode with ephemeral proof-of-authority network and a pre-funded developer account, mining enabled",
- Value: &c.cliConfig.Developer.Enabled,
+ Name: "dev",
+ Usage: "Enable developer mode with ephemeral proof-of-authority network and a pre-funded developer account, mining enabled",
+ Value: &c.cliConfig.Developer.Enabled,
+ Default: c.cliConfig.Developer.Enabled,
})
f.Uint64Flag(&flagset.Uint64Flag{
- Name: "dev.period",
- Usage: "Block period to use in developer mode (0 = mine only if transaction pending)",
- Value: &c.cliConfig.Developer.Period,
+ Name: "dev.period",
+ Usage: "Block period to use in developer mode (0 = mine only if transaction pending)",
+ Value: &c.cliConfig.Developer.Period,
+ Default: c.cliConfig.Developer.Period,
})
return f
}
diff --git a/internal/cli/server/helper.go b/internal/cli/server/helper.go
new file mode 100644
index 0000000000..3a232d3185
--- /dev/null
+++ b/internal/cli/server/helper.go
@@ -0,0 +1,83 @@
+package server
+
+import (
+ "fmt"
+ "io/ioutil"
+ "math/rand"
+ "net"
+ "os"
+ "sync/atomic"
+ "time"
+)
+
+var maxPortCheck int32 = 100
+
+// findAvailablePort returns the next available port starting from `from`
+func findAvailablePort(from int32, count int32) (int32, error) {
+ if count == maxPortCheck {
+ return 0, fmt.Errorf("no available port found")
+ }
+
+ port := atomic.AddInt32(&from, 1)
+ addr := fmt.Sprintf("localhost:%d", port)
+
+ count++
+
+ lis, err := net.Listen("tcp", addr)
+ if err == nil {
+ lis.Close()
+ return port, nil
+ } else {
+ return findAvailablePort(from, count)
+ }
+}
+
+func CreateMockServer(config *Config) (*Server, error) {
+ if config == nil {
+ config = DefaultConfig()
+ }
+
+ // find available port for grpc server
+ rand.Seed(time.Now().UnixNano())
+
+ var (
+ from int32 = 60000 // the min port to start checking from
+ to int32 = 61000 // the max port to start checking from
+ )
+
+ //nolint: gosec
+ port, err := findAvailablePort(rand.Int31n(to-from+1)+from, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ // grpc port
+ config.GRPC.Addr = fmt.Sprintf(":%d", port)
+
+ // datadir
+ datadir, _ := ioutil.TempDir("/tmp", "bor-cli-test")
+ config.DataDir = datadir
+
+ // find available port for http server
+ from = 8545
+ to = 9545
+
+ //nolint: gosec
+ port, err = findAvailablePort(rand.Int31n(to-from+1)+from, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ config.JsonRPC.Http.Port = uint64(port)
+
+ // start the server
+ return NewServer(config)
+}
+
+func CloseMockServer(server *Server) {
+ // remove the contents of temp data dir
+ os.RemoveAll(server.config.DataDir)
+
+ // close the server
+ server.Stop()
+}
diff --git a/internal/cli/server/proto/server.pb.go b/internal/cli/server/proto/server.pb.go
index a709c538ae..3e928ac170 100644
--- a/internal/cli/server/proto/server.pb.go
+++ b/internal/cli/server/proto/server.pb.go
@@ -1,18 +1,18 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.25.0
-// protoc v3.17.3
-// source: command/server/proto/server.proto
+// protoc-gen-go v1.27.1
+// protoc v3.19.3
+// source: internal/cli/server/proto/server.proto
package proto
import (
- proto "github.com/golang/protobuf/proto"
+ reflect "reflect"
+ sync "sync"
+
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
emptypb "google.golang.org/protobuf/types/known/emptypb"
- reflect "reflect"
- sync "sync"
)
const (
@@ -22,57 +22,138 @@ const (
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
-// This is a compile-time assertion that a sufficiently up-to-date version
-// of the legacy proto package is being used.
-const _ = proto.ProtoPackageIsVersion4
-
-type PprofRequest_Type int32
+type DebugPprofRequest_Type int32
const (
- PprofRequest_LOOKUP PprofRequest_Type = 0
- PprofRequest_CPU PprofRequest_Type = 1
- PprofRequest_TRACE PprofRequest_Type = 2
+ DebugPprofRequest_LOOKUP DebugPprofRequest_Type = 0
+ DebugPprofRequest_CPU DebugPprofRequest_Type = 1
+ DebugPprofRequest_TRACE DebugPprofRequest_Type = 2
)
-// Enum value maps for PprofRequest_Type.
+// Enum value maps for DebugPprofRequest_Type.
var (
- PprofRequest_Type_name = map[int32]string{
+ DebugPprofRequest_Type_name = map[int32]string{
0: "LOOKUP",
1: "CPU",
2: "TRACE",
}
- PprofRequest_Type_value = map[string]int32{
+ DebugPprofRequest_Type_value = map[string]int32{
"LOOKUP": 0,
"CPU": 1,
"TRACE": 2,
}
)
-func (x PprofRequest_Type) Enum() *PprofRequest_Type {
- p := new(PprofRequest_Type)
+func (x DebugPprofRequest_Type) Enum() *DebugPprofRequest_Type {
+ p := new(DebugPprofRequest_Type)
*p = x
return p
}
-func (x PprofRequest_Type) String() string {
+func (x DebugPprofRequest_Type) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
-func (PprofRequest_Type) Descriptor() protoreflect.EnumDescriptor {
- return file_command_server_proto_server_proto_enumTypes[0].Descriptor()
+func (DebugPprofRequest_Type) Descriptor() protoreflect.EnumDescriptor {
+ return file_internal_cli_server_proto_server_proto_enumTypes[0].Descriptor()
}
-func (PprofRequest_Type) Type() protoreflect.EnumType {
- return &file_command_server_proto_server_proto_enumTypes[0]
+func (DebugPprofRequest_Type) Type() protoreflect.EnumType {
+ return &file_internal_cli_server_proto_server_proto_enumTypes[0]
}
-func (x PprofRequest_Type) Number() protoreflect.EnumNumber {
+func (x DebugPprofRequest_Type) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
-// Deprecated: Use PprofRequest_Type.Descriptor instead.
-func (PprofRequest_Type) EnumDescriptor() ([]byte, []int) {
- return file_command_server_proto_server_proto_rawDescGZIP(), []int{16, 0}
+// Deprecated: Use DebugPprofRequest_Type.Descriptor instead.
+func (DebugPprofRequest_Type) EnumDescriptor() ([]byte, []int) {
+ return file_internal_cli_server_proto_server_proto_rawDescGZIP(), []int{18, 0}
+}
+
+type TraceRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Number int64 `protobuf:"varint,1,opt,name=number,proto3" json:"number,omitempty"`
+}
+
+func (x *TraceRequest) Reset() {
+ *x = TraceRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TraceRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TraceRequest) ProtoMessage() {}
+
+func (x *TraceRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TraceRequest.ProtoReflect.Descriptor instead.
+func (*TraceRequest) Descriptor() ([]byte, []int) {
+ return file_internal_cli_server_proto_server_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *TraceRequest) GetNumber() int64 {
+ if x != nil {
+ return x.Number
+ }
+ return 0
+}
+
+type TraceResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *TraceResponse) Reset() {
+ *x = TraceResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TraceResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TraceResponse) ProtoMessage() {}
+
+func (x *TraceResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TraceResponse.ProtoReflect.Descriptor instead.
+func (*TraceResponse) Descriptor() ([]byte, []int) {
+ return file_internal_cli_server_proto_server_proto_rawDescGZIP(), []int{1}
}
type ChainWatchRequest struct {
@@ -84,7 +165,7 @@ type ChainWatchRequest struct {
func (x *ChainWatchRequest) Reset() {
*x = ChainWatchRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_command_server_proto_server_proto_msgTypes[0]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -97,7 +178,7 @@ func (x *ChainWatchRequest) String() string {
func (*ChainWatchRequest) ProtoMessage() {}
func (x *ChainWatchRequest) ProtoReflect() protoreflect.Message {
- mi := &file_command_server_proto_server_proto_msgTypes[0]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -110,7 +191,7 @@ func (x *ChainWatchRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ChainWatchRequest.ProtoReflect.Descriptor instead.
func (*ChainWatchRequest) Descriptor() ([]byte, []int) {
- return file_command_server_proto_server_proto_rawDescGZIP(), []int{0}
+ return file_internal_cli_server_proto_server_proto_rawDescGZIP(), []int{2}
}
type ChainWatchResponse struct {
@@ -126,7 +207,7 @@ type ChainWatchResponse struct {
func (x *ChainWatchResponse) Reset() {
*x = ChainWatchResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_command_server_proto_server_proto_msgTypes[1]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -139,7 +220,7 @@ func (x *ChainWatchResponse) String() string {
func (*ChainWatchResponse) ProtoMessage() {}
func (x *ChainWatchResponse) ProtoReflect() protoreflect.Message {
- mi := &file_command_server_proto_server_proto_msgTypes[1]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -152,7 +233,7 @@ func (x *ChainWatchResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ChainWatchResponse.ProtoReflect.Descriptor instead.
func (*ChainWatchResponse) Descriptor() ([]byte, []int) {
- return file_command_server_proto_server_proto_rawDescGZIP(), []int{1}
+ return file_internal_cli_server_proto_server_proto_rawDescGZIP(), []int{3}
}
func (x *ChainWatchResponse) GetOldchain() []*BlockStub {
@@ -188,7 +269,7 @@ type BlockStub struct {
func (x *BlockStub) Reset() {
*x = BlockStub{}
if protoimpl.UnsafeEnabled {
- mi := &file_command_server_proto_server_proto_msgTypes[2]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -201,7 +282,7 @@ func (x *BlockStub) String() string {
func (*BlockStub) ProtoMessage() {}
func (x *BlockStub) ProtoReflect() protoreflect.Message {
- mi := &file_command_server_proto_server_proto_msgTypes[2]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -214,7 +295,7 @@ func (x *BlockStub) ProtoReflect() protoreflect.Message {
// Deprecated: Use BlockStub.ProtoReflect.Descriptor instead.
func (*BlockStub) Descriptor() ([]byte, []int) {
- return file_command_server_proto_server_proto_rawDescGZIP(), []int{2}
+ return file_internal_cli_server_proto_server_proto_rawDescGZIP(), []int{4}
}
func (x *BlockStub) GetHash() string {
@@ -243,7 +324,7 @@ type PeersAddRequest struct {
func (x *PeersAddRequest) Reset() {
*x = PeersAddRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_command_server_proto_server_proto_msgTypes[3]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -256,7 +337,7 @@ func (x *PeersAddRequest) String() string {
func (*PeersAddRequest) ProtoMessage() {}
func (x *PeersAddRequest) ProtoReflect() protoreflect.Message {
- mi := &file_command_server_proto_server_proto_msgTypes[3]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -269,7 +350,7 @@ func (x *PeersAddRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use PeersAddRequest.ProtoReflect.Descriptor instead.
func (*PeersAddRequest) Descriptor() ([]byte, []int) {
- return file_command_server_proto_server_proto_rawDescGZIP(), []int{3}
+ return file_internal_cli_server_proto_server_proto_rawDescGZIP(), []int{5}
}
func (x *PeersAddRequest) GetEnode() string {
@@ -295,7 +376,7 @@ type PeersAddResponse struct {
func (x *PeersAddResponse) Reset() {
*x = PeersAddResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_command_server_proto_server_proto_msgTypes[4]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -308,7 +389,7 @@ func (x *PeersAddResponse) String() string {
func (*PeersAddResponse) ProtoMessage() {}
func (x *PeersAddResponse) ProtoReflect() protoreflect.Message {
- mi := &file_command_server_proto_server_proto_msgTypes[4]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -321,7 +402,7 @@ func (x *PeersAddResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use PeersAddResponse.ProtoReflect.Descriptor instead.
func (*PeersAddResponse) Descriptor() ([]byte, []int) {
- return file_command_server_proto_server_proto_rawDescGZIP(), []int{4}
+ return file_internal_cli_server_proto_server_proto_rawDescGZIP(), []int{6}
}
type PeersRemoveRequest struct {
@@ -336,7 +417,7 @@ type PeersRemoveRequest struct {
func (x *PeersRemoveRequest) Reset() {
*x = PeersRemoveRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_command_server_proto_server_proto_msgTypes[5]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -349,7 +430,7 @@ func (x *PeersRemoveRequest) String() string {
func (*PeersRemoveRequest) ProtoMessage() {}
func (x *PeersRemoveRequest) ProtoReflect() protoreflect.Message {
- mi := &file_command_server_proto_server_proto_msgTypes[5]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -362,7 +443,7 @@ func (x *PeersRemoveRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use PeersRemoveRequest.ProtoReflect.Descriptor instead.
func (*PeersRemoveRequest) Descriptor() ([]byte, []int) {
- return file_command_server_proto_server_proto_rawDescGZIP(), []int{5}
+ return file_internal_cli_server_proto_server_proto_rawDescGZIP(), []int{7}
}
func (x *PeersRemoveRequest) GetEnode() string {
@@ -388,7 +469,7 @@ type PeersRemoveResponse struct {
func (x *PeersRemoveResponse) Reset() {
*x = PeersRemoveResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_command_server_proto_server_proto_msgTypes[6]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -401,7 +482,7 @@ func (x *PeersRemoveResponse) String() string {
func (*PeersRemoveResponse) ProtoMessage() {}
func (x *PeersRemoveResponse) ProtoReflect() protoreflect.Message {
- mi := &file_command_server_proto_server_proto_msgTypes[6]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[8]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -414,7 +495,7 @@ func (x *PeersRemoveResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use PeersRemoveResponse.ProtoReflect.Descriptor instead.
func (*PeersRemoveResponse) Descriptor() ([]byte, []int) {
- return file_command_server_proto_server_proto_rawDescGZIP(), []int{6}
+ return file_internal_cli_server_proto_server_proto_rawDescGZIP(), []int{8}
}
type PeersListRequest struct {
@@ -426,7 +507,7 @@ type PeersListRequest struct {
func (x *PeersListRequest) Reset() {
*x = PeersListRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_command_server_proto_server_proto_msgTypes[7]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -439,7 +520,7 @@ func (x *PeersListRequest) String() string {
func (*PeersListRequest) ProtoMessage() {}
func (x *PeersListRequest) ProtoReflect() protoreflect.Message {
- mi := &file_command_server_proto_server_proto_msgTypes[7]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[9]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -452,7 +533,7 @@ func (x *PeersListRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use PeersListRequest.ProtoReflect.Descriptor instead.
func (*PeersListRequest) Descriptor() ([]byte, []int) {
- return file_command_server_proto_server_proto_rawDescGZIP(), []int{7}
+ return file_internal_cli_server_proto_server_proto_rawDescGZIP(), []int{9}
}
type PeersListResponse struct {
@@ -466,7 +547,7 @@ type PeersListResponse struct {
func (x *PeersListResponse) Reset() {
*x = PeersListResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_command_server_proto_server_proto_msgTypes[8]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -479,7 +560,7 @@ func (x *PeersListResponse) String() string {
func (*PeersListResponse) ProtoMessage() {}
func (x *PeersListResponse) ProtoReflect() protoreflect.Message {
- mi := &file_command_server_proto_server_proto_msgTypes[8]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[10]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -492,7 +573,7 @@ func (x *PeersListResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use PeersListResponse.ProtoReflect.Descriptor instead.
func (*PeersListResponse) Descriptor() ([]byte, []int) {
- return file_command_server_proto_server_proto_rawDescGZIP(), []int{8}
+ return file_internal_cli_server_proto_server_proto_rawDescGZIP(), []int{10}
}
func (x *PeersListResponse) GetPeers() []*Peer {
@@ -513,7 +594,7 @@ type PeersStatusRequest struct {
func (x *PeersStatusRequest) Reset() {
*x = PeersStatusRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_command_server_proto_server_proto_msgTypes[9]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -526,7 +607,7 @@ func (x *PeersStatusRequest) String() string {
func (*PeersStatusRequest) ProtoMessage() {}
func (x *PeersStatusRequest) ProtoReflect() protoreflect.Message {
- mi := &file_command_server_proto_server_proto_msgTypes[9]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[11]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -539,7 +620,7 @@ func (x *PeersStatusRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use PeersStatusRequest.ProtoReflect.Descriptor instead.
func (*PeersStatusRequest) Descriptor() ([]byte, []int) {
- return file_command_server_proto_server_proto_rawDescGZIP(), []int{9}
+ return file_internal_cli_server_proto_server_proto_rawDescGZIP(), []int{11}
}
func (x *PeersStatusRequest) GetEnode() string {
@@ -560,7 +641,7 @@ type PeersStatusResponse struct {
func (x *PeersStatusResponse) Reset() {
*x = PeersStatusResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_command_server_proto_server_proto_msgTypes[10]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -573,7 +654,7 @@ func (x *PeersStatusResponse) String() string {
func (*PeersStatusResponse) ProtoMessage() {}
func (x *PeersStatusResponse) ProtoReflect() protoreflect.Message {
- mi := &file_command_server_proto_server_proto_msgTypes[10]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[12]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -586,7 +667,7 @@ func (x *PeersStatusResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use PeersStatusResponse.ProtoReflect.Descriptor instead.
func (*PeersStatusResponse) Descriptor() ([]byte, []int) {
- return file_command_server_proto_server_proto_rawDescGZIP(), []int{10}
+ return file_internal_cli_server_proto_server_proto_rawDescGZIP(), []int{12}
}
func (x *PeersStatusResponse) GetPeer() *Peer {
@@ -613,7 +694,7 @@ type Peer struct {
func (x *Peer) Reset() {
*x = Peer{}
if protoimpl.UnsafeEnabled {
- mi := &file_command_server_proto_server_proto_msgTypes[11]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -626,7 +707,7 @@ func (x *Peer) String() string {
func (*Peer) ProtoMessage() {}
func (x *Peer) ProtoReflect() protoreflect.Message {
- mi := &file_command_server_proto_server_proto_msgTypes[11]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[13]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -639,7 +720,7 @@ func (x *Peer) ProtoReflect() protoreflect.Message {
// Deprecated: Use Peer.ProtoReflect.Descriptor instead.
func (*Peer) Descriptor() ([]byte, []int) {
- return file_command_server_proto_server_proto_rawDescGZIP(), []int{11}
+ return file_internal_cli_server_proto_server_proto_rawDescGZIP(), []int{13}
}
func (x *Peer) GetId() string {
@@ -702,7 +783,7 @@ type ChainSetHeadRequest struct {
func (x *ChainSetHeadRequest) Reset() {
*x = ChainSetHeadRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_command_server_proto_server_proto_msgTypes[12]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -715,7 +796,7 @@ func (x *ChainSetHeadRequest) String() string {
func (*ChainSetHeadRequest) ProtoMessage() {}
func (x *ChainSetHeadRequest) ProtoReflect() protoreflect.Message {
- mi := &file_command_server_proto_server_proto_msgTypes[12]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[14]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -728,7 +809,7 @@ func (x *ChainSetHeadRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ChainSetHeadRequest.ProtoReflect.Descriptor instead.
func (*ChainSetHeadRequest) Descriptor() ([]byte, []int) {
- return file_command_server_proto_server_proto_rawDescGZIP(), []int{12}
+ return file_internal_cli_server_proto_server_proto_rawDescGZIP(), []int{14}
}
func (x *ChainSetHeadRequest) GetNumber() uint64 {
@@ -747,7 +828,7 @@ type ChainSetHeadResponse struct {
func (x *ChainSetHeadResponse) Reset() {
*x = ChainSetHeadResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_command_server_proto_server_proto_msgTypes[13]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -760,7 +841,7 @@ func (x *ChainSetHeadResponse) String() string {
func (*ChainSetHeadResponse) ProtoMessage() {}
func (x *ChainSetHeadResponse) ProtoReflect() protoreflect.Message {
- mi := &file_command_server_proto_server_proto_msgTypes[13]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[15]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -773,7 +854,7 @@ func (x *ChainSetHeadResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ChainSetHeadResponse.ProtoReflect.Descriptor instead.
func (*ChainSetHeadResponse) Descriptor() ([]byte, []int) {
- return file_command_server_proto_server_proto_rawDescGZIP(), []int{13}
+ return file_internal_cli_server_proto_server_proto_rawDescGZIP(), []int{15}
}
type StatusResponse struct {
@@ -792,7 +873,7 @@ type StatusResponse struct {
func (x *StatusResponse) Reset() {
*x = StatusResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_command_server_proto_server_proto_msgTypes[14]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -805,7 +886,7 @@ func (x *StatusResponse) String() string {
func (*StatusResponse) ProtoMessage() {}
func (x *StatusResponse) ProtoReflect() protoreflect.Message {
- mi := &file_command_server_proto_server_proto_msgTypes[14]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[16]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -818,7 +899,7 @@ func (x *StatusResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use StatusResponse.ProtoReflect.Descriptor instead.
func (*StatusResponse) Descriptor() ([]byte, []int) {
- return file_command_server_proto_server_proto_rawDescGZIP(), []int{14}
+ return file_internal_cli_server_proto_server_proto_rawDescGZIP(), []int{16}
}
func (x *StatusResponse) GetCurrentBlock() *Header {
@@ -875,7 +956,7 @@ type Header struct {
func (x *Header) Reset() {
*x = Header{}
if protoimpl.UnsafeEnabled {
- mi := &file_command_server_proto_server_proto_msgTypes[15]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -888,7 +969,7 @@ func (x *Header) String() string {
func (*Header) ProtoMessage() {}
func (x *Header) ProtoReflect() protoreflect.Message {
- mi := &file_command_server_proto_server_proto_msgTypes[15]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[17]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -901,7 +982,7 @@ func (x *Header) ProtoReflect() protoreflect.Message {
// Deprecated: Use Header.ProtoReflect.Descriptor instead.
func (*Header) Descriptor() ([]byte, []int) {
- return file_command_server_proto_server_proto_rawDescGZIP(), []int{15}
+ return file_internal_cli_server_proto_server_proto_rawDescGZIP(), []int{17}
}
func (x *Header) GetHash() string {
@@ -918,33 +999,33 @@ func (x *Header) GetNumber() uint64 {
return 0
}
-type PprofRequest struct {
+type DebugPprofRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- Type PprofRequest_Type `protobuf:"varint,1,opt,name=type,proto3,enum=proto.PprofRequest_Type" json:"type,omitempty"`
- Profile string `protobuf:"bytes,2,opt,name=profile,proto3" json:"profile,omitempty"`
- Seconds int64 `protobuf:"varint,3,opt,name=seconds,proto3" json:"seconds,omitempty"`
+ Type DebugPprofRequest_Type `protobuf:"varint,1,opt,name=type,proto3,enum=proto.DebugPprofRequest_Type" json:"type,omitempty"`
+ Profile string `protobuf:"bytes,2,opt,name=profile,proto3" json:"profile,omitempty"`
+ Seconds int64 `protobuf:"varint,3,opt,name=seconds,proto3" json:"seconds,omitempty"`
}
-func (x *PprofRequest) Reset() {
- *x = PprofRequest{}
+func (x *DebugPprofRequest) Reset() {
+ *x = DebugPprofRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_command_server_proto_server_proto_msgTypes[16]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *PprofRequest) String() string {
+func (x *DebugPprofRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*PprofRequest) ProtoMessage() {}
+func (*DebugPprofRequest) ProtoMessage() {}
-func (x *PprofRequest) ProtoReflect() protoreflect.Message {
- mi := &file_command_server_proto_server_proto_msgTypes[16]
+func (x *DebugPprofRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[18]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -955,61 +1036,108 @@ func (x *PprofRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use PprofRequest.ProtoReflect.Descriptor instead.
-func (*PprofRequest) Descriptor() ([]byte, []int) {
- return file_command_server_proto_server_proto_rawDescGZIP(), []int{16}
+// Deprecated: Use DebugPprofRequest.ProtoReflect.Descriptor instead.
+func (*DebugPprofRequest) Descriptor() ([]byte, []int) {
+ return file_internal_cli_server_proto_server_proto_rawDescGZIP(), []int{18}
}
-func (x *PprofRequest) GetType() PprofRequest_Type {
+func (x *DebugPprofRequest) GetType() DebugPprofRequest_Type {
if x != nil {
return x.Type
}
- return PprofRequest_LOOKUP
+ return DebugPprofRequest_LOOKUP
}
-func (x *PprofRequest) GetProfile() string {
+func (x *DebugPprofRequest) GetProfile() string {
if x != nil {
return x.Profile
}
return ""
}
-func (x *PprofRequest) GetSeconds() int64 {
+func (x *DebugPprofRequest) GetSeconds() int64 {
if x != nil {
return x.Seconds
}
return 0
}
-type PprofResponse struct {
+type DebugBlockRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Number int64 `protobuf:"varint,1,opt,name=number,proto3" json:"number,omitempty"`
+}
+
+func (x *DebugBlockRequest) Reset() {
+ *x = DebugBlockRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DebugBlockRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DebugBlockRequest) ProtoMessage() {}
+
+func (x *DebugBlockRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DebugBlockRequest.ProtoReflect.Descriptor instead.
+func (*DebugBlockRequest) Descriptor() ([]byte, []int) {
+ return file_internal_cli_server_proto_server_proto_rawDescGZIP(), []int{19}
+}
+
+func (x *DebugBlockRequest) GetNumber() int64 {
+ if x != nil {
+ return x.Number
+ }
+ return 0
+}
+
+type DebugFileResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Types that are assignable to Event:
- // *PprofResponse_Open_
- // *PprofResponse_Input_
- // *PprofResponse_Eof
- Event isPprofResponse_Event `protobuf_oneof:"event"`
+ // *DebugFileResponse_Open_
+ // *DebugFileResponse_Input_
+ // *DebugFileResponse_Eof
+ Event isDebugFileResponse_Event `protobuf_oneof:"event"`
}
-func (x *PprofResponse) Reset() {
- *x = PprofResponse{}
+func (x *DebugFileResponse) Reset() {
+ *x = DebugFileResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_command_server_proto_server_proto_msgTypes[17]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *PprofResponse) String() string {
+func (x *DebugFileResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*PprofResponse) ProtoMessage() {}
+func (*DebugFileResponse) ProtoMessage() {}
-func (x *PprofResponse) ProtoReflect() protoreflect.Message {
- mi := &file_command_server_proto_server_proto_msgTypes[17]
+func (x *DebugFileResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[20]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1020,60 +1148,60 @@ func (x *PprofResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use PprofResponse.ProtoReflect.Descriptor instead.
-func (*PprofResponse) Descriptor() ([]byte, []int) {
- return file_command_server_proto_server_proto_rawDescGZIP(), []int{17}
+// Deprecated: Use DebugFileResponse.ProtoReflect.Descriptor instead.
+func (*DebugFileResponse) Descriptor() ([]byte, []int) {
+ return file_internal_cli_server_proto_server_proto_rawDescGZIP(), []int{20}
}
-func (m *PprofResponse) GetEvent() isPprofResponse_Event {
+func (m *DebugFileResponse) GetEvent() isDebugFileResponse_Event {
if m != nil {
return m.Event
}
return nil
}
-func (x *PprofResponse) GetOpen() *PprofResponse_Open {
- if x, ok := x.GetEvent().(*PprofResponse_Open_); ok {
+func (x *DebugFileResponse) GetOpen() *DebugFileResponse_Open {
+ if x, ok := x.GetEvent().(*DebugFileResponse_Open_); ok {
return x.Open
}
return nil
}
-func (x *PprofResponse) GetInput() *PprofResponse_Input {
- if x, ok := x.GetEvent().(*PprofResponse_Input_); ok {
+func (x *DebugFileResponse) GetInput() *DebugFileResponse_Input {
+ if x, ok := x.GetEvent().(*DebugFileResponse_Input_); ok {
return x.Input
}
return nil
}
-func (x *PprofResponse) GetEof() *emptypb.Empty {
- if x, ok := x.GetEvent().(*PprofResponse_Eof); ok {
+func (x *DebugFileResponse) GetEof() *emptypb.Empty {
+ if x, ok := x.GetEvent().(*DebugFileResponse_Eof); ok {
return x.Eof
}
return nil
}
-type isPprofResponse_Event interface {
- isPprofResponse_Event()
+type isDebugFileResponse_Event interface {
+ isDebugFileResponse_Event()
}
-type PprofResponse_Open_ struct {
- Open *PprofResponse_Open `protobuf:"bytes,1,opt,name=open,proto3,oneof"`
+type DebugFileResponse_Open_ struct {
+ Open *DebugFileResponse_Open `protobuf:"bytes,1,opt,name=open,proto3,oneof"`
}
-type PprofResponse_Input_ struct {
- Input *PprofResponse_Input `protobuf:"bytes,2,opt,name=input,proto3,oneof"`
+type DebugFileResponse_Input_ struct {
+ Input *DebugFileResponse_Input `protobuf:"bytes,2,opt,name=input,proto3,oneof"`
}
-type PprofResponse_Eof struct {
+type DebugFileResponse_Eof struct {
Eof *emptypb.Empty `protobuf:"bytes,3,opt,name=eof,proto3,oneof"`
}
-func (*PprofResponse_Open_) isPprofResponse_Event() {}
+func (*DebugFileResponse_Open_) isDebugFileResponse_Event() {}
-func (*PprofResponse_Input_) isPprofResponse_Event() {}
+func (*DebugFileResponse_Input_) isDebugFileResponse_Event() {}
-func (*PprofResponse_Eof) isPprofResponse_Event() {}
+func (*DebugFileResponse_Eof) isDebugFileResponse_Event() {}
type StatusResponse_Fork struct {
state protoimpl.MessageState
@@ -1088,7 +1216,7 @@ type StatusResponse_Fork struct {
func (x *StatusResponse_Fork) Reset() {
*x = StatusResponse_Fork{}
if protoimpl.UnsafeEnabled {
- mi := &file_command_server_proto_server_proto_msgTypes[18]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1101,7 +1229,7 @@ func (x *StatusResponse_Fork) String() string {
func (*StatusResponse_Fork) ProtoMessage() {}
func (x *StatusResponse_Fork) ProtoReflect() protoreflect.Message {
- mi := &file_command_server_proto_server_proto_msgTypes[18]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[21]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1114,7 +1242,7 @@ func (x *StatusResponse_Fork) ProtoReflect() protoreflect.Message {
// Deprecated: Use StatusResponse_Fork.ProtoReflect.Descriptor instead.
func (*StatusResponse_Fork) Descriptor() ([]byte, []int) {
- return file_command_server_proto_server_proto_rawDescGZIP(), []int{14, 0}
+ return file_internal_cli_server_proto_server_proto_rawDescGZIP(), []int{16, 0}
}
func (x *StatusResponse_Fork) GetName() string {
@@ -1151,7 +1279,7 @@ type StatusResponse_Syncing struct {
func (x *StatusResponse_Syncing) Reset() {
*x = StatusResponse_Syncing{}
if protoimpl.UnsafeEnabled {
- mi := &file_command_server_proto_server_proto_msgTypes[19]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1164,7 +1292,7 @@ func (x *StatusResponse_Syncing) String() string {
func (*StatusResponse_Syncing) ProtoMessage() {}
func (x *StatusResponse_Syncing) ProtoReflect() protoreflect.Message {
- mi := &file_command_server_proto_server_proto_msgTypes[19]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[22]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1177,7 +1305,7 @@ func (x *StatusResponse_Syncing) ProtoReflect() protoreflect.Message {
// Deprecated: Use StatusResponse_Syncing.ProtoReflect.Descriptor instead.
func (*StatusResponse_Syncing) Descriptor() ([]byte, []int) {
- return file_command_server_proto_server_proto_rawDescGZIP(), []int{14, 1}
+ return file_internal_cli_server_proto_server_proto_rawDescGZIP(), []int{16, 1}
}
func (x *StatusResponse_Syncing) GetStartingBlock() int64 {
@@ -1201,32 +1329,31 @@ func (x *StatusResponse_Syncing) GetCurrentBlock() int64 {
return 0
}
-type PprofResponse_Open struct {
+type DebugFileResponse_Open struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Headers map[string]string `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- Size int64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"`
}
-func (x *PprofResponse_Open) Reset() {
- *x = PprofResponse_Open{}
+func (x *DebugFileResponse_Open) Reset() {
+ *x = DebugFileResponse_Open{}
if protoimpl.UnsafeEnabled {
- mi := &file_command_server_proto_server_proto_msgTypes[20]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *PprofResponse_Open) String() string {
+func (x *DebugFileResponse_Open) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*PprofResponse_Open) ProtoMessage() {}
+func (*DebugFileResponse_Open) ProtoMessage() {}
-func (x *PprofResponse_Open) ProtoReflect() protoreflect.Message {
- mi := &file_command_server_proto_server_proto_msgTypes[20]
+func (x *DebugFileResponse_Open) ProtoReflect() protoreflect.Message {
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[23]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1237,26 +1364,19 @@ func (x *PprofResponse_Open) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use PprofResponse_Open.ProtoReflect.Descriptor instead.
-func (*PprofResponse_Open) Descriptor() ([]byte, []int) {
- return file_command_server_proto_server_proto_rawDescGZIP(), []int{17, 0}
+// Deprecated: Use DebugFileResponse_Open.ProtoReflect.Descriptor instead.
+func (*DebugFileResponse_Open) Descriptor() ([]byte, []int) {
+ return file_internal_cli_server_proto_server_proto_rawDescGZIP(), []int{20, 0}
}
-func (x *PprofResponse_Open) GetHeaders() map[string]string {
+func (x *DebugFileResponse_Open) GetHeaders() map[string]string {
if x != nil {
return x.Headers
}
return nil
}
-func (x *PprofResponse_Open) GetSize() int64 {
- if x != nil {
- return x.Size
- }
- return 0
-}
-
-type PprofResponse_Input struct {
+type DebugFileResponse_Input struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
@@ -1264,23 +1384,23 @@ type PprofResponse_Input struct {
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
}
-func (x *PprofResponse_Input) Reset() {
- *x = PprofResponse_Input{}
+func (x *DebugFileResponse_Input) Reset() {
+ *x = DebugFileResponse_Input{}
if protoimpl.UnsafeEnabled {
- mi := &file_command_server_proto_server_proto_msgTypes[21]
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *PprofResponse_Input) String() string {
+func (x *DebugFileResponse_Input) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*PprofResponse_Input) ProtoMessage() {}
+func (*DebugFileResponse_Input) ProtoMessage() {}
-func (x *PprofResponse_Input) ProtoReflect() protoreflect.Message {
- mi := &file_command_server_proto_server_proto_msgTypes[21]
+func (x *DebugFileResponse_Input) ProtoReflect() protoreflect.Message {
+ mi := &file_internal_cli_server_proto_server_proto_msgTypes[24]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1291,263 +1411,305 @@ func (x *PprofResponse_Input) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use PprofResponse_Input.ProtoReflect.Descriptor instead.
-func (*PprofResponse_Input) Descriptor() ([]byte, []int) {
- return file_command_server_proto_server_proto_rawDescGZIP(), []int{17, 1}
+// Deprecated: Use DebugFileResponse_Input.ProtoReflect.Descriptor instead.
+func (*DebugFileResponse_Input) Descriptor() ([]byte, []int) {
+ return file_internal_cli_server_proto_server_proto_rawDescGZIP(), []int{20, 1}
}
-func (x *PprofResponse_Input) GetData() []byte {
+func (x *DebugFileResponse_Input) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
-var File_command_server_proto_server_proto protoreflect.FileDescriptor
-
-var file_command_server_proto_server_proto_rawDesc = []byte{
- 0x0a, 0x21, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
- 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74,
- 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x13, 0x0a, 0x11, 0x43, 0x68, 0x61, 0x69, 0x6e,
- 0x57, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x84, 0x01, 0x0a,
- 0x12, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x08, 0x6f, 0x6c, 0x64, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18,
- 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x42, 0x6c,
- 0x6f, 0x63, 0x6b, 0x53, 0x74, 0x75, 0x62, 0x52, 0x08, 0x6f, 0x6c, 0x64, 0x63, 0x68, 0x61, 0x69,
- 0x6e, 0x12, 0x2c, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20,
+var File_internal_cli_server_proto_server_proto protoreflect.FileDescriptor
+
+var file_internal_cli_server_proto_server_proto_rawDesc = []byte{
+ 0x0a, 0x26, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x63, 0x6c, 0x69, 0x2f, 0x73,
+ 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76,
+ 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x26, 0x0a, 0x0c,
+ 0x54, 0x72, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06,
+ 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6e, 0x75,
+ 0x6d, 0x62, 0x65, 0x72, 0x22, 0x0f, 0x0a, 0x0d, 0x54, 0x72, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x13, 0x0a, 0x11, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x57, 0x61,
+ 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x84, 0x01, 0x0a, 0x12, 0x43,
+ 0x68, 0x61, 0x69, 0x6e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x2c, 0x0a, 0x08, 0x6f, 0x6c, 0x64, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20,
0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x42, 0x6c, 0x6f, 0x63,
- 0x6b, 0x53, 0x74, 0x75, 0x62, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x12,
- 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74,
- 0x79, 0x70, 0x65, 0x22, 0x37, 0x0a, 0x09, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x53, 0x74, 0x75, 0x62,
- 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
- 0x68, 0x61, 0x73, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x41, 0x0a, 0x0f,
- 0x50, 0x65, 0x65, 0x72, 0x73, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x14, 0x0a, 0x05, 0x65, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
- 0x65, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x22,
- 0x12, 0x0a, 0x10, 0x50, 0x65, 0x65, 0x72, 0x73, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x22, 0x44, 0x0a, 0x12, 0x50, 0x65, 0x65, 0x72, 0x73, 0x52, 0x65, 0x6d, 0x6f,
- 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6e, 0x6f,
- 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6e, 0x6f, 0x64, 0x65, 0x12,
- 0x18, 0x0a, 0x07, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x07, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x50, 0x65, 0x65,
- 0x72, 0x73, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x22, 0x12, 0x0a, 0x10, 0x50, 0x65, 0x65, 0x72, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x22, 0x36, 0x0a, 0x11, 0x50, 0x65, 0x65, 0x72, 0x73, 0x4c, 0x69, 0x73,
- 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x05, 0x70, 0x65, 0x65,
- 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x05, 0x70, 0x65, 0x65, 0x72, 0x73, 0x22, 0x2a, 0x0a, 0x12,
+ 0x6b, 0x53, 0x74, 0x75, 0x62, 0x52, 0x08, 0x6f, 0x6c, 0x64, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x12,
+ 0x2c, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x53,
+ 0x74, 0x75, 0x62, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x12, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70,
+ 0x65, 0x22, 0x37, 0x0a, 0x09, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x53, 0x74, 0x75, 0x62, 0x12, 0x12,
+ 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x61,
+ 0x73, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x41, 0x0a, 0x0f, 0x50, 0x65,
+ 0x65, 0x72, 0x73, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a,
+ 0x05, 0x65, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6e,
+ 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x22, 0x12, 0x0a,
+ 0x10, 0x50, 0x65, 0x65, 0x72, 0x73, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x44, 0x0a, 0x12, 0x50, 0x65, 0x65, 0x72, 0x73, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6e, 0x6f, 0x64, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a,
+ 0x07, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07,
+ 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x50, 0x65, 0x65, 0x72, 0x73,
+ 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x12,
+ 0x0a, 0x10, 0x50, 0x65, 0x65, 0x72, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x22, 0x36, 0x0a, 0x11, 0x50, 0x65, 0x65, 0x72, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x05, 0x70, 0x65, 0x65, 0x72, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50,
+ 0x65, 0x65, 0x72, 0x52, 0x05, 0x70, 0x65, 0x65, 0x72, 0x73, 0x22, 0x2a, 0x0a, 0x12, 0x50, 0x65,
+ 0x65, 0x72, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x65, 0x6e, 0x6f, 0x64, 0x65, 0x22, 0x36, 0x0a, 0x13, 0x50, 0x65, 0x65, 0x72, 0x73, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a,
+ 0x04, 0x70, 0x65, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x22, 0x98,
+ 0x01, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6e, 0x6f, 0x64, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x10, 0x0a,
+ 0x03, 0x65, 0x6e, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x6e, 0x72, 0x12,
+ 0x12, 0x0a, 0x04, 0x63, 0x61, 0x70, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x63,
+ 0x61, 0x70, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x72, 0x75, 0x73, 0x74,
+ 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65,
+ 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x18, 0x07, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x22, 0x2d, 0x0a, 0x13, 0x43, 0x68, 0x61,
+ 0x69, 0x6e, 0x53, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x16, 0x0a, 0x14, 0x43, 0x68, 0x61, 0x69,
+ 0x6e, 0x53, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0xe2, 0x03, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x6c,
+ 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e,
+ 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x33, 0x0a, 0x0d, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e,
+ 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0d, 0x63, 0x75,
+ 0x72, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x6e,
+ 0x75, 0x6d, 0x50, 0x65, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6e,
+ 0x75, 0x6d, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x79, 0x6e, 0x63, 0x4d,
+ 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x79, 0x6e, 0x63, 0x4d,
+ 0x6f, 0x64, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x79, 0x6e, 0x63,
+ 0x69, 0x6e, 0x67, 0x52, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x05,
+ 0x66, 0x6f, 0x72, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x2e, 0x46, 0x6f, 0x72, 0x6b, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x6b, 0x73, 0x1a, 0x4c,
+ 0x0a, 0x04, 0x46, 0x6f, 0x72, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x6c,
+ 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b,
+ 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x77, 0x0a, 0x07,
+ 0x53, 0x79, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x12, 0x24, 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74,
+ 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d,
+ 0x73, 0x74, 0x61, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x22, 0x0a,
+ 0x0c, 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x03, 0x52, 0x0c, 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x42, 0x6c, 0x6f, 0x63,
+ 0x6b, 0x12, 0x22, 0x0a, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x6c, 0x6f, 0x63,
+ 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74,
+ 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x34, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12,
+ 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68,
+ 0x61, 0x73, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xa2, 0x01, 0x0a, 0x11,
+ 0x44, 0x65, 0x62, 0x75, 0x67, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x31, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
+ 0x1d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x50, 0x70, 0x72,
+ 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04,
+ 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x18,
+ 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52,
+ 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, 0x26, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65,
+ 0x12, 0x0a, 0x0a, 0x06, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03,
+ 0x43, 0x50, 0x55, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x02,
+ 0x22, 0x2b, 0x0a, 0x11, 0x44, 0x65, 0x62, 0x75, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xdd, 0x02,
+ 0x0a, 0x11, 0x44, 0x65, 0x62, 0x75, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x04, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x46,
+ 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4f, 0x70, 0x65, 0x6e,
+ 0x48, 0x00, 0x52, 0x04, 0x6f, 0x70, 0x65, 0x6e, 0x12, 0x36, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75,
+ 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
+ 0x44, 0x65, 0x62, 0x75, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x48, 0x00, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74,
+ 0x12, 0x2a, 0x0a, 0x03, 0x65, 0x6f, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x03, 0x65, 0x6f, 0x66, 0x1a, 0x88, 0x01, 0x0a,
+ 0x04, 0x4f, 0x70, 0x65, 0x6e, 0x12, 0x44, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44,
+ 0x65, 0x62, 0x75, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x2e, 0x4f, 0x70, 0x65, 0x6e, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74,
+ 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x1a, 0x3a, 0x0a, 0x0c, 0x48,
+ 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b,
+ 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x1b, 0x0a, 0x05, 0x49, 0x6e, 0x70, 0x75, 0x74,
+ 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04,
+ 0x64, 0x61, 0x74, 0x61, 0x42, 0x07, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x32, 0xdd, 0x04,
+ 0x0a, 0x03, 0x42, 0x6f, 0x72, 0x12, 0x3b, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x73, 0x41, 0x64,
+ 0x64, 0x12, 0x16, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x73, 0x41,
+ 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x73, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x44, 0x0a, 0x0b, 0x50, 0x65, 0x65, 0x72, 0x73, 0x52, 0x65, 0x6d, 0x6f, 0x76,
+ 0x65, 0x12, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x73, 0x52,
+ 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x73, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x09, 0x50, 0x65, 0x65, 0x72,
+ 0x73, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x17, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x65,
+ 0x65, 0x72, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x73, 0x4c, 0x69, 0x73, 0x74,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x0b, 0x50, 0x65, 0x65, 0x72,
+ 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
0x50, 0x65, 0x65, 0x72, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x05, 0x65, 0x6e, 0x6f, 0x64, 0x65, 0x22, 0x36, 0x0a, 0x13, 0x50, 0x65, 0x65, 0x72,
- 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x1f, 0x0a, 0x04, 0x70, 0x65, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72,
- 0x22, 0x98, 0x01, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6e, 0x6f,
- 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6e, 0x6f, 0x64, 0x65, 0x12,
- 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x6e,
- 0x72, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x61, 0x70, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x04, 0x63, 0x61, 0x70, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x72, 0x75,
- 0x73, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x74, 0x72, 0x75, 0x73,
- 0x74, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x18, 0x07, 0x20,
- 0x01, 0x28, 0x08, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x22, 0x2d, 0x0a, 0x13, 0x43,
- 0x68, 0x61, 0x69, 0x6e, 0x53, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x04, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x16, 0x0a, 0x14, 0x43, 0x68,
- 0x61, 0x69, 0x6e, 0x53, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0xe2, 0x03, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74,
- 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x63, 0x75, 0x72, 0x72,
- 0x65, 0x6e, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x33, 0x0a, 0x0d, 0x63, 0x75, 0x72, 0x72,
- 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x0d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0d,
- 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x1a, 0x0a,
- 0x08, 0x6e, 0x75, 0x6d, 0x50, 0x65, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52,
- 0x08, 0x6e, 0x75, 0x6d, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x79, 0x6e,
- 0x63, 0x4d, 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x79, 0x6e,
- 0x63, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x69, 0x6e, 0x67,
- 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x79,
- 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x12, 0x30,
- 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x6f, 0x72, 0x6b, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x6b, 0x73,
- 0x1a, 0x4c, 0x0a, 0x04, 0x46, 0x6f, 0x72, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05,
- 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x62, 0x6c, 0x6f,
- 0x63, 0x6b, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x77,
- 0x0a, 0x07, 0x53, 0x79, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x12, 0x24, 0x0a, 0x0d, 0x73, 0x74, 0x61,
- 0x72, 0x74, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
- 0x52, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12,
- 0x22, 0x0a, 0x0c, 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x42, 0x6c,
- 0x6f, 0x63, 0x6b, 0x12, 0x22, 0x0a, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x6c,
- 0x6f, 0x63, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65,
- 0x6e, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x34, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65,
- 0x72, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x98, 0x01,
- 0x0a, 0x0c, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c,
- 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07,
- 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70,
- 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64,
- 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73,
- 0x22, 0x26, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x4c, 0x4f, 0x4f, 0x4b,
- 0x55, 0x50, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x43, 0x50, 0x55, 0x10, 0x01, 0x12, 0x09, 0x0a,
- 0x05, 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x02, 0x22, 0xe1, 0x02, 0x0a, 0x0d, 0x50, 0x70, 0x72,
- 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x04, 0x6f, 0x70,
- 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4f,
- 0x70, 0x65, 0x6e, 0x48, 0x00, 0x52, 0x04, 0x6f, 0x70, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x05, 0x69,
- 0x6e, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x48, 0x00, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12,
- 0x2a, 0x0a, 0x03, 0x65, 0x6f, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
- 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x03, 0x65, 0x6f, 0x66, 0x1a, 0x98, 0x01, 0x0a, 0x04,
- 0x4f, 0x70, 0x65, 0x6e, 0x12, 0x40, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18,
- 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70,
- 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4f, 0x70, 0x65, 0x6e,
- 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68,
- 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x1a, 0x3a, 0x0a, 0x0c, 0x48, 0x65,
- 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
- 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x1b, 0x0a, 0x05, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12,
- 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64,
- 0x61, 0x74, 0x61, 0x42, 0x07, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x32, 0x8b, 0x04, 0x0a,
- 0x03, 0x42, 0x6f, 0x72, 0x12, 0x34, 0x0a, 0x05, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x12, 0x13, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x3b, 0x0a, 0x08, 0x50, 0x65,
- 0x65, 0x72, 0x73, 0x41, 0x64, 0x64, 0x12, 0x16, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50,
- 0x65, 0x65, 0x72, 0x73, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x73, 0x41, 0x64, 0x64, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x0b, 0x50, 0x65, 0x65, 0x72, 0x73,
- 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50,
- 0x65, 0x65, 0x72, 0x73, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x73, 0x52,
- 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a,
- 0x09, 0x50, 0x65, 0x65, 0x72, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x17, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x65, 0x65, 0x72,
- 0x73, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a,
- 0x0b, 0x50, 0x65, 0x65, 0x72, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
- 0x50, 0x65, 0x65, 0x72, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x0c, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x53, 0x65, 0x74, 0x48,
- 0x65, 0x61, 0x64, 0x12, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x68, 0x61, 0x69,
- 0x6e, 0x53, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x1b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x53, 0x65, 0x74,
- 0x48, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x06,
- 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x15,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x0a, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x57, 0x61,
- 0x74, 0x63, 0x68, 0x12, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x68, 0x61, 0x69,
- 0x6e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x57, 0x61, 0x74, 0x63, 0x68,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x42, 0x17, 0x5a, 0x15, 0x2f, 0x63,
- 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x73,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47,
+ 0x0a, 0x0c, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x53, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x12, 0x1a,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x53, 0x65, 0x74, 0x48,
+ 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x2e, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x53, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x43, 0x0a, 0x0a, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x18,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x57, 0x61, 0x74, 0x63,
+ 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x2e, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x42, 0x0a, 0x0a, 0x44, 0x65, 0x62, 0x75, 0x67, 0x50, 0x70,
+ 0x72, 0x6f, 0x66, 0x12, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x62, 0x75,
+ 0x67, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x42, 0x0a, 0x0a, 0x44, 0x65, 0x62,
+ 0x75, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
+ 0x44, 0x65, 0x62, 0x75, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x46,
+ 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x42, 0x1c, 0x5a,
+ 0x1a, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x63, 0x6c, 0x69, 0x2f, 0x73,
+ 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x33,
}
var (
- file_command_server_proto_server_proto_rawDescOnce sync.Once
- file_command_server_proto_server_proto_rawDescData = file_command_server_proto_server_proto_rawDesc
+ file_internal_cli_server_proto_server_proto_rawDescOnce sync.Once
+ file_internal_cli_server_proto_server_proto_rawDescData = file_internal_cli_server_proto_server_proto_rawDesc
)
-func file_command_server_proto_server_proto_rawDescGZIP() []byte {
- file_command_server_proto_server_proto_rawDescOnce.Do(func() {
- file_command_server_proto_server_proto_rawDescData = protoimpl.X.CompressGZIP(file_command_server_proto_server_proto_rawDescData)
+func file_internal_cli_server_proto_server_proto_rawDescGZIP() []byte {
+ file_internal_cli_server_proto_server_proto_rawDescOnce.Do(func() {
+ file_internal_cli_server_proto_server_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_cli_server_proto_server_proto_rawDescData)
})
- return file_command_server_proto_server_proto_rawDescData
-}
-
-var file_command_server_proto_server_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
-var file_command_server_proto_server_proto_msgTypes = make([]protoimpl.MessageInfo, 23)
-var file_command_server_proto_server_proto_goTypes = []interface{}{
- (PprofRequest_Type)(0), // 0: proto.PprofRequest.Type
- (*ChainWatchRequest)(nil), // 1: proto.ChainWatchRequest
- (*ChainWatchResponse)(nil), // 2: proto.ChainWatchResponse
- (*BlockStub)(nil), // 3: proto.BlockStub
- (*PeersAddRequest)(nil), // 4: proto.PeersAddRequest
- (*PeersAddResponse)(nil), // 5: proto.PeersAddResponse
- (*PeersRemoveRequest)(nil), // 6: proto.PeersRemoveRequest
- (*PeersRemoveResponse)(nil), // 7: proto.PeersRemoveResponse
- (*PeersListRequest)(nil), // 8: proto.PeersListRequest
- (*PeersListResponse)(nil), // 9: proto.PeersListResponse
- (*PeersStatusRequest)(nil), // 10: proto.PeersStatusRequest
- (*PeersStatusResponse)(nil), // 11: proto.PeersStatusResponse
- (*Peer)(nil), // 12: proto.Peer
- (*ChainSetHeadRequest)(nil), // 13: proto.ChainSetHeadRequest
- (*ChainSetHeadResponse)(nil), // 14: proto.ChainSetHeadResponse
- (*StatusResponse)(nil), // 15: proto.StatusResponse
- (*Header)(nil), // 16: proto.Header
- (*PprofRequest)(nil), // 17: proto.PprofRequest
- (*PprofResponse)(nil), // 18: proto.PprofResponse
- (*StatusResponse_Fork)(nil), // 19: proto.StatusResponse.Fork
- (*StatusResponse_Syncing)(nil), // 20: proto.StatusResponse.Syncing
- (*PprofResponse_Open)(nil), // 21: proto.PprofResponse.Open
- (*PprofResponse_Input)(nil), // 22: proto.PprofResponse.Input
- nil, // 23: proto.PprofResponse.Open.HeadersEntry
- (*emptypb.Empty)(nil), // 24: google.protobuf.Empty
-}
-var file_command_server_proto_server_proto_depIdxs = []int32{
- 3, // 0: proto.ChainWatchResponse.oldchain:type_name -> proto.BlockStub
- 3, // 1: proto.ChainWatchResponse.newchain:type_name -> proto.BlockStub
- 12, // 2: proto.PeersListResponse.peers:type_name -> proto.Peer
- 12, // 3: proto.PeersStatusResponse.peer:type_name -> proto.Peer
- 16, // 4: proto.StatusResponse.currentBlock:type_name -> proto.Header
- 16, // 5: proto.StatusResponse.currentHeader:type_name -> proto.Header
- 20, // 6: proto.StatusResponse.syncing:type_name -> proto.StatusResponse.Syncing
- 19, // 7: proto.StatusResponse.forks:type_name -> proto.StatusResponse.Fork
- 0, // 8: proto.PprofRequest.type:type_name -> proto.PprofRequest.Type
- 21, // 9: proto.PprofResponse.open:type_name -> proto.PprofResponse.Open
- 22, // 10: proto.PprofResponse.input:type_name -> proto.PprofResponse.Input
- 24, // 11: proto.PprofResponse.eof:type_name -> google.protobuf.Empty
- 23, // 12: proto.PprofResponse.Open.headers:type_name -> proto.PprofResponse.Open.HeadersEntry
- 17, // 13: proto.Bor.Pprof:input_type -> proto.PprofRequest
- 4, // 14: proto.Bor.PeersAdd:input_type -> proto.PeersAddRequest
- 6, // 15: proto.Bor.PeersRemove:input_type -> proto.PeersRemoveRequest
- 8, // 16: proto.Bor.PeersList:input_type -> proto.PeersListRequest
- 10, // 17: proto.Bor.PeersStatus:input_type -> proto.PeersStatusRequest
- 13, // 18: proto.Bor.ChainSetHead:input_type -> proto.ChainSetHeadRequest
- 24, // 19: proto.Bor.Status:input_type -> google.protobuf.Empty
- 1, // 20: proto.Bor.ChainWatch:input_type -> proto.ChainWatchRequest
- 18, // 21: proto.Bor.Pprof:output_type -> proto.PprofResponse
- 5, // 22: proto.Bor.PeersAdd:output_type -> proto.PeersAddResponse
- 7, // 23: proto.Bor.PeersRemove:output_type -> proto.PeersRemoveResponse
- 9, // 24: proto.Bor.PeersList:output_type -> proto.PeersListResponse
- 11, // 25: proto.Bor.PeersStatus:output_type -> proto.PeersStatusResponse
- 14, // 26: proto.Bor.ChainSetHead:output_type -> proto.ChainSetHeadResponse
- 15, // 27: proto.Bor.Status:output_type -> proto.StatusResponse
- 2, // 28: proto.Bor.ChainWatch:output_type -> proto.ChainWatchResponse
- 21, // [21:29] is the sub-list for method output_type
- 13, // [13:21] is the sub-list for method input_type
+ return file_internal_cli_server_proto_server_proto_rawDescData
+}
+
+var file_internal_cli_server_proto_server_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_internal_cli_server_proto_server_proto_msgTypes = make([]protoimpl.MessageInfo, 26)
+var file_internal_cli_server_proto_server_proto_goTypes = []interface{}{
+ (DebugPprofRequest_Type)(0), // 0: proto.DebugPprofRequest.Type
+ (*TraceRequest)(nil), // 1: proto.TraceRequest
+ (*TraceResponse)(nil), // 2: proto.TraceResponse
+ (*ChainWatchRequest)(nil), // 3: proto.ChainWatchRequest
+ (*ChainWatchResponse)(nil), // 4: proto.ChainWatchResponse
+ (*BlockStub)(nil), // 5: proto.BlockStub
+ (*PeersAddRequest)(nil), // 6: proto.PeersAddRequest
+ (*PeersAddResponse)(nil), // 7: proto.PeersAddResponse
+ (*PeersRemoveRequest)(nil), // 8: proto.PeersRemoveRequest
+ (*PeersRemoveResponse)(nil), // 9: proto.PeersRemoveResponse
+ (*PeersListRequest)(nil), // 10: proto.PeersListRequest
+ (*PeersListResponse)(nil), // 11: proto.PeersListResponse
+ (*PeersStatusRequest)(nil), // 12: proto.PeersStatusRequest
+ (*PeersStatusResponse)(nil), // 13: proto.PeersStatusResponse
+ (*Peer)(nil), // 14: proto.Peer
+ (*ChainSetHeadRequest)(nil), // 15: proto.ChainSetHeadRequest
+ (*ChainSetHeadResponse)(nil), // 16: proto.ChainSetHeadResponse
+ (*StatusResponse)(nil), // 17: proto.StatusResponse
+ (*Header)(nil), // 18: proto.Header
+ (*DebugPprofRequest)(nil), // 19: proto.DebugPprofRequest
+ (*DebugBlockRequest)(nil), // 20: proto.DebugBlockRequest
+ (*DebugFileResponse)(nil), // 21: proto.DebugFileResponse
+ (*StatusResponse_Fork)(nil), // 22: proto.StatusResponse.Fork
+ (*StatusResponse_Syncing)(nil), // 23: proto.StatusResponse.Syncing
+ (*DebugFileResponse_Open)(nil), // 24: proto.DebugFileResponse.Open
+ (*DebugFileResponse_Input)(nil), // 25: proto.DebugFileResponse.Input
+ nil, // 26: proto.DebugFileResponse.Open.HeadersEntry
+ (*emptypb.Empty)(nil), // 27: google.protobuf.Empty
+}
+var file_internal_cli_server_proto_server_proto_depIdxs = []int32{
+ 5, // 0: proto.ChainWatchResponse.oldchain:type_name -> proto.BlockStub
+ 5, // 1: proto.ChainWatchResponse.newchain:type_name -> proto.BlockStub
+ 14, // 2: proto.PeersListResponse.peers:type_name -> proto.Peer
+ 14, // 3: proto.PeersStatusResponse.peer:type_name -> proto.Peer
+ 18, // 4: proto.StatusResponse.currentBlock:type_name -> proto.Header
+ 18, // 5: proto.StatusResponse.currentHeader:type_name -> proto.Header
+ 23, // 6: proto.StatusResponse.syncing:type_name -> proto.StatusResponse.Syncing
+ 22, // 7: proto.StatusResponse.forks:type_name -> proto.StatusResponse.Fork
+ 0, // 8: proto.DebugPprofRequest.type:type_name -> proto.DebugPprofRequest.Type
+ 24, // 9: proto.DebugFileResponse.open:type_name -> proto.DebugFileResponse.Open
+ 25, // 10: proto.DebugFileResponse.input:type_name -> proto.DebugFileResponse.Input
+ 27, // 11: proto.DebugFileResponse.eof:type_name -> google.protobuf.Empty
+ 26, // 12: proto.DebugFileResponse.Open.headers:type_name -> proto.DebugFileResponse.Open.HeadersEntry
+ 6, // 13: proto.Bor.PeersAdd:input_type -> proto.PeersAddRequest
+ 8, // 14: proto.Bor.PeersRemove:input_type -> proto.PeersRemoveRequest
+ 10, // 15: proto.Bor.PeersList:input_type -> proto.PeersListRequest
+ 12, // 16: proto.Bor.PeersStatus:input_type -> proto.PeersStatusRequest
+ 15, // 17: proto.Bor.ChainSetHead:input_type -> proto.ChainSetHeadRequest
+ 27, // 18: proto.Bor.Status:input_type -> google.protobuf.Empty
+ 3, // 19: proto.Bor.ChainWatch:input_type -> proto.ChainWatchRequest
+ 19, // 20: proto.Bor.DebugPprof:input_type -> proto.DebugPprofRequest
+ 20, // 21: proto.Bor.DebugBlock:input_type -> proto.DebugBlockRequest
+ 7, // 22: proto.Bor.PeersAdd:output_type -> proto.PeersAddResponse
+ 9, // 23: proto.Bor.PeersRemove:output_type -> proto.PeersRemoveResponse
+ 11, // 24: proto.Bor.PeersList:output_type -> proto.PeersListResponse
+ 13, // 25: proto.Bor.PeersStatus:output_type -> proto.PeersStatusResponse
+ 16, // 26: proto.Bor.ChainSetHead:output_type -> proto.ChainSetHeadResponse
+ 17, // 27: proto.Bor.Status:output_type -> proto.StatusResponse
+ 4, // 28: proto.Bor.ChainWatch:output_type -> proto.ChainWatchResponse
+ 21, // 29: proto.Bor.DebugPprof:output_type -> proto.DebugFileResponse
+ 21, // 30: proto.Bor.DebugBlock:output_type -> proto.DebugFileResponse
+ 22, // [22:31] is the sub-list for method output_type
+ 13, // [13:22] is the sub-list for method input_type
13, // [13:13] is the sub-list for extension type_name
13, // [13:13] is the sub-list for extension extendee
0, // [0:13] is the sub-list for field type_name
}
-func init() { file_command_server_proto_server_proto_init() }
-func file_command_server_proto_server_proto_init() {
- if File_command_server_proto_server_proto != nil {
+func init() { file_internal_cli_server_proto_server_proto_init() }
+func file_internal_cli_server_proto_server_proto_init() {
+ if File_internal_cli_server_proto_server_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
- file_command_server_proto_server_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_internal_cli_server_proto_server_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TraceRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_internal_cli_server_proto_server_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TraceResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_internal_cli_server_proto_server_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ChainWatchRequest); i {
case 0:
return &v.state
@@ -1559,7 +1721,7 @@ func file_command_server_proto_server_proto_init() {
return nil
}
}
- file_command_server_proto_server_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_internal_cli_server_proto_server_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ChainWatchResponse); i {
case 0:
return &v.state
@@ -1571,7 +1733,7 @@ func file_command_server_proto_server_proto_init() {
return nil
}
}
- file_command_server_proto_server_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_internal_cli_server_proto_server_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*BlockStub); i {
case 0:
return &v.state
@@ -1583,7 +1745,7 @@ func file_command_server_proto_server_proto_init() {
return nil
}
}
- file_command_server_proto_server_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_internal_cli_server_proto_server_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PeersAddRequest); i {
case 0:
return &v.state
@@ -1595,7 +1757,7 @@ func file_command_server_proto_server_proto_init() {
return nil
}
}
- file_command_server_proto_server_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ file_internal_cli_server_proto_server_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PeersAddResponse); i {
case 0:
return &v.state
@@ -1607,7 +1769,7 @@ func file_command_server_proto_server_proto_init() {
return nil
}
}
- file_command_server_proto_server_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ file_internal_cli_server_proto_server_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PeersRemoveRequest); i {
case 0:
return &v.state
@@ -1619,7 +1781,7 @@ func file_command_server_proto_server_proto_init() {
return nil
}
}
- file_command_server_proto_server_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ file_internal_cli_server_proto_server_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PeersRemoveResponse); i {
case 0:
return &v.state
@@ -1631,7 +1793,7 @@ func file_command_server_proto_server_proto_init() {
return nil
}
}
- file_command_server_proto_server_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ file_internal_cli_server_proto_server_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PeersListRequest); i {
case 0:
return &v.state
@@ -1643,7 +1805,7 @@ func file_command_server_proto_server_proto_init() {
return nil
}
}
- file_command_server_proto_server_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ file_internal_cli_server_proto_server_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PeersListResponse); i {
case 0:
return &v.state
@@ -1655,7 +1817,7 @@ func file_command_server_proto_server_proto_init() {
return nil
}
}
- file_command_server_proto_server_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ file_internal_cli_server_proto_server_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PeersStatusRequest); i {
case 0:
return &v.state
@@ -1667,7 +1829,7 @@ func file_command_server_proto_server_proto_init() {
return nil
}
}
- file_command_server_proto_server_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ file_internal_cli_server_proto_server_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PeersStatusResponse); i {
case 0:
return &v.state
@@ -1679,7 +1841,7 @@ func file_command_server_proto_server_proto_init() {
return nil
}
}
- file_command_server_proto_server_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ file_internal_cli_server_proto_server_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Peer); i {
case 0:
return &v.state
@@ -1691,7 +1853,7 @@ func file_command_server_proto_server_proto_init() {
return nil
}
}
- file_command_server_proto_server_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ file_internal_cli_server_proto_server_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ChainSetHeadRequest); i {
case 0:
return &v.state
@@ -1703,7 +1865,7 @@ func file_command_server_proto_server_proto_init() {
return nil
}
}
- file_command_server_proto_server_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ file_internal_cli_server_proto_server_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ChainSetHeadResponse); i {
case 0:
return &v.state
@@ -1715,7 +1877,7 @@ func file_command_server_proto_server_proto_init() {
return nil
}
}
- file_command_server_proto_server_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ file_internal_cli_server_proto_server_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*StatusResponse); i {
case 0:
return &v.state
@@ -1727,7 +1889,7 @@ func file_command_server_proto_server_proto_init() {
return nil
}
}
- file_command_server_proto_server_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ file_internal_cli_server_proto_server_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Header); i {
case 0:
return &v.state
@@ -1739,8 +1901,20 @@ func file_command_server_proto_server_proto_init() {
return nil
}
}
- file_command_server_proto_server_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PprofRequest); i {
+ file_internal_cli_server_proto_server_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DebugPprofRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_internal_cli_server_proto_server_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DebugBlockRequest); i {
case 0:
return &v.state
case 1:
@@ -1751,8 +1925,8 @@ func file_command_server_proto_server_proto_init() {
return nil
}
}
- file_command_server_proto_server_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PprofResponse); i {
+ file_internal_cli_server_proto_server_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DebugFileResponse); i {
case 0:
return &v.state
case 1:
@@ -1763,7 +1937,7 @@ func file_command_server_proto_server_proto_init() {
return nil
}
}
- file_command_server_proto_server_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ file_internal_cli_server_proto_server_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*StatusResponse_Fork); i {
case 0:
return &v.state
@@ -1775,7 +1949,7 @@ func file_command_server_proto_server_proto_init() {
return nil
}
}
- file_command_server_proto_server_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ file_internal_cli_server_proto_server_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*StatusResponse_Syncing); i {
case 0:
return &v.state
@@ -1787,8 +1961,8 @@ func file_command_server_proto_server_proto_init() {
return nil
}
}
- file_command_server_proto_server_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PprofResponse_Open); i {
+ file_internal_cli_server_proto_server_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DebugFileResponse_Open); i {
case 0:
return &v.state
case 1:
@@ -1799,8 +1973,8 @@ func file_command_server_proto_server_proto_init() {
return nil
}
}
- file_command_server_proto_server_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PprofResponse_Input); i {
+ file_internal_cli_server_proto_server_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DebugFileResponse_Input); i {
case 0:
return &v.state
case 1:
@@ -1812,28 +1986,28 @@ func file_command_server_proto_server_proto_init() {
}
}
}
- file_command_server_proto_server_proto_msgTypes[17].OneofWrappers = []interface{}{
- (*PprofResponse_Open_)(nil),
- (*PprofResponse_Input_)(nil),
- (*PprofResponse_Eof)(nil),
+ file_internal_cli_server_proto_server_proto_msgTypes[20].OneofWrappers = []interface{}{
+ (*DebugFileResponse_Open_)(nil),
+ (*DebugFileResponse_Input_)(nil),
+ (*DebugFileResponse_Eof)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_command_server_proto_server_proto_rawDesc,
+ RawDescriptor: file_internal_cli_server_proto_server_proto_rawDesc,
NumEnums: 1,
- NumMessages: 23,
+ NumMessages: 26,
NumExtensions: 0,
NumServices: 1,
},
- GoTypes: file_command_server_proto_server_proto_goTypes,
- DependencyIndexes: file_command_server_proto_server_proto_depIdxs,
- EnumInfos: file_command_server_proto_server_proto_enumTypes,
- MessageInfos: file_command_server_proto_server_proto_msgTypes,
+ GoTypes: file_internal_cli_server_proto_server_proto_goTypes,
+ DependencyIndexes: file_internal_cli_server_proto_server_proto_depIdxs,
+ EnumInfos: file_internal_cli_server_proto_server_proto_enumTypes,
+ MessageInfos: file_internal_cli_server_proto_server_proto_msgTypes,
}.Build()
- File_command_server_proto_server_proto = out.File
- file_command_server_proto_server_proto_rawDesc = nil
- file_command_server_proto_server_proto_goTypes = nil
- file_command_server_proto_server_proto_depIdxs = nil
+ File_internal_cli_server_proto_server_proto = out.File
+ file_internal_cli_server_proto_server_proto_rawDesc = nil
+ file_internal_cli_server_proto_server_proto_goTypes = nil
+ file_internal_cli_server_proto_server_proto_depIdxs = nil
}
diff --git a/internal/cli/server/proto/server.proto b/internal/cli/server/proto/server.proto
index 2dc84bff78..1520ab6536 100644
--- a/internal/cli/server/proto/server.proto
+++ b/internal/cli/server/proto/server.proto
@@ -4,11 +4,9 @@ package proto;
import "google/protobuf/empty.proto";
-option go_package = "/command/server/proto";
+option go_package = "/internal/cli/server/proto";
service Bor {
- rpc Pprof(PprofRequest) returns (stream PprofResponse);
-
rpc PeersAdd(PeersAddRequest) returns (PeersAddResponse);
rpc PeersRemove(PeersRemoveRequest) returns (PeersRemoveResponse);
@@ -20,8 +18,20 @@ service Bor {
rpc ChainSetHead(ChainSetHeadRequest) returns (ChainSetHeadResponse);
rpc Status(google.protobuf.Empty) returns (StatusResponse);
-
+
rpc ChainWatch(ChainWatchRequest) returns (stream ChainWatchResponse);
+
+ rpc DebugPprof(DebugPprofRequest) returns (stream DebugFileResponse);
+
+ rpc DebugBlock(DebugBlockRequest) returns (stream DebugFileResponse);
+}
+
+message TraceRequest {
+ int64 number = 1;
+}
+
+message TraceResponse {
+
}
message ChainWatchRequest {
@@ -113,7 +123,7 @@ message Header {
uint64 number = 2;
}
-message PprofRequest {
+message DebugPprofRequest {
Type type = 1;
string profile = 2;
@@ -127,7 +137,11 @@ message PprofRequest {
}
}
-message PprofResponse {
+message DebugBlockRequest {
+ int64 number = 1;
+}
+
+message DebugFileResponse {
oneof event {
Open open = 1;
Input input = 2;
@@ -136,7 +150,6 @@ message PprofResponse {
message Open {
map headers = 1;
- int64 size = 2;
}
message Input {
diff --git a/internal/cli/server/proto/server_grpc.pb.go b/internal/cli/server/proto/server_grpc.pb.go
index 9ed2593e47..bd4ecb660d 100644
--- a/internal/cli/server/proto/server_grpc.pb.go
+++ b/internal/cli/server/proto/server_grpc.pb.go
@@ -1,13 +1,14 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0
-// - protoc v3.17.3
-// source: command/server/proto/server.proto
+// - protoc v3.19.3
+// source: internal/cli/server/proto/server.proto
package proto
import (
context "context"
+
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
@@ -23,7 +24,6 @@ const _ = grpc.SupportPackageIsVersion7
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type BorClient interface {
- Pprof(ctx context.Context, in *PprofRequest, opts ...grpc.CallOption) (Bor_PprofClient, error)
PeersAdd(ctx context.Context, in *PeersAddRequest, opts ...grpc.CallOption) (*PeersAddResponse, error)
PeersRemove(ctx context.Context, in *PeersRemoveRequest, opts ...grpc.CallOption) (*PeersRemoveResponse, error)
PeersList(ctx context.Context, in *PeersListRequest, opts ...grpc.CallOption) (*PeersListResponse, error)
@@ -31,6 +31,8 @@ type BorClient interface {
ChainSetHead(ctx context.Context, in *ChainSetHeadRequest, opts ...grpc.CallOption) (*ChainSetHeadResponse, error)
Status(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*StatusResponse, error)
ChainWatch(ctx context.Context, in *ChainWatchRequest, opts ...grpc.CallOption) (Bor_ChainWatchClient, error)
+ DebugPprof(ctx context.Context, in *DebugPprofRequest, opts ...grpc.CallOption) (Bor_DebugPprofClient, error)
+ DebugBlock(ctx context.Context, in *DebugBlockRequest, opts ...grpc.CallOption) (Bor_DebugBlockClient, error)
}
type borClient struct {
@@ -41,38 +43,6 @@ func NewBorClient(cc grpc.ClientConnInterface) BorClient {
return &borClient{cc}
}
-func (c *borClient) Pprof(ctx context.Context, in *PprofRequest, opts ...grpc.CallOption) (Bor_PprofClient, error) {
- stream, err := c.cc.NewStream(ctx, &Bor_ServiceDesc.Streams[0], "/proto.Bor/Pprof", opts...)
- if err != nil {
- return nil, err
- }
- x := &borPprofClient{stream}
- if err := x.ClientStream.SendMsg(in); err != nil {
- return nil, err
- }
- if err := x.ClientStream.CloseSend(); err != nil {
- return nil, err
- }
- return x, nil
-}
-
-type Bor_PprofClient interface {
- Recv() (*PprofResponse, error)
- grpc.ClientStream
-}
-
-type borPprofClient struct {
- grpc.ClientStream
-}
-
-func (x *borPprofClient) Recv() (*PprofResponse, error) {
- m := new(PprofResponse)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
func (c *borClient) PeersAdd(ctx context.Context, in *PeersAddRequest, opts ...grpc.CallOption) (*PeersAddResponse, error) {
out := new(PeersAddResponse)
err := c.cc.Invoke(ctx, "/proto.Bor/PeersAdd", in, out, opts...)
@@ -128,7 +98,7 @@ func (c *borClient) Status(ctx context.Context, in *emptypb.Empty, opts ...grpc.
}
func (c *borClient) ChainWatch(ctx context.Context, in *ChainWatchRequest, opts ...grpc.CallOption) (Bor_ChainWatchClient, error) {
- stream, err := c.cc.NewStream(ctx, &Bor_ServiceDesc.Streams[1], "/proto.Bor/ChainWatch", opts...)
+ stream, err := c.cc.NewStream(ctx, &Bor_ServiceDesc.Streams[0], "/proto.Bor/ChainWatch", opts...)
if err != nil {
return nil, err
}
@@ -159,11 +129,74 @@ func (x *borChainWatchClient) Recv() (*ChainWatchResponse, error) {
return m, nil
}
+func (c *borClient) DebugPprof(ctx context.Context, in *DebugPprofRequest, opts ...grpc.CallOption) (Bor_DebugPprofClient, error) {
+ stream, err := c.cc.NewStream(ctx, &Bor_ServiceDesc.Streams[1], "/proto.Bor/DebugPprof", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &borDebugPprofClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type Bor_DebugPprofClient interface {
+ Recv() (*DebugFileResponse, error)
+ grpc.ClientStream
+}
+
+type borDebugPprofClient struct {
+ grpc.ClientStream
+}
+
+func (x *borDebugPprofClient) Recv() (*DebugFileResponse, error) {
+ m := new(DebugFileResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *borClient) DebugBlock(ctx context.Context, in *DebugBlockRequest, opts ...grpc.CallOption) (Bor_DebugBlockClient, error) {
+ stream, err := c.cc.NewStream(ctx, &Bor_ServiceDesc.Streams[2], "/proto.Bor/DebugBlock", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &borDebugBlockClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type Bor_DebugBlockClient interface {
+ Recv() (*DebugFileResponse, error)
+ grpc.ClientStream
+}
+
+type borDebugBlockClient struct {
+ grpc.ClientStream
+}
+
+func (x *borDebugBlockClient) Recv() (*DebugFileResponse, error) {
+ m := new(DebugFileResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
// BorServer is the server API for Bor service.
// All implementations must embed UnimplementedBorServer
// for forward compatibility
type BorServer interface {
- Pprof(*PprofRequest, Bor_PprofServer) error
PeersAdd(context.Context, *PeersAddRequest) (*PeersAddResponse, error)
PeersRemove(context.Context, *PeersRemoveRequest) (*PeersRemoveResponse, error)
PeersList(context.Context, *PeersListRequest) (*PeersListResponse, error)
@@ -171,6 +204,8 @@ type BorServer interface {
ChainSetHead(context.Context, *ChainSetHeadRequest) (*ChainSetHeadResponse, error)
Status(context.Context, *emptypb.Empty) (*StatusResponse, error)
ChainWatch(*ChainWatchRequest, Bor_ChainWatchServer) error
+ DebugPprof(*DebugPprofRequest, Bor_DebugPprofServer) error
+ DebugBlock(*DebugBlockRequest, Bor_DebugBlockServer) error
mustEmbedUnimplementedBorServer()
}
@@ -178,9 +213,6 @@ type BorServer interface {
type UnimplementedBorServer struct {
}
-func (UnimplementedBorServer) Pprof(*PprofRequest, Bor_PprofServer) error {
- return status.Errorf(codes.Unimplemented, "method Pprof not implemented")
-}
func (UnimplementedBorServer) PeersAdd(context.Context, *PeersAddRequest) (*PeersAddResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method PeersAdd not implemented")
}
@@ -202,6 +234,12 @@ func (UnimplementedBorServer) Status(context.Context, *emptypb.Empty) (*StatusRe
func (UnimplementedBorServer) ChainWatch(*ChainWatchRequest, Bor_ChainWatchServer) error {
return status.Errorf(codes.Unimplemented, "method ChainWatch not implemented")
}
+func (UnimplementedBorServer) DebugPprof(*DebugPprofRequest, Bor_DebugPprofServer) error {
+ return status.Errorf(codes.Unimplemented, "method DebugPprof not implemented")
+}
+func (UnimplementedBorServer) DebugBlock(*DebugBlockRequest, Bor_DebugBlockServer) error {
+ return status.Errorf(codes.Unimplemented, "method DebugBlock not implemented")
+}
func (UnimplementedBorServer) mustEmbedUnimplementedBorServer() {}
// UnsafeBorServer may be embedded to opt out of forward compatibility for this service.
@@ -215,27 +253,6 @@ func RegisterBorServer(s grpc.ServiceRegistrar, srv BorServer) {
s.RegisterService(&Bor_ServiceDesc, srv)
}
-func _Bor_Pprof_Handler(srv interface{}, stream grpc.ServerStream) error {
- m := new(PprofRequest)
- if err := stream.RecvMsg(m); err != nil {
- return err
- }
- return srv.(BorServer).Pprof(m, &borPprofServer{stream})
-}
-
-type Bor_PprofServer interface {
- Send(*PprofResponse) error
- grpc.ServerStream
-}
-
-type borPprofServer struct {
- grpc.ServerStream
-}
-
-func (x *borPprofServer) Send(m *PprofResponse) error {
- return x.ServerStream.SendMsg(m)
-}
-
func _Bor_PeersAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PeersAddRequest)
if err := dec(in); err != nil {
@@ -365,6 +382,48 @@ func (x *borChainWatchServer) Send(m *ChainWatchResponse) error {
return x.ServerStream.SendMsg(m)
}
+func _Bor_DebugPprof_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(DebugPprofRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(BorServer).DebugPprof(m, &borDebugPprofServer{stream})
+}
+
+type Bor_DebugPprofServer interface {
+ Send(*DebugFileResponse) error
+ grpc.ServerStream
+}
+
+type borDebugPprofServer struct {
+ grpc.ServerStream
+}
+
+func (x *borDebugPprofServer) Send(m *DebugFileResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _Bor_DebugBlock_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(DebugBlockRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(BorServer).DebugBlock(m, &borDebugBlockServer{stream})
+}
+
+type Bor_DebugBlockServer interface {
+ Send(*DebugFileResponse) error
+ grpc.ServerStream
+}
+
+type borDebugBlockServer struct {
+ grpc.ServerStream
+}
+
+func (x *borDebugBlockServer) Send(m *DebugFileResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
// Bor_ServiceDesc is the grpc.ServiceDesc for Bor service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
@@ -399,15 +458,20 @@ var Bor_ServiceDesc = grpc.ServiceDesc{
},
Streams: []grpc.StreamDesc{
{
- StreamName: "Pprof",
- Handler: _Bor_Pprof_Handler,
+ StreamName: "ChainWatch",
+ Handler: _Bor_ChainWatch_Handler,
ServerStreams: true,
},
{
- StreamName: "ChainWatch",
- Handler: _Bor_ChainWatch_Handler,
+ StreamName: "DebugPprof",
+ Handler: _Bor_DebugPprof_Handler,
+ ServerStreams: true,
+ },
+ {
+ StreamName: "DebugBlock",
+ Handler: _Bor_DebugBlock_Handler,
ServerStreams: true,
},
},
- Metadata: "command/server/proto/server.proto",
+ Metadata: "internal/cli/server/proto/server.proto",
}
diff --git a/internal/cli/server/server.go b/internal/cli/server/server.go
index e882429e86..70187d6985 100644
--- a/internal/cli/server/server.go
+++ b/internal/cli/server/server.go
@@ -4,13 +4,28 @@ import (
"context"
"fmt"
"io"
+ "math/big"
"net"
"net/http"
"os"
"strings"
"time"
+ "github.com/mattn/go-colorable"
+ "github.com/mattn/go-isatty"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+ "go.opentelemetry.io/otel/propagation"
+ "go.opentelemetry.io/otel/sdk/resource"
+ sdktrace "go.opentelemetry.io/otel/sdk/trace"
+ semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
+ "google.golang.org/grpc"
+
+ "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore"
+ "github.com/ethereum/go-ethereum/consensus/beacon"
+ "github.com/ethereum/go-ethereum/consensus/bor"
+ "github.com/ethereum/go-ethereum/consensus/clique"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/ethstats"
@@ -21,15 +36,10 @@ import (
"github.com/ethereum/go-ethereum/metrics/influxdb"
"github.com/ethereum/go-ethereum/metrics/prometheus"
"github.com/ethereum/go-ethereum/node"
- "github.com/mattn/go-colorable"
- "github.com/mattn/go-isatty"
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
- "go.opentelemetry.io/otel/propagation"
- "go.opentelemetry.io/otel/sdk/resource"
- sdktrace "go.opentelemetry.io/otel/sdk/trace"
- semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
- "google.golang.org/grpc"
+
+ // Force-load the tracer engines to trigger registration
+ _ "github.com/ethereum/go-ethereum/eth/tracers/js"
+ _ "github.com/ethereum/go-ethereum/eth/tracers/native"
)
type Server struct {
@@ -39,6 +49,9 @@ type Server struct {
grpcServer *grpc.Server
tracer *sdktrace.TracerProvider
config *Config
+
+ // tracerAPI to trace block executions
+ tracerAPI *tracers.API
}
func NewServer(config *Config) (*Server, error) {
@@ -63,71 +76,153 @@ func NewServer(config *Config) (*Server, error) {
if err != nil {
return nil, err
}
+
stack, err := node.New(nodeCfg)
if err != nil {
return nil, err
}
- srv.node = stack
// setup account manager (only keystore)
- {
- keydir := stack.KeyStoreDir()
- n, p := keystore.StandardScryptN, keystore.StandardScryptP
- if config.Accounts.UseLightweightKDF {
- n, p = keystore.LightScryptN, keystore.LightScryptP
- }
- stack.AccountManager().AddBackend(keystore.NewKeyStore(keydir, n, p))
- }
+ // create a new account manager, only for the scope of this function
+ accountManager := accounts.NewManager(&accounts.Config{})
- // register the ethereum backend
- ethCfg, err := config.buildEth(stack)
- if err != nil {
- return nil, err
+ // register backend to account manager with keystore for signing
+ keydir := stack.KeyStoreDir()
+
+ n, p := keystore.StandardScryptN, keystore.StandardScryptP
+ if config.Accounts.UseLightweightKDF {
+ n, p = keystore.LightScryptN, keystore.LightScryptP
}
- backend, err := eth.New(stack, ethCfg)
- if err != nil {
- return nil, err
+ // proceed to authorize the local account manager in any case
+ accountManager.AddBackend(keystore.NewKeyStore(keydir, n, p))
+
+ // flag to set if we're authorizing consensus here
+ authorized := false
+
+ // check if personal wallet endpoints are disabled or not
+ // nolint:nestif
+ if !config.Accounts.DisableBorWallet {
+ // add keystore globally to the node's account manager if personal wallet is enabled
+ stack.AccountManager().AddBackend(keystore.NewKeyStore(keydir, n, p))
+
+ // register the ethereum backend
+ ethCfg, err := config.buildEth(stack, stack.AccountManager())
+ if err != nil {
+ return nil, err
+ }
+
+ backend, err := eth.New(stack, ethCfg)
+ if err != nil {
+ return nil, err
+ }
+
+ srv.backend = backend
+ } else {
+ // register the ethereum backend (with temporary created account manager)
+ ethCfg, err := config.buildEth(stack, accountManager)
+ if err != nil {
+ return nil, err
+ }
+
+ backend, err := eth.New(stack, ethCfg)
+ if err != nil {
+ return nil, err
+ }
+
+ srv.backend = backend
+
+ // authorize only if mining or in developer mode
+ if config.Sealer.Enabled || config.Developer.Enabled {
+ // get the etherbase
+ eb, err := srv.backend.Etherbase()
+ if err != nil {
+ log.Error("Cannot start mining without etherbase", "err", err)
+
+ return nil, fmt.Errorf("etherbase missing: %v", err)
+ }
+
+ // Authorize the clique consensus (if chosen) to sign using wallet signer
+ var cli *clique.Clique
+ if c, ok := srv.backend.Engine().(*clique.Clique); ok {
+ cli = c
+ } else if cl, ok := srv.backend.Engine().(*beacon.Beacon); ok {
+ if c, ok := cl.InnerEngine().(*clique.Clique); ok {
+ cli = c
+ }
+ }
+ if cli != nil {
+ wallet, err := accountManager.Find(accounts.Account{Address: eb})
+ if wallet == nil || err != nil {
+ log.Error("Etherbase account unavailable locally", "err", err)
+
+ return nil, fmt.Errorf("signer missing: %v", err)
+ }
+
+ cli.Authorize(eb, wallet.SignData)
+ authorized = true
+ }
+
+ // Authorize the bor consensus (if chosen) to sign using wallet signer
+ if bor, ok := srv.backend.Engine().(*bor.Bor); ok {
+ wallet, err := accountManager.Find(accounts.Account{Address: eb})
+ if wallet == nil || err != nil {
+ log.Error("Etherbase account unavailable locally", "err", err)
+ return nil, fmt.Errorf("signer missing: %v", err)
+ }
+
+ bor.Authorize(eb, wallet.SignData)
+ authorized = true
+ }
+ }
}
- srv.backend = backend
+
+ // set the auth status in backend
+ srv.backend.SetAuthorized(authorized)
// debug tracing is enabled by default
- stack.RegisterAPIs(tracers.APIs(backend.APIBackend))
+ stack.RegisterAPIs(tracers.APIs(srv.backend.APIBackend))
+ srv.tracerAPI = tracers.NewAPI(srv.backend.APIBackend)
// graphql is started from another place
if config.JsonRPC.Graphql.Enabled {
- if err := graphql.New(stack, backend.APIBackend, config.JsonRPC.Cors, config.JsonRPC.VHost); err != nil {
+ if err := graphql.New(stack, srv.backend.APIBackend, config.JsonRPC.Graphql.Cors, config.JsonRPC.Graphql.VHost); err != nil {
return nil, fmt.Errorf("failed to register the GraphQL service: %v", err)
}
}
// register ethash service
if config.Ethstats != "" {
- if err := ethstats.New(stack, backend.APIBackend, backend.Engine(), config.Ethstats); err != nil {
+ if err := ethstats.New(stack, srv.backend.APIBackend, srv.backend.Engine(), config.Ethstats); err != nil {
return nil, err
}
}
// sealing (if enabled) or in dev mode
if config.Sealer.Enabled || config.Developer.Enabled {
- if err := backend.StartMining(1); err != nil {
+ if err := srv.backend.StartMining(1); err != nil {
return nil, err
}
}
- if err := srv.setupMetrics(config.Telemetry, config.Name); err != nil {
+ if err := srv.setupMetrics(config.Telemetry, config.Identity); err != nil {
return nil, err
}
+ // Set the node instance
+ srv.node = stack
+
// start the node
if err := srv.node.Start(); err != nil {
return nil, err
}
+
return srv, nil
}
func (s *Server) Stop() {
s.node.Close()
+ s.grpcServer.Stop()
// shutdown the tracer
if s.tracer != nil {
@@ -138,6 +233,18 @@ func (s *Server) Stop() {
}
func (s *Server) setupMetrics(config *TelemetryConfig, serviceName string) error {
+ // Check the global metrics if they're matching with the provided config
+ if metrics.Enabled != config.Enabled || metrics.EnabledExpensive != config.Expensive {
+ log.Warn(
+ "Metric misconfiguration, some of them might not be visible",
+ "metrics", metrics.Enabled,
+ "config.metrics", config.Enabled,
+ "expensive", metrics.EnabledExpensive,
+ "config.expensive", config.Expensive,
+ )
+ }
+
+ // Update the values anyways (for services which don't need immediate attention)
metrics.Enabled = config.Enabled
metrics.EnabledExpensive = config.Expensive
@@ -148,6 +255,10 @@ func (s *Server) setupMetrics(config *TelemetryConfig, serviceName string) error
log.Info("Enabling metrics collection")
+ if metrics.EnabledExpensive {
+ log.Info("Enabling expensive metrics collection")
+ }
+
// influxdb
if v1Enabled, v2Enabled := config.InfluxDB.V1Enabled, config.InfluxDB.V2Enabled; v1Enabled || v2Enabled {
if v1Enabled && v2Enabled {
@@ -160,10 +271,12 @@ func (s *Server) setupMetrics(config *TelemetryConfig, serviceName string) error
if v1Enabled {
log.Info("Enabling metrics export to InfluxDB (v1)")
+
go influxdb.InfluxDBWithTags(metrics.DefaultRegistry, 10*time.Second, endpoint, cfg.Database, cfg.Username, cfg.Password, "geth.", tags)
}
if v2Enabled {
log.Info("Enabling metrics export to InfluxDB (v2)")
+
go influxdb.InfluxDBV2WithTags(metrics.DefaultRegistry, 10*time.Second, endpoint, cfg.Token, cfg.Bucket, cfg.Organization, "geth.", tags)
}
}
@@ -175,9 +288,7 @@ func (s *Server) setupMetrics(config *TelemetryConfig, serviceName string) error
prometheusMux := http.NewServeMux()
- prometheusMux.HandleFunc("/metrics", func(w http.ResponseWriter, r *http.Request) {
- prometheus.Handler(metrics.DefaultRegistry)
- })
+ prometheusMux.Handle("/debug/metrics/prometheus", prometheus.Handler(metrics.DefaultRegistry))
promServer := &http.Server{
Addr: config.PrometheusAddr,
@@ -190,6 +301,8 @@ func (s *Server) setupMetrics(config *TelemetryConfig, serviceName string) error
}
}()
+ log.Info("Enabling metrics export to prometheus", "path", fmt.Sprintf("http://%s/debug/metrics/prometheus", config.PrometheusAddr))
+
}
if config.OpenCollectorEndpoint != "" {
@@ -231,6 +344,8 @@ func (s *Server) setupMetrics(config *TelemetryConfig, serviceName string) error
// set the tracer
s.tracer = tracerProvider
+
+ log.Info("Open collector tracing started", "address", config.OpenCollectorEndpoint)
}
return nil
@@ -252,6 +367,7 @@ func (s *Server) setupGRPCServer(addr string) error {
}()
log.Info("GRPC Server started", "addr", addr)
+
return nil
}
@@ -262,16 +378,20 @@ func (s *Server) withLoggingUnaryInterceptor() grpc.ServerOption {
func (s *Server) loggingServerInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
start := time.Now()
h, err := handler(ctx, req)
+
log.Trace("Request", "method", info.FullMethod, "duration", time.Since(start), "error", err)
+
return h, err
}
func setupLogger(logLevel string) {
output := io.Writer(os.Stderr)
+
usecolor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb"
if usecolor {
output = colorable.NewColorableStderr()
}
+
ostream := log.StreamHandler(output, log.TerminalFormat(usecolor))
glogger := log.NewGlogHandler(ostream)
@@ -282,5 +402,14 @@ func setupLogger(logLevel string) {
} else {
glogger.Verbosity(log.LvlInfo)
}
+
log.Root().SetHandler(glogger)
}
+
+func (s *Server) GetLatestBlockNumber() *big.Int {
+ return s.backend.BlockChain().CurrentBlock().Number()
+}
+
+func (s *Server) GetGrpcAddr() string {
+ return s.config.GRPC.Addr[1:]
+}
diff --git a/internal/cli/server/server_test.go b/internal/cli/server/server_test.go
index 070739c687..c88c656b10 100644
--- a/internal/cli/server/server_test.go
+++ b/internal/cli/server/server_test.go
@@ -1,13 +1,37 @@
package server
import (
+ "fmt"
+ "net"
+ "sync/atomic"
"testing"
"time"
+ "github.com/ethereum/go-ethereum/log"
+
"github.com/stretchr/testify/assert"
)
+var initialPort uint64 = 61000
+
+// nextPort gives the next available port starting from 60000
+func nextPort() uint64 {
+ log.Info("Checking for new port", "current", initialPort)
+ port := atomic.AddUint64(&initialPort, 1)
+ addr := fmt.Sprintf("localhost:%d", port)
+
+ lis, err := net.Listen("tcp", addr)
+ if err == nil {
+ lis.Close()
+
+ return port
+ } else {
+ return nextPort()
+ }
+}
+
func TestServer_DeveloperMode(t *testing.T) {
+ t.Parallel()
// get the default config
config := DefaultConfig()
@@ -16,17 +40,17 @@ func TestServer_DeveloperMode(t *testing.T) {
config.Developer.Enabled = true
config.Developer.Period = 2 // block time
- // start the server
- server, err1 := NewServer(config)
- if err1 != nil {
- t.Fatalf("failed to start server: %v", err1)
- }
+ // start the mock server
+ server, err := CreateMockServer(config)
+ assert.NoError(t, err)
+
+ defer CloseMockServer(server)
// record the initial block number
blockNumber := server.backend.BlockChain().CurrentBlock().Header().Number.Int64()
var i int64 = 0
- for i = 0; i < 10; i++ {
+ for i = 0; i < 3; i++ {
// We expect the node to mine blocks every `config.Developer.Period` time period
time.Sleep(time.Duration(config.Developer.Period) * time.Second)
currBlock := server.backend.BlockChain().CurrentBlock().Header().Number.Int64()
@@ -35,7 +59,4 @@ func TestServer_DeveloperMode(t *testing.T) {
break
}
}
-
- // stop the server
- server.Stop()
}
diff --git a/internal/cli/server/service.go b/internal/cli/server/service.go
index e8e3928581..de240d5461 100644
--- a/internal/cli/server/service.go
+++ b/internal/cli/server/service.go
@@ -2,46 +2,34 @@ package server
import (
"context"
+ "encoding/json"
"fmt"
"math/big"
"reflect"
"strings"
+ gproto "github.com/golang/protobuf/proto" //nolint:staticcheck,typecheck
+ "github.com/golang/protobuf/ptypes/empty"
+ grpc_net_conn "github.com/mitchellh/go-grpc-net-conn"
+
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/eth/tracers"
+ "github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/internal/cli/server/pprof"
"github.com/ethereum/go-ethereum/internal/cli/server/proto"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
- gproto "github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/ptypes/empty"
- grpc_net_conn "github.com/mitchellh/go-grpc-net-conn"
)
-func (s *Server) Pprof(req *proto.PprofRequest, stream proto.Bor_PprofServer) error {
- var payload []byte
- var headers map[string]string
- var err error
-
- ctx := context.Background()
- switch req.Type {
- case proto.PprofRequest_CPU:
- payload, headers, err = pprof.CPUProfile(ctx, int(req.Seconds))
- case proto.PprofRequest_TRACE:
- payload, headers, err = pprof.Trace(ctx, int(req.Seconds))
- case proto.PprofRequest_LOOKUP:
- payload, headers, err = pprof.Profile(req.Profile, 0, 0)
- }
- if err != nil {
- return err
- }
+const chunkSize = 1024 * 1024 * 1024
+func sendStreamDebugFile(stream proto.Bor_DebugPprofServer, headers map[string]string, data []byte) error {
// open the stream and send the headers
- err = stream.Send(&proto.PprofResponse{
- Event: &proto.PprofResponse_Open_{
- Open: &proto.PprofResponse_Open{
+ err := stream.Send(&proto.DebugFileResponse{
+ Event: &proto.DebugFileResponse_Open_{
+ Open: &proto.DebugFileResponse_Open{
Headers: headers,
- Size: int64(len(payload)),
},
},
})
@@ -50,24 +38,57 @@ func (s *Server) Pprof(req *proto.PprofRequest, stream proto.Bor_PprofServer) er
}
// Wrap our conn around the response.
+ encoder := grpc_net_conn.SimpleEncoder(func(msg gproto.Message) *[]byte {
+ return &msg.(*proto.DebugFileResponse_Input).Data
+ })
conn := &grpc_net_conn.Conn{
Stream: stream,
- Request: &proto.PprofResponse_Input{},
- Encode: grpc_net_conn.SimpleEncoder(func(msg gproto.Message) *[]byte {
- return &msg.(*proto.PprofResponse_Input).Data
- }),
+ Request: &proto.DebugFileResponse_Input{},
+ Encode: grpc_net_conn.ChunkedEncoder(encoder, chunkSize),
}
- if _, err := conn.Write(payload); err != nil {
+
+ if _, err := conn.Write(data); err != nil {
return err
}
// send the eof
- err = stream.Send(&proto.PprofResponse{
- Event: &proto.PprofResponse_Eof{},
+ err = stream.Send(&proto.DebugFileResponse{
+ Event: &proto.DebugFileResponse_Eof{},
})
if err != nil {
return err
}
+
+ return nil
+}
+
+func (s *Server) DebugPprof(req *proto.DebugPprofRequest, stream proto.Bor_DebugPprofServer) error {
+ var (
+ payload []byte
+ headers map[string]string
+ err error
+ )
+
+ ctx := context.Background()
+
+ switch req.Type {
+ case proto.DebugPprofRequest_CPU:
+ payload, headers, err = pprof.CPUProfile(ctx, int(req.Seconds))
+ case proto.DebugPprofRequest_TRACE:
+ payload, headers, err = pprof.Trace(ctx, int(req.Seconds))
+ case proto.DebugPprofRequest_LOOKUP:
+ payload, headers, err = pprof.Profile(req.Profile, 0, 0)
+ }
+
+ if err != nil {
+ return err
+ }
+
+ // send the file on a grpc stream
+ if err := sendStreamDebugFile(stream, headers, payload); err != nil {
+ return err
+ }
+
return nil
}
@@ -169,6 +190,34 @@ func headerToProtoHeader(h *types.Header) *proto.Header {
}
}
+func (s *Server) DebugBlock(req *proto.DebugBlockRequest, stream proto.Bor_DebugBlockServer) error {
+ traceReq := &tracers.TraceBlockRequest{
+ Number: req.Number,
+ Config: &tracers.TraceConfig{
+ Config: &logger.Config{
+ EnableMemory: true,
+ },
+ },
+ }
+
+ res, err := s.tracerAPI.TraceBorBlock(traceReq)
+ if err != nil {
+ return err
+ }
+
+ // this is memory heavy
+ data, err := json.Marshal(res)
+ if err != nil {
+ return err
+ }
+
+ if err := sendStreamDebugFile(stream, map[string]string{}, data); err != nil {
+ return err
+ }
+
+ return nil
+}
+
var bigIntT = reflect.TypeOf(new(big.Int)).Kind()
// gatherForks gathers all the fork numbers via reflection
diff --git a/internal/cli/server/service_test.go b/internal/cli/server/service_test.go
index 7850525686..86cf68e75e 100644
--- a/internal/cli/server/service_test.go
+++ b/internal/cli/server/service_test.go
@@ -4,8 +4,9 @@ import (
"math/big"
"testing"
- "github.com/ethereum/go-ethereum/internal/cli/server/proto"
"github.com/stretchr/testify/assert"
+
+ "github.com/ethereum/go-ethereum/internal/cli/server/proto"
)
func TestGatherBlocks(t *testing.T) {
diff --git a/internal/cli/server/testdata/password.txt b/internal/cli/server/testdata/password.txt
new file mode 100644
index 0000000000..1827ffa521
--- /dev/null
+++ b/internal/cli/server/testdata/password.txt
@@ -0,0 +1,2 @@
+test1
+test2
\ No newline at end of file
diff --git a/internal/cli/server/testdata/simple.hcl b/internal/cli/server/testdata/simple.hcl
deleted file mode 100644
index e276960e6d..0000000000
--- a/internal/cli/server/testdata/simple.hcl
+++ /dev/null
@@ -1,17 +0,0 @@
-data-dir = "./data"
-
-whitelist = {
- a = "b"
-}
-
-p2p {
- max-peers = 30
-}
-
-txpool {
- lifetime = "1s"
-}
-
-gpo {
- max-price = "100"
-}
diff --git a/internal/cli/server/testdata/simple.json b/internal/cli/server/testdata/simple.json
deleted file mode 100644
index 277f05d105..0000000000
--- a/internal/cli/server/testdata/simple.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "data-dir": "./data",
- "whitelist": {
- "a": "b"
- },
- "p2p": {
- "max-peers": 30
- },
- "txpool": {
- "lifetime": "1s"
- },
- "gpo": {
- "max-price": "100"
- }
-}
\ No newline at end of file
diff --git a/internal/cli/server/testdata/test.toml b/internal/cli/server/testdata/test.toml
new file mode 100644
index 0000000000..4ccc644ee9
--- /dev/null
+++ b/internal/cli/server/testdata/test.toml
@@ -0,0 +1,25 @@
+datadir = "./data"
+snapshot = false
+
+["eth.requiredblocks"]
+"31000000" = "0x2087b9e2b353209c2c21e370c82daa12278efd0fe5f0febe6c29035352cf050e"
+"32000000" = "0x875500011e5eecc0c554f95d07b31cf59df4ca2505f4dbbfffa7d4e4da917c68"
+
+[p2p]
+maxpeers = 30
+
+[txpool]
+locals = []
+lifetime = "1s"
+
+[miner]
+mine = true
+gaslimit = 30000000
+gasprice = "1000000000"
+
+[gpo]
+ignoreprice = "4"
+
+[cache]
+cache = 1024
+rejournal = "1s"
diff --git a/internal/cli/status.go b/internal/cli/status.go
index ef0a42172b..2a8b7d7470 100644
--- a/internal/cli/status.go
+++ b/internal/cli/status.go
@@ -6,6 +6,7 @@ import (
"strings"
"github.com/ethereum/go-ethereum/internal/cli/server/proto"
+
"github.com/golang/protobuf/ptypes/empty"
)
@@ -14,6 +15,16 @@ type StatusCommand struct {
*Meta2
}
+// MarkDown implements cli.MarkDown interface
+func (p *StatusCommand) MarkDown() string {
+ items := []string{
+ "# Status",
+ "The ```status``` command outputs the status of the client.",
+ }
+
+ return strings.Join(items, "\n\n")
+}
+
// Help implements the cli.Command interface
func (p *StatusCommand) Help() string {
return `Usage: bor status
@@ -47,6 +58,7 @@ func (c *StatusCommand) Run(args []string) int {
}
c.UI.Output(printStatus(status))
+
return 0
}
@@ -60,6 +72,7 @@ func printStatus(status *proto.StatusResponse) string {
forks := make([]string, len(status.Forks)+1)
forks[0] = "Name|Block|Enabled"
+
for i, d := range status.Forks {
forks[i+1] = fmt.Sprintf("%s|%d|%v", d.Name, d.Block, !d.Disabled)
}
@@ -83,5 +96,6 @@ func printStatus(status *proto.StatusResponse) string {
"\nForks",
formatList(forks),
}
+
return strings.Join(full, "\n")
}
diff --git a/internal/cli/version.go b/internal/cli/version.go
index 7433f208b2..cd155f43a7 100644
--- a/internal/cli/version.go
+++ b/internal/cli/version.go
@@ -1,7 +1,10 @@
package cli
import (
+ "strings"
+
"github.com/ethereum/go-ethereum/params"
+
"github.com/mitchellh/cli"
)
@@ -10,6 +13,25 @@ type VersionCommand struct {
UI cli.Ui
}
+// MarkDown implements cli.MarkDown interface
+func (d *VersionCommand) MarkDown() string {
+ examples := []string{
+ "## Usage",
+ CodeBlock([]string{
+ "$ bor version",
+ "0.2.9-stable",
+ }),
+ }
+
+ items := []string{
+ "# Version",
+ "The ```bor version``` command outputs the version of the binary.",
+ }
+ items = append(items, examples...)
+
+ return strings.Join(items, "\n\n")
+}
+
// Help implements the cli.Command interface
func (c *VersionCommand) Help() string {
return `Usage: bor version
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index 15536d5d72..59b6feba52 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -2156,6 +2156,17 @@ func (api *PrivateDebugAPI) SetHead(number hexutil.Uint64) {
api.b.SetHead(uint64(number))
}
+// GetCheckpointWhitelist retrieves the current checkpoint whitelist
+// entries (of the form block number -> block hash)
+func (api *PrivateDebugAPI) GetCheckpointWhitelist() map[uint64]common.Hash {
+ return api.b.GetCheckpointWhitelist()
+}
+
+// PurgeCheckpointWhitelist purges the current checkpoint whitelist entries
+func (api *PrivateDebugAPI) PurgeCheckpointWhitelist() {
+ api.b.PurgeCheckpointWhitelist()
+}
+
// PublicNetAPI offers network related RPC methods
type PublicNetAPI struct {
net *p2p.Server
diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go
index 3251879870..f69f596faa 100644
--- a/internal/ethapi/backend.go
+++ b/internal/ethapi/backend.go
@@ -100,6 +100,8 @@ type Backend interface {
GetBorBlockTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error)
GetBorBlockTransactionWithBlockHash(ctx context.Context, txHash common.Hash, blockHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error)
SubscribeChain2HeadEvent(ch chan<- core.Chain2HeadEvent) event.Subscription
+ GetCheckpointWhitelist() map[uint64]common.Hash
+ PurgeCheckpointWhitelist()
ChainConfig() *params.ChainConfig
Engine() consensus.Engine
diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go
index 9c5950af58..aa2596fe81 100644
--- a/internal/ethapi/transaction_args.go
+++ b/internal/ethapi/transaction_args.go
@@ -59,6 +59,7 @@ func (args *TransactionArgs) from() common.Address {
if args.From == nil {
return common.Address{}
}
+
return *args.From
}
@@ -67,9 +68,11 @@ func (args *TransactionArgs) data() []byte {
if args.Input != nil {
return *args.Input
}
+
if args.Data != nil {
return *args.Data
}
+
return nil
}
@@ -78,8 +81,10 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error {
if args.GasPrice != nil && (args.MaxFeePerGas != nil || args.MaxPriorityFeePerGas != nil) {
return errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
}
+
// After london, default to 1559 unless gasPrice is set
head := b.CurrentHeader()
+
// If user specifies both maxPriorityfee and maxFee, then we do not
// need to consult the chain for defaults. It's definitely a London tx.
if args.MaxPriorityFeePerGas == nil || args.MaxFeePerGas == nil {
@@ -90,6 +95,7 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error {
if err != nil {
return err
}
+
args.MaxPriorityFeePerGas = (*hexutil.Big)(tip)
}
if args.MaxFeePerGas == nil {
@@ -97,8 +103,10 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error {
(*big.Int)(args.MaxPriorityFeePerGas),
new(big.Int).Mul(head.BaseFee, big.NewInt(2)),
)
+
args.MaxFeePerGas = (*hexutil.Big)(gasFeeCap)
}
+
if args.MaxFeePerGas.ToInt().Cmp(args.MaxPriorityFeePerGas.ToInt()) < 0 {
return fmt.Errorf("maxFeePerGas (%v) < maxPriorityFeePerGas (%v)", args.MaxFeePerGas, args.MaxPriorityFeePerGas)
}
@@ -106,17 +114,20 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error {
if args.MaxFeePerGas != nil || args.MaxPriorityFeePerGas != nil {
return errors.New("maxFeePerGas or maxPriorityFeePerGas specified but london is not active yet")
}
+
if args.GasPrice == nil {
price, err := b.SuggestGasTipCap(ctx)
if err != nil {
return err
}
+
if b.ChainConfig().IsLondon(head.Number) {
// The legacy tx gas price suggestion should not add 2x base fee
// because all fees are consumed, so it would result in a spiral
// upwards.
price.Add(price, head.BaseFee)
}
+
args.GasPrice = (*hexutil.Big)(price)
}
}
@@ -126,22 +137,28 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error {
return fmt.Errorf("maxFeePerGas (%v) < maxPriorityFeePerGas (%v)", args.MaxFeePerGas, args.MaxPriorityFeePerGas)
}
}
+
if args.Value == nil {
args.Value = new(hexutil.Big)
}
+
if args.Nonce == nil {
nonce, err := b.GetPoolNonce(ctx, args.from())
if err != nil {
return err
}
+
args.Nonce = (*hexutil.Uint64)(&nonce)
}
+
if args.Data != nil && args.Input != nil && !bytes.Equal(*args.Data, *args.Input) {
return errors.New(`both "data" and "input" are set and not equal. Please use "input" to pass transaction call data`)
}
+
if args.To == nil && len(args.data()) == 0 {
return errors.New(`contract creation without any data provided`)
}
+
// Estimate the gas usage if necessary.
if args.Gas == nil {
// These fields are immutable during the estimation, safe to
@@ -157,18 +174,23 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error {
Data: (*hexutil.Bytes)(&data),
AccessList: args.AccessList,
}
+
pendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber)
+
estimated, err := DoEstimateGas(ctx, b, callArgs, pendingBlockNr, b.RPCGasCap())
if err != nil {
return err
}
+
args.Gas = &estimated
log.Trace("Estimate gas usage automatically", "gas", args.Gas)
}
+
if args.ChainID == nil {
id := (*hexutil.Big)(b.ChainConfig().ChainID)
args.ChainID = id
}
+
return nil
}
@@ -180,32 +202,41 @@ func (args *TransactionArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (t
if args.GasPrice != nil && (args.MaxFeePerGas != nil || args.MaxPriorityFeePerGas != nil) {
return types.Message{}, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
}
+
// Set sender address or use zero address if none specified.
addr := args.from()
+ // Gas set for system calls
+ systemCallGas := (hexutil.Uint64)(uint64(math.MaxUint64 / 2))
+
// Set default gas & gas price if none were set
gas := globalGasCap
if gas == 0 {
gas = uint64(math.MaxUint64 / 2)
}
- if args.Gas != nil {
+
+ if args.Gas != nil && *args.Gas != systemCallGas {
gas = uint64(*args.Gas)
}
+
if globalGasCap != 0 && globalGasCap < gas {
log.Warn("Caller gas above allowance, capping", "requested", gas, "cap", globalGasCap)
gas = globalGasCap
}
+
var (
gasPrice *big.Int
gasFeeCap *big.Int
gasTipCap *big.Int
)
+
if baseFee == nil {
// If there's no basefee, then it must be a non-1559 execution
gasPrice = new(big.Int)
if args.GasPrice != nil {
gasPrice = args.GasPrice.ToInt()
}
+
gasFeeCap, gasTipCap = gasPrice, gasPrice
} else {
// A basefee is provided, necessitating 1559-type execution
@@ -216,30 +247,40 @@ func (args *TransactionArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (t
} else {
// User specified 1559 gas feilds (or none), use those
gasFeeCap = new(big.Int)
+
if args.MaxFeePerGas != nil {
gasFeeCap = args.MaxFeePerGas.ToInt()
}
+
gasTipCap = new(big.Int)
+
if args.MaxPriorityFeePerGas != nil {
gasTipCap = args.MaxPriorityFeePerGas.ToInt()
}
+
// Backfill the legacy gasPrice for EVM execution, unless we're all zeroes
gasPrice = new(big.Int)
+
if gasFeeCap.BitLen() > 0 || gasTipCap.BitLen() > 0 {
gasPrice = math.BigMin(new(big.Int).Add(gasTipCap, baseFee), gasFeeCap)
}
}
}
+
value := new(big.Int)
if args.Value != nil {
value = args.Value.ToInt()
}
+
data := args.data()
+
var accessList types.AccessList
if args.AccessList != nil {
accessList = *args.AccessList
}
+
msg := types.NewMessage(addr, args.To, 0, value, gas, gasPrice, gasFeeCap, gasTipCap, data, accessList, true)
+
return msg, nil
}
@@ -247,6 +288,7 @@ func (args *TransactionArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (t
// This assumes that setDefaults has been called.
func (args *TransactionArgs) toTransaction() *types.Transaction {
var data types.TxData
+
switch {
case args.MaxFeePerGas != nil:
al := types.AccessList{}
@@ -285,6 +327,7 @@ func (args *TransactionArgs) toTransaction() *types.Transaction {
Data: args.data(),
}
}
+
return types.NewTx(data)
}
diff --git a/internal/web3ext/bor_ext.go b/internal/web3ext/bor_ext.go
index b7fe9b4886..fe8b01da19 100644
--- a/internal/web3ext/bor_ext.go
+++ b/internal/web3ext/bor_ext.go
@@ -17,6 +17,18 @@ web3._extend({
params: 1,
inputFormatter: [null]
}),
+ new web3._extend.Method({
+ name: 'getSnapshotProposer',
+ call: 'bor_getSnapshotProposer',
+ params: 1,
+ inputFormatter: [null]
+ }),
+ new web3._extend.Method({
+ name: 'getSnapshotProposerSequence',
+ call: 'bor_getSnapshotProposerSequence',
+ params: 1,
+ inputFormatter: [null]
+ }),
new web3._extend.Method({
name: 'getSnapshotAtHash',
call: 'bor_getSnapshotAtHash',
diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go
index dd8b34e025..dcdd5baf23 100644
--- a/internal/web3ext/web3ext.go
+++ b/internal/web3ext/web3ext.go
@@ -474,6 +474,16 @@ web3._extend({
params: 2,
inputFormatter:[web3._extend.formatters.inputBlockNumberFormatter, web3._extend.formatters.inputBlockNumberFormatter],
}),
+ new web3._extend.Method({
+ name: 'getCheckpointWhitelist',
+ call: 'debug_getCheckpointWhitelist',
+ params: 0,
+ }),
+ new web3._extend.Method({
+ name: 'purgeCheckpointWhitelist',
+ call: 'debug_purgeCheckpointWhitelist',
+ params: 0,
+ }),
],
properties: []
});
diff --git a/les/api_backend.go b/les/api_backend.go
index e9e51c4ecc..c716a3967f 100644
--- a/les/api_backend.go
+++ b/les/api_backend.go
@@ -337,17 +337,24 @@ func (b *LesApiBackend) StateAtTransaction(ctx context.Context, block *types.Blo
//
func (b *LesApiBackend) GetBorBlockReceipt(ctx context.Context, hash common.Hash) (*types.Receipt, error) {
- return nil, errors.New("Not implemented")
+ return nil, errors.New("not implemented")
}
func (b *LesApiBackend) GetBorBlockLogs(ctx context.Context, hash common.Hash) ([]*types.Log, error) {
- return nil, errors.New("Not implemented")
+ return nil, errors.New("not implemented")
}
func (b *LesApiBackend) GetBorBlockTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) {
- return nil, common.Hash{}, 0, 0, errors.New("Not implemented")
+ return nil, common.Hash{}, 0, 0, errors.New("not implemented")
}
func (b *LesApiBackend) GetBorBlockTransactionWithBlockHash(ctx context.Context, txHash common.Hash, blockHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) {
- return nil, common.Hash{}, 0, 0, errors.New("Not implemented")
+ return nil, common.Hash{}, 0, 0, errors.New("not implemented")
+}
+
+func (b *LesApiBackend) GetCheckpointWhitelist() map[uint64]common.Hash {
+ return nil
+}
+
+func (b *LesApiBackend) PurgeCheckpointWhitelist() {
}
diff --git a/metrics/metrics.go b/metrics/metrics.go
index 747d6471a7..a22f99feaa 100644
--- a/metrics/metrics.go
+++ b/metrics/metrics.go
@@ -11,7 +11,7 @@ import (
"strings"
"time"
- "github.com/ethereum/go-ethereum/log"
+ "github.com/BurntSushi/toml"
)
// Enabled is checked by the constructor functions for all of the
@@ -32,26 +32,75 @@ var enablerFlags = []string{"metrics"}
// expensiveEnablerFlags is the CLI flag names to use to enable metrics collections.
var expensiveEnablerFlags = []string{"metrics.expensive"}
+// configFlag is the CLI flag name to use to start node by providing a toml based config
+var configFlag = "config"
+
// Init enables or disables the metrics system. Since we need this to run before
// any other code gets to create meters and timers, we'll actually do an ugly hack
// and peek into the command line args for the metrics flag.
func init() {
- for _, arg := range os.Args {
+ var configFile string
+
+ for i := 0; i < len(os.Args); i++ {
+ arg := os.Args[i]
+
flag := strings.TrimLeft(arg, "-")
+ // check for existence of `config` flag
+ if flag == configFlag && i < len(os.Args)-1 {
+ configFile = strings.TrimLeft(os.Args[i+1], "-") // find the value of flag
+ } else if len(flag) > 6 && flag[:6] == configFlag {
+ // Checks for `=` separated flag (e.g. config=path)
+ configFile = strings.TrimLeft(flag[6:], "=")
+ }
+
for _, enabler := range enablerFlags {
if !Enabled && flag == enabler {
- log.Info("Enabling metrics collection")
Enabled = true
}
}
+
for _, enabler := range expensiveEnablerFlags {
if !EnabledExpensive && flag == enabler {
- log.Info("Enabling expensive metrics collection")
EnabledExpensive = true
}
}
}
+
+ // Update the global metrics value, if they're provided in the config file
+ updateMetricsFromConfig(configFile)
+}
+
+func updateMetricsFromConfig(path string) {
+ // Don't act upon any errors here. They're already taken into
+ // consideration when the toml config file will be parsed in the cli.
+ data, err := os.ReadFile(path)
+ tomlData := string(data)
+
+ if err != nil {
+ return
+ }
+
+ // Create a minimal config to decode
+ type TelemetryConfig struct {
+ Enabled bool `hcl:"metrics,optional" toml:"metrics,optional"`
+ Expensive bool `hcl:"expensive,optional" toml:"expensive,optional"`
+ }
+
+ type CliConfig struct {
+ Telemetry *TelemetryConfig `hcl:"telemetry,block" toml:"telemetry,block"`
+ }
+
+ conf := &CliConfig{}
+
+ _, err = toml.Decode(tomlData, &conf)
+ if err != nil || conf == nil || conf.Telemetry == nil {
+ return
+ }
+
+ // We have the values now, update them
+ Enabled = conf.Telemetry.Enabled
+ EnabledExpensive = conf.Telemetry.Expensive
}
// CollectProcessMetrics periodically collects various metrics about the running
diff --git a/metrics/sample_test.go b/metrics/sample_test.go
index c9168d3e82..d675862e84 100644
--- a/metrics/sample_test.go
+++ b/metrics/sample_test.go
@@ -244,6 +244,7 @@ func TestUniformSampleIncludesTail(t *testing.T) {
}
func TestUniformSampleSnapshot(t *testing.T) {
+ rand.Seed(1)
s := NewUniformSample(100)
for i := 1; i <= 10000; i++ {
s.Update(int64(i))
@@ -339,6 +340,9 @@ func TestUniformSampleConcurrentUpdateCount(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
}
+
+ rand.Seed(1)
+
s := NewUniformSample(100)
for i := 0; i < 100; i++ {
s.Update(int64(i))
diff --git a/miner/fake_miner.go b/miner/fake_miner.go
new file mode 100644
index 0000000000..cbe0d9f54c
--- /dev/null
+++ b/miner/fake_miner.go
@@ -0,0 +1,233 @@
+package miner
+
+import (
+ "errors"
+ "math/big"
+ "testing"
+ "time"
+
+ "github.com/golang/mock/gomock"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/consensus/bor"
+ "github.com/ethereum/go-ethereum/consensus/bor/api"
+ "github.com/ethereum/go-ethereum/consensus/bor/valset"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/core/vm"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/ethdb/memorydb"
+ "github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/tests/bor/mocks"
+ "github.com/ethereum/go-ethereum/trie"
+)
+
+type DefaultBorMiner struct {
+ Miner *Miner
+ Mux *event.TypeMux //nolint:staticcheck
+ Cleanup func(skipMiner bool)
+
+ Ctrl *gomock.Controller
+ EthAPIMock api.Caller
+ HeimdallClientMock bor.IHeimdallClient
+ ContractMock bor.GenesisContract
+}
+
+func NewBorDefaultMiner(t *testing.T) *DefaultBorMiner {
+ t.Helper()
+
+ ctrl := gomock.NewController(t)
+
+ ethAPI := api.NewMockCaller(ctrl)
+ ethAPI.EXPECT().Call(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
+
+ spanner := bor.NewMockSpanner(ctrl)
+ spanner.EXPECT().GetCurrentValidators(gomock.Any(), gomock.Any()).Return([]*valset.Validator{
+ {
+ ID: 0,
+ Address: common.Address{0x1},
+ VotingPower: 100,
+ ProposerPriority: 0,
+ },
+ }, nil).AnyTimes()
+
+ heimdallClient := mocks.NewMockIHeimdallClient(ctrl)
+ heimdallClient.EXPECT().Close().Times(1)
+
+ genesisContracts := bor.NewMockGenesisContract(ctrl)
+
+ miner, mux, cleanup := createBorMiner(t, ethAPI, spanner, heimdallClient, genesisContracts)
+
+ return &DefaultBorMiner{
+ Miner: miner,
+ Mux: mux,
+ Cleanup: cleanup,
+ Ctrl: ctrl,
+ EthAPIMock: ethAPI,
+ HeimdallClientMock: heimdallClient,
+ ContractMock: genesisContracts,
+ }
+}
+
+//nolint:staticcheck
+func createBorMiner(t *testing.T, ethAPIMock api.Caller, spanner bor.Spanner, heimdallClientMock bor.IHeimdallClient, contractMock bor.GenesisContract) (*Miner, *event.TypeMux, func(skipMiner bool)) {
+ t.Helper()
+
+ // Create Ethash config
+ chainDB, _, chainConfig := NewDBForFakes(t)
+
+ engine := NewFakeBor(t, chainDB, chainConfig, ethAPIMock, spanner, heimdallClientMock, contractMock)
+
+ // Create Ethereum backend
+ bc, err := core.NewBlockChain(chainDB, nil, chainConfig, engine, vm.Config{}, nil, nil)
+ if err != nil {
+ t.Fatalf("can't create new chain %v", err)
+ }
+
+ statedb, _ := state.New(common.Hash{}, state.NewDatabase(chainDB), nil)
+ blockchain := &testBlockChain{statedb, 10000000, new(event.Feed)}
+
+ pool := core.NewTxPool(testTxPoolConfig, chainConfig, blockchain)
+ backend := NewMockBackend(bc, pool)
+
+ // Create event Mux
+ mux := new(event.TypeMux)
+
+ config := Config{
+ Etherbase: common.HexToAddress("123456789"),
+ }
+
+ // Create Miner
+ miner := New(backend, &config, chainConfig, mux, engine, nil)
+
+ cleanup := func(skipMiner bool) {
+ bc.Stop()
+ engine.Close()
+ pool.Stop()
+
+ if !skipMiner {
+ miner.Close()
+ }
+ }
+
+ return miner, mux, cleanup
+}
+
+func NewDBForFakes(t *testing.T) (ethdb.Database, *core.Genesis, *params.ChainConfig) {
+ t.Helper()
+
+ memdb := memorydb.New()
+ chainDB := rawdb.NewDatabase(memdb)
+ genesis := core.DeveloperGenesisBlock(2, 11_500_000, common.HexToAddress("12345"))
+
+ chainConfig, _, err := core.SetupGenesisBlock(chainDB, genesis)
+ if err != nil {
+ t.Fatalf("can't create new chain config: %v", err)
+ }
+
+ chainConfig.Bor.Period = map[string]uint64{
+ "0": 1,
+ }
+
+ return chainDB, genesis, chainConfig
+}
+
+func NewFakeBor(t *testing.T, chainDB ethdb.Database, chainConfig *params.ChainConfig, ethAPIMock api.Caller, spanner bor.Spanner, heimdallClientMock bor.IHeimdallClient, contractMock bor.GenesisContract) consensus.Engine {
+ t.Helper()
+
+ if chainConfig.Bor == nil {
+ chainConfig.Bor = params.BorUnittestChainConfig.Bor
+ }
+
+ return bor.New(chainConfig, chainDB, ethAPIMock, spanner, heimdallClientMock, contractMock)
+}
+
+type mockBackend struct {
+ bc *core.BlockChain
+ txPool *core.TxPool
+}
+
+func NewMockBackend(bc *core.BlockChain, txPool *core.TxPool) *mockBackend {
+ return &mockBackend{
+ bc: bc,
+ txPool: txPool,
+ }
+}
+
+func (m *mockBackend) BlockChain() *core.BlockChain {
+ return m.bc
+}
+
+func (m *mockBackend) TxPool() *core.TxPool {
+ return m.txPool
+}
+
+func (m *mockBackend) StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (statedb *state.StateDB, err error) {
+ return nil, errors.New("not supported")
+}
+
+type testBlockChain struct {
+ statedb *state.StateDB
+ gasLimit uint64
+ chainHeadFeed *event.Feed
+}
+
+func (bc *testBlockChain) CurrentBlock() *types.Block {
+ return types.NewBlock(&types.Header{
+ GasLimit: bc.gasLimit,
+ }, nil, nil, nil, trie.NewStackTrie(nil))
+}
+
+func (bc *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
+ return bc.CurrentBlock()
+}
+
+func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) {
+ return bc.statedb, nil
+}
+
+func (bc *testBlockChain) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription {
+ return bc.chainHeadFeed.Subscribe(ch)
+}
+
+var (
+ // Test chain configurations
+ testTxPoolConfig core.TxPoolConfig
+ ethashChainConfig *params.ChainConfig
+ cliqueChainConfig *params.ChainConfig
+
+ // Test accounts
+ testBankKey, _ = crypto.GenerateKey()
+ testBankAddress = crypto.PubkeyToAddress(testBankKey.PublicKey)
+ testBankFunds = big.NewInt(1000000000000000000)
+
+ testUserKey, _ = crypto.GenerateKey()
+ testUserAddress = crypto.PubkeyToAddress(testUserKey.PublicKey)
+
+ // Test transactions
+ pendingTxs []*types.Transaction
+ newTxs []*types.Transaction
+
+ testConfig = &Config{
+ Recommit: time.Second,
+ GasCeil: params.GenesisGasLimit,
+ }
+)
+
+func init() {
+ testTxPoolConfig = core.DefaultTxPoolConfig
+ testTxPoolConfig.Journal = ""
+ ethashChainConfig = new(params.ChainConfig)
+ *ethashChainConfig = *params.TestChainConfig
+ cliqueChainConfig = new(params.ChainConfig)
+ *cliqueChainConfig = *params.TestChainConfig
+ cliqueChainConfig.Clique = ¶ms.CliqueConfig{
+ Period: 10,
+ Epoch: 30000,
+ }
+}
diff --git a/miner/miner_test.go b/miner/miner_test.go
index 03c79cbbd5..76f61fd0d4 100644
--- a/miner/miner_test.go
+++ b/miner/miner_test.go
@@ -18,80 +18,32 @@
package miner
import (
- "errors"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/consensus"
- "github.com/ethereum/go-ethereum/consensus/clique"
- "github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/state"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/downloader"
- "github.com/ethereum/go-ethereum/ethdb/memorydb"
- "github.com/ethereum/go-ethereum/event"
- "github.com/ethereum/go-ethereum/trie"
)
-type mockBackend struct {
- bc *core.BlockChain
- txPool *core.TxPool
-}
-
-func NewMockBackend(bc *core.BlockChain, txPool *core.TxPool) *mockBackend {
- return &mockBackend{
- bc: bc,
- txPool: txPool,
- }
-}
-
-func (m *mockBackend) BlockChain() *core.BlockChain {
- return m.bc
-}
-
-func (m *mockBackend) TxPool() *core.TxPool {
- return m.txPool
-}
-
-func (m *mockBackend) StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (statedb *state.StateDB, err error) {
- return nil, errors.New("not supported")
-}
-
-type testBlockChain struct {
- statedb *state.StateDB
- gasLimit uint64
- chainHeadFeed *event.Feed
-}
-
-func (bc *testBlockChain) CurrentBlock() *types.Block {
- return types.NewBlock(&types.Header{
- GasLimit: bc.gasLimit,
- }, nil, nil, nil, trie.NewStackTrie(nil))
-}
-
-func (bc *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
- return bc.CurrentBlock()
-}
+func TestMiner(t *testing.T) {
+ t.Parallel()
-func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) {
- return bc.statedb, nil
-}
+ minerBor := NewBorDefaultMiner(t)
+ defer func() {
+ minerBor.Cleanup(false)
+ minerBor.Ctrl.Finish()
+ }()
-func (bc *testBlockChain) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription {
- return bc.chainHeadFeed.Subscribe(ch)
-}
+ miner := minerBor.Miner
+ mux := minerBor.Mux
-func TestMiner(t *testing.T) {
- miner, mux, cleanup := createMiner(t)
- defer cleanup(false)
miner.Start(common.HexToAddress("0x12345"))
waitForMiningState(t, miner, true)
+
// Start the downloader
mux.Post(downloader.StartEvent{})
waitForMiningState(t, miner, false)
+
// Stop the downloader and wait for the update loop to run
mux.Post(downloader.DoneEvent{})
waitForMiningState(t, miner, true)
@@ -113,10 +65,20 @@ func TestMiner(t *testing.T) {
// An initial FailedEvent should allow mining to stop on a subsequent
// downloader StartEvent.
func TestMinerDownloaderFirstFails(t *testing.T) {
- miner, mux, cleanup := createMiner(t)
- defer cleanup(false)
+ t.Parallel()
+
+ minerBor := NewBorDefaultMiner(t)
+ defer func() {
+ minerBor.Cleanup(false)
+ minerBor.Ctrl.Finish()
+ }()
+
+ miner := minerBor.Miner
+ mux := minerBor.Mux
+
miner.Start(common.HexToAddress("0x12345"))
waitForMiningState(t, miner, true)
+
// Start the downloader
mux.Post(downloader.StartEvent{})
waitForMiningState(t, miner, false)
@@ -145,10 +107,20 @@ func TestMinerDownloaderFirstFails(t *testing.T) {
}
func TestMinerStartStopAfterDownloaderEvents(t *testing.T) {
- miner, mux, cleanup := createMiner(t)
- defer cleanup(false)
+ t.Parallel()
+
+ minerBor := NewBorDefaultMiner(t)
+ defer func() {
+ minerBor.Cleanup(false)
+ minerBor.Ctrl.Finish()
+ }()
+
+ miner := minerBor.Miner
+ mux := minerBor.Mux
+
miner.Start(common.HexToAddress("0x12345"))
waitForMiningState(t, miner, true)
+
// Start the downloader
mux.Post(downloader.StartEvent{})
waitForMiningState(t, miner, false)
@@ -168,58 +140,104 @@ func TestMinerStartStopAfterDownloaderEvents(t *testing.T) {
}
func TestStartWhileDownload(t *testing.T) {
- miner, mux, cleanup := createMiner(t)
- defer cleanup(false)
+ t.Parallel()
+
+ minerBor := NewBorDefaultMiner(t)
+ defer func() {
+ minerBor.Cleanup(false)
+ minerBor.Ctrl.Finish()
+ }()
+
+ miner := minerBor.Miner
+ mux := minerBor.Mux
+
waitForMiningState(t, miner, false)
miner.Start(common.HexToAddress("0x12345"))
waitForMiningState(t, miner, true)
+
// Stop the downloader and wait for the update loop to run
mux.Post(downloader.StartEvent{})
waitForMiningState(t, miner, false)
+
// Starting the miner after the downloader should not work
miner.Start(common.HexToAddress("0x12345"))
waitForMiningState(t, miner, false)
}
func TestStartStopMiner(t *testing.T) {
- miner, _, cleanup := createMiner(t)
- defer cleanup(false)
+ t.Parallel()
+
+ minerBor := NewBorDefaultMiner(t)
+ defer func() {
+ minerBor.Cleanup(false)
+ minerBor.Ctrl.Finish()
+ }()
+
+ miner := minerBor.Miner
+
waitForMiningState(t, miner, false)
miner.Start(common.HexToAddress("0x12345"))
+
waitForMiningState(t, miner, true)
+
miner.Stop()
- waitForMiningState(t, miner, false)
+ waitForMiningState(t, miner, false)
}
func TestCloseMiner(t *testing.T) {
- miner, _, cleanup := createMiner(t)
- defer cleanup(true)
+ t.Parallel()
+
+ minerBor := NewBorDefaultMiner(t)
+ defer func() {
+ minerBor.Cleanup(true)
+ minerBor.Ctrl.Finish()
+ }()
+
+ miner := minerBor.Miner
+
waitForMiningState(t, miner, false)
+
miner.Start(common.HexToAddress("0x12345"))
+
waitForMiningState(t, miner, true)
+
// Terminate the miner and wait for the update loop to run
miner.Close()
+
waitForMiningState(t, miner, false)
}
// TestMinerSetEtherbase checks that etherbase becomes set even if mining isn't
// possible at the moment
func TestMinerSetEtherbase(t *testing.T) {
- miner, mux, cleanup := createMiner(t)
- defer cleanup(false)
+ t.Parallel()
+
+ minerBor := NewBorDefaultMiner(t)
+ defer func() {
+ minerBor.Cleanup(false)
+ minerBor.Ctrl.Finish()
+ }()
+
+ miner := minerBor.Miner
+ mux := minerBor.Mux
+
// Start with a 'bad' mining address
miner.Start(common.HexToAddress("0xdead"))
waitForMiningState(t, miner, true)
+
// Start the downloader
mux.Post(downloader.StartEvent{})
waitForMiningState(t, miner, false)
+
// Now user tries to configure proper mining address
miner.Start(common.HexToAddress("0x1337"))
+
// Stop the downloader and wait for the update loop to run
mux.Post(downloader.DoneEvent{})
waitForMiningState(t, miner, true)
+
// The miner should now be using the good address
if got, exp := miner.coinbase, common.HexToAddress("0x1337"); got != exp {
t.Fatalf("Wrong coinbase, got %x expected %x", got, exp)
@@ -239,46 +257,6 @@ func waitForMiningState(t *testing.T, m *Miner, mining bool) {
return
}
}
- t.Fatalf("Mining() == %t, want %t", state, mining)
-}
-func createMiner(t *testing.T) (*Miner, *event.TypeMux, func(skipMiner bool)) {
- // Create Ethash config
- config := Config{
- Etherbase: common.HexToAddress("123456789"),
- }
- // Create chainConfig
- memdb := memorydb.New()
- chainDB := rawdb.NewDatabase(memdb)
- genesis := core.DeveloperGenesisBlock(15, 11_500_000, common.HexToAddress("12345"))
- chainConfig, _, err := core.SetupGenesisBlock(chainDB, genesis)
- if err != nil {
- t.Fatalf("can't create new chain config: %v", err)
- }
- // Create consensus engine
- engine := clique.New(chainConfig.Clique, chainDB)
- // Create Ethereum backend
- consensus.NewMerger(rawdb.NewMemoryDatabase())
- bc, err := core.NewBlockChain(chainDB, nil, chainConfig, engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("can't create new chain %v", err)
- }
- statedb, _ := state.New(common.Hash{}, state.NewDatabase(chainDB), nil)
- blockchain := &testBlockChain{statedb, 10000000, new(event.Feed)}
-
- pool := core.NewTxPool(testTxPoolConfig, chainConfig, blockchain)
- backend := NewMockBackend(bc, pool)
- // Create event Mux
- mux := new(event.TypeMux)
- // Create Miner
- miner := New(backend, &config, chainConfig, mux, engine, nil)
- cleanup := func(skipMiner bool) {
- bc.Stop()
- engine.Close()
- pool.Stop()
- if !skipMiner {
- miner.Close()
- }
- }
- return miner, mux, cleanup
+ t.Fatalf("Mining() == %t, want %t", state, mining)
}
diff --git a/miner/unconfirmed_test.go b/miner/unconfirmed_test.go
index dc83cb9265..70d3325506 100644
--- a/miner/unconfirmed_test.go
+++ b/miner/unconfirmed_test.go
@@ -44,6 +44,7 @@ func TestUnconfirmedInsertBounds(t *testing.T) {
for i := 0; i < int(depth); i++ {
pool.Insert(depth, [32]byte{byte(depth), byte(i)})
}
+
// Validate that no blocks below the depth allowance are left in
pool.blocks.Do(func(block interface{}) {
if block := block.(*unconfirmedBlock); block.index+uint64(limit) <= depth {
@@ -64,23 +65,29 @@ func TestUnconfirmedShifts(t *testing.T) {
for depth := start; depth < start+uint64(limit); depth++ {
pool.Insert(depth, [32]byte{byte(depth)})
}
+
// Try to shift below the limit and ensure no blocks are dropped
pool.Shift(start + uint64(limit) - 1)
if n := pool.blocks.Len(); n != int(limit) {
t.Errorf("unconfirmed count mismatch: have %d, want %d", n, limit)
}
+
// Try to shift half the blocks out and verify remainder
pool.Shift(start + uint64(limit) - 1 + uint64(limit/2))
if n := pool.blocks.Len(); n != int(limit)/2 {
t.Errorf("unconfirmed count mismatch: have %d, want %d", n, limit/2)
}
+
// Try to shift all the remaining blocks out and verify emptyness
pool.Shift(start + 2*uint64(limit))
+
if n := pool.blocks.Len(); n != 0 {
t.Errorf("unconfirmed count mismatch: have %d, want %d", n, 0)
}
+
// Try to shift out from the empty set and make sure it doesn't break
pool.Shift(start + 3*uint64(limit))
+
if n := pool.blocks.Len(); n != 0 {
t.Errorf("unconfirmed count mismatch: have %d, want %d", n, 0)
}
diff --git a/miner/worker.go b/miner/worker.go
index c3a5e1c9ba..9fcb2140ca 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -25,6 +25,7 @@ import (
"time"
mapset "github.com/deckarep/golang-set"
+
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc"
@@ -389,24 +390,31 @@ func (w *worker) close() {
// recalcRecommit recalculates the resubmitting interval upon feedback.
func recalcRecommit(minRecommit, prev time.Duration, target float64, inc bool) time.Duration {
- var (
- prevF = float64(prev.Nanoseconds())
- next float64
- )
- if inc {
- next = prevF*(1-intervalAdjustRatio) + intervalAdjustRatio*(target+intervalAdjustBias)
- max := float64(maxRecommitInterval.Nanoseconds())
- if next > max {
- next = max
- }
- } else {
- next = prevF*(1-intervalAdjustRatio) + intervalAdjustRatio*(target-intervalAdjustBias)
- min := float64(minRecommit.Nanoseconds())
- if next < min {
- next = min
- }
- }
- return time.Duration(int64(next))
+ // var (
+ // prevF = float64(prev.Nanoseconds())
+ // next float64
+ // )
+ //
+ // if inc {
+ // next = prevF*(1-intervalAdjustRatio) + intervalAdjustRatio*(target+intervalAdjustBias)
+ // max := float64(maxRecommitInterval.Nanoseconds())
+ //
+ // if next > max {
+ // next = max
+ // }
+ // } else {
+ // next = prevF*(1-intervalAdjustRatio) + intervalAdjustRatio*(target-intervalAdjustBias)
+ // min := float64(minRecommit.Nanoseconds())
+ //
+ // if next < min {
+ // next = min
+ // }
+ // }
+ //
+ // log.Info("Recalc Commit", "Prev", prev, "Next", next)
+
+ //returning the Same prev value to keep the recommit interval constant
+ return prev
}
// newWorkLoop is a standalone goroutine to submit new sealing work upon received events.
diff --git a/miner/worker_test.go b/miner/worker_test.go
index dd029433b8..6dae391c88 100644
--- a/miner/worker_test.go
+++ b/miner/worker_test.go
@@ -24,9 +24,14 @@ import (
"testing"
"time"
+ "github.com/golang/mock/gomock"
+
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/consensus/bor"
+ "github.com/ethereum/go-ethereum/consensus/bor/api"
+ "github.com/ethereum/go-ethereum/consensus/bor/valset"
"github.com/ethereum/go-ethereum/consensus/clique"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
@@ -38,6 +43,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/tests/bor/mocks"
)
const (
@@ -49,43 +55,9 @@ const (
testGas = 144109
)
-var (
- // Test chain configurations
- testTxPoolConfig core.TxPoolConfig
- ethashChainConfig *params.ChainConfig
- cliqueChainConfig *params.ChainConfig
-
- // Test accounts
- testBankKey, _ = crypto.GenerateKey()
- testBankAddress = crypto.PubkeyToAddress(testBankKey.PublicKey)
- testBankFunds = big.NewInt(1000000000000000000)
-
- testUserKey, _ = crypto.GenerateKey()
- testUserAddress = crypto.PubkeyToAddress(testUserKey.PublicKey)
-
- // Test transactions
- pendingTxs []*types.Transaction
- newTxs []*types.Transaction
-
- testConfig = &Config{
- Recommit: time.Second,
- GasCeil: params.GenesisGasLimit,
- }
-)
-
func init() {
- testTxPoolConfig = core.DefaultTxPoolConfig
- testTxPoolConfig.Journal = ""
- ethashChainConfig = new(params.ChainConfig)
- *ethashChainConfig = *params.TestChainConfig
- cliqueChainConfig = new(params.ChainConfig)
- *cliqueChainConfig = *params.TestChainConfig
- cliqueChainConfig.Clique = ¶ms.CliqueConfig{
- Period: 10,
- Epoch: 30000,
- }
-
signer := types.LatestSigner(params.TestChainConfig)
+
tx1 := types.MustSignNewTx(testBankKey, signer, &types.AccessListTx{
ChainID: params.TestChainConfig.ChainID,
Nonce: 0,
@@ -94,6 +66,7 @@ func init() {
Gas: params.TxGas,
GasPrice: big.NewInt(params.InitialBaseFee),
})
+
pendingTxs = append(pendingTxs, tx1)
tx2 := types.MustSignNewTx(testBankKey, signer, &types.LegacyTx{
@@ -103,6 +76,7 @@ func init() {
Gas: params.TxGas,
GasPrice: big.NewInt(params.InitialBaseFee),
})
+
newTxs = append(newTxs, tx2)
rand.Seed(time.Now().UnixNano())
@@ -125,6 +99,12 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine
}
switch e := engine.(type) {
+ case *bor.Bor:
+ gspec.ExtraData = make([]byte, 32+common.AddressLength+crypto.SignatureLength)
+ copy(gspec.ExtraData[32:32+common.AddressLength], testBankAddress.Bytes())
+ e.Authorize(testBankAddress, func(account accounts.Account, s string, data []byte) ([]byte, error) {
+ return crypto.Sign(crypto.Keccak256(data), testBankKey)
+ })
case *clique.Clique:
gspec.ExtraData = make([]byte, 32+common.AddressLength+crypto.SignatureLength)
copy(gspec.ExtraData[32:32+common.AddressLength], testBankAddress.Bytes())
@@ -135,6 +115,7 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine
default:
t.Fatalf("unexpected consensus engine type: %T", engine)
}
+
genesis := gspec.MustCommit(db)
chain, _ := core.NewBlockChain(db, &core.CacheConfig{TrieDirtyDisabled: true}, gspec.Config, engine, vm.Config{}, nil, nil)
@@ -149,10 +130,12 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine
t.Fatalf("failed to insert origin chain: %v", err)
}
}
+
parent := genesis
if n > 0 {
parent = chain.GetBlockByHash(chain.CurrentBlock().ParentHash())
}
+
blocks, _ := core.GenerateChain(chainConfig, parent, engine, db, 1, func(i int, gen *core.BlockGen) {
gen.SetCoinbase(testUserAddress)
})
@@ -174,28 +157,36 @@ func (b *testWorkerBackend) StateAtBlock(block *types.Block, reexec uint64, base
func (b *testWorkerBackend) newRandomUncle() *types.Block {
var parent *types.Block
+
cur := b.chain.CurrentBlock()
+
if cur.NumberU64() == 0 {
parent = b.chain.Genesis()
} else {
parent = b.chain.GetBlockByHash(b.chain.CurrentBlock().ParentHash())
}
+
blocks, _ := core.GenerateChain(b.chain.Config(), parent, b.chain.Engine(), b.db, 1, func(i int, gen *core.BlockGen) {
var addr = make([]byte, common.AddressLength)
+
rand.Read(addr)
gen.SetCoinbase(common.BytesToAddress(addr))
})
+
return blocks[0]
}
func (b *testWorkerBackend) newRandomTx(creation bool) *types.Transaction {
var tx *types.Transaction
+
gasPrice := big.NewInt(10 * params.InitialBaseFee)
+
if creation {
tx, _ = types.SignTx(types.NewContractCreation(b.txPool.Nonce(testBankAddress), big.NewInt(0), testGas, gasPrice, common.FromHex(testCode)), types.HomesteadSigner{}, testBankKey)
} else {
tx, _ = types.SignTx(types.NewTransaction(b.txPool.Nonce(testBankAddress), testUserAddress, big.NewInt(1000), params.TxGas, gasPrice, nil), types.HomesteadSigner{}, testBankKey)
}
+
return tx
}
@@ -204,39 +195,85 @@ func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consens
backend.txPool.AddLocals(pendingTxs)
w := newWorker(testConfig, chainConfig, engine, backend, new(event.TypeMux), nil, false)
w.setEtherbase(testBankAddress)
+
return w, backend
}
func TestGenerateBlockAndImportEthash(t *testing.T) {
- testGenerateBlockAndImport(t, false)
+ t.Parallel()
+
+ testGenerateBlockAndImport(t, false, false)
}
func TestGenerateBlockAndImportClique(t *testing.T) {
- testGenerateBlockAndImport(t, true)
+ t.Parallel()
+
+ testGenerateBlockAndImport(t, true, false)
+}
+
+func TestGenerateBlockAndImportBor(t *testing.T) {
+ t.Parallel()
+
+ testGenerateBlockAndImport(t, false, true)
}
-func testGenerateBlockAndImport(t *testing.T, isClique bool) {
+//nolint:thelper
+func testGenerateBlockAndImport(t *testing.T, isClique bool, isBor bool) {
var (
engine consensus.Engine
chainConfig *params.ChainConfig
db = rawdb.NewMemoryDatabase()
)
- if isClique {
- chainConfig = params.AllCliqueProtocolChanges
- chainConfig.Clique = ¶ms.CliqueConfig{Period: 1, Epoch: 30000}
- engine = clique.New(chainConfig.Clique, db)
+
+ if isBor {
+ chainConfig = params.BorUnittestChainConfig
+
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ ethAPIMock := api.NewMockCaller(ctrl)
+ ethAPIMock.EXPECT().Call(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
+
+ spanner := bor.NewMockSpanner(ctrl)
+ spanner.EXPECT().GetCurrentValidators(gomock.Any(), gomock.Any()).Return([]*valset.Validator{
+ {
+ ID: 0,
+ Address: testBankAddress,
+ VotingPower: 100,
+ ProposerPriority: 0,
+ },
+ }, nil).AnyTimes()
+
+ heimdallClientMock := mocks.NewMockIHeimdallClient(ctrl)
+ heimdallClientMock.EXPECT().Close().Times(1)
+
+ contractMock := bor.NewMockGenesisContract(ctrl)
+
+ db, _, _ = NewDBForFakes(t)
+
+ engine = NewFakeBor(t, db, chainConfig, ethAPIMock, spanner, heimdallClientMock, contractMock)
} else {
- chainConfig = params.AllEthashProtocolChanges
- engine = ethash.NewFaker()
+ if isClique {
+ chainConfig = params.AllCliqueProtocolChanges
+ chainConfig.Clique = ¶ms.CliqueConfig{Period: 1, Epoch: 30000}
+ engine = clique.New(chainConfig.Clique, db)
+ } else {
+ chainConfig = params.AllEthashProtocolChanges
+ engine = ethash.NewFaker()
+ }
}
+ defer engine.Close()
+
chainConfig.LondonBlock = big.NewInt(0)
+
w, b := newTestWorker(t, chainConfig, engine, db, 0)
defer w.close()
// This test chain imports the mined blocks.
db2 := rawdb.NewMemoryDatabase()
b.genesis.MustCommit(db2)
+
chain, _ := core.NewBlockChain(db2, nil, b.chain.Config(), engine, vm.Config{}, nil, nil)
defer chain.Stop()
@@ -287,20 +324,25 @@ func testEmptyWork(t *testing.T, chainConfig *params.ChainConfig, engine consens
taskIndex int
taskCh = make(chan struct{}, 2)
)
+
checkEqual := func(t *testing.T, task *task, index int) {
// The first empty work without any txs included
receiptLen, balance := 0, big.NewInt(0)
+
if index == 1 {
// The second full work with 1 tx included
receiptLen, balance = 1, big.NewInt(1000)
}
+
if len(task.receipts) != receiptLen {
t.Fatalf("receipt number mismatch: have %d, want %d", len(task.receipts), receiptLen)
}
+
if task.state.GetBalance(testUserAddress).Cmp(balance) != 0 {
t.Fatalf("account balance mismatch: have %d, want %d", task.state.GetBalance(testUserAddress), balance)
}
}
+
w.newTaskHook = func(task *task) {
if task.block.NumberU64() == 1 {
checkEqual(t, task, taskIndex)
@@ -308,11 +350,14 @@ func testEmptyWork(t *testing.T, chainConfig *params.ChainConfig, engine consens
taskCh <- struct{}{}
}
}
+
w.skipSealHook = func(task *task) bool { return true }
w.fullTaskHook = func() {
time.Sleep(100 * time.Millisecond)
}
+
w.start() // Start mining!
+
for i := 0; i < 2; i += 1 {
select {
case <-taskCh:
@@ -344,16 +389,20 @@ func TestStreamUncleBlock(t *testing.T) {
t.Errorf("uncle hash mismatch: have %s, want %s", have.Hex(), want.Hex())
}
}
+
taskCh <- struct{}{}
taskIndex += 1
}
}
+
w.skipSealHook = func(task *task) bool {
return true
}
+
w.fullTaskHook = func() {
time.Sleep(100 * time.Millisecond)
}
+
w.start()
for i := 0; i < 2; i += 1 {
@@ -396,20 +445,25 @@ func testRegenerateMiningBlock(t *testing.T, chainConfig *params.ChainConfig, en
// one has 1 pending tx, the third one has 2 txs
if taskIndex == 2 {
receiptLen, balance := 2, big.NewInt(2000)
+
if len(task.receipts) != receiptLen {
t.Errorf("receipt number mismatch: have %d, want %d", len(task.receipts), receiptLen)
}
+
if task.state.GetBalance(testUserAddress).Cmp(balance) != 0 {
t.Errorf("account balance mismatch: have %d, want %d", task.state.GetBalance(testUserAddress), balance)
}
}
+
taskCh <- struct{}{}
taskIndex += 1
}
}
+
w.skipSealHook = func(task *task) bool {
return true
}
+
w.fullTaskHook = func() {
time.Sleep(100 * time.Millisecond)
}
@@ -423,6 +477,7 @@ func testRegenerateMiningBlock(t *testing.T, chainConfig *params.ChainConfig, en
t.Error("new task timeout")
}
}
+
b.txPool.AddLocals(newTxs)
time.Sleep(time.Second)
@@ -434,10 +489,15 @@ func testRegenerateMiningBlock(t *testing.T, chainConfig *params.ChainConfig, en
}
func TestAdjustIntervalEthash(t *testing.T) {
+ // Skipping this test as recommit interval would remain constant
+ t.Skip()
testAdjustInterval(t, ethashChainConfig, ethash.NewFaker())
}
func TestAdjustIntervalClique(t *testing.T) {
+
+ // Skipping this test as recommit interval would remain constant
+ t.Skip()
testAdjustInterval(t, cliqueChainConfig, clique.New(cliqueChainConfig.Clique, rawdb.NewMemoryDatabase()))
}
@@ -459,6 +519,7 @@ func testAdjustInterval(t *testing.T, chainConfig *params.ChainConfig, engine co
index = 0
start uint32
)
+
w.resubmitHook = func(minInterval time.Duration, recommitInterval time.Duration) {
// Short circuit if interval checking hasn't started.
if atomic.LoadUint32(&start) == 0 {
@@ -489,16 +550,19 @@ func testAdjustInterval(t *testing.T, chainConfig *params.ChainConfig, engine co
if recommitInterval != wantRecommitInterval {
t.Errorf("resubmit interval mismatch: have %v, want %v", recommitInterval, wantRecommitInterval)
}
+
result = append(result, float64(recommitInterval.Nanoseconds()))
index += 1
progress <- struct{}{}
}
+
w.start()
time.Sleep(time.Second) // Ensure two tasks have been summitted due to start opt
atomic.StoreUint32(&start, 1)
w.setRecommitInterval(3 * time.Second)
+
select {
case <-progress:
case <-time.NewTimer(time.Second).C:
@@ -506,6 +570,7 @@ func testAdjustInterval(t *testing.T, chainConfig *params.ChainConfig, engine co
}
w.resubmitAdjustCh <- &intervalAdjust{inc: true, ratio: 0.8}
+
select {
case <-progress:
case <-time.NewTimer(time.Second).C:
@@ -513,6 +578,7 @@ func testAdjustInterval(t *testing.T, chainConfig *params.ChainConfig, engine co
}
w.resubmitAdjustCh <- &intervalAdjust{inc: false}
+
select {
case <-progress:
case <-time.NewTimer(time.Second).C:
@@ -520,6 +586,7 @@ func testAdjustInterval(t *testing.T, chainConfig *params.ChainConfig, engine co
}
w.setRecommitInterval(500 * time.Millisecond)
+
select {
case <-progress:
case <-time.NewTimer(time.Second).C:
@@ -554,9 +621,11 @@ func testGetSealingWork(t *testing.T, chainConfig *params.ChainConfig, engine co
w.skipSealHook = func(task *task) bool {
return true
}
+
w.fullTaskHook = func() {
time.Sleep(100 * time.Millisecond)
}
+
timestamp := uint64(time.Now().Unix())
assertBlock := func(block *types.Block, number uint64, coinbase common.Address, random common.Hash) {
if block.Time() != timestamp {
@@ -564,14 +633,17 @@ func testGetSealingWork(t *testing.T, chainConfig *params.ChainConfig, engine co
// is even smaller than parent block's. It's OK.
t.Logf("Invalid timestamp, want %d, get %d", timestamp, block.Time())
}
+
if len(block.Uncles()) != 0 {
t.Error("Unexpected uncle block")
}
+
_, isClique := engine.(*clique.Clique)
if !isClique {
if len(block.Extra()) != 0 {
t.Error("Unexpected extra field")
}
+
if block.Coinbase() != coinbase {
t.Errorf("Unexpected coinbase got %x want %x", block.Coinbase(), coinbase)
}
@@ -580,18 +652,22 @@ func testGetSealingWork(t *testing.T, chainConfig *params.ChainConfig, engine co
t.Error("Unexpected coinbase")
}
}
+
if !isClique {
if block.MixDigest() != random {
t.Error("Unexpected mix digest")
}
}
+
if block.Nonce() != 0 {
t.Error("Unexpected block nonce")
}
+
if block.NumberU64() != number {
t.Errorf("Mismatched block number, want %d got %d", number, block.NumberU64())
}
}
+
var cases = []struct {
parent common.Hash
coinbase common.Address
@@ -639,6 +715,7 @@ func testGetSealingWork(t *testing.T, chainConfig *params.ChainConfig, engine co
// This API should work even when the automatic sealing is not enabled
for _, c := range cases {
block, err := w.getSealingBlock(c.parent, timestamp, c.coinbase, c.random)
+
if c.expectErr {
if err == nil {
t.Error("Expect error but get nil")
@@ -647,12 +724,14 @@ func testGetSealingWork(t *testing.T, chainConfig *params.ChainConfig, engine co
if err != nil {
t.Errorf("Unexpected error %v", err)
}
+
assertBlock(block, c.expectNumber, c.coinbase, c.random)
}
}
// This API should work even when the automatic sealing is enabled
w.start()
+
for _, c := range cases {
block, err := w.getSealingBlock(c.parent, timestamp, c.coinbase, c.random)
if c.expectErr {
@@ -663,6 +742,7 @@ func testGetSealingWork(t *testing.T, chainConfig *params.ChainConfig, engine co
if err != nil {
t.Errorf("Unexpected error %v", err)
}
+
assertBlock(block, c.expectNumber, c.coinbase, c.random)
}
}
diff --git a/packaging/deb/README.md b/packaging/deb/README.md
new file mode 100644
index 0000000000..7e84275f38
--- /dev/null
+++ b/packaging/deb/README.md
@@ -0,0 +1,23 @@
+# Debian
+
+
+
+For debian packages you will need to add the following layouts during the build
+
+
+
+bor/
+ DEBIAN/control
+ DEBIAN/postinst
+ usr/local/bin/bor
+ lib/systemd/system/bor.service
+
+This will be wrapped during the build package process building
+
+
+Note this is still a work in progress:
+
+TODO: removal/purge on removal using dpkg
+ cleanup of control files to list what we want
+ copyright inclusuion
+
diff --git a/packaging/deb/bor/DEBIAN/changelog b/packaging/deb/bor/DEBIAN/changelog
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/packaging/deb/bor/DEBIAN/control b/packaging/deb/bor/DEBIAN/control
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/packaging/deb/bor/DEBIAN/postinst b/packaging/deb/bor/DEBIAN/postinst
new file mode 100755
index 0000000000..e23f4d6897
--- /dev/null
+++ b/packaging/deb/bor/DEBIAN/postinst
@@ -0,0 +1,4 @@
+#!/bin/bash
+# This is a postinstallation script so the service can be configured and started when requested
+#
+sudo adduser --disabled-password --disabled-login --shell /usr/sbin/nologin --quiet --system --no-create-home --home /nonexistent bor
diff --git a/packaging/deb/bor/DEBIAN/postrm b/packaging/deb/bor/DEBIAN/postrm
new file mode 100755
index 0000000000..7602789a01
--- /dev/null
+++ b/packaging/deb/bor/DEBIAN/postrm
@@ -0,0 +1,6 @@
+#!/bin/bash
+#
+###############
+# Remove bor installs
+##############
+sudo rm -rf /usr/bin/bor
diff --git a/packaging/deb/bor/DEBIAN/prerm b/packaging/deb/bor/DEBIAN/prerm
new file mode 100755
index 0000000000..e40aed2c80
--- /dev/null
+++ b/packaging/deb/bor/DEBIAN/prerm
@@ -0,0 +1,9 @@
+#!/bin/bash
+#
+#
+##############
+# Stop bor before removal
+##############
+#sudo systemctl stop bor.service
+#############
+
diff --git a/packaging/requirements/README.md b/packaging/requirements/README.md
new file mode 100644
index 0000000000..48cdce8528
--- /dev/null
+++ b/packaging/requirements/README.md
@@ -0,0 +1 @@
+placeholder
diff --git a/packaging/rpm/TODO b/packaging/rpm/TODO
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/packaging/templates/mainnet-v1/archive/config.toml b/packaging/templates/mainnet-v1/archive/config.toml
new file mode 100644
index 0000000000..9eaafd3bee
--- /dev/null
+++ b/packaging/templates/mainnet-v1/archive/config.toml
@@ -0,0 +1,135 @@
+chain = "mainnet"
+# identity = "node_name"
+# log-level = "INFO"
+datadir = "/var/lib/bor/data"
+# ancient = ""
+# keystore = ""
+syncmode = "full"
+gcmode = "archive"
+# snapshot = true
+# ethstats = ""
+
+# ["eth.requiredblocks"]
+
+[p2p]
+ maxpeers = 50
+ port = 30303
+ # maxpendpeers = 50
+ # bind = "0.0.0.0"
+ # nodiscover = false
+ # nat = "any"
+ # [p2p.discovery]
+ # v5disc = false
+ # bootnodes = []
+ # bootnodesv4 = []
+ # bootnodesv5 = []
+ # static-nodes = []
+ # trusted-nodes = []
+ # dns = []
+
+# [heimdall]
+ # url = "http://localhost:1317"
+ # "bor.without" = false
+ # grpc-address = ""
+
+[txpool]
+ nolocals = true
+ pricelimit = 30000000000
+ accountslots = 16
+ globalslots = 32768
+ accountqueue = 16
+ globalqueue = 32768
+ lifetime = "1h30m0s"
+ # locals = []
+ # journal = ""
+ # rejournal = "1h0m0s"
+ # pricebump = 10
+
+[miner]
+ gaslimit = 30000000
+ gasprice = "30000000000"
+ # mine = false
+ # etherbase = ""
+ # extradata = ""
+
+[jsonrpc]
+ ipcpath = "/var/lib/bor/bor.ipc"
+ # ipcdisable = false
+ # gascap = 50000000
+ # txfeecap = 5.0
+ [jsonrpc.http]
+ enabled = true
+ port = 8545
+ host = "127.0.0.1"
+ api = ["eth", "net", "web3", "txpool", "bor"]
+ vhosts = ["*"]
+ corsdomain = ["*"]
+ # prefix = ""
+ [jsonrpc.ws]
+ enabled = true
+ port = 8546
+ # prefix = ""
+ # host = "localhost"
+ # api = ["web3", "net"]
+ origins = ["*"]
+ # [jsonrpc.graphql]
+ # enabled = false
+ # port = 0
+ # prefix = ""
+ # host = ""
+ # vhosts = ["*"]
+ # corsdomain = ["*"]
+ # [jsonrpc.timeouts]
+ # read = "30s"
+ # write = "30s"
+ # idle = "2m0s"
+
+[gpo]
+ # blocks = 20
+ # percentile = 60
+ # maxprice = "5000000000000"
+ ignoreprice = "30000000000"
+
+[telemetry]
+ metrics = true
+ # expensive = false
+ # prometheus-addr = ""
+ # opencollector-endpoint = ""
+ # [telemetry.influx]
+ # influxdb = false
+ # endpoint = ""
+ # database = ""
+ # username = ""
+ # password = ""
+ # influxdbv2 = false
+ # token = ""
+ # bucket = ""
+ # organization = ""
+ # [telemetry.influx.tags]
+
+[cache]
+ cache = 4096
+ gc = 0
+ snapshot = 20
+ # database = 50
+ trie = 30
+ # journal = "triecache"
+ # rejournal = "1h0m0s"
+ # noprefetch = false
+ # preimages = false
+ # txlookuplimit = 2350000
+ # timeout = "1h0m0s"
+
+# [accounts]
+ # unlock = []
+ # password = ""
+ # allow-insecure-unlock = false
+ # lightkdf = false
+ # disable-bor-wallet = false
+
+# [grpc]
+ # addr = ":3131"
+
+# [developer]
+ # dev = false
+ # period = 0
diff --git a/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml b/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml
new file mode 100644
index 0000000000..94dd6634f0
--- /dev/null
+++ b/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml
@@ -0,0 +1,135 @@
+chain = "mainnet"
+# identity = "node_name"
+# log-level = "INFO"
+datadir = "/var/lib/bor/data"
+# ancient = ""
+# keystore = ""
+syncmode = "full"
+# gcmode = "full"
+# snapshot = true
+# ethstats = ""
+
+# ["eth.requiredblocks"]
+
+[p2p]
+ maxpeers = 50
+ port = 30303
+ # maxpendpeers = 50
+ # bind = "0.0.0.0"
+ # nodiscover = false
+ # nat = "any"
+ # [p2p.discovery]
+ # v5disc = false
+ # bootnodes = []
+ # bootnodesv4 = []
+ # bootnodesv5 = []
+ # static-nodes = []
+ # trusted-nodes = []
+ # dns = []
+
+# [heimdall]
+ # url = "http://localhost:1317"
+ # "bor.without" = false
+ # grpc-address = ""
+
+[txpool]
+ nolocals = true
+ pricelimit = 30000000000
+ accountslots = 16
+ globalslots = 32768
+ accountqueue = 16
+ globalqueue = 32768
+ lifetime = "1h30m0s"
+ # locals = []
+ # journal = ""
+ # rejournal = "1h0m0s"
+ # pricebump = 10
+
+[miner]
+ gaslimit = 30000000
+ gasprice = "30000000000"
+ # mine = false
+ # etherbase = ""
+ # extradata = ""
+
+[jsonrpc]
+ ipcpath = "/var/lib/bor/bor.ipc"
+ # ipcdisable = false
+ # gascap = 50000000
+ # txfeecap = 5.0
+ [jsonrpc.http]
+ enabled = true
+ port = 8545
+ host = "127.0.0.1"
+ api = ["eth", "net", "web3", "txpool", "bor"]
+ vhosts = ["*"]
+ corsdomain = ["*"]
+ # prefix = ""
+ # [jsonrpc.ws]
+ # enabled = false
+ # port = 8546
+ # prefix = ""
+ # host = "localhost"
+ # api = ["web3", "net"]
+ # origins = ["*"]
+ # [jsonrpc.graphql]
+ # enabled = false
+ # port = 0
+ # prefix = ""
+ # host = ""
+ # vhosts = ["*"]
+ # corsdomain = ["*"]
+ # [jsonrpc.timeouts]
+ # read = "30s"
+ # write = "30s"
+ # idle = "2m0s"
+
+[gpo]
+ # blocks = 20
+ # percentile = 60
+ # maxprice = "5000000000000"
+ ignoreprice = "30000000000"
+
+[telemetry]
+ metrics = true
+ # expensive = false
+ # prometheus-addr = ""
+ # opencollector-endpoint = ""
+ # [telemetry.influx]
+ # influxdb = false
+ # endpoint = ""
+ # database = ""
+ # username = ""
+ # password = ""
+ # influxdbv2 = false
+ # token = ""
+ # bucket = ""
+ # organization = ""
+ # [telemetry.influx.tags]
+
+[cache]
+ cache = 4096
+ # gc = 25
+ # snapshot = 10
+ # database = 50
+ # trie = 15
+ # journal = "triecache"
+ # rejournal = "1h0m0s"
+ # noprefetch = false
+ # preimages = false
+ # txlookuplimit = 2350000
+ # timeout = "1h0m0s"
+
+# [accounts]
+ # unlock = []
+ # password = ""
+ # allow-insecure-unlock = false
+ # lightkdf = false
+ # disable-bor-wallet = false
+
+# [grpc]
+ # addr = ":3131"
+
+# [developer]
+ # dev = false
+ # period = 0
diff --git a/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml b/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml
new file mode 100644
index 0000000000..9c55683c96
--- /dev/null
+++ b/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml
@@ -0,0 +1,137 @@
+# NOTE: Update and uncomment: `keystore`, `password`, and `unlock` fields.
+
+chain = "mainnet"
+# identity = "node_name"
+# log-level = "INFO"
+datadir = "/var/lib/bor/data"
+# ancient = ""
+# keystore = "$BOR_DIR/keystore"
+syncmode = "full"
+# gcmode = "full"
+# snapshot = true
+# ethstats = ""
+
+# ["eth.requiredblocks"]
+
+[p2p]
+ maxpeers = 1
+ port = 30303
+ nodiscover = true
+ # maxpendpeers = 50
+ # bind = "0.0.0.0"
+ # nat = "any"
+ # [p2p.discovery]
+ # v5disc = false
+ # bootnodes = []
+ # bootnodesv4 = []
+ # bootnodesv5 = []
+ # static-nodes = []
+ # trusted-nodes = []
+ # dns = []
+
+# [heimdall]
+ # url = "http://localhost:1317"
+ # "bor.without" = false
+ # grpc-address = ""
+
+[txpool]
+ nolocals = true
+ pricelimit = 30000000000
+ accountslots = 16
+ globalslots = 32768
+ accountqueue = 16
+ globalqueue = 32768
+ lifetime = "1h30m0s"
+ # locals = []
+ # journal = ""
+ # rejournal = "1h0m0s"
+ # pricebump = 10
+
+[miner]
+ mine = true
+ gaslimit = 30000000
+ gasprice = "30000000000"
+ # etherbase = ""
+ # extradata = ""
+
+[jsonrpc]
+ ipcpath = "/var/lib/bor/bor.ipc"
+ # ipcdisable = false
+ # gascap = 50000000
+ # txfeecap = 5.0
+ [jsonrpc.http]
+ enabled = true
+ port = 8545
+ host = "127.0.0.1"
+ api = ["eth", "net", "web3", "txpool", "bor"]
+ vhosts = ["*"]
+ corsdomain = ["*"]
+ # prefix = ""
+ # [jsonrpc.ws]
+ # enabled = false
+ # port = 8546
+ # prefix = ""
+ # host = "localhost"
+ # api = ["web3", "net"]
+ # origins = ["*"]
+ # [jsonrpc.graphql]
+ # enabled = false
+ # port = 0
+ # prefix = ""
+ # host = ""
+ # vhosts = ["*"]
+ # corsdomain = ["*"]
+ # [jsonrpc.timeouts]
+ # read = "30s"
+ # write = "30s"
+ # idle = "2m0s"
+
+[gpo]
+ # blocks = 20
+ # percentile = 60
+ # maxprice = "5000000000000"
+ ignoreprice = "30000000000"
+
+[telemetry]
+ metrics = true
+ # expensive = false
+ # prometheus-addr = ""
+ # opencollector-endpoint = ""
+ # [telemetry.influx]
+ # influxdb = false
+ # endpoint = ""
+ # database = ""
+ # username = ""
+ # password = ""
+ # influxdbv2 = false
+ # token = ""
+ # bucket = ""
+ # organization = ""
+ # [telemetry.influx.tags]
+
+[cache]
+ cache = 4096
+ # gc = 25
+ # snapshot = 10
+ # database = 50
+ # trie = 15
+ # journal = "triecache"
+ # rejournal = "1h0m0s"
+ # noprefetch = false
+ # preimages = false
+ # txlookuplimit = 2350000
+ # timeout = "1h0m0s"
+
+[accounts]
+ allow-insecure-unlock = true
+ # password = "$BOR_DIR/password.txt"
+ # unlock = ["$ADDRESS"]
+ # lightkdf = false
+ # disable-bor-wallet = false
+
+# [grpc]
+ # addr = ":3131"
+
+# [developer]
+ # dev = false
+ # period = 0
diff --git a/packaging/templates/mainnet-v1/without-sentry/bor/config.toml b/packaging/templates/mainnet-v1/without-sentry/bor/config.toml
new file mode 100644
index 0000000000..573f1f3be8
--- /dev/null
+++ b/packaging/templates/mainnet-v1/without-sentry/bor/config.toml
@@ -0,0 +1,137 @@
+# NOTE: Update and uncomment: `keystore`, `password`, and `unlock` fields.
+
+chain = "mainnet"
+# identity = "node_name"
+# log-level = "INFO"
+datadir = "/var/lib/bor/data"
+# ancient = ""
+# keystore = "$BOR_DIR/keystore"
+syncmode = "full"
+# gcmode = "full"
+# snapshot = true
+# ethstats = ""
+
+# ["eth.requiredblocks"]
+
+[p2p]
+ maxpeers = 50
+ port = 30303
+ # maxpendpeers = 50
+ # bind = "0.0.0.0"
+ # nodiscover = false
+ # nat = "any"
+ # [p2p.discovery]
+ # v5disc = false
+ # bootnodes = []
+ # bootnodesv4 = []
+ # bootnodesv5 = []
+ # static-nodes = []
+ # trusted-nodes = []
+ # dns = []
+
+# [heimdall]
+ # url = "http://localhost:1317"
+ # "bor.without" = false
+ # grpc-address = ""
+
+[txpool]
+ nolocals = true
+ pricelimit = 30000000000
+ accountslots = 16
+ globalslots = 32768
+ accountqueue = 16
+ globalqueue = 32768
+ lifetime = "1h30m0s"
+ # locals = []
+ # journal = ""
+ # rejournal = "1h0m0s"
+ # pricebump = 10
+
+[miner]
+ mine = true
+ gaslimit = 30000000
+ gasprice = "30000000000"
+ # etherbase = ""
+ # extradata = ""
+
+[jsonrpc]
+ ipcpath = "/var/lib/bor/bor.ipc"
+ # ipcdisable = false
+ # gascap = 50000000
+ # txfeecap = 5.0
+ [jsonrpc.http]
+ enabled = true
+ port = 8545
+ host = "127.0.0.1"
+ api = ["eth", "net", "web3", "txpool", "bor"]
+ vhosts = ["*"]
+ corsdomain = ["*"]
+ # prefix = ""
+ # [jsonrpc.ws]
+ # enabled = false
+ # port = 8546
+ # prefix = ""
+ # host = "localhost"
+ # api = ["web3", "net"]
+ # origins = ["*"]
+ # [jsonrpc.graphql]
+ # enabled = false
+ # port = 0
+ # prefix = ""
+ # host = ""
+ # vhosts = ["*"]
+ # corsdomain = ["*"]
+ # [jsonrpc.timeouts]
+ # read = "30s"
+ # write = "30s"
+ # idle = "2m0s"
+
+[gpo]
+# blocks = 20
+# percentile = 60
+# maxprice = "5000000000000"
+ ignoreprice = "30000000000"
+
+[telemetry]
+ metrics = true
+ # expensive = false
+ # prometheus-addr = ""
+ # opencollector-endpoint = ""
+ # [telemetry.influx]
+ # influxdb = false
+ # endpoint = ""
+ # database = ""
+ # username = ""
+ # password = ""
+ # influxdbv2 = false
+ # token = ""
+ # bucket = ""
+ # organization = ""
+ # [telemetry.influx.tags]
+
+[cache]
+ cache = 4096
+# gc = 25
+# snapshot = 10
+# database = 50
+# trie = 15
+# journal = "triecache"
+# rejournal = "1h0m0s"
+# noprefetch = false
+# preimages = false
+# txlookuplimit = 2350000
+# timeout = "1h0m0s"
+
+[accounts]
+ allow-insecure-unlock = true
+ # password = "$BOR_DIR/password.txt"
+ # unlock = ["$ADDRESS"]
+ # lightkdf = false
+ # disable-bor-wallet = false
+
+# [grpc]
+ # addr = ":3131"
+
+# [developer]
+ # dev = false
+ # period = 0
diff --git a/packaging/templates/package_scripts/changelog b/packaging/templates/package_scripts/changelog
new file mode 100644
index 0000000000..2395bcaef1
--- /dev/null
+++ b/packaging/templates/package_scripts/changelog
@@ -0,0 +1,3 @@
+bor (2.10.11) unstable; urgency=low
+
+-- Polygon Team Mon, 10 Nov 2022 00:37:31 +0100
\ No newline at end of file
diff --git a/packaging/templates/package_scripts/changelog.profile b/packaging/templates/package_scripts/changelog.profile
new file mode 100644
index 0000000000..b84fa22646
--- /dev/null
+++ b/packaging/templates/package_scripts/changelog.profile
@@ -0,0 +1,3 @@
+bor-profile (2.10.11) unstable; urgency=low
+
+-- Polygon Team Mon, 10 Nov 2022 00:37:31 +0100
\ No newline at end of file
diff --git a/packaging/templates/package_scripts/control b/packaging/templates/package_scripts/control
new file mode 100644
index 0000000000..ed0ff46c06
--- /dev/null
+++ b/packaging/templates/package_scripts/control
@@ -0,0 +1,12 @@
+Source: bor
+Version: 0.3.0
+Section: develop
+Priority: standard
+Maintainer: Polygon
+Build-Depends: debhelper-compat (= 13)
+Package: bor
+Rules-Requires-Root: yes
+Architecture: amd64
+Multi-Arch: foreign
+Depends:
+Description: This is the bor package from Polygon Technology.
diff --git a/packaging/templates/package_scripts/control.arm64 b/packaging/templates/package_scripts/control.arm64
new file mode 100644
index 0000000000..2c624a4c45
--- /dev/null
+++ b/packaging/templates/package_scripts/control.arm64
@@ -0,0 +1,13 @@
+Source: bor
+Version: 0.3.0
+Section: develop
+Priority: standard
+Maintainer: Polygon
+Build-Depends: debhelper-compat (= 13)
+Rules-Requires-Root: yes
+Package: bor
+Architecture: arm64
+Multi-Arch: foreign
+Depends:
+Description: This is the bor package from Polygon Technology.
+
diff --git a/packaging/templates/package_scripts/control.profile.amd64 b/packaging/templates/package_scripts/control.profile.amd64
new file mode 100644
index 0000000000..087dabb1f6
--- /dev/null
+++ b/packaging/templates/package_scripts/control.profile.amd64
@@ -0,0 +1,14 @@
+Source: bor-profile
+Version: 0.3.0
+Section: develop
+Priority: standard
+Maintainer: Polygon
+Build-Depends: debhelper-compat (= 13)
+Rules-Requires-Root: yes
+Package: bor-profile
+Architecture: amd64
+Multi-Arch: foreign
+Depends:
+Description: This is the bor package from Polygon Technology.
+
+
diff --git a/packaging/templates/package_scripts/control.profile.arm64 b/packaging/templates/package_scripts/control.profile.arm64
new file mode 100644
index 0000000000..9de0c50253
--- /dev/null
+++ b/packaging/templates/package_scripts/control.profile.arm64
@@ -0,0 +1,12 @@
+Source: bor-profile
+Version: 0.3.0
+Section: develop
+Priority: standard
+Maintainer: Polygon
+Build-Depends: debhelper-compat (= 13)
+Rules-Requires-Root: yes
+Package: bor-profile
+Architecture: arm64
+Multi-Arch: foreign
+Depends:
+Description: This is the bor package from Polygon Technology.
diff --git a/packaging/templates/package_scripts/control.validator b/packaging/templates/package_scripts/control.validator
new file mode 100644
index 0000000000..f3f5652a31
--- /dev/null
+++ b/packaging/templates/package_scripts/control.validator
@@ -0,0 +1,12 @@
+Source: bor-profile
+Version: 0.3.0
+Section: develop
+Priority: standard
+Maintainer: Polygon
+Build-Depends: debhelper-compat (= 13)
+Package: bor-profile
+Rules-Requires-Root: yes
+Architecture: amd64
+Multi-Arch: foreign
+Depends:
+Description: This is the bor package from Polygon Technology.
diff --git a/packaging/templates/package_scripts/control.validator.arm64 b/packaging/templates/package_scripts/control.validator.arm64
new file mode 100644
index 0000000000..97712830ff
--- /dev/null
+++ b/packaging/templates/package_scripts/control.validator.arm64
@@ -0,0 +1,13 @@
+Source: bor-profile
+Version: 0.3.0
+Section: develop
+Priority: standard
+Maintainer: Polygon
+Build-Depends: debhelper-compat (= 13)
+Rules-Requires-Root: yes
+Package: bor-profile
+Architecture: arm64
+Multi-Arch: foreign
+Depends:
+Description: This is the bor package from Polygon Technology.
+
diff --git a/packaging/templates/package_scripts/postinst b/packaging/templates/package_scripts/postinst
new file mode 100755
index 0000000000..7272b4b1aa
--- /dev/null
+++ b/packaging/templates/package_scripts/postinst
@@ -0,0 +1,12 @@
+#!/bin/bash
+# This is a postinstallation script so the service can be configured and started when requested
+#
+sudo adduser --disabled-password --disabled-login --shell /usr/sbin/nologin --quiet --system --no-create-home --home /nonexistent bor
+if [ -d "/var/lib/bor" ]
+then
+ echo "Directory /var/lib/bor exists."
+else
+ mkdir -p /var/lib/bor
+ sudo chown -R bor /var/lib/bor
+fi
+sudo systemctl daemon-reload
diff --git a/packaging/templates/package_scripts/postinst.profile b/packaging/templates/package_scripts/postinst.profile
new file mode 100755
index 0000000000..e9a497906d
--- /dev/null
+++ b/packaging/templates/package_scripts/postinst.profile
@@ -0,0 +1,11 @@
+#!/bin/bash
+# This is a postinstallation script so the service can be configured and started when requested
+#
+if [ -d "/var/lib/bor" ]
+then
+ echo "Directory /var/lib/bor exists."
+else
+ mkdir -p /var/lib/bor
+ sudo chown -R bor /var/lib/bor
+fi
+sudo systemctl daemon-reload
diff --git a/packaging/templates/package_scripts/postrm b/packaging/templates/package_scripts/postrm
new file mode 100755
index 0000000000..55bbb87a4f
--- /dev/null
+++ b/packaging/templates/package_scripts/postrm
@@ -0,0 +1,8 @@
+#!/bin/bash
+#
+###############
+# Remove bor installs
+##############
+sudo rm -rf /var/lib/bor/config.toml
+sudo rm -rf /lib/systemd/system/bor.service
+sudo systemctl daemon-reload
diff --git a/packaging/templates/package_scripts/preinst b/packaging/templates/package_scripts/preinst
new file mode 100755
index 0000000000..b9efb0091d
--- /dev/null
+++ b/packaging/templates/package_scripts/preinst
@@ -0,0 +1,7 @@
+#!/bin/bash
+#
+#################
+# Stop existing bor in case of upgrade
+################
+#sudo systemctl stop bor.service
+######################
diff --git a/packaging/templates/package_scripts/prerm b/packaging/templates/package_scripts/prerm
new file mode 100755
index 0000000000..b2b2b4fce9
--- /dev/null
+++ b/packaging/templates/package_scripts/prerm
@@ -0,0 +1,8 @@
+#!/bin/bash
+#
+#
+##############
+# Stop bor before removal
+##############
+#sudo systemctl stop bor.service
+#############
\ No newline at end of file
diff --git a/packaging/templates/systemd/bor.service b/packaging/templates/systemd/bor.service
new file mode 100644
index 0000000000..b92bdd3cc5
--- /dev/null
+++ b/packaging/templates/systemd/bor.service
@@ -0,0 +1,16 @@
+[Unit]
+ Description=bor
+ StartLimitIntervalSec=500
+ StartLimitBurst=5
+
+[Service]
+ Restart=on-failure
+ RestartSec=5s
+ ExecStart=/usr/bin/bor server -config "/var/lib/bor/config.toml"
+ Type=simple
+ KillSignal=SIGINT
+ User=bor
+ TimeoutStopSec=120
+
+[Install]
+ WantedBy=multi-user.target
diff --git a/packaging/templates/testnet-v4/archive/config.toml b/packaging/templates/testnet-v4/archive/config.toml
new file mode 100644
index 0000000000..1762fdf117
--- /dev/null
+++ b/packaging/templates/testnet-v4/archive/config.toml
@@ -0,0 +1,135 @@
+chain = "mumbai"
+# identity = "node_name"
+# log-level = "INFO"
+datadir = "/var/lib/bor/data"
+# ancient = ""
+# keystore = ""
+syncmode = "full"
+gcmode = "archive"
+# snapshot = true
+# ethstats = ""
+
+# ["eth.requiredblocks"]
+
+[p2p]
+ maxpeers = 50
+ port = 30303
+ # maxpendpeers = 50
+ # bind = "0.0.0.0"
+ # nodiscover = false
+ # nat = "any"
+ # [p2p.discovery]
+ # v5disc = false
+ # bootnodes = []
+ # bootnodesv4 = []
+ # bootnodesv5 = []
+ # static-nodes = []
+ # trusted-nodes = []
+ # dns = []
+
+# [heimdall]
+ # url = "http://localhost:1317"
+ # "bor.without" = false
+ # grpc-address = ""
+
+[txpool]
+ nolocals = true
+ accountslots = 16
+ globalslots = 131072
+ accountqueue = 64
+ globalqueue = 131072
+ lifetime = "1h30m0s"
+ # locals = []
+ # journal = ""
+ # rejournal = "1h0m0s"
+ # pricelimit = 1
+ # pricebump = 10
+
+[miner]
+ gaslimit = 30000000
+ # gasprice = "1000000000"
+ # mine = false
+ # etherbase = ""
+ # extradata = ""
+
+[jsonrpc]
+ ipcpath = "/var/lib/bor/bor.ipc"
+ # ipcdisable = false
+ # gascap = 50000000
+ # txfeecap = 5.0
+ [jsonrpc.http]
+ enabled = true
+ port = 8545
+ host = "0.0.0.0"
+ api = ["eth", "net", "web3", "txpool", "bor"]
+ vhosts = ["*"]
+ corsdomain = ["*"]
+ # prefix = ""
+ [jsonrpc.ws]
+ enabled = true
+ port = 8546
+ # prefix = ""
+ # host = "localhost"
+ # api = ["web3", "net"]
+ origins = ["*"]
+ # [jsonrpc.graphql]
+ # enabled = false
+ # port = 0
+ # prefix = ""
+ # host = ""
+ # vhosts = ["*"]
+ # corsdomain = ["*"]
+ # [jsonrpc.timeouts]
+ # read = "30s"
+ # write = "30s"
+ # idle = "2m0s"
+
+# [gpo]
+ # blocks = 20
+ # percentile = 60
+ # maxprice = "5000000000000"
+ # ignoreprice = "2"
+
+[telemetry]
+ metrics = true
+ # expensive = false
+ # prometheus-addr = ""
+ # opencollector-endpoint = ""
+ # [telemetry.influx]
+ # influxdb = false
+ # endpoint = ""
+ # database = ""
+ # username = ""
+ # password = ""
+ # influxdbv2 = false
+ # token = ""
+ # bucket = ""
+ # organization = ""
+ # [telemetry.influx.tags]
+
+[cache]
+ # cache = 1024
+ gc = 0
+ snapshot = 20
+ # database = 50
+ trie = 30
+ # journal = "triecache"
+ # rejournal = "1h0m0s"
+ # noprefetch = false
+ # preimages = false
+ # txlookuplimit = 2350000
+ # timeout = "1h0m0s"
+
+# [accounts]
+ # unlock = []
+ # password = ""
+ # allow-insecure-unlock = false
+ # lightkdf = false
+ # disable-bor-wallet = false
+
+# [grpc]
+ # addr = ":3131"
+
+# [developer]
+ # dev = false
+ # period = 0
diff --git a/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml b/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml
new file mode 100644
index 0000000000..ae191cec2c
--- /dev/null
+++ b/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml
@@ -0,0 +1,135 @@
+chain = "mumbai"
+# identity = "node_name"
+# log-level = "INFO"
+datadir = "/var/lib/bor/data"
+# ancient = ""
+# keystore = ""
+syncmode = "full"
+# gcmode = "full"
+# snapshot = true
+# ethstats = ""
+
+# ["eth.requiredblocks"]
+
+[p2p]
+ maxpeers = 50
+ port = 30303
+ # maxpendpeers = 50
+ # bind = "0.0.0.0"
+ # nodiscover = false
+ # nat = "any"
+ # [p2p.discovery]
+ # v5disc = false
+ # bootnodes = []
+ # bootnodesv4 = []
+ # bootnodesv5 = []
+ # static-nodes = []
+ # trusted-nodes = []
+ # dns = []
+
+# [heimdall]
+ # url = "http://localhost:1317"
+ # "bor.without" = false
+ # grpc-address = ""
+
+[txpool]
+ nolocals = true
+ accountslots = 16
+ globalslots = 131072
+ accountqueue = 64
+ globalqueue = 131072
+ lifetime = "1h30m0s"
+ # locals = []
+ # journal = ""
+ # rejournal = "1h0m0s"
+ # pricelimit = 1
+ # pricebump = 10
+
+[miner]
+ gaslimit = 30000000
+ # gasprice = "1000000000"
+ # mine = false
+ # etherbase = ""
+ # extradata = ""
+
+[jsonrpc]
+ ipcpath = "/var/lib/bor/bor.ipc"
+ # ipcdisable = false
+ # gascap = 50000000
+ # txfeecap = 5.0
+ [jsonrpc.http]
+ enabled = true
+ port = 8545
+ host = "0.0.0.0"
+ api = ["eth", "net", "web3", "txpool", "bor"]
+ vhosts = ["*"]
+ corsdomain = ["*"]
+ # prefix = ""
+ # [jsonrpc.ws]
+ # enabled = false
+ # port = 8546
+ # prefix = ""
+ # host = "localhost"
+ # api = ["web3", "net"]
+ # origins = ["*"]
+ # [jsonrpc.graphql]
+ # enabled = false
+ # port = 0
+ # prefix = ""
+ # host = ""
+ # vhosts = ["*"]
+ # corsdomain = ["*"]
+ # [jsonrpc.timeouts]
+ # read = "30s"
+ # write = "30s"
+ # idle = "2m0s"
+
+# [gpo]
+ # blocks = 20
+ # percentile = 60
+ # maxprice = "5000000000000"
+ # ignoreprice = "2"
+
+[telemetry]
+ metrics = true
+ # expensive = false
+ # prometheus-addr = ""
+ # opencollector-endpoint = ""
+ # [telemetry.influx]
+ # influxdb = false
+ # endpoint = ""
+ # database = ""
+ # username = ""
+ # password = ""
+ # influxdbv2 = false
+ # token = ""
+ # bucket = ""
+ # organization = ""
+ # [telemetry.influx.tags]
+
+# [cache]
+ # cache = 1024
+ # gc = 25
+ # snapshot = 10
+ # database = 50
+ # trie = 15
+ # journal = "triecache"
+ # rejournal = "1h0m0s"
+ # noprefetch = false
+ # preimages = false
+ # txlookuplimit = 2350000
+ # timeout = "1h0m0s"
+
+# [accounts]
+ # unlock = []
+ # password = ""
+ # allow-insecure-unlock = false
+ # lightkdf = false
+ # disable-bor-wallet = false
+
+# [grpc]
+ # addr = ":3131"
+
+# [developer]
+ # dev = false
+ # period = 0
diff --git a/packaging/templates/testnet-v4/sentry/validator/bor/config.toml b/packaging/templates/testnet-v4/sentry/validator/bor/config.toml
new file mode 100644
index 0000000000..b441cc137d
--- /dev/null
+++ b/packaging/templates/testnet-v4/sentry/validator/bor/config.toml
@@ -0,0 +1,137 @@
+# NOTE: Update and uncomment: `keystore`, `password`, and `unlock` fields.
+
+chain = "mumbai"
+# identity = "node_name"
+# log-level = "INFO"
+datadir = "/var/lib/bor/data"
+# ancient = ""
+# keystore = "$BOR_DIR/keystore"
+syncmode = "full"
+# gcmode = "full"
+# snapshot = true
+# ethstats = ""
+
+# ["eth.requiredblocks"]
+
+[p2p]
+ maxpeers = 1
+ port = 30303
+ nodiscover = true
+ # maxpendpeers = 50
+ # bind = "0.0.0.0"
+ # nat = "any"
+ # [p2p.discovery]
+ # v5disc = false
+ # bootnodes = []
+ # bootnodesv4 = []
+ # bootnodesv5 = []
+ # static-nodes = []
+ # trusted-nodes = []
+ # dns = []
+
+# [heimdall]
+ # url = "http://localhost:1317"
+ # "bor.without" = false
+ # grpc-address = ""
+
+[txpool]
+ nolocals = true
+ accountslots = 16
+ globalslots = 131072
+ accountqueue = 64
+ globalqueue = 131072
+ lifetime = "1h30m0s"
+ # locals = []
+ # journal = ""
+ # rejournal = "1h0m0s"
+ # pricelimit = 1
+ # pricebump = 10
+
+[miner]
+ mine = true
+ gaslimit = 30000000
+ # gasprice = "1000000000"
+ # etherbase = ""
+ # extradata = ""
+
+[jsonrpc]
+ ipcpath = "/var/lib/bor/bor.ipc"
+ # ipcdisable = false
+ # gascap = 50000000
+ # txfeecap = 5.0
+ [jsonrpc.http]
+ enabled = true
+ port = 8545
+ host = "0.0.0.0"
+ api = ["eth", "net", "web3", "txpool", "bor"]
+ vhosts = ["*"]
+ corsdomain = ["*"]
+ # prefix = ""
+ # [jsonrpc.ws]
+ # enabled = false
+ # port = 8546
+ # prefix = ""
+ # host = "localhost"
+ # api = ["web3", "net"]
+ # origins = ["*"]
+ # [jsonrpc.graphql]
+ # enabled = false
+ # port = 0
+ # prefix = ""
+ # host = ""
+ # vhosts = ["*"]
+ # corsdomain = ["*"]
+ # [jsonrpc.timeouts]
+ # read = "30s"
+ # write = "30s"
+ # idle = "2m0s"
+
+# [gpo]
+ # blocks = 20
+ # percentile = 60
+ # maxprice = "5000000000000"
+ # ignoreprice = "2"
+
+[telemetry]
+ metrics = true
+ # expensive = false
+ # prometheus-addr = ""
+ # opencollector-endpoint = ""
+ # [telemetry.influx]
+ # influxdb = false
+ # endpoint = ""
+ # database = ""
+ # username = ""
+ # password = ""
+ # influxdbv2 = false
+ # token = ""
+ # bucket = ""
+ # organization = ""
+ # [telemetry.influx.tags]
+
+# [cache]
+ # cache = 1024
+ # gc = 25
+ # snapshot = 10
+ # database = 50
+ # trie = 15
+ # journal = "triecache"
+ # rejournal = "1h0m0s"
+ # noprefetch = false
+ # preimages = false
+ # txlookuplimit = 2350000
+ # timeout = "1h0m0s"
+
+[accounts]
+ allow-insecure-unlock = true
+ # password = "$BOR_DIR/password.txt"
+ # unlock = ["$ADDRESS"]
+ # lightkdf = false
+ # disable-bor-wallet = false
+
+# [grpc]
+ # addr = ":3131"
+
+# [developer]
+ # dev = false
+ # period = 0
diff --git a/packaging/templates/testnet-v4/without-sentry/bor/config.toml b/packaging/templates/testnet-v4/without-sentry/bor/config.toml
new file mode 100644
index 0000000000..05a254e184
--- /dev/null
+++ b/packaging/templates/testnet-v4/without-sentry/bor/config.toml
@@ -0,0 +1,137 @@
+# NOTE: Update and uncomment: `keystore`, `password`, and `unlock` fields.
+
+chain = "mumbai"
+# identity = "node_name"
+# log-level = "INFO"
+datadir = "/var/lib/bor/data"
+# ancient = ""
+# keystore = "$BOR_DIR/keystore"
+syncmode = "full"
+# gcmode = "full"
+# snapshot = true
+# ethstats = ""
+
+# ["eth.requiredblocks"]
+
+[p2p]
+ maxpeers = 50
+ port = 30303
+ # maxpendpeers = 50
+ # bind = "0.0.0.0"
+ # nodiscover = false
+ # nat = "any"
+ # [p2p.discovery]
+ # v5disc = false
+ # bootnodes = []
+ # bootnodesv4 = []
+ # bootnodesv5 = []
+ # static-nodes = []
+ # trusted-nodes = []
+ # dns = []
+
+# [heimdall]
+ # url = "http://localhost:1317"
+ # "bor.without" = false
+ # grpc-address = ""
+
+[txpool]
+ nolocals = true
+ accountslots = 16
+ globalslots = 131072
+ accountqueue = 64
+ globalqueue = 131072
+ lifetime = "1h30m0s"
+ # locals = []
+ # journal = ""
+ # rejournal = "1h0m0s"
+ # pricelimit = 1
+ # pricebump = 10
+
+[miner]
+ mine = true
+ gaslimit = 30000000
+ # gasprice = "1000000000"
+ # etherbase = ""
+ # extradata = ""
+
+[jsonrpc]
+ ipcpath = "/var/lib/bor/bor.ipc"
+ # ipcdisable = false
+ # gascap = 50000000
+ # txfeecap = 5.0
+ [jsonrpc.http]
+ enabled = true
+ port = 8545
+ host = "0.0.0.0"
+ api = ["eth", "net", "web3", "txpool", "bor"]
+ vhosts = ["*"]
+ corsdomain = ["*"]
+ # prefix = ""
+ # [jsonrpc.ws]
+ # enabled = false
+ # port = 8546
+ # prefix = ""
+ # host = "localhost"
+ # api = ["web3", "net"]
+ # origins = ["*"]
+ # [jsonrpc.graphql]
+ # enabled = false
+ # port = 0
+ # prefix = ""
+ # host = ""
+ # vhosts = ["*"]
+ # corsdomain = ["*"]
+ # [jsonrpc.timeouts]
+ # read = "30s"
+ # write = "30s"
+ # idle = "2m0s"
+
+# [gpo]
+# blocks = 20
+# percentile = 60
+# maxprice = "5000000000000"
+# ignoreprice = "2"
+
+[telemetry]
+ metrics = true
+ # expensive = false
+ # prometheus-addr = ""
+ # opencollector-endpoint = ""
+ # [telemetry.influx]
+ # influxdb = false
+ # endpoint = ""
+ # database = ""
+ # username = ""
+ # password = ""
+ # influxdbv2 = false
+ # token = ""
+ # bucket = ""
+ # organization = ""
+ # [telemetry.influx.tags]
+
+# [cache]
+# cache = 1024
+# gc = 25
+# snapshot = 10
+# database = 50
+# trie = 15
+# journal = "triecache"
+# rejournal = "1h0m0s"
+# noprefetch = false
+# preimages = false
+# txlookuplimit = 2350000
+# timeout = "1h0m0s"
+
+[accounts]
+ allow-insecure-unlock = true
+ # password = "$BOR_DIR/password.txt"
+ # unlock = ["$ADDRESS"]
+ # lightkdf = false
+ # disable-bor-wallet = false
+
+# [grpc]
+ # addr = ":3131"
+
+# [developer]
+ # dev = false
+ # period = 0
diff --git a/params/config.go b/params/config.go
index 47e22f986c..980250a1ec 100644
--- a/params/config.go
+++ b/params/config.go
@@ -23,8 +23,9 @@ import (
"sort"
"strconv"
- "github.com/ethereum/go-ethereum/common"
"golang.org/x/crypto/sha3"
+
+ "github.com/ethereum/go-ethereum/common"
)
// Genesis hashes to enforce below configs on.
@@ -289,6 +290,38 @@ var (
},
},
}
+ BorUnittestChainConfig = &ChainConfig{
+ ChainID: big.NewInt(80001),
+ HomesteadBlock: big.NewInt(0),
+ DAOForkBlock: nil,
+ DAOForkSupport: true,
+ EIP150Hash: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
+ EIP150Block: big.NewInt(0),
+ EIP155Block: big.NewInt(0),
+ EIP158Block: big.NewInt(0),
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(0),
+ PetersburgBlock: big.NewInt(0),
+ IstanbulBlock: big.NewInt(0),
+ MuirGlacierBlock: big.NewInt(0),
+ BerlinBlock: big.NewInt(0),
+ LondonBlock: big.NewInt(0),
+ Bor: &BorConfig{
+ Period: map[string]uint64{
+ "0": 1,
+ },
+ ProducerDelay: 3,
+ Sprint: 32,
+ BackupMultiplier: map[string]uint64{
+ "0": 2,
+ },
+ ValidatorContract: "0x0000000000000000000000000000000000001000",
+ StateReceiverContract: "0x0000000000000000000000000000000000001001",
+ BurntContract: map[string]string{
+ "0": "0x00000000000000000000000000000000000000000",
+ },
+ },
+ }
// MumbaiChainConfig contains the chain parameters to run a node on the Mumbai test network.
MumbaiChainConfig = &ChainConfig{
@@ -550,7 +583,9 @@ func (c *BorConfig) calculateBorConfigHelper(field map[string]uint64, number uin
for k := range field {
keys = append(keys, k)
}
+
sort.Strings(keys)
+
for i := 0; i < len(keys)-1; i++ {
valUint, _ := strconv.ParseUint(keys[i], 10, 64)
valUintNext, _ := strconv.ParseUint(keys[i+1], 10, 64)
@@ -558,6 +593,7 @@ func (c *BorConfig) calculateBorConfigHelper(field map[string]uint64, number uin
return field[keys[i]]
}
}
+
return field[keys[len(keys)-1]]
}
diff --git a/params/version.go b/params/version.go
index 4b608b1a5f..d2746b37ee 100644
--- a/params/version.go
+++ b/params/version.go
@@ -22,8 +22,8 @@ import (
const (
VersionMajor = 0 // Major version component of the current release
- VersionMinor = 2 // Minor version component of the current release
- VersionPatch = 16 // Patch version component of the current release
+ VersionMinor = 3 // Minor version component of the current release
+ VersionPatch = 0 // Patch version component of the current release
VersionMeta = "stable" // Version metadata to append to the version string
)
@@ -43,7 +43,8 @@ var VersionWithMeta = func() string {
// ArchiveVersion holds the textual version string used for Geth archives.
// e.g. "1.8.11-dea1ce05" for stable releases, or
-// "1.8.13-unstable-21c059b6" for unstable releases
+//
+// "1.8.13-unstable-21c059b6" for unstable releases
func ArchiveVersion(gitCommit string) string {
vsn := Version
if VersionMeta != "stable" {
diff --git a/scripts/getconfig.go b/scripts/getconfig.go
new file mode 100644
index 0000000000..59e3ff749d
--- /dev/null
+++ b/scripts/getconfig.go
@@ -0,0 +1,715 @@
+package main
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/pelletier/go-toml"
+
+ "github.com/ethereum/go-ethereum/internal/cli/server"
+)
+
+// YesFV: Both, Flags and their values has changed
+// YesF: Only the Flag has changed, not their value
+var flagMap = map[string][]string{
+ "networkid": {"notABoolFlag", "YesFV"},
+ "miner.gastarget": {"notABoolFlag", "No"},
+ "pprof": {"BoolFlag", "No"},
+ "pprof.port": {"notABoolFlag", "No"},
+ "pprof.addr": {"notABoolFlag", "No"},
+ "pprof.memprofilerate": {"notABoolFlag", "No"},
+ "pprof.blockprofilerate": {"notABoolFlag", "No"},
+ "pprof.cpuprofile": {"notABoolFlag", "No"},
+ "jsonrpc.corsdomain": {"notABoolFlag", "YesF"},
+ "jsonrpc.vhosts": {"notABoolFlag", "YesF"},
+ "http.modules": {"notABoolFlag", "YesF"},
+ "ws.modules": {"notABoolFlag", "YesF"},
+ "config": {"notABoolFlag", "No"},
+ "datadir.ancient": {"notABoolFlag", "No"},
+ "datadir.minfreedisk": {"notABoolFlag", "No"},
+ "usb": {"BoolFlag", "No"},
+ "pcscdpath": {"notABoolFlag", "No"},
+ "mainnet": {"BoolFlag", "No"},
+ "goerli": {"BoolFlag", "No"},
+ "bor-mumbai": {"BoolFlag", "No"},
+ "bor-mainnet": {"BoolFlag", "No"},
+ "rinkeby": {"BoolFlag", "No"},
+ "ropsten": {"BoolFlag", "No"},
+ "sepolia": {"BoolFlag", "No"},
+ "kiln": {"BoolFlag", "No"},
+ "exitwhensynced": {"BoolFlag", "No"},
+ "light.serve": {"notABoolFlag", "No"},
+ "light.ingress": {"notABoolFlag", "No"},
+ "light.egress": {"notABoolFlag", "No"},
+ "light.maxpeers": {"notABoolFlag", "No"},
+ "ulc.servers": {"notABoolFlag", "No"},
+ "ulc.fraction": {"notABoolFlag", "No"},
+ "ulc.onlyannounce": {"BoolFlag", "No"},
+ "light.nopruning": {"BoolFlag", "No"},
+ "light.nosyncserve": {"BoolFlag", "No"},
+ "dev.gaslimit": {"notABoolFlag", "No"},
+ "ethash.cachedir": {"notABoolFlag", "No"},
+ "ethash.cachesinmem": {"notABoolFlag", "No"},
+ "ethash.cachesondisk": {"notABoolFlag", "No"},
+ "ethash.cacheslockmmap": {"BoolFlag", "No"},
+ "ethash.dagdir": {"notABoolFlag", "No"},
+ "ethash.dagsinmem": {"notABoolFlag", "No"},
+ "ethash.dagsondisk": {"notABoolFlag", "No"},
+ "ethash.dagslockmmap": {"BoolFlag", "No"},
+ "fdlimit": {"notABoolFlag", "No"},
+ "signer": {"notABoolFlag", "No"},
+ "authrpc.jwtsecret": {"notABoolFlag", "No"},
+ "authrpc.addr": {"notABoolFlag", "No"},
+ "authrpc.port": {"notABoolFlag", "No"},
+ "authrpc.vhosts": {"notABoolFlag", "No"},
+ "graphql.corsdomain": {"notABoolFlag", "No"},
+ "graphql.vhosts": {"notABoolFlag", "No"},
+ "rpc.evmtimeout": {"notABoolFlag", "No"},
+ "rpc.allow-unprotected-txs": {"BoolFlag", "No"},
+ "jspath": {"notABoolFlag", "No"},
+ "exec": {"notABoolFlag", "No"},
+ "preload": {"notABoolFlag", "No"},
+ "discovery.dns": {"notABoolFlag", "No"},
+ "netrestrict": {"notABoolFlag", "No"},
+ "nodekey": {"notABoolFlag", "No"},
+ "nodekeyhex": {"notABoolFlag", "No"},
+ "miner.threads": {"notABoolFlag", "No"},
+ "miner.notify": {"notABoolFlag", "No"},
+ "miner.notify.full": {"BoolFlag", "No"},
+ "miner.recommit": {"notABoolFlag", "No"},
+ "miner.noverify": {"BoolFlag", "No"},
+ "vmdebug": {"BoolFlag", "No"},
+ "fakepow": {"BoolFlag", "No"},
+ "nocompaction": {"BoolFlag", "No"},
+ "metrics.addr": {"notABoolFlag", "No"},
+ "metrics.port": {"notABoolFlag", "No"},
+ "whitelist": {"notABoolFlag", "No"},
+ "snapshot": {"BoolFlag", "YesF"},
+ "bloomfilter.size": {"notABoolFlag", "No"},
+ "bor.logs": {"BoolFlag", "No"},
+ "override.arrowglacier": {"notABoolFlag", "No"},
+ "override.terminaltotaldifficulty": {"notABoolFlag", "No"},
+ "verbosity": {"notABoolFlag", "YesFV"},
+ "ws.origins": {"notABoolFlag", "No"},
+}
+
+// map from cli flags to corresponding toml tags
+var nameTagMap = map[string]string{
+ "chain": "chain",
+ "identity": "identity",
+ "log-level": "log-level",
+ "datadir": "datadir",
+ "keystore": "keystore",
+ "syncmode": "syncmode",
+ "gcmode": "gcmode",
+ "eth.requiredblocks": "eth.requiredblocks",
+ "0-snapshot": "snapshot",
+ "\"bor.logs\"": "bor.logs",
+ "url": "bor.heimdall",
+ "\"bor.without\"": "bor.withoutheimdall",
+ "grpc-address": "bor.heimdallgRPC",
+ "locals": "txpool.locals",
+ "nolocals": "txpool.nolocals",
+ "journal": "txpool.journal",
+ "rejournal": "txpool.rejournal",
+ "pricelimit": "txpool.pricelimit",
+ "pricebump": "txpool.pricebump",
+ "accountslots": "txpool.accountslots",
+ "globalslots": "txpool.globalslots",
+ "accountqueue": "txpool.accountqueue",
+ "globalqueue": "txpool.globalqueue",
+ "lifetime": "txpool.lifetime",
+ "mine": "mine",
+ "etherbase": "miner.etherbase",
+ "extradata": "miner.extradata",
+ "gaslimit": "miner.gaslimit",
+ "gasprice": "miner.gasprice",
+ "ethstats": "ethstats",
+ "blocks": "gpo.blocks",
+ "percentile": "gpo.percentile",
+ "maxprice": "gpo.maxprice",
+ "ignoreprice": "gpo.ignoreprice",
+ "cache": "cache",
+ "1-database": "cache.database",
+ "trie": "cache.trie",
+ "trie.journal": "cache.journal",
+ "trie.rejournal": "cache.rejournal",
+ "gc": "cache.gc",
+ "1-snapshot": "cache.snapshot",
+ "noprefetch": "cache.noprefetch",
+ "preimages": "cache.preimages",
+ "txlookuplimit": "txlookuplimit",
+ "gascap": "rpc.gascap",
+ "txfeecap": "rpc.txfeecap",
+ "ipcdisable": "ipcdisable",
+ "ipcpath": "ipcpath",
+ "1-corsdomain": "http.corsdomain",
+ "1-vhosts": "http.vhosts",
+ "origins": "ws.origins",
+ "3-corsdomain": "graphql.corsdomain",
+ "3-vhosts": "graphql.vhosts",
+ "1-enabled": "http",
+ "1-host": "http.addr",
+ "1-port": "http.port",
+ "1-prefix": "http.rpcprefix",
+ "1-api": "http.api",
+ "2-enabled": "ws",
+ "2-host": "ws.addr",
+ "2-port": "ws.port",
+ "2-prefix": "ws.rpcprefix",
+ "2-api": "ws.api",
+ "3-enabled": "graphql",
+ "bind": "bind",
+ "0-port": "port",
+ "bootnodes": "bootnodes",
+ "maxpeers": "maxpeers",
+ "maxpendpeers": "maxpendpeers",
+ "nat": "nat",
+ "nodiscover": "nodiscover",
+ "v5disc": "v5disc",
+ "metrics": "metrics",
+ "expensive": "metrics.expensive",
+ "influxdb": "metrics.influxdb",
+ "endpoint": "metrics.influxdb.endpoint",
+ "0-database": "metrics.influxdb.database",
+ "username": "metrics.influxdb.username",
+ "0-password": "metrics.influxdb.password",
+ "tags": "metrics.influxdb.tags",
+ "prometheus-addr": "metrics.prometheus-addr",
+ "opencollector-endpoint": "metrics.opencollector-endpoint",
+ "influxdbv2": "metrics.influxdbv2",
+ "token": "metrics.influxdb.token",
+ "bucket": "metrics.influxdb.bucket",
+ "organization": "metrics.influxdb.organization",
+ "unlock": "unlock",
+ "1-password": "password",
+ "allow-insecure-unlock": "allow-insecure-unlock",
+ "lightkdf": "lightkdf",
+ "disable-bor-wallet": "disable-bor-wallet",
+ "addr": "grpc.addr",
+ "dev": "dev",
+ "period": "dev.period",
+}
+
+var removedFlagsAndValues = map[string]string{}
+
+var replacedFlagsMapFlagAndValue = map[string]map[string]map[string]string{
+ "networkid": {
+ "flag": {
+ "networkid": "chain",
+ },
+ "value": {
+ "'137'": "mainnet",
+ "137": "mainnet",
+ "'80001'": "mumbai",
+ "80001": "mumbai",
+ },
+ },
+ "verbosity": {
+ "flag": {
+ "verbosity": "log-level",
+ },
+ "value": {
+ "0": "SILENT",
+ "1": "ERROR",
+ "2": "WARN",
+ "3": "INFO",
+ "4": "DEBUG",
+ "5": "DETAIL",
+ },
+ },
+}
+
+// Do not remove
+var replacedFlagsMapFlag = map[string]string{}
+
+var currentBoolFlags = []string{
+ "snapshot",
+ "bor.logs",
+ "bor.withoutheimdall",
+ "txpool.nolocals",
+ "mine",
+ "cache.noprefetch",
+ "cache.preimages",
+ "ipcdisable",
+ "http",
+ "ws",
+ "graphql",
+ "nodiscover",
+ "v5disc",
+ "metrics",
+ "metrics.expensive",
+ "metrics.influxdb",
+ "metrics.influxdbv2",
+ "allow-insecure-unlock",
+ "lightkdf",
+ "disable-bor-wallet",
+ "dev",
+}
+
+func contains(s []string, str string) (bool, int) {
+ for ind, v := range s {
+ if v == str || v == "-"+str || v == "--"+str {
+ return true, ind
+ }
+ }
+
+ return false, -1
+}
+
+func indexOf(s []string, str string) int {
+ for k, v := range s {
+ if v == str || v == "-"+str || v == "--"+str {
+ return k
+ }
+ }
+
+ return -1
+}
+
+func remove1(s []string, idx int) []string {
+ removedFlagsAndValues[s[idx]] = ""
+ return append(s[:idx], s[idx+1:]...)
+}
+
+func remove2(s []string, idx int) []string {
+ removedFlagsAndValues[s[idx]] = s[idx+1]
+ return append(s[:idx], s[idx+2:]...)
+}
+
+func checkFlag(allFlags []string, checkFlags []string) []string {
+ outOfDateFlags := []string{}
+
+ for _, flag := range checkFlags {
+ t1, _ := contains(allFlags, flag)
+ if !t1 {
+ outOfDateFlags = append(outOfDateFlags, flag)
+ }
+ }
+
+ return outOfDateFlags
+}
+
+func checkFileExists(path string) bool {
+ _, err := os.Stat(path)
+ if errors.Is(err, os.ErrNotExist) {
+ fmt.Println("WARN: File does not exist", path)
+ return false
+ } else {
+ return true
+ }
+}
+
+func writeTempStaticJSON(path string) {
+ data, err := os.ReadFile(path)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ var conf interface{}
+ if err := json.Unmarshal(data, &conf); err != nil {
+ log.Fatal(err)
+ }
+
+ temparr := []string{}
+ for _, item := range conf.([]interface{}) {
+ temparr = append(temparr, item.(string))
+ }
+
+ // write to a temp file
+ err = os.WriteFile("./tempStaticNodes.json", []byte(strings.Join(temparr, "\", \"")), 0600)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+func writeTempStaticTrustedTOML(path string) {
+ data, err := toml.LoadFile(path)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ if data.Has("Node.P2P.StaticNodes") {
+ temparr := []string{}
+ for _, item := range data.Get("Node.P2P.StaticNodes").([]interface{}) {
+ temparr = append(temparr, item.(string))
+ }
+
+ err = os.WriteFile("./tempStaticNodes.toml", []byte(strings.Join(temparr, "\", \"")), 0600)
+ if err != nil {
+ log.Fatal(err)
+ }
+ }
+
+ if data.Has("Node.P2P.TrustedNodes") {
+ temparr := []string{}
+ for _, item := range data.Get("Node.P2P.TrustedNodes").([]interface{}) {
+ temparr = append(temparr, item.(string))
+ }
+
+ err = os.WriteFile("./tempTrustedNodes.toml", []byte(strings.Join(temparr, "\", \"")), 0600)
+ if err != nil {
+ log.Fatal(err)
+ }
+ }
+
+ if data.Has("Node.HTTPTimeouts.ReadTimeout") {
+ err = os.WriteFile("./tempHTTPTimeoutsReadTimeout.toml", []byte(data.Get("Node.HTTPTimeouts.ReadTimeout").(string)), 0600)
+ if err != nil {
+ log.Fatal(err)
+ }
+ }
+
+ if data.Has("Node.HTTPTimeouts.WriteTimeout") {
+ err = os.WriteFile("./tempHTTPTimeoutsWriteTimeout.toml", []byte(data.Get("Node.HTTPTimeouts.WriteTimeout").(string)), 0600)
+ if err != nil {
+ log.Fatal(err)
+ }
+ }
+
+ if data.Has("Node.HTTPTimeouts.IdleTimeout") {
+ err = os.WriteFile("./tempHTTPTimeoutsIdleTimeout.toml", []byte(data.Get("Node.HTTPTimeouts.IdleTimeout").(string)), 0600)
+ if err != nil {
+ log.Fatal(err)
+ }
+ }
+
+ if data.Has("Eth.TrieTimeout") {
+ err = os.WriteFile("./tempHTTPTimeoutsTrieTimeout.toml", []byte(data.Get("Eth.TrieTimeout").(string)), 0600)
+ if err != nil {
+ log.Fatal(err)
+ }
+ }
+}
+
+func getStaticTrustedNodes(args []string) {
+ // if config flag is passed, it should be only a .toml file
+ t1, t2 := contains(args, "config")
+ // nolint: nestif
+ if t1 {
+ path := args[t2+1]
+ if !checkFileExists(path) {
+ return
+ }
+
+ if path[len(path)-4:] == "toml" {
+ writeTempStaticTrustedTOML(path)
+ } else {
+ fmt.Println("only TOML config file is supported through CLI")
+ }
+ } else {
+ path := "./static-nodes.json"
+ if !checkFileExists(path) {
+ return
+ }
+ writeTempStaticJSON(path)
+ }
+}
+
+func getFlagsToCheck(args []string) []string {
+ flagsToCheck := []string{}
+
+ for _, item := range args {
+ if strings.HasPrefix(item, "-") {
+ if item[1] == '-' {
+ temp := item[2:]
+ flagsToCheck = append(flagsToCheck, temp)
+ } else {
+ temp := item[1:]
+ flagsToCheck = append(flagsToCheck, temp)
+ }
+ }
+ }
+
+ return flagsToCheck
+}
+
+func getFlagType(flag string) string {
+ return flagMap[flag][0]
+}
+
+func updateArgsClean(args []string, outOfDateFlags []string) []string {
+ updatedArgs := []string{}
+ updatedArgs = append(updatedArgs, args...)
+
+ // iterate through outOfDateFlags and remove the flags from updatedArgs along with their value (if any)
+ for _, item := range outOfDateFlags {
+ idx := indexOf(updatedArgs, item)
+
+ if getFlagType(item) == "BoolFlag" {
+ // remove the element at index idx
+ updatedArgs = remove1(updatedArgs, idx)
+ } else {
+ // remove the element at index idx and idx + 1
+ updatedArgs = remove2(updatedArgs, idx)
+ }
+ }
+
+ return updatedArgs
+}
+
+func updateArgsAdd(args []string) []string {
+ for flag, value := range removedFlagsAndValues {
+ if strings.HasPrefix(flag, "--") {
+ flag = flag[2:]
+ } else {
+ flag = flag[1:]
+ }
+
+ if flagMap[flag][1] == "YesFV" {
+ temp := "--" + replacedFlagsMapFlagAndValue[flag]["flag"][flag] + " " + replacedFlagsMapFlagAndValue[flag]["value"][value]
+ args = append(args, temp)
+ } else if flagMap[flag][1] == "YesF" {
+ temp := "--" + replacedFlagsMapFlag[flag] + " " + value
+ args = append(args, temp)
+ }
+ }
+
+ return args
+}
+
+func handlePrometheus(args []string, updatedArgs []string) []string {
+ var newUpdatedArgs []string
+
+ mAddr := ""
+ mPort := ""
+
+ pAddr := ""
+ pPort := ""
+
+ newUpdatedArgs = append(newUpdatedArgs, updatedArgs...)
+
+ for i, val := range args {
+ if strings.Contains(val, "metrics.addr") && strings.HasPrefix(val, "-") {
+ mAddr = args[i+1]
+ }
+
+ if strings.Contains(val, "metrics.port") && strings.HasPrefix(val, "-") {
+ mPort = args[i+1]
+ }
+
+ if strings.Contains(val, "pprof.addr") && strings.HasPrefix(val, "-") {
+ pAddr = args[i+1]
+ }
+
+ if strings.Contains(val, "pprof.port") && strings.HasPrefix(val, "-") {
+ pPort = args[i+1]
+ }
+ }
+
+ if mAddr != "" && mPort != "" {
+ newUpdatedArgs = append(newUpdatedArgs, "--metrics.prometheus-addr")
+ newUpdatedArgs = append(newUpdatedArgs, mAddr+":"+mPort)
+ } else if pAddr != "" && pPort != "" {
+ newUpdatedArgs = append(newUpdatedArgs, "--metrics.prometheus-addr")
+ newUpdatedArgs = append(newUpdatedArgs, pAddr+":"+pPort)
+ }
+
+ return newUpdatedArgs
+}
+
+func dumpFlags(args []string) {
+ err := os.WriteFile("./temp", []byte(strings.Join(args, " ")), 0600)
+ if err != nil {
+ fmt.Println("Error in WriteFile")
+ } else {
+ fmt.Println("WriteFile Done")
+ }
+}
+
+// nolint: gocognit
+func commentFlags(path string, updatedArgs []string) {
+ const cache = "[cache]"
+
+ const telemetry = "[telemetry]"
+
+ // snapshot: "[cache]"
+ cacheFlag := 0
+
+ // corsdomain, vhosts, enabled, host, api, port, prefix: "[p2p]", " [jsonrpc.http]", " [jsonrpc.ws]", " [jsonrpc.graphql]"
+ p2pHttpWsGraphFlag := -1
+
+ // database: "[telemetry]", "[cache]"
+ databaseFlag := -1
+
+ // password: "[telemetry]", "[accounts]"
+ passwordFlag := -1
+
+ ignoreLineFlag := false
+
+ input, err := os.ReadFile(path)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ lines := strings.Split(string(input), "\n")
+
+ var newLines []string
+ newLines = append(newLines, lines...)
+
+ for i, line := range lines {
+ if line == cache {
+ cacheFlag += 1
+ }
+
+ if line == "[p2p]" || line == " [jsonrpc.http]" || line == " [jsonrpc.ws]" || line == " [jsonrpc.graphql]" {
+ p2pHttpWsGraphFlag += 1
+ }
+
+ if line == telemetry || line == cache {
+ databaseFlag += 1
+ }
+
+ if line == telemetry || line == "[accounts]" {
+ passwordFlag += 1
+ }
+
+ if line == "[\"eth.requiredblocks\"]" || line == " [telemetry.influx.tags]" {
+ ignoreLineFlag = true
+ } else if line != "" {
+ if strings.HasPrefix(strings.Fields(line)[0], "[") {
+ ignoreLineFlag = false
+ }
+ }
+
+ // nolint: nestif
+ if !(strings.HasPrefix(line, "[") || strings.HasPrefix(line, " [") || strings.HasPrefix(line, " [") || line == "" || ignoreLineFlag) {
+ flag := strings.Fields(line)[0]
+ if flag == "snapshot" {
+ flag = strconv.Itoa(cacheFlag) + "-" + flag
+ } else if flag == "corsdomain" {
+ flag = strconv.Itoa(p2pHttpWsGraphFlag) + "-" + flag
+ } else if flag == "vhosts" {
+ flag = strconv.Itoa(p2pHttpWsGraphFlag) + "-" + flag
+ } else if flag == "enabled" {
+ flag = strconv.Itoa(p2pHttpWsGraphFlag) + "-" + flag
+ } else if flag == "host" {
+ flag = strconv.Itoa(p2pHttpWsGraphFlag) + "-" + flag
+ } else if flag == "api" {
+ flag = strconv.Itoa(p2pHttpWsGraphFlag) + "-" + flag
+ } else if flag == "port" {
+ flag = strconv.Itoa(p2pHttpWsGraphFlag) + "-" + flag
+ } else if flag == "prefix" {
+ flag = strconv.Itoa(p2pHttpWsGraphFlag) + "-" + flag
+ } else if flag == "database" {
+ flag = strconv.Itoa(databaseFlag) + "-" + flag
+ } else if flag == "password" {
+ flag = strconv.Itoa(passwordFlag) + "-" + flag
+ }
+
+ if flag != "static-nodes" && flag != "trusted-nodes" && flag != "read" && flag != "write" && flag != "idle" && flag != "timeout" {
+ flag = nameTagMap[flag]
+
+ tempFlag := false
+
+ for _, val := range updatedArgs {
+ if strings.Contains(val, flag) && (strings.Contains(val, "-") || strings.Contains(val, "--")) {
+ tempFlag = true
+ }
+ }
+
+ if !tempFlag || flag == "" {
+ newLines[i] = "# " + line
+ }
+ }
+ }
+ }
+
+ output := strings.Join(newLines, "\n")
+
+ err = os.WriteFile(path, []byte(output), 0600)
+ if err != nil {
+ log.Fatalln(err)
+ }
+}
+
+func checkBoolFlags(val string) bool {
+ returnFlag := false
+
+ if strings.Contains(val, "=") {
+ val = strings.Split(val, "=")[0]
+ }
+
+ for _, flag := range currentBoolFlags {
+ if val == "-"+flag || val == "--"+flag {
+ returnFlag = true
+ }
+ }
+
+ return returnFlag
+}
+
+func beautifyArgs(args []string) ([]string, []string) {
+ newArgs := []string{}
+
+ ignoreForNow := []string{}
+
+ temp := []string{}
+
+ for _, val := range args {
+ // nolint: nestif
+ if !(checkBoolFlags(val)) {
+ if strings.HasPrefix(val, "-") {
+ if strings.Contains(val, "=") {
+ temparr := strings.Split(val, "=")
+ newArgs = append(newArgs, temparr...)
+ } else {
+ newArgs = append(newArgs, val)
+ }
+ } else {
+ newArgs = append(newArgs, val)
+ }
+ } else {
+ ignoreForNow = append(ignoreForNow, val)
+ }
+ }
+
+ for j, val := range newArgs {
+ if val == "-unlock" || val == "--unlock" {
+ temp = append(temp, "--miner.etherbase")
+ temp = append(temp, newArgs[j+1])
+ }
+ }
+
+ newArgs = append(newArgs, temp...)
+
+ return newArgs, ignoreForNow
+}
+
+func main() {
+ const notYet = "notYet"
+
+ temp := os.Args[1]
+ args := os.Args[2:]
+
+ args, ignoreForNow := beautifyArgs(args)
+
+ c := server.Command{}
+ flags := c.Flags()
+ allFlags := flags.GetAllFlags()
+ flagsToCheck := getFlagsToCheck(args)
+
+ if temp == notYet {
+ getStaticTrustedNodes(args)
+ }
+
+ outOfDateFlags := checkFlag(allFlags, flagsToCheck)
+ updatedArgs := updateArgsClean(args, outOfDateFlags)
+ updatedArgs = updateArgsAdd(updatedArgs)
+ updatedArgs = handlePrometheus(args, updatedArgs)
+
+ if temp == notYet {
+ updatedArgs = append(updatedArgs, ignoreForNow...)
+ dumpFlags(updatedArgs)
+ }
+
+ if temp != notYet {
+ updatedArgs = append(updatedArgs, ignoreForNow...)
+ commentFlags(temp, updatedArgs)
+ }
+}
diff --git a/scripts/getconfig.sh b/scripts/getconfig.sh
new file mode 100755
index 0000000000..d00bf35ec8
--- /dev/null
+++ b/scripts/getconfig.sh
@@ -0,0 +1,177 @@
+#!/bin/bash
+set -e
+
+# Instructions:
+# Execute `./getconfig.sh`, and follow the instructions displayed on the terminal
+# The `*-config.toml` file will be created in the same directory as start.sh
+# It is recommended to check the flags generated in config.toml
+
+# Some checks to make commands OS independent
+OS="$(uname -s)"
+MKTEMPOPTION=
+SEDOPTION= ## Not used as of now (TODO)
+shopt -s nocasematch; if [[ "$OS" == "darwin"* ]]; then
+ SEDOPTION="''"
+ MKTEMPOPTION="-t"
+fi
+
+read -p "* Path to start.sh: " startPath
+# check if start.sh is present
+if [[ ! -f $startPath ]]
+then
+ echo "Error: start.sh do not exist."
+ exit 1
+fi
+read -p "* Your validator address (e.g. 0xca67a8D767e45056DC92384b488E9Af654d78DE2), or press Enter to skip if running a sentry node: " ADD
+
+if [[ -f $HOME/.bor/data/bor/static-nodes.json ]]
+then
+cp $HOME/.bor/data/bor/static-nodes.json ./static-nodes.json
+else
+read -p "* You dont have '~/.bor/data/bor/static-nodes.json' file. If you want to use static nodes, enter the path to 'static-nodes.json' here (press Enter to skip): " STAT
+if [[ -f $STAT ]]; then cp $STAT ./static-nodes.json; fi
+fi
+
+printf "\nThank you, your inputs are:\n"
+echo "Path to start.sh: "$startPath
+echo "Address: "$ADD
+
+confPath=${startPath%.sh}"-config.toml"
+echo "Path to the config file: "$confPath
+# check if config.toml is present
+if [[ -f $confPath ]]
+then
+ echo "WARN: config.toml exists, data will be overwritten."
+fi
+printf "\n"
+
+tmpDir="$(mktemp -d $MKTEMPOPTION ./temp-dir-XXXXXXXXXXX || oops "Can't create temporary directory")"
+cleanup() {
+ rm -rf "$tmpDir"
+}
+trap cleanup EXIT INT QUIT TERM
+
+# SHA1 hash of `tempStart` -> `3305fe263dd4a999d58f96deb064e21bb70123d9`
+sed 's/bor --/go run getconfig.go notYet --/g' $startPath > $tmpDir/3305fe263dd4a999d58f96deb064e21bb70123d9.sh
+chmod +x $tmpDir/3305fe263dd4a999d58f96deb064e21bb70123d9.sh
+$tmpDir/3305fe263dd4a999d58f96deb064e21bb70123d9.sh $ADD
+rm $tmpDir/3305fe263dd4a999d58f96deb064e21bb70123d9.sh
+
+shopt -s nocasematch; if [[ "$OS" == "darwin"* ]]; then
+ sed -i '' "s%*%'*'%g" ./temp
+else
+ sed -i "s%*%'*'%g" ./temp
+fi
+
+# read the flags from `./temp`
+dumpconfigflags=$(head -1 ./temp)
+
+# run the dumpconfig command with the flags from `./temp`
+command="bor dumpconfig "$dumpconfigflags" > "$confPath
+bash -c "$command"
+
+rm ./temp
+
+printf "\n"
+
+if [[ -f ./tempStaticNodes.json ]]
+then
+ echo "JSON StaticNodes found"
+ staticnodesjson=$(head -1 ./tempStaticNodes.json)
+ shopt -s nocasematch; if [[ "$OS" == "darwin"* ]]; then
+ sed -i '' "s%static-nodes = \[\]%static-nodes = \[\"${staticnodesjson}\"\]%" $confPath
+ else
+ sed -i "s%static-nodes = \[\]%static-nodes = \[\"${staticnodesjson}\"\]%" $confPath
+ fi
+ rm ./tempStaticNodes.json
+elif [[ -f ./tempStaticNodes.toml ]]
+then
+ echo "TOML StaticNodes found"
+ staticnodestoml=$(head -1 ./tempStaticNodes.toml)
+ shopt -s nocasematch; if [[ "$OS" == "darwin"* ]]; then
+ sed -i '' "s%static-nodes = \[\]%static-nodes = \[\"${staticnodestoml}\"\]%" $confPath
+ else
+ sed -i "s%static-nodes = \[\]%static-nodes = \[\"${staticnodestoml}\"\]%" $confPath
+ fi
+ rm ./tempStaticNodes.toml
+else
+ echo "neither JSON nor TOML StaticNodes found"
+fi
+
+if [[ -f ./tempTrustedNodes.toml ]]
+then
+ echo "TOML TrustedNodes found"
+ trustednodestoml=$(head -1 ./tempTrustedNodes.toml)
+ shopt -s nocasematch; if [[ "$OS" == "darwin"* ]]; then
+ sed -i '' "s%trusted-nodes = \[\]%trusted-nodes = \[\"${trustednodestoml}\"\]%" $confPath
+ else
+ sed -i "s%trusted-nodes = \[\]%trusted-nodes = \[\"${trustednodestoml}\"\]%" $confPath
+ fi
+ rm ./tempTrustedNodes.toml
+else
+ echo "neither JSON nor TOML TrustedNodes found"
+fi
+
+if [[ -f ./tempHTTPTimeoutsReadTimeout.toml ]]
+then
+ echo "HTTPTimeouts.ReadTimeout found"
+ read=$(head -1 ./tempHTTPTimeoutsReadTimeout.toml)
+ shopt -s nocasematch; if [[ "$OS" == "darwin"* ]]; then
+ sed -i '' "s%read = \"30s\"%read = \"${read}\"%" $confPath
+ else
+ sed -i "s%read = \"30s\"%read = \"${read}\"%" $confPath
+ fi
+ rm ./tempHTTPTimeoutsReadTimeout.toml
+fi
+
+if [[ -f ./tempHTTPTimeoutsWriteTimeout.toml ]]
+then
+ echo "HTTPTimeouts.WriteTimeout found"
+ write=$(head -1 ./tempHTTPTimeoutsWriteTimeout.toml)
+ shopt -s nocasematch; if [[ "$OS" == "darwin"* ]]; then
+ sed -i '' "s%write = \"30s\"%write = \"${write}\"%" $confPath
+ else
+ sed -i "s%write = \"30s\"%write = \"${write}\"%" $confPath
+ fi
+ rm ./tempHTTPTimeoutsWriteTimeout.toml
+fi
+
+if [[ -f ./tempHTTPTimeoutsIdleTimeout.toml ]]
+then
+ echo "HTTPTimeouts.IdleTimeout found"
+ idle=$(head -1 ./tempHTTPTimeoutsIdleTimeout.toml)
+ shopt -s nocasematch; if [[ "$OS" == "darwin"* ]]; then
+ sed -i '' "s%idle = \"2m0s\"%idle = \"${idle}\"%" $confPath
+ else
+ sed -i "s%idle = \"2m0s\"%idle = \"${idle}\"%" $confPath
+ fi
+ rm ./tempHTTPTimeoutsIdleTimeout.toml
+fi
+
+if [[ -f ./tempHTTPTimeoutsTrieTimeout.toml ]]
+then
+ echo "Eth.TrieTimeout found"
+ timeout=$(head -1 ./tempHTTPTimeoutsTrieTimeout.toml)
+ shopt -s nocasematch; if [[ "$OS" == "darwin"* ]]; then
+ sed -i '' "s%timeout = \"1h0m0s\"%timeout = \"${timeout}\"%" $confPath
+ else
+ sed -i "s%timeout = \"1h0m0s\"%timeout = \"${timeout}\"%" $confPath
+ fi
+ rm ./tempHTTPTimeoutsTrieTimeout.toml
+fi
+
+printf "\n"
+
+# comment flags in $configPath that were not passed through $startPath
+# SHA1 hash of `tempStart` -> `3305fe263dd4a999d58f96deb064e21bb70123d9`
+sed "s%bor --%go run getconfig.go ${confPath} --%" $startPath > $tmpDir/3305fe263dd4a999d58f96deb064e21bb70123d9.sh
+chmod +x $tmpDir/3305fe263dd4a999d58f96deb064e21bb70123d9.sh
+$tmpDir/3305fe263dd4a999d58f96deb064e21bb70123d9.sh $ADD
+rm $tmpDir/3305fe263dd4a999d58f96deb064e21bb70123d9.sh
+
+if [[ -f $HOME/.bor/data/bor/static-nodes.json ]]
+then
+rm ./static-nodes.json
+fi
+
+exit 0
diff --git a/scripts/tools-protobuf.sh b/scripts/tools-protobuf.sh
index fe03cab3cb..04144134f4 100755
--- a/scripts/tools-protobuf.sh
+++ b/scripts/tools-protobuf.sh
@@ -1,15 +1,24 @@
#!/bin/bash
# Install protobuf
-PROTOC_ZIP=protoc-3.12.0-linux-x86_64.zip
-curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v3.12.0/$PROTOC_ZIP
+if [[ "$OSTYPE" == "linux-gnu"* ]]; then
+ os="linux"
+elif [[ "$OSTYPE" == "darwin"* ]]; then
+ os="osx"
+else
+ echo "Unsupported platform"
+ exit 1
+fi
+
+PROTOC_ZIP=protoc-3.19.3-$os-x86_64.zip
+curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v3.19.3/$PROTOC_ZIP
sudo unzip -o $PROTOC_ZIP -d /usr/local bin/protoc
sudo unzip -o $PROTOC_ZIP -d /usr/local 'include/*'
rm -f $PROTOC_ZIP
# Change permissions to use the binary
-sudo chmod 755 -R /usr/local/bin/protoc
-sudo chmod 755 -R /usr/local/include
+sudo chmod -R 755 /usr/local/bin/protoc
+sudo chmod -R 755 /usr/local/include
# Install golang extensions (DO NOT CHANGE THE VERSIONS)
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.25.0
diff --git a/tests/block_test.go b/tests/block_test.go
index 74c7ed8197..591bff6e07 100644
--- a/tests/block_test.go
+++ b/tests/block_test.go
@@ -14,6 +14,9 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build integration
+// +build integration
+
package tests
import (
diff --git a/tests/block_test_util.go b/tests/block_test_util.go
index bcf861e09b..d28f3a1237 100644
--- a/tests/block_test_util.go
+++ b/tests/block_test_util.go
@@ -15,6 +15,7 @@
// along with the go-ethereum library. If not, see .
// Package tests implements execution of Ethereum JSON tests.
+
package tests
import (
diff --git a/tests/bor/bor_test.go b/tests/bor/bor_test.go
index 7d0ca9372b..243b0182bb 100644
--- a/tests/bor/bor_test.go
+++ b/tests/bor/bor_test.go
@@ -1,15 +1,24 @@
+//go:build integration
+// +build integration
+
package bor
import (
"encoding/hex"
- "encoding/json"
"io"
"math/big"
"testing"
"time"
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/assert"
+ "golang.org/x/crypto/sha3"
+
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/bor"
+ "github.com/ethereum/go-ethereum/consensus/bor/clerk"
+ "github.com/ethereum/go-ethereum/consensus/bor/heimdall/checkpoint"
+ "github.com/ethereum/go-ethereum/consensus/bor/heimdall/span"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
@@ -19,15 +28,6 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/tests/bor/mocks"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/mock"
- "golang.org/x/crypto/sha3"
-)
-
-var (
- spanPath = "bor/span/1"
- clerkPath = "clerk/event-record/list"
- clerkQueryParams = "from-time=%d&to-time=%d&page=%d&limit=50"
)
func TestInsertingSpanSizeBlocks(t *testing.T) {
@@ -35,7 +35,21 @@ func TestInsertingSpanSizeBlocks(t *testing.T) {
chain := init.ethereum.BlockChain()
engine := init.ethereum.Engine()
_bor := engine.(*bor.Bor)
- h, heimdallSpan := getMockedHeimdallClient(t)
+
+ defer _bor.Close()
+
+ h, heimdallSpan, ctrl := getMockedHeimdallClient(t)
+ defer ctrl.Finish()
+
+ _, span := loadSpanFromFile(t)
+
+ h.EXPECT().Close().AnyTimes()
+ h.EXPECT().FetchLatestCheckpoint().Return(&checkpoint.Checkpoint{
+ Proposer: span.SelectedProducers[0].Address,
+ StartBlock: big.NewInt(0),
+ EndBlock: big.NewInt(int64(spanSize)),
+ }, nil).AnyTimes()
+
_bor.SetHeimdallClient(h)
db := init.ethereum.ChainDb()
@@ -48,7 +62,6 @@ func TestInsertingSpanSizeBlocks(t *testing.T) {
insertNewBlock(t, chain, block)
}
- assert.True(t, h.AssertCalled(t, "FetchWithRetry", spanPath, ""))
validators, err := _bor.GetCurrentValidators(block.Hash(), spanSize) // check validator set at the first block of new span
if err != nil {
t.Fatalf("%s", err)
@@ -67,6 +80,8 @@ func TestFetchStateSyncEvents(t *testing.T) {
engine := init.ethereum.Engine()
_bor := engine.(*bor.Bor)
+ defer _bor.Close()
+
// A. Insert blocks for 0th sprint
db := init.ethereum.ChainDb()
block := init.genesis.ToBlock(db)
@@ -79,8 +94,18 @@ func TestFetchStateSyncEvents(t *testing.T) {
// B. Before inserting 1st block of the next sprint, mock heimdall deps
// B.1 Mock /bor/span/1
res, _ := loadSpanFromFile(t)
- h := &mocks.IHeimdallClient{}
- h.On("FetchWithRetry", spanPath, "").Return(res, nil)
+
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ h := mocks.NewMockIHeimdallClient(ctrl)
+ h.EXPECT().Close().AnyTimes()
+ h.EXPECT().Span(uint64(1)).Return(&res.Result, nil).AnyTimes()
+ h.EXPECT().FetchLatestCheckpoint().Return(&checkpoint.Checkpoint{
+ StartBlock: big.NewInt(1),
+ EndBlock: big.NewInt(2),
+ RootHash: common.Hash{},
+ }, nil).AnyTimes()
// B.2 Mock State Sync events
fromID := uint64(1)
@@ -91,14 +116,12 @@ func TestFetchStateSyncEvents(t *testing.T) {
sample := getSampleEventRecord(t)
sample.Time = time.Unix(to-int64(eventCount+1), 0) // last event.Time will be just < to
eventRecords := generateFakeStateSyncEvents(sample, eventCount)
- h.On("FetchStateSyncEvents", fromID, to).Return(eventRecords, nil)
+
+ h.EXPECT().StateSyncEvents(fromID, to).Return(eventRecords, nil).AnyTimes()
_bor.SetHeimdallClient(h)
block = buildNextBlock(t, _bor, chain, block, nil, init.genesis.Config.Bor)
insertNewBlock(t, chain, block)
-
- assert.True(t, h.AssertCalled(t, "FetchWithRetry", spanPath, ""))
- assert.True(t, h.AssertCalled(t, "FetchStateSyncEvents", fromID, to))
}
func TestFetchStateSyncEvents_2(t *testing.T) {
@@ -107,10 +130,22 @@ func TestFetchStateSyncEvents_2(t *testing.T) {
engine := init.ethereum.Engine()
_bor := engine.(*bor.Bor)
+ defer _bor.Close()
+
// Mock /bor/span/1
res, _ := loadSpanFromFile(t)
- h := &mocks.IHeimdallClient{}
- h.On("FetchWithRetry", spanPath, "").Return(res, nil)
+
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ h := mocks.NewMockIHeimdallClient(ctrl)
+ h.EXPECT().Close().AnyTimes()
+ h.EXPECT().Span(uint64(1)).Return(&res.Result, nil).AnyTimes()
+ h.EXPECT().FetchLatestCheckpoint().Return(&checkpoint.Checkpoint{
+ StartBlock: big.NewInt(1),
+ EndBlock: big.NewInt(2),
+ RootHash: common.Hash{},
+ }, nil).AnyTimes()
// Mock State Sync events
// at # sprintSize, events are fetched for [fromID, (block-sprint).Time)
@@ -120,7 +155,7 @@ func TestFetchStateSyncEvents_2(t *testing.T) {
// First query will be from [id=1, (block-sprint).Time]
// Insert 5 events in this time range
- eventRecords := []*bor.EventRecordWithTime{
+ eventRecords := []*clerk.EventRecordWithTime{
buildStateEvent(sample, 1, 3), // id = 1, time = 1
buildStateEvent(sample, 2, 1), // id = 2, time = 3
buildStateEvent(sample, 3, 2), // id = 3, time = 2
@@ -128,7 +163,8 @@ func TestFetchStateSyncEvents_2(t *testing.T) {
buildStateEvent(sample, 4, 5), // id = 4, time = 5
buildStateEvent(sample, 6, 4), // id = 6, time = 4
}
- h.On("FetchStateSyncEvents", fromID, to).Return(eventRecords, nil)
+
+ h.EXPECT().StateSyncEvents(fromID, to).Return(eventRecords, nil).AnyTimes()
_bor.SetHeimdallClient(h)
// Insert blocks for 0th sprint
@@ -138,25 +174,27 @@ func TestFetchStateSyncEvents_2(t *testing.T) {
block = buildNextBlock(t, _bor, chain, block, nil, init.genesis.Config.Bor)
insertNewBlock(t, chain, block)
}
- assert.True(t, h.AssertCalled(t, "FetchWithRetry", spanPath, ""))
- assert.True(t, h.AssertCalled(t, "FetchStateSyncEvents", fromID, to))
+
lastStateID, _ := _bor.GenesisContractsClient.LastStateId(sprintSize)
+
// state 6 was not written
assert.Equal(t, uint64(4), lastStateID.Uint64())
//
fromID = uint64(5)
to = int64(chain.GetHeaderByNumber(sprintSize).Time)
- eventRecords = []*bor.EventRecordWithTime{
+
+ eventRecords = []*clerk.EventRecordWithTime{
buildStateEvent(sample, 5, 7),
buildStateEvent(sample, 6, 4),
}
- h.On("FetchStateSyncEvents", fromID, to).Return(eventRecords, nil)
+ h.EXPECT().StateSyncEvents(fromID, to).Return(eventRecords, nil).AnyTimes()
+
for i := sprintSize + 1; i <= spanSize; i++ {
block = buildNextBlock(t, _bor, chain, block, nil, init.genesis.Config.Bor)
insertNewBlock(t, chain, block)
}
- assert.True(t, h.AssertCalled(t, "FetchStateSyncEvents", fromID, to))
+
lastStateID, _ = _bor.GenesisContractsClient.LastStateId(spanSize)
assert.Equal(t, uint64(6), lastStateID.Uint64())
}
@@ -166,7 +204,13 @@ func TestOutOfTurnSigning(t *testing.T) {
chain := init.ethereum.BlockChain()
engine := init.ethereum.Engine()
_bor := engine.(*bor.Bor)
- h, _ := getMockedHeimdallClient(t)
+
+ defer _bor.Close()
+
+ h, _, ctrl := getMockedHeimdallClient(t)
+ defer ctrl.Finish()
+
+ h.EXPECT().Close().AnyTimes()
_bor.SetHeimdallClient(h)
db := init.ethereum.ChainDb()
@@ -197,6 +241,7 @@ func TestOutOfTurnSigning(t *testing.T) {
bor.CalcProducerDelay(header.Number.Uint64(), 0, init.genesis.Config.Bor))
sign(t, header, signerKey, init.genesis.Config.Bor)
block = types.NewBlockWithHeader(header)
+
_, err = chain.InsertChain([]*types.Block{block})
assert.Equal(t,
*err.(*bor.WrongDifficultyError),
@@ -205,6 +250,7 @@ func TestOutOfTurnSigning(t *testing.T) {
header.Difficulty = new(big.Int).SetUint64(expectedDifficulty)
sign(t, header, signerKey, init.genesis.Config.Bor)
block = types.NewBlockWithHeader(header)
+
_, err = chain.InsertChain([]*types.Block{block})
assert.Nil(t, err)
}
@@ -214,7 +260,14 @@ func TestSignerNotFound(t *testing.T) {
chain := init.ethereum.BlockChain()
engine := init.ethereum.Engine()
_bor := engine.(*bor.Bor)
- h, _ := getMockedHeimdallClient(t)
+
+ defer _bor.Close()
+
+ h, _, ctrl := getMockedHeimdallClient(t)
+ defer ctrl.Finish()
+
+ h.EXPECT().Close().AnyTimes()
+
_bor.SetHeimdallClient(h)
db := init.ethereum.ChainDb()
@@ -233,58 +286,63 @@ func TestSignerNotFound(t *testing.T) {
bor.UnauthorizedSignerError{Number: 0, Signer: addr.Bytes()})
}
-func getMockedHeimdallClient(t *testing.T) (*mocks.IHeimdallClient, *bor.HeimdallSpan) {
- res, heimdallSpan := loadSpanFromFile(t)
- h := &mocks.IHeimdallClient{}
- h.On("FetchWithRetry", "bor/span/1", "").Return(res, nil)
- h.On(
- "FetchStateSyncEvents",
- mock.AnythingOfType("uint64"),
- mock.AnythingOfType("int64")).Return([]*bor.EventRecordWithTime{getSampleEventRecord(t)}, nil)
- return h, heimdallSpan
+func getMockedHeimdallClient(t *testing.T) (*mocks.MockIHeimdallClient, *span.HeimdallSpan, *gomock.Controller) {
+ ctrl := gomock.NewController(t)
+ h := mocks.NewMockIHeimdallClient(ctrl)
+
+ _, heimdallSpan := loadSpanFromFile(t)
+
+ h.EXPECT().Span(uint64(1)).Return(heimdallSpan, nil).AnyTimes()
+
+ h.EXPECT().StateSyncEvents(gomock.Any(), gomock.Any()).
+ Return([]*clerk.EventRecordWithTime{getSampleEventRecord(t)}, nil).AnyTimes()
+
+ // h.EXPECT().FetchLatestCheckpoint().Return([]*clerk.EventRecordWithTime{getSampleEventRecord(t)}, nil).AnyTimes()
+
+ return h, heimdallSpan, ctrl
}
-func generateFakeStateSyncEvents(sample *bor.EventRecordWithTime, count int) []*bor.EventRecordWithTime {
- events := make([]*bor.EventRecordWithTime, count)
+func generateFakeStateSyncEvents(sample *clerk.EventRecordWithTime, count int) []*clerk.EventRecordWithTime {
+ events := make([]*clerk.EventRecordWithTime, count)
event := *sample
event.ID = 1
- events[0] = &bor.EventRecordWithTime{}
+ events[0] = &clerk.EventRecordWithTime{}
*events[0] = event
for i := 1; i < count; i++ {
event.ID = uint64(i)
event.Time = event.Time.Add(1 * time.Second)
- events[i] = &bor.EventRecordWithTime{}
+ events[i] = &clerk.EventRecordWithTime{}
*events[i] = event
}
return events
}
-func buildStateEvent(sample *bor.EventRecordWithTime, id uint64, timeStamp int64) *bor.EventRecordWithTime {
+func buildStateEvent(sample *clerk.EventRecordWithTime, id uint64, timeStamp int64) *clerk.EventRecordWithTime {
event := *sample
event.ID = id
event.Time = time.Unix(timeStamp, 0)
return &event
}
-func getSampleEventRecord(t *testing.T) *bor.EventRecordWithTime {
- res := stateSyncEventsPayload(t)
- var _eventRecords []*bor.EventRecordWithTime
- if err := json.Unmarshal(res.Result, &_eventRecords); err != nil {
- t.Fatalf("%s", err)
- }
- _eventRecords[0].Time = time.Unix(1, 0)
- return _eventRecords[0]
+func getSampleEventRecord(t *testing.T) *clerk.EventRecordWithTime {
+ eventRecords := stateSyncEventsPayload(t)
+ eventRecords.Result[0].Time = time.Unix(1, 0)
+ return eventRecords.Result[0]
+}
+
+func getEventRecords(t *testing.T) []*clerk.EventRecordWithTime {
+ return stateSyncEventsPayload(t).Result
}
// TestEIP1559Transition tests the following:
//
-// 1. A transaction whose gasFeeCap is greater than the baseFee is valid.
-// 2. Gas accounting for access lists on EIP-1559 transactions is correct.
-// 3. Only the transaction's tip will be received by the coinbase.
-// 4. The transaction sender pays for both the tip and baseFee.
-// 5. The coinbase receives only the partially realized tip when
-// gasFeeCap - gasTipCap < baseFee.
-// 6. Legacy transaction behave as expected (e.g. gasPrice = gasFeeCap = gasTipCap).
+// 1. A transaction whose gasFeeCap is greater than the baseFee is valid.
+// 2. Gas accounting for access lists on EIP-1559 transactions is correct.
+// 3. Only the transaction's tip will be received by the coinbase.
+// 4. The transaction sender pays for both the tip and baseFee.
+// 5. The coinbase receives only the partially realized tip when
+// gasFeeCap - gasTipCap < baseFee.
+// 6. Legacy transaction behave as expected (e.g. gasPrice = gasFeeCap = gasTipCap).
func TestEIP1559Transition(t *testing.T) {
var (
aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa")
diff --git a/tests/bor/helper.go b/tests/bor/helper.go
index a1b6a19742..a8a3ae4ea6 100644
--- a/tests/bor/helper.go
+++ b/tests/bor/helper.go
@@ -11,6 +11,9 @@ import (
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/bor"
+ "github.com/ethereum/go-ethereum/consensus/bor/heimdall"
+ "github.com/ethereum/go-ethereum/consensus/bor/heimdall/span"
+ "github.com/ethereum/go-ethereum/consensus/bor/valset"
"github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
@@ -46,17 +49,23 @@ type initializeData struct {
}
func buildEthereumInstance(t *testing.T, db ethdb.Database) *initializeData {
+ t.Helper()
+
genesisData, err := ioutil.ReadFile("./testdata/genesis.json")
if err != nil {
t.Fatalf("%s", err)
}
+
gen := &core.Genesis{}
+
if err := json.Unmarshal(genesisData, gen); err != nil {
t.Fatalf("%s", err)
}
+
ethConf := ð.Config{
Genesis: gen,
}
+
ethConf.Genesis.MustCommit(db)
ethereum := utils.CreateBorEthereum(ethConf)
@@ -65,6 +74,7 @@ func buildEthereumInstance(t *testing.T, db ethdb.Database) *initializeData {
}
ethConf.Genesis.MustCommit(ethereum.ChainDb())
+
return &initializeData{
genesis: gen,
ethereum: ethereum,
@@ -72,12 +82,16 @@ func buildEthereumInstance(t *testing.T, db ethdb.Database) *initializeData {
}
func insertNewBlock(t *testing.T, chain *core.BlockChain, block *types.Block) {
+ t.Helper()
+
if _, err := chain.InsertChain([]*types.Block{block}); err != nil {
t.Fatalf("%s", err)
}
}
func buildNextBlock(t *testing.T, _bor *bor.Bor, chain *core.BlockChain, block *types.Block, signer []byte, borConfig *params.BorConfig) *types.Block {
+ t.Helper()
+
header := block.Header()
header.Number.Add(header.Number, big.NewInt(1))
number := header.Number.Uint64()
@@ -90,11 +104,12 @@ func buildNextBlock(t *testing.T, _bor *bor.Bor, chain *core.BlockChain, block *
header.Time += bor.CalcProducerDelay(header.Number.Uint64(), 0, borConfig)
header.Extra = make([]byte, 32+65) // vanity + extraSeal
- currentValidators := []*bor.Validator{bor.NewValidator(addr, 10)}
+ currentValidators := []*valset.Validator{valset.NewValidator(addr, 10)}
isSpanEnd := (number+1)%spanSize == 0
isSpanStart := number%spanSize == 0
isSprintEnd := (header.Number.Uint64()+1)%sprintSize == 0
+
if isSpanEnd {
_, heimdallSpan := loadSpanFromFile(t)
// this is to stash the validator bytes in the header
@@ -102,18 +117,23 @@ func buildNextBlock(t *testing.T, _bor *bor.Bor, chain *core.BlockChain, block *
} else if isSpanStart {
header.Difficulty = new(big.Int).SetInt64(3)
}
+
if isSprintEnd {
- sort.Sort(bor.ValidatorsByAddress(currentValidators))
+ sort.Sort(valset.ValidatorsByAddress(currentValidators))
+
validatorBytes := make([]byte, len(currentValidators)*validatorHeaderBytesLength)
header.Extra = make([]byte, 32+len(validatorBytes)+65) // vanity + validatorBytes + extraSeal
+
for i, val := range currentValidators {
copy(validatorBytes[i*validatorHeaderBytesLength:], val.HeaderBytes())
}
+
copy(header.Extra[32:], validatorBytes)
}
if chain.Config().IsLondon(header.Number) {
header.BaseFee = misc.CalcBaseFee(chain.Config(), block.Header())
+
if !chain.Config().IsLondon(block.Number()) {
parentGasLimit := block.GasLimit() * params.ElasticityMultiplier
header.GasLimit = core.CalcGasLimit(parentGasLimit, parentGasLimit)
@@ -124,58 +144,73 @@ func buildNextBlock(t *testing.T, _bor *bor.Bor, chain *core.BlockChain, block *
if err != nil {
t.Fatalf("%s", err)
}
+
_, err = _bor.FinalizeAndAssemble(chain, header, state, nil, nil, nil)
if err != nil {
t.Fatalf("%s", err)
}
+
sign(t, header, signer, borConfig)
+
return types.NewBlockWithHeader(header)
}
func sign(t *testing.T, header *types.Header, signer []byte, c *params.BorConfig) {
+ t.Helper()
+
sig, err := secp256k1.Sign(crypto.Keccak256(bor.BorRLP(header, c)), signer)
if err != nil {
t.Fatalf("%s", err)
}
+
copy(header.Extra[len(header.Extra)-extraSeal:], sig)
}
-func stateSyncEventsPayload(t *testing.T) *bor.ResponseWithHeight {
+//nolint:unused,deadcode
+func stateSyncEventsPayload(t *testing.T) *heimdall.StateSyncEventsResponse {
+ t.Helper()
+
stateData, err := ioutil.ReadFile("./testdata/states.json")
if err != nil {
t.Fatalf("%s", err)
}
- res := &bor.ResponseWithHeight{}
+
+ res := &heimdall.StateSyncEventsResponse{}
if err := json.Unmarshal(stateData, res); err != nil {
t.Fatalf("%s", err)
}
+
return res
}
-func loadSpanFromFile(t *testing.T) (*bor.ResponseWithHeight, *bor.HeimdallSpan) {
+//nolint:unused,deadcode
+func loadSpanFromFile(t *testing.T) (*heimdall.SpanResponse, *span.HeimdallSpan) {
+ t.Helper()
+
spanData, err := ioutil.ReadFile("./testdata/span.json")
if err != nil {
t.Fatalf("%s", err)
}
- res := &bor.ResponseWithHeight{}
+
+ res := &heimdall.SpanResponse{}
+
if err := json.Unmarshal(spanData, res); err != nil {
t.Fatalf("%s", err)
}
- heimdallSpan := &bor.HeimdallSpan{}
- if err := json.Unmarshal(res.Result, heimdallSpan); err != nil {
- t.Fatalf("%s", err)
- }
- return res, heimdallSpan
+ return res, &res.Result
}
func getSignerKey(number uint64) []byte {
signerKey := privKey
+
isSpanStart := number%spanSize == 0
if isSpanStart {
// validator set in the new span has changed
signerKey = privKey2
}
+
_key, _ := hex.DecodeString(signerKey)
+
return _key
}
diff --git a/tests/bor/mocks/IHeimdallClient.go b/tests/bor/mocks/IHeimdallClient.go
index ac89487d3b..f770ed9fa8 100644
--- a/tests/bor/mocks/IHeimdallClient.go
+++ b/tests/bor/mocks/IHeimdallClient.go
@@ -1,87 +1,94 @@
-// Code generated by mockery v2.10.0. DO NOT EDIT.
+// Code generated by MockGen. DO NOT EDIT.
+// Source: github.com/ethereum/go-ethereum/consensus/bor (interfaces: IHeimdallClient)
+// Package mocks is a generated GoMock package.
package mocks
import (
- bor "github.com/ethereum/go-ethereum/consensus/bor"
- mock "github.com/stretchr/testify/mock"
+ reflect "reflect"
+
+ clerk "github.com/ethereum/go-ethereum/consensus/bor/clerk"
+ checkpoint "github.com/ethereum/go-ethereum/consensus/bor/heimdall/checkpoint"
+ span "github.com/ethereum/go-ethereum/consensus/bor/heimdall/span"
+ gomock "github.com/golang/mock/gomock"
)
-// IHeimdallClient is an autogenerated mock type for the IHeimdallClient type
-type IHeimdallClient struct {
- mock.Mock
+// MockIHeimdallClient is a mock of IHeimdallClient interface.
+type MockIHeimdallClient struct {
+ ctrl *gomock.Controller
+ recorder *MockIHeimdallClientMockRecorder
}
-// Close provides a mock function with given fields:
-func (_m *IHeimdallClient) Close() {
- _m.Called()
+// MockIHeimdallClientMockRecorder is the mock recorder for MockIHeimdallClient.
+type MockIHeimdallClientMockRecorder struct {
+ mock *MockIHeimdallClient
}
-// Fetch provides a mock function with given fields: path, query
-func (_m *IHeimdallClient) Fetch(path string, query string) (*bor.ResponseWithHeight, error) {
- ret := _m.Called(path, query)
-
- var r0 *bor.ResponseWithHeight
- if rf, ok := ret.Get(0).(func(string, string) *bor.ResponseWithHeight); ok {
- r0 = rf(path, query)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).(*bor.ResponseWithHeight)
- }
- }
-
- var r1 error
- if rf, ok := ret.Get(1).(func(string, string) error); ok {
- r1 = rf(path, query)
- } else {
- r1 = ret.Error(1)
- }
+// NewMockIHeimdallClient creates a new mock instance.
+func NewMockIHeimdallClient(ctrl *gomock.Controller) *MockIHeimdallClient {
+ mock := &MockIHeimdallClient{ctrl: ctrl}
+ mock.recorder = &MockIHeimdallClientMockRecorder{mock}
+ return mock
+}
- return r0, r1
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockIHeimdallClient) EXPECT() *MockIHeimdallClientMockRecorder {
+ return m.recorder
}
-// FetchStateSyncEvents provides a mock function with given fields: fromID, to
-func (_m *IHeimdallClient) FetchStateSyncEvents(fromID uint64, to int64) ([]*bor.EventRecordWithTime, error) {
- ret := _m.Called(fromID, to)
+// Close mocks base method.
+func (m *MockIHeimdallClient) Close() {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "Close")
+}
- var r0 []*bor.EventRecordWithTime
- if rf, ok := ret.Get(0).(func(uint64, int64) []*bor.EventRecordWithTime); ok {
- r0 = rf(fromID, to)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).([]*bor.EventRecordWithTime)
- }
- }
+// Close indicates an expected call of Close.
+func (mr *MockIHeimdallClientMockRecorder) Close() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockIHeimdallClient)(nil).Close))
+}
- var r1 error
- if rf, ok := ret.Get(1).(func(uint64, int64) error); ok {
- r1 = rf(fromID, to)
- } else {
- r1 = ret.Error(1)
- }
+// FetchLatestCheckpoint mocks base method.
+func (m *MockIHeimdallClient) FetchLatestCheckpoint() (*checkpoint.Checkpoint, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "FetchLatestCheckpoint")
+ ret0, _ := ret[0].(*checkpoint.Checkpoint)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
- return r0, r1
+// FetchLatestCheckpoint indicates an expected call of FetchLatestCheckpoint.
+func (mr *MockIHeimdallClientMockRecorder) FetchLatestCheckpoint() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchLatestCheckpoint", reflect.TypeOf((*MockIHeimdallClient)(nil).FetchLatestCheckpoint))
}
-// FetchWithRetry provides a mock function with given fields: path, query
-func (_m *IHeimdallClient) FetchWithRetry(path string, query string) (*bor.ResponseWithHeight, error) {
- ret := _m.Called(path, query)
+// Span mocks base method.
+func (m *MockIHeimdallClient) Span(arg0 uint64) (*span.HeimdallSpan, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Span", arg0)
+ ret0, _ := ret[0].(*span.HeimdallSpan)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
- var r0 *bor.ResponseWithHeight
- if rf, ok := ret.Get(0).(func(string, string) *bor.ResponseWithHeight); ok {
- r0 = rf(path, query)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).(*bor.ResponseWithHeight)
- }
- }
+// Span indicates an expected call of Span.
+func (mr *MockIHeimdallClientMockRecorder) Span(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Span", reflect.TypeOf((*MockIHeimdallClient)(nil).Span), arg0)
+}
- var r1 error
- if rf, ok := ret.Get(1).(func(string, string) error); ok {
- r1 = rf(path, query)
- } else {
- r1 = ret.Error(1)
- }
+// StateSyncEvents mocks base method.
+func (m *MockIHeimdallClient) StateSyncEvents(arg0 uint64, arg1 int64) ([]*clerk.EventRecordWithTime, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateSyncEvents", arg0, arg1)
+ ret0, _ := ret[0].([]*clerk.EventRecordWithTime)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
- return r0, r1
+// StateSyncEvents indicates an expected call of StateSyncEvents.
+func (mr *MockIHeimdallClientMockRecorder) StateSyncEvents(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSyncEvents", reflect.TypeOf((*MockIHeimdallClient)(nil).StateSyncEvents), arg0, arg1)
}
diff --git a/tests/deps/fake.go b/tests/deps/fake.go
new file mode 100644
index 0000000000..cd04ffa070
--- /dev/null
+++ b/tests/deps/fake.go
@@ -0,0 +1,7 @@
+package deps
+
+// it is a fake file to lock deps
+//nolint:typecheck
+import (
+ _ "github.com/golang/mock/mockgen/model"
+)
diff --git a/tests/difficulty_test.go b/tests/difficulty_test.go
index 192dff12cc..0b030c1485 100644
--- a/tests/difficulty_test.go
+++ b/tests/difficulty_test.go
@@ -14,6 +14,9 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build integration
+// +build integration
+
package tests
import (
diff --git a/tests/init_test.go b/tests/init_test.go
index 7e2f3ff7f5..4ade0bfb90 100644
--- a/tests/init_test.go
+++ b/tests/init_test.go
@@ -14,6 +14,9 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build integration
+// +build integration
+
package tests
import (
diff --git a/tests/rlp_test.go b/tests/rlp_test.go
index 79a1683eb2..dbca73efc6 100644
--- a/tests/rlp_test.go
+++ b/tests/rlp_test.go
@@ -14,6 +14,9 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build integration
+// +build integration
+
package tests
import (
diff --git a/tests/state_test.go b/tests/state_test.go
index d2c92b211c..8fcf35b864 100644
--- a/tests/state_test.go
+++ b/tests/state_test.go
@@ -14,6 +14,9 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build integration
+// +build integration
+
package tests
import (
diff --git a/tests/transaction_test.go b/tests/transaction_test.go
index cb0f262318..1197eebe19 100644
--- a/tests/transaction_test.go
+++ b/tests/transaction_test.go
@@ -14,6 +14,9 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build integration
+// +build integration
+
package tests
import (
diff --git a/trie/hasher.go b/trie/hasher.go
index 7f0748c13d..c085970a3f 100644
--- a/trie/hasher.go
+++ b/trie/hasher.go
@@ -19,9 +19,10 @@ package trie
import (
"sync"
+ "golang.org/x/crypto/sha3"
+
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
- "golang.org/x/crypto/sha3"
)
// hasher is a type used for the trie Hash operation. A hasher has some